about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--config.h.in3
-rwxr-xr-xsysdeps/x86_64/configure27
-rw-r--r--sysdeps/x86_64/configure.ac15
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S8
15 files changed, 0 insertions, 141 deletions
diff --git a/config.h.in b/config.h.in
index 3752f9a6f7..964873f27e 100644
--- a/config.h.in
+++ b/config.h.in
@@ -62,9 +62,6 @@
 /* Define if _rtld_local structure should be forced into .sdata section.  */
 #undef	HAVE_SDATA_SECTION
 
-/* Define if assembler supports AVX512DQ.  */
-#undef  HAVE_AVX512DQ_ASM_SUPPORT
-
 /* Define if assembler supports z10 zarch instructions as default on S390.  */
 #undef  HAVE_S390_MIN_Z10_ZARCH_ASM_SUPPORT
 
diff --git a/sysdeps/x86_64/configure b/sysdeps/x86_64/configure
index d81accdc07..585279f83d 100755
--- a/sysdeps/x86_64/configure
+++ b/sysdeps/x86_64/configure
@@ -1,33 +1,6 @@
 # This file is generated from configure.ac by Autoconf.  DO NOT EDIT!
  # Local configure fragment for sysdeps/x86_64.
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for AVX512DQ support in assembler" >&5
-$as_echo_n "checking for AVX512DQ support in assembler... " >&6; }
-if ${libc_cv_asm_avx512dq+:} false; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat > conftest.s <<\EOF
-        vandpd (%rax), %zmm6, %zmm1
-EOF
-if { ac_try='${CC-cc} -c $ASFLAGS conftest.s 1>&5'
-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then
-  libc_cv_asm_avx512dq=yes
-else
-  libc_cv_asm_avx512dq=no
-fi
-rm -f conftest*
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_asm_avx512dq" >&5
-$as_echo "$libc_cv_asm_avx512dq" >&6; }
-if test $libc_cv_asm_avx512dq = yes; then
-  $as_echo "#define HAVE_AVX512DQ_ASM_SUPPORT 1" >>confdefs.h
-
-fi
-
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking -mprefer-vector-width=128" >&5
 $as_echo_n "checking -mprefer-vector-width=128... " >&6; }
 if ${libc_cv_cc_mprefer_vector_width+:} false; then :
diff --git a/sysdeps/x86_64/configure.ac b/sysdeps/x86_64/configure.ac
index 41baed6999..29e14033c0 100644
--- a/sysdeps/x86_64/configure.ac
+++ b/sysdeps/x86_64/configure.ac
@@ -1,21 +1,6 @@
 GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
 # Local configure fragment for sysdeps/x86_64.
 
-dnl Check if asm supports AVX512DQ.
-AC_CACHE_CHECK(for AVX512DQ support in assembler, libc_cv_asm_avx512dq, [dnl
-cat > conftest.s <<\EOF
-        vandpd (%rax), %zmm6, %zmm1
-EOF
-if AC_TRY_COMMAND(${CC-cc} -c $ASFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then
-  libc_cv_asm_avx512dq=yes
-else
-  libc_cv_asm_avx512dq=no
-fi
-rm -f conftest*])
-if test $libc_cv_asm_avx512dq = yes; then
-  AC_DEFINE(HAVE_AVX512DQ_ASM_SUPPORT)
-fi
-
 dnl Check if -mprefer-vector-width=128 works.
 AC_CACHE_CHECK(-mprefer-vector-width=128, libc_cv_cc_mprefer_vector_width, [dnl
 LIBC_TRY_CC_OPTION([-mprefer-vector-width=128],
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
index 58e588a3d4..0fcb912557 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN8v_cos_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -232,13 +229,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
         call      JUMPTARGET(cos)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_cos_knl)
 
 ENTRY (_ZGVeN8v_cos_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -454,5 +447,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_cos_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
index 5181b12043..c40d82bf65 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN8v_exp_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -234,13 +231,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
         call      JUMPTARGET(exp)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_exp_knl)
 
 ENTRY (_ZGVeN8v_exp_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -452,5 +445,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8v_exp_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
index f5f117d474..5596c950ce 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN8v_log_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_log
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -233,13 +230,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
         call      JUMPTARGET(log)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_log_knl)
 
 ENTRY (_ZGVeN8v_log_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_log
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -459,5 +452,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_log_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
index d70b4d6061..6062ec8718 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
@@ -82,9 +82,6 @@
 
 	.text
 ENTRY (_ZGVeN8vv_pow_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -405,13 +402,9 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN8vv_pow_knl)
 
 ENTRY (_ZGVeN8vv_pow_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -737,5 +730,4 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8vv_pow_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
index 48d251db16..2981f1582e 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN8v_sin_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -233,13 +230,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
         call      JUMPTARGET(sin)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_sin_knl)
 
 ENTRY (_ZGVeN8v_sin_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -456,5 +449,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_sin_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
index a4944a4fee..4ad366373b 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
@@ -37,9 +37,6 @@
 
 	.text
 ENTRY (_ZGVeN8vl8l8_sincos_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -303,14 +300,10 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN8vl8l8_sincos_knl)
 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_knl)
 
 ENTRY (_ZGVeN8vl8l8_sincos_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -585,7 +578,6 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8vl8l8_sincos_skx)
 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_skx)
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
index fe8474fed9..b7d79efb54 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN16v_cosf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -235,13 +232,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
         call      JUMPTARGET(cosf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16v_cosf_knl)
 
 ENTRY (_ZGVeN16v_cosf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -451,5 +444,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
         call      JUMPTARGET(cosf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16v_cosf_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
index 229b7828cd..9f03b9b780 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN16v_expf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -223,13 +220,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN16v_expf_knl)
 
 ENTRY (_ZGVeN16v_expf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -438,5 +431,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN16v_expf_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
index fa2aae986f..2ba38b0f33 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY (_ZGVeN16v_logf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -207,13 +204,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
         call      JUMPTARGET(logf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16v_logf_knl)
 
 ENTRY (_ZGVeN16v_logf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -407,5 +400,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN16v_logf_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
index 6aea2a4f11..7f0272c809 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
@@ -82,9 +82,6 @@
 
 	.text
 ENTRY (_ZGVeN16vv_powf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -355,13 +352,9 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
         call      JUMPTARGET(powf)
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16vv_powf_knl)
 
 ENTRY (_ZGVeN16vv_powf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -641,5 +634,4 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
         call      JUMPTARGET(powf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16vv_powf_skx)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
index a446c504f6..e1d0154441 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
@@ -50,9 +50,6 @@
 
 	.text
 ENTRY (_ZGVeN16vl4l4_sincosf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -266,14 +263,10 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
 
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16vl4l4_sincosf_knl)
 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_knl)
 
 ENTRY (_ZGVeN16vl4l4_sincosf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -496,7 +489,6 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
 
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16vl4l4_sincosf_skx)
 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_skx)
 
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
index c1b352d0ad..bcb76ff756 100644
--- a/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S
@@ -22,9 +22,6 @@
 
 	.text
 ENTRY(_ZGVeN16v_sinf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -239,13 +236,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
         call      JUMPTARGET(sinf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END(_ZGVeN16v_sinf_knl)
 
 ENTRY (_ZGVeN16v_sinf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -470,5 +463,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
 
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16v_sinf_skx)