about summary refs log tree commit diff
path: root/sysdeps/x86_64/fpu/multiarch
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 15:38:29 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 15:38:29 +0300
commit6af25acc7b6313fd8934c3b2f0eb3da5a1c6eb6b (patch)
tree6b667fc73a74f97c43853ce2ffd7b090321412df /sysdeps/x86_64/fpu/multiarch
parentc71c89e5c72baf43fd44d08dda8ab846eec5b1d6 (diff)
downloadglibc-6af25acc7b6313fd8934c3b2f0eb3da5a1c6eb6b.tar.gz
glibc-6af25acc7b6313fd8934c3b2f0eb3da5a1c6eb6b.tar.xz
glibc-6af25acc7b6313fd8934c3b2f0eb3da5a1c6eb6b.zip
Vector log for x86_64 and tests.
Here is implementation of vectorized log containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

    * bits/libm-simd-decl-stubs.h: Added stubs for log.
    * math/bits/mathcalls.h: Added log declaration with __MATHCALL_VEC.
    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New versions added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm
    redirections for log.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added
    build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log2_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log2_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log4_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log_data.S: New file.
    * sysdeps/x86_64/fpu/svml_d_log_data.h: New file.
    * sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c: Added vector log test.
    * sysdeps/x86_64/fpu/test-double-vlen2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8.c: Likewise.
    * NEWS: Mention addition of x86_64 vector log.
Diffstat (limited to 'sysdeps/x86_64/fpu/multiarch')
-rw-r--r--sysdeps/x86_64/fpu/multiarch/Makefile2
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log2_core.S38
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S229
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log4_core.S38
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S210
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log8_core.S39
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S468
7 files changed, 1024 insertions, 0 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/Makefile b/sysdeps/x86_64/fpu/multiarch/Makefile
index 61759b8d0b..16d93caf57 100644
--- a/sysdeps/x86_64/fpu/multiarch/Makefile
+++ b/sysdeps/x86_64/fpu/multiarch/Makefile
@@ -56,6 +56,8 @@ ifeq ($(subdir),mathvec)
 libmvec-sysdep_routines += svml_d_cos2_core_sse4 svml_d_cos4_core_avx2 \
 			   svml_d_cos8_core_avx512 svml_d_sin2_core_sse4 \
 			   svml_d_sin4_core_avx2 svml_d_sin8_core_avx512 \
+			   svml_d_log2_core_sse4 svml_d_log4_core_avx2 \
+			   svml_d_log8_core_avx512 \
 			   svml_s_cosf4_core_sse4 svml_s_cosf8_core_avx2 \
 			   svml_s_cosf16_core_avx512 svml_s_sinf4_core_sse4 \
 			   svml_s_sinf8_core_avx2 svml_s_sinf16_core_avx512
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core.S
new file mode 100644
index 0000000000..38d369fc3c
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core.S
@@ -0,0 +1,38 @@
+/* Multiple versions of vectorized log.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+        .text
+ENTRY (_ZGVbN2v_log)
+        .type   _ZGVbN2v_log, @gnu_indirect_function
+        cmpl    $0, KIND_OFFSET+__cpu_features(%rip)
+        jne     1f
+        call    __init_cpu_features
+1:      leaq    _ZGVbN2v_log_sse4(%rip), %rax
+        testl   $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip)
+        jz      2f
+        ret
+2:      leaq    _ZGVbN2v_log_sse2(%rip), %rax
+        ret
+END (_ZGVbN2v_log)
+libmvec_hidden_def (_ZGVbN2v_log)
+
+#define _ZGVbN2v_log _ZGVbN2v_log_sse2
+#include "../svml_d_log2_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S
new file mode 100644
index 0000000000..82f3d8215d
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log2_core_sse4.S
@@ -0,0 +1,229 @@
+/* Function log vectorized with SSE4.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_log_data.h"
+
+	.text
+ENTRY (_ZGVbN2v_log_sse4)
+/*
+   ALGORITHM DESCRIPTION:
+
+   log(x) = -log(Rcp) + log(Rcp*x),
+     where Rcp ~ 1/x (accuracy ~9 bits, obtained by rounding
+     HW approximation to 1+9 mantissa bits)
+
+   Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial
+
+   log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp)
+     -log(mantissa_Rcp) is obtained from a lookup table,
+     accessed by a 9-bit index
+ */
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        subq      $320, %rsp
+        movaps    %xmm0, %xmm6
+        movq      __svml_dlog_data@GOTPCREL(%rip), %r8
+        movaps    %xmm6, %xmm3
+        movaps    %xmm6, %xmm2
+
+/* isolate exponent bits */
+        movaps    %xmm6, %xmm1
+        psrlq     $20, %xmm1
+        movups _ExpMask(%r8), %xmm5
+
+/* preserve mantissa, set input exponent to 2^(-10) */
+        andps     %xmm6, %xmm5
+        orps _Two10(%r8), %xmm5
+
+/* reciprocal approximation good to at least 11 bits */
+        cvtpd2ps  %xmm5, %xmm7
+        cmpltpd _MinNorm(%r8), %xmm3
+        cmpnlepd _MaxNorm(%r8), %xmm2
+        movlhps   %xmm7, %xmm7
+
+/* combine and get argument value range mask */
+        orps      %xmm2, %xmm3
+        rcpps     %xmm7, %xmm0
+        movmskpd  %xmm3, %eax
+        movups _HalfMask(%r8), %xmm2
+
+/* argument reduction started:  R = Mantissa*Rcp - 1 */
+        andps     %xmm5, %xmm2
+        cvtps2pd  %xmm0, %xmm4
+        subpd     %xmm2, %xmm5
+
+/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
+        roundpd   $0, %xmm4, %xmm4
+        mulpd     %xmm4, %xmm2
+        mulpd     %xmm4, %xmm5
+        subpd _One(%r8), %xmm2
+        addpd     %xmm2, %xmm5
+        movups _Threshold(%r8), %xmm2
+
+/* calculate index for table lookup */
+        movaps    %xmm4, %xmm3
+        cmpltpd   %xmm4, %xmm2
+        pshufd    $221, %xmm1, %xmm7
+        psrlq     $40, %xmm3
+
+/* convert biased exponent to DP format */
+        cvtdq2pd  %xmm7, %xmm0
+        movd      %xmm3, %edx
+        movups _poly_coeff_1(%r8), %xmm4
+
+/* polynomial computation */
+        mulpd     %xmm5, %xmm4
+        andps _Bias(%r8), %xmm2
+        orps _Bias1(%r8), %xmm2
+
+/*
+   Table stores -log(0.5*mantissa) for larger mantissas,
+   adjust exponent accordingly
+ */
+        subpd     %xmm2, %xmm0
+        addpd _poly_coeff_2(%r8), %xmm4
+
+/* exponent*log(2.0) */
+        mulpd _L2(%r8), %xmm0
+        movaps    %xmm5, %xmm2
+        mulpd     %xmm5, %xmm2
+        movups _poly_coeff_3(%r8), %xmm7
+        mulpd     %xmm5, %xmm7
+        mulpd     %xmm2, %xmm4
+        addpd _poly_coeff_4(%r8), %xmm7
+        addpd     %xmm4, %xmm7
+        mulpd     %xmm7, %xmm2
+        movslq    %edx, %rdx
+        pextrd    $2, %xmm3, %ecx
+
+/*
+   reconstruction:
+   (exponent*log(2)) + (LogRcp + (R+poly))
+ */
+        addpd     %xmm2, %xmm5
+        movslq    %ecx, %rcx
+        movsd     _LogRcp_lookup(%r8,%rdx), %xmm1
+        movhpd    _LogRcp_lookup(%r8,%rcx), %xmm1
+        addpd     %xmm5, %xmm1
+        addpd     %xmm1, %xmm0
+        testl     %eax, %eax
+        jne       .LBL_1_3
+
+.LBL_1_2:
+        cfi_remember_state
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+
+.LBL_1_3:
+        cfi_restore_state
+        movups    %xmm6, 192(%rsp)
+        movups    %xmm0, 256(%rsp)
+        je        .LBL_1_2
+
+        xorb      %cl, %cl
+        xorl      %edx, %edx
+        movups    %xmm8, 112(%rsp)
+        movups    %xmm9, 96(%rsp)
+        movups    %xmm10, 80(%rsp)
+        movups    %xmm11, 64(%rsp)
+        movups    %xmm12, 48(%rsp)
+        movups    %xmm13, 32(%rsp)
+        movups    %xmm14, 16(%rsp)
+        movups    %xmm15, (%rsp)
+        movq      %rsi, 136(%rsp)
+        movq      %rdi, 128(%rsp)
+        movq      %r12, 168(%rsp)
+        cfi_offset_rel_rsp (12, 168)
+        movb      %cl, %r12b
+        movq      %r13, 160(%rsp)
+        cfi_offset_rel_rsp (13, 160)
+        movl      %eax, %r13d
+        movq      %r14, 152(%rsp)
+        cfi_offset_rel_rsp (14, 152)
+        movl      %edx, %r14d
+        movq      %r15, 144(%rsp)
+        cfi_offset_rel_rsp (15, 144)
+        cfi_remember_state
+
+.LBL_1_6:
+        btl       %r14d, %r13d
+        jc        .LBL_1_12
+
+.LBL_1_7:
+        lea       1(%r14), %esi
+        btl       %esi, %r13d
+        jc        .LBL_1_10
+
+.LBL_1_8:
+        incb      %r12b
+        addl      $2, %r14d
+        cmpb      $16, %r12b
+        jb        .LBL_1_6
+
+        movups    112(%rsp), %xmm8
+        movups    96(%rsp), %xmm9
+        movups    80(%rsp), %xmm10
+        movups    64(%rsp), %xmm11
+        movups    48(%rsp), %xmm12
+        movups    32(%rsp), %xmm13
+        movups    16(%rsp), %xmm14
+        movups    (%rsp), %xmm15
+        movq      136(%rsp), %rsi
+        movq      128(%rsp), %rdi
+        movq      168(%rsp), %r12
+        cfi_restore (%r12)
+        movq      160(%rsp), %r13
+        cfi_restore (%r13)
+        movq      152(%rsp), %r14
+        cfi_restore (%r14)
+        movq      144(%rsp), %r15
+        cfi_restore (%r15)
+        movups    256(%rsp), %xmm0
+        jmp       .LBL_1_2
+
+.LBL_1_10:
+        cfi_restore_state
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        movsd     200(%rsp,%r15), %xmm0
+
+        call      log@PLT
+
+        movsd     %xmm0, 264(%rsp,%r15)
+        jmp       .LBL_1_8
+
+.LBL_1_12:
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        movsd     192(%rsp,%r15), %xmm0
+
+        call      log@PLT
+
+        movsd     %xmm0, 256(%rsp,%r15)
+        jmp       .LBL_1_7
+
+END (_ZGVbN2v_log_sse4)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core.S
new file mode 100644
index 0000000000..ddb6105405
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core.S
@@ -0,0 +1,38 @@
+/* Multiple versions of vectorized log.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+	.text
+ENTRY (_ZGVdN4v_log)
+        .type   _ZGVdN4v_log, @gnu_indirect_function
+        cmpl    $0, KIND_OFFSET+__cpu_features(%rip)
+        jne     1f
+        call    __init_cpu_features
+1:      leaq    _ZGVdN4v_log_avx2(%rip), %rax
+        testl   $bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip)
+        jz      2f
+        ret
+2:      leaq    _ZGVdN4v_log_sse_wrapper(%rip), %rax
+        ret
+END (_ZGVdN4v_log)
+libmvec_hidden_def (_ZGVdN4v_log)
+
+#define _ZGVdN4v_log _ZGVdN4v_log_sse_wrapper
+#include "../svml_d_log4_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S
new file mode 100644
index 0000000000..816aede395
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log4_core_avx2.S
@@ -0,0 +1,210 @@
+/* Function log vectorized with AVX2.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_log_data.h"
+
+	.text
+ENTRY (_ZGVdN4v_log_avx2)
+/* ALGORITHM DESCRIPTION:
+
+    log(x) = -log(Rcp) + log(Rcp*x),
+    where Rcp ~ 1/x (accuracy ~9 bits, obtained by rounding
+    HW approximation to 1+9 mantissa bits)
+
+    Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial
+
+    log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp)
+      -log(mantissa_Rcp) is obtained from a lookup table,
+      accessed by a 9-bit index
+ */
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        subq      $448, %rsp
+        movq      __svml_dlog_data@GOTPCREL(%rip), %rax
+        vmovdqa   %ymm0, %ymm5
+
+/* isolate exponent bits */
+        vpsrlq    $20, %ymm5, %ymm0
+
+/* preserve mantissa, set input exponent to 2^(-10) */
+        vandpd _ExpMask(%rax), %ymm5, %ymm6
+        vorpd _Two10(%rax), %ymm6, %ymm4
+
+/* reciprocal approximation good to at least 11 bits */
+        vcvtpd2ps %ymm4, %xmm7
+        vrcpps    %xmm7, %xmm1
+        vcmplt_oqpd _MinNorm(%rax), %ymm5, %ymm7
+        vcvtps2pd %xmm1, %ymm3
+        vcmpnle_uqpd _MaxNorm(%rax), %ymm5, %ymm1
+        vextracti128 $1, %ymm0, %xmm2
+        vshufps   $221, %xmm2, %xmm0, %xmm6
+
+/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
+        vroundpd  $0, %ymm3, %ymm2
+
+/* convert biased exponent to DP format */
+        vcvtdq2pd %xmm6, %ymm0
+
+/* combine and get argument value range mask */
+        vorpd     %ymm1, %ymm7, %ymm3
+        vmovupd _One(%rax), %ymm1
+        vmovmskpd %ymm3, %ecx
+
+/* calculate index for table lookup */
+        vpsrlq    $40, %ymm2, %ymm3
+
+/* argument reduction started:  R = Mantissa*Rcp - 1 */
+        vfmsub213pd %ymm1, %ymm2, %ymm4
+        vcmpgt_oqpd _Threshold(%rax), %ymm2, %ymm2
+        vpcmpeqd  %ymm6, %ymm6, %ymm6
+        vxorpd    %ymm1, %ymm1, %ymm1
+        vgatherqpd %ymm6, _LogRcp_lookup(%rax,%ymm3), %ymm1
+
+/* exponent*log(2.0) */
+        vmovupd _poly_coeff_1(%rax), %ymm6
+        vmulpd    %ymm4, %ymm4, %ymm3
+
+/* polynomial computation */
+        vfmadd213pd _poly_coeff_2(%rax), %ymm4, %ymm6
+        vandpd _Bias(%rax), %ymm2, %ymm7
+        vorpd _Bias1(%rax), %ymm7, %ymm2
+
+/*
+   Table stores -log(0.5*mantissa) for larger mantissas,
+   adjust exponent accordingly
+ */
+        vsubpd    %ymm2, %ymm0, %ymm0
+        vmovupd _poly_coeff_3(%rax), %ymm2
+        vfmadd213pd _poly_coeff_4(%rax), %ymm4, %ymm2
+        vfmadd213pd %ymm2, %ymm3, %ymm6
+
+/*
+   reconstruction:
+   (exponent*log(2)) + (LogRcp + (R+poly))
+ */
+        vfmadd213pd %ymm4, %ymm3, %ymm6
+        vaddpd    %ymm1, %ymm6, %ymm4
+        vfmadd132pd _L2(%rax), %ymm4, %ymm0
+        testl     %ecx, %ecx
+        jne       .LBL_1_3
+
+.LBL_1_2:
+        cfi_remember_state
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+
+.LBL_1_3:
+        cfi_restore_state
+        vmovupd   %ymm5, 320(%rsp)
+        vmovupd   %ymm0, 384(%rsp)
+        je        .LBL_1_2
+
+        xorb      %dl, %dl
+        xorl      %eax, %eax
+        vmovups   %ymm8, 224(%rsp)
+        vmovups   %ymm9, 192(%rsp)
+        vmovups   %ymm10, 160(%rsp)
+        vmovups   %ymm11, 128(%rsp)
+        vmovups   %ymm12, 96(%rsp)
+        vmovups   %ymm13, 64(%rsp)
+        vmovups   %ymm14, 32(%rsp)
+        vmovups   %ymm15, (%rsp)
+        movq      %rsi, 264(%rsp)
+        movq      %rdi, 256(%rsp)
+        movq      %r12, 296(%rsp)
+        cfi_offset_rel_rsp (12, 296)
+        movb      %dl, %r12b
+        movq      %r13, 288(%rsp)
+        cfi_offset_rel_rsp (13, 288)
+        movl      %ecx, %r13d
+        movq      %r14, 280(%rsp)
+        cfi_offset_rel_rsp (14, 280)
+        movl      %eax, %r14d
+        movq      %r15, 272(%rsp)
+        cfi_offset_rel_rsp (15, 272)
+        cfi_remember_state
+
+.LBL_1_6:
+        btl       %r14d, %r13d
+        jc        .LBL_1_12
+
+.LBL_1_7:
+        lea       1(%r14), %esi
+        btl       %esi, %r13d
+        jc        .LBL_1_10
+
+.LBL_1_8:
+        incb      %r12b
+        addl      $2, %r14d
+        cmpb      $16, %r12b
+        jb        .LBL_1_6
+
+        vmovups   224(%rsp), %ymm8
+        vmovups   192(%rsp), %ymm9
+        vmovups   160(%rsp), %ymm10
+        vmovups   128(%rsp), %ymm11
+        vmovups   96(%rsp), %ymm12
+        vmovups   64(%rsp), %ymm13
+        vmovups   32(%rsp), %ymm14
+        vmovups   (%rsp), %ymm15
+        vmovupd   384(%rsp), %ymm0
+        movq      264(%rsp), %rsi
+        movq      256(%rsp), %rdi
+        movq      296(%rsp), %r12
+        cfi_restore (%r12)
+        movq      288(%rsp), %r13
+        cfi_restore (%r13)
+        movq      280(%rsp), %r14
+        cfi_restore (%r14)
+        movq      272(%rsp), %r15
+        cfi_restore (%r15)
+        jmp       .LBL_1_2
+
+.LBL_1_10:
+        cfi_restore_state
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    328(%rsp,%r15), %xmm0
+        vzeroupper
+
+        call      log@PLT
+
+        vmovsd    %xmm0, 392(%rsp,%r15)
+        jmp       .LBL_1_8
+
+.LBL_1_12:
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    320(%rsp,%r15), %xmm0
+        vzeroupper
+
+        call      log@PLT
+
+        vmovsd    %xmm0, 384(%rsp,%r15)
+        jmp       .LBL_1_7
+
+END (_ZGVdN4v_log_avx2)
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core.S
new file mode 100644
index 0000000000..2f9e9d8892
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core.S
@@ -0,0 +1,39 @@
+/* Multiple versions of vectorized log.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+	.text
+ENTRY (_ZGVeN8v_log)
+        .type   _ZGVeN8v_log, @gnu_indirect_function
+        cmpl    $0, KIND_OFFSET+__cpu_features(%rip)
+        jne     1
+        call    __init_cpu_features
+1:      leaq    _ZGVeN8v_log_skx(%rip), %rax
+        testl   $bit_AVX512DQ_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512DQ_Usable(%rip)
+        jnz     3
+2:      leaq    _ZGVeN8v_log_knl(%rip), %rax
+        testl   $bit_AVX512F_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512F_Usable(%rip)
+        jnz     3
+        leaq    _ZGVeN8v_log_avx2_wrapper(%rip), %rax
+3:      ret
+END (_ZGVeN8v_log)
+
+#define _ZGVeN8v_log _ZGVeN8v_log_avx2_wrapper
+#include "../svml_d_log8_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
new file mode 100644
index 0000000000..b0f3dd580c
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
@@ -0,0 +1,468 @@
+/* Function log vectorized with AVX-512. KNL and SKX versions.
+   Copyright (C) 2014-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_log_data.h"
+#include "svml_d_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVeN8v_log_knl)
+#ifndef HAVE_AVX512_ASM_SUPPORT
+WRAPPER_IMPL_AVX512 _ZGVdN4v_log
+#else
+/*
+   ALGORITHM DESCRIPTION:
+
+   log(x) = -log(Rcp) + log(Rcp*x),
+     where Rcp ~ 1/x (accuracy ~9 bits, obtained by
+     rounding HW approximation to 1+9 mantissa bits)
+
+   Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial
+
+   log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp)
+     -log(mantissa_Rcp) is obtained from a lookup table,
+     accessed by a 9-bit index
+ */
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        subq      $1280, %rsp
+        movq      __svml_dlog_data@GOTPCREL(%rip), %rdx
+        movq      $-1, %rax
+
+/* isolate exponent bits */
+        vpsrlq    $20, %zmm0, %zmm2
+        vpsrlq    $32, %zmm2, %zmm3
+        vpxord    %zmm2, %zmm2, %zmm2
+        kxnorw    %k3, %k3, %k3
+        vmovups   _Two10(%rdx), %zmm1
+        vmovups   _One(%rdx), %zmm9
+        vpmovqd   %zmm3, %ymm4
+
+/* convert biased exponent to DP format */
+        vcvtdq2pd %ymm4, %zmm13
+
+/* preserve mantissa, set input exponent to 2^(-10) */
+        vpternlogq $248, _ExpMask(%rdx), %zmm0, %zmm1
+        vcmppd     $17, _MinNorm(%rdx), %zmm0, %k1
+
+/* reciprocal approximation good to at least 11 bits */
+        vrcp28pd  %zmm1, %zmm5
+        vpbroadcastq %rax, %zmm6{%k1}{z}
+        vmovups   _poly_coeff_3(%rdx), %zmm15
+        vcmppd    $22, _MaxNorm(%rdx), %zmm0, %k2
+        vmovups   _Bias1(%rdx), %zmm14
+
+/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
+        vrndscalepd $8, %zmm5, %zmm11
+        vpbroadcastq %rax, %zmm7{%k2}{z}
+
+/* argument reduction started:  R = Mantissa*Rcp - 1 */
+        vfmsub213pd %zmm9, %zmm11, %zmm1
+
+/* calculate index for table lookup */
+        vpsrlq    $40, %zmm11, %zmm10
+        vgatherqpd _LogRcp_lookup(%rdx,%zmm10), %zmm2{%k3}
+        vcmppd    $30, _Threshold(%rdx), %zmm11, %k1
+
+/* combine and get argument value range mask */
+        vporq     %zmm7, %zmm6, %zmm8
+
+/* exponent*log(2.0) */
+        vmovups   _poly_coeff_1(%rdx), %zmm11
+        vmulpd    %zmm1, %zmm1, %zmm10
+        vptestmq  %zmm8, %zmm8, %k0
+        vfmadd213pd _poly_coeff_4(%rdx), %zmm1, %zmm15
+        kmovw     %k0, %ecx
+
+/* polynomial computation */
+        vfmadd213pd  _poly_coeff_2(%rdx), %zmm1, %zmm11
+        movzbl       %cl, %ecx
+        vpbroadcastq %rax, %zmm12{%k1}{z}
+        vfmadd213pd  %zmm15, %zmm10, %zmm11
+        vpternlogq   $248, _Bias(%rdx), %zmm12, %zmm14
+
+/*
+   Table stores -log(0.5*mantissa) for larger mantissas,
+   adjust exponent accordingly
+ */
+        vsubpd    %zmm14, %zmm13, %zmm3
+
+/*
+   reconstruction:
+   (exponent*log(2)) + (LogRcp + (R+poly))
+ */
+        vfmadd213pd %zmm1, %zmm10, %zmm11
+        vaddpd      %zmm2, %zmm11, %zmm1
+        vfmadd132pd _L2(%rdx), %zmm1, %zmm3
+        testl       %ecx, %ecx
+        jne         .LBL_1_3
+
+.LBL_1_2:
+        cfi_remember_state
+        vmovaps   %zmm3, %zmm0
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+
+.LBL_1_3:
+        cfi_restore_state
+        vmovups   %zmm0, 1152(%rsp)
+        vmovups   %zmm3, 1216(%rsp)
+        je        .LBL_1_2
+
+        xorb      %dl, %dl
+        kmovw     %k4, 1048(%rsp)
+        xorl      %eax, %eax
+        kmovw     %k5, 1040(%rsp)
+        kmovw     %k6, 1032(%rsp)
+        kmovw     %k7, 1024(%rsp)
+        vmovups   %zmm16, 960(%rsp)
+        vmovups   %zmm17, 896(%rsp)
+        vmovups   %zmm18, 832(%rsp)
+        vmovups   %zmm19, 768(%rsp)
+        vmovups   %zmm20, 704(%rsp)
+        vmovups   %zmm21, 640(%rsp)
+        vmovups   %zmm22, 576(%rsp)
+        vmovups   %zmm23, 512(%rsp)
+        vmovups   %zmm24, 448(%rsp)
+        vmovups   %zmm25, 384(%rsp)
+        vmovups   %zmm26, 320(%rsp)
+        vmovups   %zmm27, 256(%rsp)
+        vmovups   %zmm28, 192(%rsp)
+        vmovups   %zmm29, 128(%rsp)
+        vmovups   %zmm30, 64(%rsp)
+        vmovups   %zmm31, (%rsp)
+        movq      %rsi, 1064(%rsp)
+        movq      %rdi, 1056(%rsp)
+        movq      %r12, 1096(%rsp)
+        cfi_offset_rel_rsp (12, 1096)
+        movb      %dl, %r12b
+        movq      %r13, 1088(%rsp)
+        cfi_offset_rel_rsp (13, 1088)
+        movl      %ecx, %r13d
+        movq      %r14, 1080(%rsp)
+        cfi_offset_rel_rsp (14, 1080)
+        movl      %eax, %r14d
+        movq      %r15, 1072(%rsp)
+        cfi_offset_rel_rsp (15, 1072)
+        cfi_remember_state
+
+.LBL_1_6:
+        btl       %r14d, %r13d
+        jc        .LBL_1_12
+
+.LBL_1_7:
+        lea       1(%r14), %esi
+        btl       %esi, %r13d
+        jc        .LBL_1_10
+
+.LBL_1_8:
+        addb      $1, %r12b
+        addl      $2, %r14d
+        cmpb      $16, %r12b
+        jb        .LBL_1_6
+
+        kmovw     1048(%rsp), %k4
+        movq      1064(%rsp), %rsi
+        kmovw     1040(%rsp), %k5
+        movq      1056(%rsp), %rdi
+        kmovw     1032(%rsp), %k6
+        movq      1096(%rsp), %r12
+        cfi_restore (%r12)
+        movq      1088(%rsp), %r13
+        cfi_restore (%r13)
+        kmovw     1024(%rsp), %k7
+        vmovups   960(%rsp), %zmm16
+        vmovups   896(%rsp), %zmm17
+        vmovups   832(%rsp), %zmm18
+        vmovups   768(%rsp), %zmm19
+        vmovups   704(%rsp), %zmm20
+        vmovups   640(%rsp), %zmm21
+        vmovups   576(%rsp), %zmm22
+        vmovups   512(%rsp), %zmm23
+        vmovups   448(%rsp), %zmm24
+        vmovups   384(%rsp), %zmm25
+        vmovups   320(%rsp), %zmm26
+        vmovups   256(%rsp), %zmm27
+        vmovups   192(%rsp), %zmm28
+        vmovups   128(%rsp), %zmm29
+        vmovups   64(%rsp), %zmm30
+        vmovups   (%rsp), %zmm31
+        movq      1080(%rsp), %r14
+        cfi_restore (%r14)
+        movq      1072(%rsp), %r15
+        cfi_restore (%r15)
+        vmovups   1216(%rsp), %zmm3
+        jmp       .LBL_1_2
+
+.LBL_1_10:
+        cfi_restore_state
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    1160(%rsp,%r15), %xmm0
+        call      log@PLT
+        vmovsd    %xmm0, 1224(%rsp,%r15)
+        jmp       .LBL_1_8
+
+.LBL_1_12:
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    1152(%rsp,%r15), %xmm0
+        call      log@PLT
+        vmovsd    %xmm0, 1216(%rsp,%r15)
+        jmp       .LBL_1_7
+#endif
+END (_ZGVeN8v_log_knl)
+
+ENTRY (_ZGVeN8v_log_skx)
+#ifndef HAVE_AVX512_ASM_SUPPORT
+WRAPPER_IMPL_AVX512 _ZGVdN4v_log
+#else
+/*
+   ALGORITHM DESCRIPTION:
+
+     log(x) = -log(Rcp) + log(Rcp*x),
+       where Rcp ~ 1/x (accuracy ~9 bits,
+       obtained by rounding HW approximation to 1+9 mantissa bits)
+
+     Reduced argument R=Rcp*x-1 is used to approximate log(1+R) as polynomial
+
+     log(Rcp) = exponent_Rcp*log(2) + log(mantissa_Rcp)
+       -log(mantissa_Rcp) is obtained from a lookup table,
+       accessed by a 9-bit index
+ */
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        subq      $1280, %rsp
+        movq      __svml_dlog_data@GOTPCREL(%rip), %rax
+        vmovaps   %zmm0, %zmm3
+        kxnorw    %k3, %k3, %k3
+        vmovups _Two10(%rax), %zmm2
+        vmovups _Threshold(%rax), %zmm14
+        vmovups _One(%rax), %zmm11
+        vcmppd    $21, _MinNorm(%rax), %zmm3, %k1
+        vcmppd    $18, _MaxNorm(%rax), %zmm3, %k2
+
+/* isolate exponent bits */
+        vpsrlq    $20, %zmm3, %zmm4
+
+/* preserve mantissa, set input exponent to 2^(-10) */
+        vpternlogq $248, _ExpMask(%rax), %zmm3, %zmm2
+        vpbroadcastq .L_2il0floatpacket.12(%rip), %zmm1
+        vpsrlq    $32, %zmm4, %zmm6
+
+/* reciprocal approximation good to at least 11 bits */
+        vrcp14pd  %zmm2, %zmm5
+
+/* exponent*log(2.0) */
+        vmovups _poly_coeff_1(%rax), %zmm4
+        vpmovqd   %zmm6, %ymm7
+
+/* round reciprocal to nearest integer, will have 1+9 mantissa bits */
+        vrndscalepd $8, %zmm5, %zmm0
+
+/* calculate index for table lookup */
+        vpsrlq    $40, %zmm0, %zmm12
+
+/* argument reduction started:  R = Mantissa*Rcp - 1 */
+        vfmsub213pd %zmm11, %zmm0, %zmm2
+        vpmovqd   %zmm12, %ymm13
+
+/* polynomial computation */
+        vfmadd213pd _poly_coeff_2(%rax), %zmm2, %zmm4
+        vmovaps   %zmm1, %zmm8
+        vmovaps   %zmm1, %zmm9
+        vpxord    %zmm5, %zmm5, %zmm5
+        vgatherdpd _LogRcp_lookup(%rax,%ymm13), %zmm5{%k3}
+        vmovups _Bias1(%rax), %zmm13
+        vpandnq   %zmm3, %zmm3, %zmm8{%k1}
+        vcmppd    $21, %zmm0, %zmm14, %k1
+        vpandnq   %zmm14, %zmm14, %zmm1{%k1}
+        vmulpd    %zmm2, %zmm2, %zmm14
+        vpternlogq $248, _Bias(%rax), %zmm1, %zmm13
+        vmovups _poly_coeff_3(%rax), %zmm1
+        vfmadd213pd _poly_coeff_4(%rax), %zmm2, %zmm1
+        vfmadd213pd %zmm1, %zmm14, %zmm4
+
+/*
+   reconstruction:
+   (exponent*log(2)) + (LogRcp + (R+poly))
+ */
+        vfmadd213pd %zmm2, %zmm14, %zmm4
+        vaddpd    %zmm5, %zmm4, %zmm2
+        vpandnq   %zmm3, %zmm3, %zmm9{%k2}
+
+/* combine and get argument value range mask */
+        vorpd     %zmm9, %zmm8, %zmm10
+        vcmppd    $3, %zmm10, %zmm10, %k0
+        kmovw     %k0, %ecx
+
+/* convert biased exponent to DP format */
+        vcvtdq2pd %ymm7, %zmm15
+
+/*
+   Table stores -log(0.5*mantissa) for larger mantissas,
+   adjust exponent accordingly
+ */
+        vsubpd    %zmm13, %zmm15, %zmm0
+        vfmadd132pd _L2(%rax), %zmm2, %zmm0
+        testl     %ecx, %ecx
+        jne       .LBL_2_3
+
+.LBL_2_2:
+        cfi_remember_state
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+
+.LBL_2_3:
+        cfi_restore_state
+        vmovups   %zmm3, 1152(%rsp)
+        vmovups   %zmm0, 1216(%rsp)
+        je        .LBL_2_2
+
+        xorb      %dl, %dl
+        xorl      %eax, %eax
+        kmovw     %k4, 1048(%rsp)
+        kmovw     %k5, 1040(%rsp)
+        kmovw     %k6, 1032(%rsp)
+        kmovw     %k7, 1024(%rsp)
+        vmovups   %zmm16, 960(%rsp)
+        vmovups   %zmm17, 896(%rsp)
+        vmovups   %zmm18, 832(%rsp)
+        vmovups   %zmm19, 768(%rsp)
+        vmovups   %zmm20, 704(%rsp)
+        vmovups   %zmm21, 640(%rsp)
+        vmovups   %zmm22, 576(%rsp)
+        vmovups   %zmm23, 512(%rsp)
+        vmovups   %zmm24, 448(%rsp)
+        vmovups   %zmm25, 384(%rsp)
+        vmovups   %zmm26, 320(%rsp)
+        vmovups   %zmm27, 256(%rsp)
+        vmovups   %zmm28, 192(%rsp)
+        vmovups   %zmm29, 128(%rsp)
+        vmovups   %zmm30, 64(%rsp)
+        vmovups   %zmm31, (%rsp)
+        movq      %rsi, 1064(%rsp)
+        movq      %rdi, 1056(%rsp)
+        movq      %r12, 1096(%rsp)
+        cfi_offset_rel_rsp (12, 1096)
+        movb      %dl, %r12b
+        movq      %r13, 1088(%rsp)
+        cfi_offset_rel_rsp (13, 1088)
+        movl      %ecx, %r13d
+        movq      %r14, 1080(%rsp)
+        cfi_offset_rel_rsp (14, 1080)
+        movl      %eax, %r14d
+        movq      %r15, 1072(%rsp)
+        cfi_offset_rel_rsp (15, 1072)
+        cfi_remember_state
+
+.LBL_2_6:
+        btl       %r14d, %r13d
+        jc        .LBL_2_12
+
+.LBL_2_7:
+        lea       1(%r14), %esi
+        btl       %esi, %r13d
+        jc        .LBL_2_10
+
+.LBL_2_8:
+        incb      %r12b
+        addl      $2, %r14d
+        cmpb      $16, %r12b
+        jb        .LBL_2_6
+
+        kmovw     1048(%rsp), %k4
+        kmovw     1040(%rsp), %k5
+        kmovw     1032(%rsp), %k6
+        kmovw     1024(%rsp), %k7
+        vmovups   960(%rsp), %zmm16
+        vmovups   896(%rsp), %zmm17
+        vmovups   832(%rsp), %zmm18
+        vmovups   768(%rsp), %zmm19
+        vmovups   704(%rsp), %zmm20
+        vmovups   640(%rsp), %zmm21
+        vmovups   576(%rsp), %zmm22
+        vmovups   512(%rsp), %zmm23
+        vmovups   448(%rsp), %zmm24
+        vmovups   384(%rsp), %zmm25
+        vmovups   320(%rsp), %zmm26
+        vmovups   256(%rsp), %zmm27
+        vmovups   192(%rsp), %zmm28
+        vmovups   128(%rsp), %zmm29
+        vmovups   64(%rsp), %zmm30
+        vmovups   (%rsp), %zmm31
+        vmovups   1216(%rsp), %zmm0
+        movq      1064(%rsp), %rsi
+        movq      1056(%rsp), %rdi
+        movq      1096(%rsp), %r12
+        cfi_restore (%r12)
+        movq      1088(%rsp), %r13
+        cfi_restore (%r13)
+        movq      1080(%rsp), %r14
+        cfi_restore (%r14)
+        movq      1072(%rsp), %r15
+        cfi_restore (%r15)
+        jmp       .LBL_2_2
+
+.LBL_2_10:
+        cfi_restore_state
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    1160(%rsp,%r15), %xmm0
+        vzeroupper
+        vmovsd    1160(%rsp,%r15), %xmm0
+
+        call      log@PLT
+
+        vmovsd    %xmm0, 1224(%rsp,%r15)
+        jmp       .LBL_2_8
+
+.LBL_2_12:
+        movzbl    %r12b, %r15d
+        shlq      $4, %r15
+        vmovsd    1152(%rsp,%r15), %xmm0
+        vzeroupper
+        vmovsd    1152(%rsp,%r15), %xmm0
+
+        call      log@PLT
+
+        vmovsd    %xmm0, 1216(%rsp,%r15)
+        jmp       .LBL_2_7
+#endif
+END (_ZGVeN8v_log_skx)
+
+	.section .rodata, "a"
+.L_2il0floatpacket.12:
+	.long	0xffffffff,0xffffffff
+	.type	.L_2il0floatpacket.12,@object