about summary refs log tree commit diff
path: root/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 16:22:26 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-17 16:22:26 +0300
commitc10b9b13f7471b08273effc8cd7e51b119df9348 (patch)
treeca058c3446a247a5bccea211bd84a9c0130e1388 /sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
parent1663be053d50c06bb0f971c87d41a7b83f96fe15 (diff)
downloadglibc-c10b9b13f7471b08273effc8cd7e51b119df9348.tar.gz
glibc-c10b9b13f7471b08273effc8cd7e51b119df9348.tar.xz
glibc-c10b9b13f7471b08273effc8cd7e51b119df9348.zip
Vector pow for x86_64 and tests.
Here is implementation of vectorized pow containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

  * bits/libm-simd-decl-stubs.h: Added stubs for pow.
    * math/bits/mathcalls.h: Added pow declaration with __MATHCALL_VEC.
    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New versions added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm
    redirections for pow.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added
    build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/svml_d_wrapper_impl.h: Added 2 argument wrappers.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow2_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow4_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow2_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow4_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow_data.S: New file.
    * sysdeps/x86_64/fpu/svml_d_pow_data.h: New file.
    * sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c: Added vector pow test.
    * sysdeps/x86_64/fpu/test-double-vlen2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8.c: Likewise.
    * NEWS: Mention addition of x86_64 vector pow.
Diffstat (limited to 'sysdeps/x86_64/fpu/svml_d_wrapper_impl.h')
-rw-r--r--sysdeps/x86_64/fpu/svml_d_wrapper_impl.h110
1 files changed, 110 insertions, 0 deletions
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 4b2e9f5e80..25465cd840 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -34,6 +34,26 @@
         ret
 .endm
 
+/* 2 argument SSE2 ISA version as wrapper to scalar.  */
+.macro WRAPPER_IMPL_SSE2_ff callee
+        subq      $56, %rsp
+        cfi_adjust_cfa_offset(56)
+        movaps    %xmm0, (%rsp)
+        movaps    %xmm1, 16(%rsp)
+        call      \callee@PLT
+        movsd     %xmm0, 32(%rsp)
+        movsd     8(%rsp), %xmm0
+        movsd     24(%rsp), %xmm1
+        call      \callee@PLT
+        movsd     32(%rsp), %xmm1
+        movsd     %xmm0, 40(%rsp)
+        unpcklpd  %xmm0, %xmm1
+        movaps    %xmm1, %xmm0
+        addq      $56, %rsp
+        cfi_adjust_cfa_offset(-56)
+        ret
+.endm
+
 /* AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
 .macro WRAPPER_IMPL_AVX callee
         pushq		%rbp
@@ -60,6 +80,34 @@
         ret
 .endm
 
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
+.macro WRAPPER_IMPL_AVX_ff callee
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-32, %rsp
+        subq      $64, %rsp
+        vextractf128 $1, %ymm0, 16(%rsp)
+        vextractf128 $1, %ymm1, (%rsp)
+        vzeroupper
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovaps   %xmm0, 32(%rsp)
+        vmovaps   16(%rsp), %xmm0
+        vmovaps   (%rsp), %xmm1
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovaps   %xmm0, %xmm1
+        vmovaps   32(%rsp), %xmm0
+        vinsertf128 $1, %xmm1, %ymm0, %ymm0
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm
+
 /* AVX512 ISA version as wrapper to AVX2 ISA version.  */
 .macro WRAPPER_IMPL_AVX512 callee
         pushq	%rbp
@@ -99,3 +147,65 @@
         cfi_restore (%rbp)
         ret
 .endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version.  */
+.macro WRAPPER_IMPL_AVX512_ff callee
+        pushq	%rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq	%rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq	$-64, %rsp
+        subq	$128, %rsp
+/* Below is encoding for vmovaps %zmm0, (%rsp).  */
+        .byte	0x62
+        .byte	0xf1
+        .byte	0x7c
+        .byte	0x48
+        .byte	0x29
+        .byte	0x04
+        .byte	0x24
+/* Below is encoding for vmovaps %zmm1, 64(%rsp).  */
+        .byte	0x62
+        .byte	0xf1
+        .byte	0x7c
+        .byte	0x48
+        .byte	0x29
+        .byte	0x4c
+        .byte	0x24
+/* Below is encoding for vmovapd (%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x04
+        .byte	0x24
+/* Below is encoding for vmovapd 64(%rsp), %ymm1.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x4c
+        .byte	0x24
+        .byte	0x40
+        call	HIDDEN_JUMPTARGET(\callee)
+/* Below is encoding for vmovapd 32(%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x44
+        .byte	0x24
+        .byte	0x20
+/* Below is encoding for vmovapd 96(%rsp), %ymm1.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x4c
+        .byte	0x24
+        .byte	0x60
+        call	HIDDEN_JUMPTARGET(\callee)
+        movq	%rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq	%rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm