about summary refs log tree commit diff
path: root/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 17:55:55 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 17:55:55 +0300
commitc9a8c526acd185176e486bee4624039740f8c435 (patch)
tree1f199d8fb0bb0ce9b0bdb21c86c3c213a514c22a /sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
parent8aa92022e2e7cb5470b6e252020140c05b8013ed (diff)
downloadglibc-c9a8c526acd185176e486bee4624039740f8c435.tar.gz
glibc-c9a8c526acd185176e486bee4624039740f8c435.tar.xz
glibc-c9a8c526acd185176e486bee4624039740f8c435.zip
Vector sincos for x86_64 and tests.
Here is implementation of vectorized sincos containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

    * NEWS: Mention addition of x86_64 vector sincos.
    * bits/libm-simd-decl-stubs.h: Added stubs for sincos.
    * math/math.h (__MATHDECL_VEC): New macro.
    * math/bits/mathcalls.h: Added sincos declaration with __MATHDECL_VEC.
    * math/gen-libm-have-vector-test.sh: Added generation of sincos wrapper
    declaration under condition.
    * math/test-vec-loop.h (TEST_VEC_LOOP): Refactored.
    * math/test-double-vlen2.h: Added wrapper for sincos tests, reflected
    TEST_VEC_LOOP change.
    * math/test-double-vlen4.h: Likewise.
    * math/test-double-vlen8.h: Likewise.
    * math/test-float-vlen16.h: Reflected TEST_VEC_LOOP change.
    * math/test-float-vlen4.h: Likewise.
    * math/test-float-vlen8.h: Likewise.
    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added sincos SIMD declaration.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines):
    Added build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos2_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos4_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos2_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos4_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos_data.S: New file.
    * sysdeps/x86_64/fpu/svml_d_sincos_data.h: New file.
    * sysdeps/x86_64/fpu/svml_d_wrapper_impl.h: Added wrappers for sincos.
    * sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c: Vector sincos tests.
    * sysdeps/x86_64/fpu/test-double-vlen2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-double-vlen8.c: Likewise.
Diffstat (limited to 'sysdeps/x86_64/fpu/svml_d_wrapper_impl.h')
-rw-r--r--sysdeps/x86_64/fpu/svml_d_wrapper_impl.h170
1 files changed, 170 insertions, 0 deletions
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 25465cd840..bd93b8edfa 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -54,6 +54,47 @@
         ret
 .endm
 
+/* 3 argument SSE2 ISA version as wrapper to scalar.  */
+.macro WRAPPER_IMPL_SSE2_fFF callee
+        pushq   %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        pushq   %rbx
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbx, 0)
+        movq    %rdi, %rbp
+        movq    %rsi, %rbx
+        subq    $40, %rsp
+        cfi_adjust_cfa_offset(40)
+        leaq    16(%rsp), %rsi
+        leaq    24(%rsp), %rdi
+        movaps  %xmm0, (%rsp)
+        call    \callee@PLT
+        leaq    16(%rsp), %rsi
+        leaq    24(%rsp), %rdi
+        movsd   24(%rsp), %xmm0
+        movapd  (%rsp), %xmm1
+        movsd   %xmm0, 0(%rbp)
+        unpckhpd        %xmm1, %xmm1
+        movsd   16(%rsp), %xmm0
+        movsd   %xmm0, (%rbx)
+        movapd  %xmm1, %xmm0
+        call    \callee@PLT
+        movsd   24(%rsp), %xmm0
+        movsd   %xmm0, 8(%rbp)
+        movsd   16(%rsp), %xmm0
+        movsd   %xmm0, 8(%rbx)
+        addq    $40, %rsp
+        cfi_adjust_cfa_offset(-40)
+        popq    %rbx
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbx)
+        popq    %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm
+
 /* AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
 .macro WRAPPER_IMPL_AVX callee
         pushq		%rbp
@@ -108,6 +149,49 @@
         ret
 .endm
 
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
+.macro WRAPPER_IMPL_AVX_fFF callee
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-32, %rsp
+        pushq     %r13
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%r13, 0)
+        pushq     %r14
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%r14, 0)
+        subq      $48, %rsp
+        movq      %rsi, %r14
+        movq      %rdi, %r13
+        vextractf128 $1, %ymm0, 32(%rsp)
+        vzeroupper
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovaps   32(%rsp), %xmm0
+        lea       (%rsp), %rdi
+        lea       16(%rsp), %rsi
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovapd   (%rsp), %xmm0
+        vmovapd   16(%rsp), %xmm1
+        vmovapd   %xmm0, 16(%r13)
+        vmovapd   %xmm1, 16(%r14)
+        addq      $48, %rsp
+        popq      %r14
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%r14)
+        popq      %r13
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%r13)
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm
+
 /* AVX512 ISA version as wrapper to AVX2 ISA version.  */
 .macro WRAPPER_IMPL_AVX512 callee
         pushq	%rbp
@@ -209,3 +293,89 @@
         cfi_restore (%rbp)
         ret
 .endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version.  */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        pushq     %r12
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%r12, 0)
+        pushq     %r13
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%r13, 0)
+        subq      $176, %rsp
+        movq      %rsi, %r13
+/* Below is encoding for vmovaps %zmm0, (%rsp).  */
+        .byte	0x62
+        .byte	0xf1
+        .byte	0x7c
+        .byte	0x48
+        .byte	0x29
+        .byte	0x04
+        .byte	0x24
+        movq    %rdi, %r12
+/* Below is encoding for vmovapd (%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x04
+        .byte	0x24
+        call      HIDDEN_JUMPTARGET(\callee)
+/* Below is encoding for vmovapd 32(%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x44
+        .byte	0x24
+        .byte	0x20
+        lea       64(%rsp), %rdi
+        lea       96(%rsp), %rsi
+        call      HIDDEN_JUMPTARGET(\callee)
+/* Below is encoding for vmovapd 64(%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x44
+        .byte	0x24
+        .byte	0x40
+/* Below is encoding for vmovapd   96(%rsp), %ymm1.  */
+        .byte	0xc5
+        .byte	0xfd
+        .byte	0x28
+        .byte	0x4c
+        .byte	0x24
+        .byte	0x60
+/* Below is encoding for vmovapd   %ymm0, 32(%r12).  */
+        .byte	0xc4
+        .byte	0xc1
+        .byte	0x7d
+        .byte	0x29
+        .byte	0x44
+        .byte	0x24
+        .byte	0x20
+/* Below is encoding for vmovapd   %ymm1, 32(%r13).  */
+        .byte	0xc4
+        .byte	0xc1
+        .byte	0x7d
+        .byte	0x29
+        .byte	0x4d
+        .byte	0x20
+        addq      $176, %rsp
+        popq      %r13
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%r13)
+        popq      %r12
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%r12)
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm