summary refs log tree commit diff
path: root/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 17:04:07 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 17:04:07 +0300
commit8aa92022e2e7cb5470b6e252020140c05b8013ed (patch)
treedfedc663faa6a67fee4bd9a65ec2227fc0e4c534 /sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
parent2f3184451dc9daf8c15be10f190071409d93232e (diff)
downloadglibc-8aa92022e2e7cb5470b6e252020140c05b8013ed.tar.gz
glibc-8aa92022e2e7cb5470b6e252020140c05b8013ed.tar.xz
glibc-8aa92022e2e7cb5470b6e252020140c05b8013ed.zip
Vector powf for x86_64 and tests.
Here is implementation of vectorized powf containing SSE, AVX,
AVX2 and AVX512 versions according to Vector ABI
<https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>.

    * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added.
    * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm
    redirections for powf.
    * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files.
    * sysdeps/x86_64/fpu/Versions: New versions added.
    * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated.
    * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines):
    Added build of SSE, AVX2 and AVX512 IFUNC versions.
    * sysdeps/x86_64/fpu/svml_s_wrapper_impl.h: Added 2 argument wrappers.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf4_core_sse4.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core.S: New file.
    * sysdeps/x86_64/fpu/multiarch/svml_s_powf8_core_avx2.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf16_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf4_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf8_core.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf8_core_avx.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf_data.S: New file.
    * sysdeps/x86_64/fpu/svml_s_powf_data.h: New file.
    * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Vector powf tests.
    * sysdeps/x86_64/fpu/test-float-vlen16.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise.
    * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
    * math/test-float-vlen16.h: Fixed 2 argument macro.
    * math/test-float-vlen4.h: Likewise.
    * math/test-float-vlen8.h: Likewise.
    * NEWS: Mention addition of x86_64 vector powf.
Diffstat (limited to 'sysdeps/x86_64/fpu/svml_s_wrapper_impl.h')
-rw-r--r--sysdeps/x86_64/fpu/svml_s_wrapper_impl.h122
1 files changed, 122 insertions, 0 deletions
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index d5b62eea42..f88e30f054 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -44,6 +44,38 @@
         ret
 .endm
 
+/* 2 argument SSE2 ISA version as wrapper to scalar.  */
+.macro WRAPPER_IMPL_SSE2_ff callee
+        subq      $56, %rsp
+        cfi_adjust_cfa_offset(56)
+        movaps    %xmm0, (%rsp)
+        movaps    %xmm1, 16(%rsp)
+        call      \callee@PLT
+        movss     %xmm0, 32(%rsp)
+        movss     4(%rsp), %xmm0
+        movss     20(%rsp), %xmm1
+        call      \callee@PLT
+        movss     %xmm0, 36(%rsp)
+        movss     8(%rsp), %xmm0
+        movss     24(%rsp), %xmm1
+        call      \callee@PLT
+        movss     %xmm0, 40(%rsp)
+        movss     12(%rsp), %xmm0
+        movss     28(%rsp), %xmm1
+        call      \callee@PLT
+        movss     32(%rsp), %xmm3
+        movss     36(%rsp), %xmm2
+        movss     40(%rsp), %xmm1
+        movss     %xmm0, 44(%rsp)
+        unpcklps  %xmm1, %xmm3
+        unpcklps  %xmm0, %xmm2
+        unpcklps  %xmm2, %xmm3
+        movaps    %xmm3, %xmm0
+        addq      $56, %rsp
+        cfi_adjust_cfa_offset(-56)
+        ret
+.endm
+
 /* AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
 .macro WRAPPER_IMPL_AVX callee
         pushq     	%rbp
@@ -70,6 +102,34 @@
         ret
 .endm
 
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version.  */
+.macro WRAPPER_IMPL_AVX_ff callee
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-32, %rsp
+        subq      $64, %rsp
+        vextractf128 $1, %ymm0, 16(%rsp)
+        vextractf128 $1, %ymm1, (%rsp)
+        vzeroupper
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovaps   %xmm0, 32(%rsp)
+        vmovaps   16(%rsp), %xmm0
+        vmovaps   (%rsp), %xmm1
+        call      HIDDEN_JUMPTARGET(\callee)
+        vmovaps   %xmm0, %xmm1
+        vmovaps   32(%rsp), %xmm0
+        vinsertf128 $1, %xmm1, %ymm0, %ymm0
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm
+
 /* AVX512 ISA version as wrapper to AVX2 ISA version.  */
 .macro WRAPPER_IMPL_AVX512 callee
         pushq	%rbp
@@ -109,3 +169,65 @@
         cfi_restore (%rbp)
         ret
 .endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version.  */
+.macro WRAPPER_IMPL_AVX512_ff callee
+        pushq     %rbp
+        cfi_adjust_cfa_offset (8)
+        cfi_rel_offset (%rbp, 0)
+        movq      %rsp, %rbp
+        cfi_def_cfa_register (%rbp)
+        andq      $-64, %rsp
+        subq      $128, %rsp
+/* Below is encoding for vmovaps %zmm0, (%rsp).  */
+        .byte	0x62
+        .byte	0xf1
+        .byte	0x7c
+        .byte	0x48
+        .byte	0x29
+        .byte	0x04
+        .byte	0x24
+/* Below is encoding for vmovaps %zmm1, 64(%rsp).  */
+        .byte	0x62
+        .byte	0xf1
+        .byte	0x7c
+        .byte	0x48
+        .byte	0x29
+        .byte	0x4c
+        .byte	0x24
+/* Below is encoding for vmovaps (%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfc
+        .byte	0x28
+        .byte	0x04
+        .byte	0x24
+/* Below is encoding for vmovaps 64(%rsp), %ymm1.  */
+        .byte	0xc5
+        .byte	0xfc
+        .byte	0x28
+        .byte	0x4c
+        .byte	0x24
+        .byte	0x40
+        call      HIDDEN_JUMPTARGET(\callee)
+/* Below is encoding for vmovaps 32(%rsp), %ymm0.  */
+        .byte	0xc5
+        .byte	0xfc
+        .byte	0x28
+        .byte	0x44
+        .byte	0x24
+        .byte	0x20
+/* Below is encoding for vmovaps 96(%rsp), %ymm1.  */
+        .byte	0xc5
+        .byte	0xfc
+        .byte	0x28
+        .byte	0x4c
+        .byte	0x24
+        .byte	0x60
+        call      HIDDEN_JUMPTARGET(\callee)
+        movq      %rbp, %rsp
+        cfi_def_cfa_register (%rsp)
+        popq      %rbp
+        cfi_adjust_cfa_offset (-8)
+        cfi_restore (%rbp)
+        ret
+.endm