diff options
author | Andrew Senkevich <andrew.senkevich@intel.com> | 2015-06-17 15:58:05 +0300 |
---|---|---|
committer | Andrew Senkevich <andrew.senkevich@intel.com> | 2015-06-17 15:58:05 +0300 |
commit | 9c02f663f6b387b3905b629ffe584c9abf2030dc (patch) | |
tree | 587a88eca7b4c3abd7c5482c07c7a35778025785 /sysdeps/x86_64/fpu/multiarch | |
parent | 774488f88aeed6b838fe29c3c7561433c242a3c9 (diff) | |
download | glibc-9c02f663f6b387b3905b629ffe584c9abf2030dc.tar.gz glibc-9c02f663f6b387b3905b629ffe584c9abf2030dc.tar.xz glibc-9c02f663f6b387b3905b629ffe584c9abf2030dc.zip |
Vector exp for x86_64 and tests.
Here is implementation of vectorized exp containing SSE, AVX, AVX2 and AVX512 versions according to Vector ABI <https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>. * bits/libm-simd-decl-stubs.h: Added stubs for exp. * math/bits/mathcalls.h: Added exp declaration with __MATHCALL_VEC. * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New versions added. * sysdeps/x86/fpu/bits/math-vector.h: Added SIMD declaration and asm redirections for exp. * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files. * sysdeps/x86_64/fpu/Versions: New versions added. * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated. * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added build of SSE, AVX2 and AVX512 IFUNC versions. * sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core.S: New file. * sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S: New file. * sysdeps/x86_64/fpu/svml_d_exp2_core.S: New file. * sysdeps/x86_64/fpu/svml_d_exp4_core.S: New file. * sysdeps/x86_64/fpu/svml_d_exp4_core_avx.S: New file. * sysdeps/x86_64/fpu/svml_d_exp8_core.S: New file. * sysdeps/x86_64/fpu/svml_d_exp_data.S: New file. * sysdeps/x86_64/fpu/svml_d_exp_data.h: New file. * sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c: Added vector exp test. * sysdeps/x86_64/fpu/test-double-vlen2.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen4-avx2.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen4.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-double-vlen8.c: Likewise. * NEWS: Mention addition of x86_64 vector exp.
Diffstat (limited to 'sysdeps/x86_64/fpu/multiarch')
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/Makefile | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core.S | 38 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S | 225 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core.S | 38 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S | 212 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core.S | 39 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S | 456 |
7 files changed, 1010 insertions, 1 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/Makefile b/sysdeps/x86_64/fpu/multiarch/Makefile index 5fc6ea3d23..d6355ae98d 100644 --- a/sysdeps/x86_64/fpu/multiarch/Makefile +++ b/sysdeps/x86_64/fpu/multiarch/Makefile @@ -62,5 +62,6 @@ libmvec-sysdep_routines += svml_d_cos2_core_sse4 svml_d_cos4_core_avx2 \ svml_s_cosf16_core_avx512 svml_s_sinf4_core_sse4 \ svml_s_sinf8_core_avx2 svml_s_sinf16_core_avx512 \ svml_s_logf4_core_sse4 svml_s_logf8_core_avx2 \ - svml_s_logf16_core_avx512 + svml_s_logf16_core_avx512 svml_d_exp2_core_sse4 \ + svml_d_exp4_core_avx2 svml_d_exp8_core_avx512 endif diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core.S new file mode 100644 index 0000000000..ef3dc49a1c --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core.S @@ -0,0 +1,38 @@ +/* Multiple versions of vectorized exp. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <init-arch.h> + + .text +ENTRY (_ZGVbN2v_exp) + .type _ZGVbN2v_exp, @gnu_indirect_function + cmpl $0, KIND_OFFSET+__cpu_features(%rip) + jne 1f + call __init_cpu_features +1: leaq _ZGVbN2v_exp_sse4(%rip), %rax + testl $bit_SSE4_1, __cpu_features+CPUID_OFFSET+index_SSE4_1(%rip) + jz 2f + ret +2: leaq _ZGVbN2v_exp_sse2(%rip), %rax + ret +END (_ZGVbN2v_exp) +libmvec_hidden_def (_ZGVbN2v_exp) + +#define _ZGVbN2v_exp _ZGVbN2v_exp_sse2 +#include "../svml_d_exp2_core.S" diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S new file mode 100644 index 0000000000..1f5445924a --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp2_core_sse4.S @@ -0,0 +1,225 @@ +/* Function exp vectorized with SSE4. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include "svml_d_exp_data.h" + + .text +ENTRY (_ZGVbN2v_exp_sse4) +/* + ALGORITHM DESCRIPTION: + + Argument representation: + N = rint(X*2^k/ln2) = 2^k*M+j + X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r + then -ln2/2^(k+1) < r < ln2/2^(k+1) + Alternatively: + N = trunc(X*2^k/ln2) + then 0 < r < ln2/2^k + + Result calculation: + exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) + = 2^M * 2^(j/2^k) * exp(r) + 2^M is calculated by bit manipulation + 2^(j/2^k) is stored in table + exp(r) is approximated by polynomial. + + The table lookup is skipped if k = 0. */ + + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $320, %rsp + movaps %xmm0, %xmm3 + movq __svml_dexp_data@GOTPCREL(%rip), %r8 + +/* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ + pshufd $221, %xmm3, %xmm7 + movups __dbInvLn2(%r8), %xmm0 + +/* dK = X*dbInvLn2 */ + mulpd %xmm3, %xmm0 + movq __iAbsMask(%r8), %xmm5 + movq __iDomainRange(%r8), %xmm6 + +/* iAbsX = iAbsX&iAbsMask */ + pand %xmm5, %xmm7 + +/* iRangeMask = (iAbsX>iDomainRange) */ + pcmpgtd %xmm6, %xmm7 + +/* Mask = iRangeMask?1:0, set mask for overflow/underflow */ + movmskps %xmm7, %eax + +/* dN = rint(X*2^k/Ln2) */ + xorps %xmm7, %xmm7 + movups __dbLn2hi(%r8), %xmm5 + movups __dbLn2lo(%r8), %xmm6 + roundpd $0, %xmm0, %xmm7 + +/* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ + mulpd %xmm7, %xmm5 + +/* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ + mulpd %xmm6, %xmm7 + movups __dbShifter(%r8), %xmm4 + +/* dM = X*dbInvLn2+dbShifter */ + addpd %xmm0, %xmm4 + movaps %xmm3, %xmm0 + subpd %xmm5, %xmm0 + subpd %xmm7, %xmm0 + movups __dPC2(%r8), %xmm5 + +/* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ + mulpd %xmm0, %xmm5 + addpd __dPC1(%r8), %xmm5 + mulpd %xmm0, %xmm5 + movups __dPC0(%r8), %xmm6 + addpd %xmm6, %xmm5 + mulpd %xmm5, %xmm0 + movdqu __lIndexMask(%r8), %xmm2 + +/* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ + movdqa %xmm2, %xmm1 + +/* lM = (*(longlong*)&dM)&(~lIndexMask) */ + pandn %xmm4, %xmm2 + pand %xmm4, %xmm1 + +/* lM = lM<<(52-K), 2^M */ + psllq $42, %xmm2 + +/* table lookup for dT[j] = 2^(j/2^k) */ + movd %xmm1, %edx + pextrw $4, %xmm1, %ecx + addpd %xmm0, %xmm6 + shll $3, %edx + shll $3, %ecx + movq (%r8,%rdx), %xmm0 + andl $3, %eax + movhpd (%r8,%rcx), %xmm0 + +/* 2^(j/2^k) * exp(r) */ + mulpd %xmm6, %xmm0 + +/* multiply by 2^M through integer add */ + paddq %xmm2, %xmm0 + jne .LBL_1_3 + +.LBL_1_2: + cfi_remember_state + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret + +.LBL_1_3: + cfi_restore_state + movups %xmm3, 192(%rsp) + movups %xmm0, 256(%rsp) + je .LBL_1_2 + + xorb %cl, %cl + xorl %edx, %edx + movups %xmm8, 112(%rsp) + movups %xmm9, 96(%rsp) + movups %xmm10, 80(%rsp) + movups %xmm11, 64(%rsp) + movups %xmm12, 48(%rsp) + movups %xmm13, 32(%rsp) + movups %xmm14, 16(%rsp) + movups %xmm15, (%rsp) + movq %rsi, 136(%rsp) + movq %rdi, 128(%rsp) + movq %r12, 168(%rsp) + cfi_offset_rel_rsp (12, 168) + movb %cl, %r12b + movq %r13, 160(%rsp) + cfi_offset_rel_rsp (13, 160) + movl %eax, %r13d + movq %r14, 152(%rsp) + cfi_offset_rel_rsp (14, 152) + movl %edx, %r14d + movq %r15, 144(%rsp) + cfi_offset_rel_rsp (15, 144) + cfi_remember_state + +.LBL_1_6: + btl %r14d, %r13d + jc .LBL_1_12 + +.LBL_1_7: + lea 1(%r14), %esi + btl %esi, %r13d + jc .LBL_1_10 + +.LBL_1_8: + incb %r12b + addl $2, %r14d + cmpb $16, %r12b + jb .LBL_1_6 + + movups 112(%rsp), %xmm8 + movups 96(%rsp), %xmm9 + movups 80(%rsp), %xmm10 + movups 64(%rsp), %xmm11 + movups 48(%rsp), %xmm12 + movups 32(%rsp), %xmm13 + movups 16(%rsp), %xmm14 + movups (%rsp), %xmm15 + movq 136(%rsp), %rsi + movq 128(%rsp), %rdi + movq 168(%rsp), %r12 + cfi_restore (%r12) + movq 160(%rsp), %r13 + cfi_restore (%r13) + movq 152(%rsp), %r14 + cfi_restore (%r14) + movq 144(%rsp), %r15 + cfi_restore (%r15) + movups 256(%rsp), %xmm0 + jmp .LBL_1_2 + +.LBL_1_10: + cfi_restore_state + movzbl %r12b, %r15d + shlq $4, %r15 + movsd 200(%rsp,%r15), %xmm0 + + call exp@PLT + + movsd %xmm0, 264(%rsp,%r15) + jmp .LBL_1_8 + +.LBL_1_12: + movzbl %r12b, %r15d + shlq $4, %r15 + movsd 192(%rsp,%r15), %xmm0 + + call exp@PLT + + movsd %xmm0, 256(%rsp,%r15) + jmp .LBL_1_7 + +END (_ZGVbN2v_exp_sse4) diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core.S new file mode 100644 index 0000000000..7f2ebdef67 --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core.S @@ -0,0 +1,38 @@ +/* Multiple versions of vectorized exp. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <init-arch.h> + + .text +ENTRY (_ZGVdN4v_exp) + .type _ZGVdN4v_exp, @gnu_indirect_function + cmpl $0, KIND_OFFSET+__cpu_features(%rip) + jne 1f + call __init_cpu_features +1: leaq _ZGVdN4v_exp_avx2(%rip), %rax + testl $bit_AVX2_Usable, __cpu_features+FEATURE_OFFSET+index_AVX2_Usable(%rip) + jz 2f + ret +2: leaq _ZGVdN4v_exp_sse_wrapper(%rip), %rax + ret +END (_ZGVdN4v_exp) +libmvec_hidden_def (_ZGVdN4v_exp) + +#define _ZGVdN4v_exp _ZGVdN4v_exp_sse_wrapper +#include "../svml_d_exp4_core.S" diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S new file mode 100644 index 0000000000..a34e267433 --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp4_core_avx2.S @@ -0,0 +1,212 @@ +/* Function exp vectorized with AVX2. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include "svml_d_exp_data.h" + + .text +ENTRY (_ZGVdN4v_exp_avx2) +/* + ALGORITHM DESCRIPTION: + + Argument representation: + N = rint(X*2^k/ln2) = 2^k*M+j + X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r + then -ln2/2^(k+1) < r < ln2/2^(k+1) + Alternatively: + N = trunc(X*2^k/ln2) + then 0 < r < ln2/2^k + + Result calculation: + exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) + = 2^M * 2^(j/2^k) * exp(r) + 2^M is calculated by bit manipulation + 2^(j/2^k) is stored in table + exp(r) is approximated by polynomial + + The table lookup is skipped if k = 0. */ + + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $448, %rsp + movq __svml_dexp_data@GOTPCREL(%rip), %rax + vmovdqa %ymm0, %ymm2 + vmovupd __dbInvLn2(%rax), %ymm3 + vmovupd __dbShifter(%rax), %ymm1 + vmovupd __lIndexMask(%rax), %ymm4 + +/* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */ + vfmadd213pd %ymm1, %ymm2, %ymm3 + +/* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ + vextracti128 $1, %ymm2, %xmm5 + vshufps $221, %xmm5, %xmm2, %xmm6 + +/* iAbsX = iAbsX&iAbsMask */ + vandps __iAbsMask(%rax), %xmm6, %xmm7 + +/* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */ + vsubpd %ymm1, %ymm3, %ymm6 + +/* iRangeMask = (iAbsX>iDomainRange) */ + vpcmpgtd __iDomainRange(%rax), %xmm7, %xmm0 + vmovupd __dbLn2hi(%rax), %ymm1 + vmovupd __dPC0(%rax), %ymm7 + +/* Mask = iRangeMask?1:0, set mask for overflow/underflow */ + vmovmskps %xmm0, %ecx + vmovupd __dPC2(%rax), %ymm0 + +/* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ + vmovdqa %ymm2, %ymm5 + vfnmadd231pd %ymm6, %ymm1, %ymm5 + +/* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ + vfnmadd132pd __dbLn2lo(%rax), %ymm5, %ymm6 + +/* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ + vfmadd213pd __dPC1(%rax), %ymm6, %ymm0 + vfmadd213pd %ymm7, %ymm6, %ymm0 + vfmadd213pd %ymm7, %ymm6, %ymm0 + +/* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ + vandps %ymm4, %ymm3, %ymm1 + +/* table lookup for dT[j] = 2^(j/2^k) */ + vxorpd %ymm6, %ymm6, %ymm6 + vpcmpeqd %ymm5, %ymm5, %ymm5 + vgatherqpd %ymm5, (%rax,%ymm1,8), %ymm6 + +/* lM = (*(longlong*)&dM)&(~lIndexMask) */ + vpandn %ymm3, %ymm4, %ymm3 + +/* 2^(j/2^k) * exp(r) */ + vmulpd %ymm0, %ymm6, %ymm0 + +/* lM = lM<<(52-K), 2^M */ + vpsllq $42, %ymm3, %ymm4 + +/* multiply by 2^M through integer add */ + vpaddq %ymm4, %ymm0, %ymm0 + testl %ecx, %ecx + jne .LBL_1_3 + +.LBL_1_2: + cfi_remember_state + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret + +.LBL_1_3: + cfi_restore_state + vmovupd %ymm2, 320(%rsp) + vmovupd %ymm0, 384(%rsp) + je .LBL_1_2 + + xorb %dl, %dl + xorl %eax, %eax + vmovups %ymm8, 224(%rsp) + vmovups %ymm9, 192(%rsp) + vmovups %ymm10, 160(%rsp) + vmovups %ymm11, 128(%rsp) + vmovups %ymm12, 96(%rsp) + vmovups %ymm13, 64(%rsp) + vmovups %ymm14, 32(%rsp) + vmovups %ymm15, (%rsp) + movq %rsi, 264(%rsp) + movq %rdi, 256(%rsp) + movq %r12, 296(%rsp) + cfi_offset_rel_rsp (12, 296) + movb %dl, %r12b + movq %r13, 288(%rsp) + cfi_offset_rel_rsp (13, 288) + movl %ecx, %r13d + movq %r14, 280(%rsp) + cfi_offset_rel_rsp (14, 280) + movl %eax, %r14d + movq %r15, 272(%rsp) + cfi_offset_rel_rsp (15, 272) + cfi_remember_state + +.LBL_1_6: + btl %r14d, %r13d + jc .LBL_1_12 + +.LBL_1_7: + lea 1(%r14), %esi + btl %esi, %r13d + jc .LBL_1_10 + +.LBL_1_8: + incb %r12b + addl $2, %r14d + cmpb $16, %r12b + jb .LBL_1_6 + + vmovups 224(%rsp), %ymm8 + vmovups 192(%rsp), %ymm9 + vmovups 160(%rsp), %ymm10 + vmovups 128(%rsp), %ymm11 + vmovups 96(%rsp), %ymm12 + vmovups 64(%rsp), %ymm13 + vmovups 32(%rsp), %ymm14 + vmovups (%rsp), %ymm15 + vmovupd 384(%rsp), %ymm0 + movq 264(%rsp), %rsi + movq 256(%rsp), %rdi + movq 296(%rsp), %r12 + cfi_restore (%r12) + movq 288(%rsp), %r13 + cfi_restore (%r13) + movq 280(%rsp), %r14 + cfi_restore (%r14) + movq 272(%rsp), %r15 + cfi_restore (%r15) + jmp .LBL_1_2 + +.LBL_1_10: + cfi_restore_state + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 328(%rsp,%r15), %xmm0 + vzeroupper + + call exp@PLT + + vmovsd %xmm0, 392(%rsp,%r15) + jmp .LBL_1_8 + +.LBL_1_12: + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 320(%rsp,%r15), %xmm0 + vzeroupper + + call exp@PLT + + vmovsd %xmm0, 384(%rsp,%r15) + jmp .LBL_1_7 + +END (_ZGVdN4v_exp_avx2) diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core.S new file mode 100644 index 0000000000..8f837fbfb9 --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core.S @@ -0,0 +1,39 @@ +/* Multiple versions of vectorized exp. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <init-arch.h> + + .text +ENTRY (_ZGVeN8v_exp) + .type _ZGVeN8v_exp, @gnu_indirect_function + cmpl $0, KIND_OFFSET+__cpu_features(%rip) + jne 1 + call __init_cpu_features +1: leaq _ZGVeN8v_exp_skx(%rip), %rax + testl $bit_AVX512DQ_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512DQ_Usable(%rip) + jnz 3 +2: leaq _ZGVeN8v_exp_knl(%rip), %rax + testl $bit_AVX512F_Usable, __cpu_features+FEATURE_OFFSET+index_AVX512F_Usable(%rip) + jnz 3 + leaq _ZGVeN8v_exp_avx2_wrapper(%rip), %rax +3: ret +END (_ZGVeN8v_exp) + +#define _ZGVeN8v_exp _ZGVeN8v_exp_avx2_wrapper +#include "../svml_d_exp8_core.S" diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S new file mode 100644 index 0000000000..049a7e49cd --- /dev/null +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S @@ -0,0 +1,456 @@ +/* Function exp vectorized with AVX-512. KNL and SKX versions. + Copyright (C) 2014-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include "svml_d_exp_data.h" +#include "svml_d_wrapper_impl.h" + + .text +ENTRY (_ZGVeN8v_exp_knl) +#ifndef HAVE_AVX512_ASM_SUPPORT +WRAPPER_IMPL_AVX512 _ZGVdN4v_exp +#else +/* + ALGORITHM DESCRIPTION: + + Argument representation: + N = rint(X*2^k/ln2) = 2^k*M+j + X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r + then -ln2/2^(k+1) < r < ln2/2^(k+1) + Alternatively: + N = trunc(X*2^k/ln2) + then 0 < r < ln2/2^k + + Result calculation: + exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) + = 2^M * 2^(j/2^k) * exp(r) + 2^M is calculated by bit manipulation + 2^(j/2^k) is stored in table + exp(r) is approximated by polynomial + + The table lookup is skipped if k = 0. */ + + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $1280, %rsp + movq __svml_dexp_data@GOTPCREL(%rip), %rax + +/* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ + vmovaps %zmm0, %zmm8 + +/* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ + vpsrlq $32, %zmm0, %zmm1 + +/* iAbsX = iAbsX&iAbsMask */ + movl $255, %edx + vpmovqd %zmm1, %ymm2 + kmovw %edx, %k2 + +/* iRangeMask = (iAbsX>iDomainRange) */ + movl $-1, %ecx + +/* table lookup for dT[j] = 2^(j/2^k) */ + vpxord %zmm11, %zmm11, %zmm11 + vmovups __dbInvLn2(%rax), %zmm5 + vmovups __dbLn2hi(%rax), %zmm7 + kxnorw %k3, %k3, %k3 + +/* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */ + vfmadd213pd __dbShifter(%rax), %zmm0, %zmm5 + vmovups __dPC2(%rax), %zmm12 + +/* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */ + vsubpd __dbShifter(%rax), %zmm5, %zmm9 + vmovups __lIndexMask(%rax), %zmm4 + vfnmadd231pd %zmm9, %zmm7, %zmm8 + vpandd __iAbsMask(%rax), %zmm2, %zmm2{%k2} + +/* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ + vpandq %zmm4, %zmm5, %zmm10 + vgatherqpd (%rax,%zmm10,8), %zmm11{%k3} + vpcmpgtd __iDomainRange(%rax), %zmm2, %k1{%k2} + +/* lM = (*(longlong*)&dM)&(~lIndexMask) */ + vpandnq %zmm5, %zmm4, %zmm6 + vpbroadcastd %ecx, %zmm3{%k1}{z} + +/* lM = lM<<(52-K), 2^M */ + vpsllq $42, %zmm6, %zmm14 + +/* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ + vfnmadd132pd __dbLn2lo(%rax), %zmm8, %zmm9 + +/* Mask = iRangeMask?1:0, set mask for overflow/underflow */ + vptestmd %zmm3, %zmm3, %k0{%k2} + +/* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ + vfmadd213pd __dPC1(%rax), %zmm9, %zmm12 + kmovw %k0, %ecx + movzbl %cl, %ecx + vfmadd213pd __dPC0(%rax), %zmm9, %zmm12 + vfmadd213pd __dPC0(%rax), %zmm9, %zmm12 + +/* 2^(j/2^k) * exp(r) */ + vmulpd %zmm12, %zmm11, %zmm13 + +/* multiply by 2^M through integer add */ + vpaddq %zmm14, %zmm13, %zmm1 + testl %ecx, %ecx + jne .LBL_1_3 + +.LBL_1_2: + cfi_remember_state + vmovaps %zmm1, %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret + +.LBL_1_3: + cfi_restore_state + vmovups %zmm0, 1152(%rsp) + vmovups %zmm1, 1216(%rsp) + je .LBL_1_2 + + xorb %dl, %dl + kmovw %k4, 1048(%rsp) + xorl %eax, %eax + kmovw %k5, 1040(%rsp) + kmovw %k6, 1032(%rsp) + kmovw %k7, 1024(%rsp) + vmovups %zmm16, 960(%rsp) + vmovups %zmm17, 896(%rsp) + vmovups %zmm18, 832(%rsp) + vmovups %zmm19, 768(%rsp) + vmovups %zmm20, 704(%rsp) + vmovups %zmm21, 640(%rsp) + vmovups %zmm22, 576(%rsp) + vmovups %zmm23, 512(%rsp) + vmovups %zmm24, 448(%rsp) + vmovups %zmm25, 384(%rsp) + vmovups %zmm26, 320(%rsp) + vmovups %zmm27, 256(%rsp) + vmovups %zmm28, 192(%rsp) + vmovups %zmm29, 128(%rsp) + vmovups %zmm30, 64(%rsp) + vmovups %zmm31, (%rsp) + movq %rsi, 1064(%rsp) + movq %rdi, 1056(%rsp) + movq %r12, 1096(%rsp) + cfi_offset_rel_rsp (12, 1096) + movb %dl, %r12b + movq %r13, 1088(%rsp) + cfi_offset_rel_rsp (13, 1088) + movl %ecx, %r13d + movq %r14, 1080(%rsp) + cfi_offset_rel_rsp (14, 1080) + movl %eax, %r14d + movq %r15, 1072(%rsp) + cfi_offset_rel_rsp (15, 1072) + cfi_remember_state + +.LBL_1_6: + btl %r14d, %r13d + jc .LBL_1_12 + +.LBL_1_7: + lea 1(%r14), %esi + btl %esi, %r13d + jc .LBL_1_10 + +.LBL_1_8: + addb $1, %r12b + addl $2, %r14d + cmpb $16, %r12b + jb .LBL_1_6 + + kmovw 1048(%rsp), %k4 + movq 1064(%rsp), %rsi + kmovw 1040(%rsp), %k5 + movq 1056(%rsp), %rdi + kmovw 1032(%rsp), %k6 + movq 1096(%rsp), %r12 + cfi_restore (%r12) + movq 1088(%rsp), %r13 + cfi_restore (%r13) + kmovw 1024(%rsp), %k7 + vmovups 960(%rsp), %zmm16 + vmovups 896(%rsp), %zmm17 + vmovups 832(%rsp), %zmm18 + vmovups 768(%rsp), %zmm19 + vmovups 704(%rsp), %zmm20 + vmovups 640(%rsp), %zmm21 + vmovups 576(%rsp), %zmm22 + vmovups 512(%rsp), %zmm23 + vmovups 448(%rsp), %zmm24 + vmovups 384(%rsp), %zmm25 + vmovups 320(%rsp), %zmm26 + vmovups 256(%rsp), %zmm27 + vmovups 192(%rsp), %zmm28 + vmovups 128(%rsp), %zmm29 + vmovups 64(%rsp), %zmm30 + vmovups (%rsp), %zmm31 + movq 1080(%rsp), %r14 + cfi_restore (%r14) + movq 1072(%rsp), %r15 + cfi_restore (%r15) + vmovups 1216(%rsp), %zmm1 + jmp .LBL_1_2 + +.LBL_1_10: + cfi_restore_state + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 1160(%rsp,%r15), %xmm0 + call exp@PLT + vmovsd %xmm0, 1224(%rsp,%r15) + jmp .LBL_1_8 + +.LBL_1_12: + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 1152(%rsp,%r15), %xmm0 + call exp@PLT + vmovsd %xmm0, 1216(%rsp,%r15) + jmp .LBL_1_7 +#endif +END (_ZGVeN8v_exp_knl) + +ENTRY (_ZGVeN8v_exp_skx) +#ifndef HAVE_AVX512_ASM_SUPPORT +WRAPPER_IMPL_AVX512 _ZGVdN4v_exp +#else +/* + ALGORITHM DESCRIPTION: + + Argument representation: + N = rint(X*2^k/ln2) = 2^k*M+j + X = N*ln2/2^k + r = M*ln2 + ln2*(j/2^k) + r + then -ln2/2^(k+1) < r < ln2/2^(k+1) + Alternatively: + N = trunc(X*2^k/ln2) + then 0 < r < ln2/2^k + + Result calculation: + exp(X) = exp(M*ln2 + ln2*(j/2^k) + r) + = 2^M * 2^(j/2^k) * exp(r) + 2^M is calculated by bit manipulation + 2^(j/2^k) is stored in table + exp(r) is approximated by polynomial + + The table lookup is skipped if k = 0. */ + + pushq %rbp + cfi_adjust_cfa_offset (8) + cfi_rel_offset (%rbp, 0) + movq %rsp, %rbp + cfi_def_cfa_register (%rbp) + andq $-64, %rsp + subq $1280, %rsp + movq __svml_dexp_data@GOTPCREL(%rip), %rax + +/* table lookup for dT[j] = 2^(j/2^k) */ + kxnorw %k1, %k1, %k1 + +/* iAbsX = (int)(lX>>32), lX = *(longlong*)&X */ + vpsrlq $32, %zmm0, %zmm1 + vmovups __dbInvLn2(%rax), %zmm7 + vmovups __dbShifter(%rax), %zmm5 + vmovups __lIndexMask(%rax), %zmm6 + vmovups __dbLn2hi(%rax), %zmm9 + vmovups __dPC0(%rax), %zmm12 + +/* dM = X*dbInvLn2+dbShifter, dbInvLn2 = 2^k/Ln2 */ + vfmadd213pd %zmm5, %zmm0, %zmm7 + vpmovqd %zmm1, %ymm2 + +/* dN = dM-dbShifter, dN = rint(X*2^k/Ln2) */ + vsubpd %zmm5, %zmm7, %zmm11 + +/* iAbsX = iAbsX&iAbsMask */ + vpand __iAbsMask(%rax), %ymm2, %ymm3 + +/* dR = X - dN*dbLn2hi, dbLn2hi is 52-8-k hi bits of ln2/2^k */ + vmovaps %zmm0, %zmm10 + vfnmadd231pd %zmm11, %zmm9, %zmm10 + vmovups __dPC2(%rax), %zmm9 + +/* dR = dR - dN*dbLn2lo, dbLn2lo is 40..94 bits of lo part of ln2/2^k */ + vfnmadd132pd __dbLn2lo(%rax), %zmm10, %zmm11 + +/* exp(r) = b0+r*(b0+r*(b1+r*b2)) */ + vfmadd213pd __dPC1(%rax), %zmm11, %zmm9 + vfmadd213pd %zmm12, %zmm11, %zmm9 + vfmadd213pd %zmm12, %zmm11, %zmm9 + +/* iRangeMask = (iAbsX>iDomainRange) */ + vpcmpgtd __iDomainRange(%rax), %ymm3, %ymm4 + +/* Mask = iRangeMask?1:0, set mask for overflow/underflow */ + vmovmskps %ymm4, %ecx + +/* lIndex = (*(longlong*)&dM)&lIndexMask, lIndex is the lower K bits of lM */ + vpandq %zmm6, %zmm7, %zmm13 + vpmovqd %zmm13, %ymm14 + vpxord %zmm15, %zmm15, %zmm15 + vgatherdpd (%rax,%ymm14,8), %zmm15{%k1} + +/* 2^(j/2^k) * exp(r) */ + vmulpd %zmm9, %zmm15, %zmm10 + +/* lM = (*(longlong*)&dM)&(~lIndexMask) */ + vpandnq %zmm7, %zmm6, %zmm8 + +/* lM = lM<<(52-K), 2^M */ + vpsllq $42, %zmm8, %zmm1 + +/* multiply by 2^M through integer add */ + vpaddq %zmm1, %zmm10, %zmm1 + testl %ecx, %ecx + jne .LBL_2_3 + +.LBL_2_2: + cfi_remember_state + vmovaps %zmm1, %zmm0 + movq %rbp, %rsp + cfi_def_cfa_register (%rsp) + popq %rbp + cfi_adjust_cfa_offset (-8) + cfi_restore (%rbp) + ret + +.LBL_2_3: + cfi_restore_state + vmovups %zmm0, 1152(%rsp) + vmovups %zmm1, 1216(%rsp) + je .LBL_2_2 + + xorb %dl, %dl + xorl %eax, %eax + kmovw %k4, 1048(%rsp) + kmovw %k5, 1040(%rsp) + kmovw %k6, 1032(%rsp) + kmovw %k7, 1024(%rsp) + vmovups %zmm16, 960(%rsp) + vmovups %zmm17, 896(%rsp) + vmovups %zmm18, 832(%rsp) + vmovups %zmm19, 768(%rsp) + vmovups %zmm20, 704(%rsp) + vmovups %zmm21, 640(%rsp) + vmovups %zmm22, 576(%rsp) + vmovups %zmm23, 512(%rsp) + vmovups %zmm24, 448(%rsp) + vmovups %zmm25, 384(%rsp) + vmovups %zmm26, 320(%rsp) + vmovups %zmm27, 256(%rsp) + vmovups %zmm28, 192(%rsp) + vmovups %zmm29, 128(%rsp) + vmovups %zmm30, 64(%rsp) + vmovups %zmm31, (%rsp) + movq %rsi, 1064(%rsp) + movq %rdi, 1056(%rsp) + movq %r12, 1096(%rsp) + cfi_offset_rel_rsp (12, 1096) + movb %dl, %r12b + movq %r13, 1088(%rsp) + cfi_offset_rel_rsp (13, 1088) + movl %ecx, %r13d + movq %r14, 1080(%rsp) + cfi_offset_rel_rsp (14, 1080) + movl %eax, %r14d + movq %r15, 1072(%rsp) + cfi_offset_rel_rsp (15, 1072) + cfi_remember_state + +.LBL_2_6: + btl %r14d, %r13d + jc .LBL_2_12 + +.LBL_2_7: + lea 1(%r14), %esi + btl %esi, %r13d + jc .LBL_2_10 + +.LBL_2_8: + incb %r12b + addl $2, %r14d + cmpb $16, %r12b + jb .LBL_2_6 + + kmovw 1048(%rsp), %k4 + kmovw 1040(%rsp), %k5 + kmovw 1032(%rsp), %k6 + kmovw 1024(%rsp), %k7 + vmovups 960(%rsp), %zmm16 + vmovups 896(%rsp), %zmm17 + vmovups 832(%rsp), %zmm18 + vmovups 768(%rsp), %zmm19 + vmovups 704(%rsp), %zmm20 + vmovups 640(%rsp), %zmm21 + vmovups 576(%rsp), %zmm22 + vmovups 512(%rsp), %zmm23 + vmovups 448(%rsp), %zmm24 + vmovups 384(%rsp), %zmm25 + vmovups 320(%rsp), %zmm26 + vmovups 256(%rsp), %zmm27 + vmovups 192(%rsp), %zmm28 + vmovups 128(%rsp), %zmm29 + vmovups 64(%rsp), %zmm30 + vmovups (%rsp), %zmm31 + vmovups 1216(%rsp), %zmm1 + movq 1064(%rsp), %rsi + movq 1056(%rsp), %rdi + movq 1096(%rsp), %r12 + cfi_restore (%r12) + movq 1088(%rsp), %r13 + cfi_restore (%r13) + movq 1080(%rsp), %r14 + cfi_restore (%r14) + movq 1072(%rsp), %r15 + cfi_restore (%r15) + jmp .LBL_2_2 + +.LBL_2_10: + cfi_restore_state + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 1160(%rsp,%r15), %xmm0 + vzeroupper + vmovsd 1160(%rsp,%r15), %xmm0 + call exp@PLT + vmovsd %xmm0, 1224(%rsp,%r15) + jmp .LBL_2_8 + +.LBL_2_12: + movzbl %r12b, %r15d + shlq $4, %r15 + vmovsd 1152(%rsp,%r15), %xmm0 + vzeroupper + vmovsd 1152(%rsp,%r15), %xmm0 + call exp@PLT + vmovsd %xmm0, 1216(%rsp,%r15) + jmp .LBL_2_7 + +#endif +END (_ZGVeN8v_exp_skx) |