diff options
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S | 871 |
1 files changed, 435 insertions, 436 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S index 11523ef485..5bdc6859f0 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_asinh8_core_avx512.S @@ -34,477 +34,476 @@ /* Offsets for data table __svml_dasinh_data_internal_avx512 */ -#define Log_tbl_H 0 -#define Log_tbl_L 128 -#define One 256 -#define AbsMask 320 -#define SmallThreshold 384 -#define Threshold 448 -#define LargeThreshold 512 -#define ca2 576 -#define ca1 640 -#define c4s 704 -#define c3s 768 -#define c2s 832 -#define c1s 896 -#define AddB5 960 -#define RcpBitMask 1024 -#define OneEighth 1088 -#define Four 1152 -#define poly_coeff9 1216 -#define poly_coeff8 1280 -#define poly_coeff7 1344 -#define poly_coeff6 1408 -#define poly_coeff5 1472 -#define poly_coeff4 1536 -#define poly_coeff3 1600 -#define poly_coeff2 1664 -#define poly_coeff1 1728 -#define L2H 1792 -#define L2L 1856 +#define Log_tbl_H 0 +#define Log_tbl_L 128 +#define One 256 +#define AbsMask 320 +#define SmallThreshold 384 +#define Threshold 448 +#define LargeThreshold 512 +#define ca2 576 +#define ca1 640 +#define c4s 704 +#define c3s 768 +#define c2s 832 +#define c1s 896 +#define AddB5 960 +#define RcpBitMask 1024 +#define OneEighth 1088 +#define Four 1152 +#define poly_coeff9 1216 +#define poly_coeff8 1280 +#define poly_coeff7 1344 +#define poly_coeff6 1408 +#define poly_coeff5 1472 +#define poly_coeff4 1536 +#define poly_coeff3 1600 +#define poly_coeff2 1664 +#define poly_coeff1 1728 +#define L2H 1792 +#define L2L 1856 #include <sysdep.h> - .text - .section .text.evex512,"ax",@progbits + .section .text.evex512, "ax", @progbits ENTRY(_ZGVeN8v_asinh_skx) - pushq %rbp - cfi_def_cfa_offset(16) - movq %rsp, %rbp - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - andq $-64, %rsp - subq $192, %rsp - vmovaps %zmm0, %zmm3 + pushq %rbp + cfi_def_cfa_offset(16) + movq %rsp, %rbp + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + andq $-64, %rsp + subq $192, %rsp + vmovaps %zmm0, %zmm3 -/* x^2 */ - vmulpd {rn-sae}, %zmm3, %zmm3, %zmm14 - vmovups One+__svml_dasinh_data_internal_avx512(%rip), %zmm9 + /* x^2 */ + vmulpd {rn-sae}, %zmm3, %zmm3, %zmm14 + vmovups One+__svml_dasinh_data_internal_avx512(%rip), %zmm9 -/* polynomial computation for small inputs */ - vmovups ca2+__svml_dasinh_data_internal_avx512(%rip), %zmm10 - vmovups ca1+__svml_dasinh_data_internal_avx512(%rip), %zmm11 + /* polynomial computation for small inputs */ + vmovups ca2+__svml_dasinh_data_internal_avx512(%rip), %zmm10 + vmovups ca1+__svml_dasinh_data_internal_avx512(%rip), %zmm11 -/* not a very small input ? */ - vmovups SmallThreshold+__svml_dasinh_data_internal_avx512(%rip), %zmm0 + /* not a very small input ? */ + vmovups SmallThreshold+__svml_dasinh_data_internal_avx512(%rip), %zmm0 -/* A=max(x^2, 1); */ - vmaxpd {sae}, %zmm14, %zmm9, %zmm4 + /* A=max(x^2, 1); */ + vmaxpd {sae}, %zmm14, %zmm9, %zmm4 -/* B=min(x^2, 1); */ - vminpd {sae}, %zmm14, %zmm9, %zmm5 - vfmadd231pd {rn-sae}, %zmm14, %zmm10, %zmm11 + /* B=min(x^2, 1); */ + vminpd {sae}, %zmm14, %zmm9, %zmm5 + vfmadd231pd {rn-sae}, %zmm14, %zmm10, %zmm11 -/* 1+x^2 */ - vaddpd {rn-sae}, %zmm9, %zmm14, %zmm8 + /* 1+x^2 */ + vaddpd {rn-sae}, %zmm9, %zmm14, %zmm8 -/* |input| */ - vandpd AbsMask+__svml_dasinh_data_internal_avx512(%rip), %zmm3, %zmm1 - vrsqrt14pd %zmm8, %zmm6 - vcmppd $21, {sae}, %zmm0, %zmm1, %k2 + /* |input| */ + vandpd AbsMask+__svml_dasinh_data_internal_avx512(%rip), %zmm3, %zmm1 + vrsqrt14pd %zmm8, %zmm6 + vcmppd $21, {sae}, %zmm0, %zmm1, %k2 -/* B_high */ - vsubpd {rn-sae}, %zmm4, %zmm8, %zmm7 + /* B_high */ + vsubpd {rn-sae}, %zmm4, %zmm8, %zmm7 -/* sign bit */ - vxorpd %zmm3, %zmm1, %zmm2 - vmulpd {rn-sae}, %zmm14, %zmm11, %zmm4 + /* sign bit */ + vxorpd %zmm3, %zmm1, %zmm2 + vmulpd {rn-sae}, %zmm14, %zmm11, %zmm4 -/* B_low */ - vsubpd {rn-sae}, %zmm7, %zmm5, %zmm13 - vmovups c2s+__svml_dasinh_data_internal_avx512(%rip), %zmm5 - vmovups c1s+__svml_dasinh_data_internal_avx512(%rip), %zmm7 + /* B_low */ + vsubpd {rn-sae}, %zmm7, %zmm5, %zmm13 + vmovups c2s+__svml_dasinh_data_internal_avx512(%rip), %zmm5 + vmovups c1s+__svml_dasinh_data_internal_avx512(%rip), %zmm7 -/* polynomial computation for small inputs */ - vfmadd213pd {rn-sae}, %zmm1, %zmm1, %zmm4 + /* polynomial computation for small inputs */ + vfmadd213pd {rn-sae}, %zmm1, %zmm1, %zmm4 -/* (x^2)_low */ - vmovaps %zmm3, %zmm15 - vfmsub213pd {rn-sae}, %zmm14, %zmm3, %zmm15 - -/* Sh ~sqrt(1+x^2) */ - vmulpd {rn-sae}, %zmm6, %zmm8, %zmm14 - -/* Yl = (x^2)_low + B_low */ - vaddpd {rn-sae}, %zmm15, %zmm13, %zmm13 - -/* very large inputs ? */ - vmovups Threshold+__svml_dasinh_data_internal_avx512(%rip), %zmm15 - -/* (Yh*R0)_low */ - vfmsub213pd {rn-sae}, %zmm14, %zmm6, %zmm8 - vcmppd $21, {sae}, %zmm15, %zmm1, %k1 - -/* Sl = (Yh*R0)_low+(R0*Yl) */ - vfmadd213pd {rn-sae}, %zmm8, %zmm6, %zmm13 - vmovups LargeThreshold+__svml_dasinh_data_internal_avx512(%rip), %zmm8 - -/* rel. error term: Eh=1-Sh*R0 */ - vmovaps %zmm9, %zmm12 - vfnmadd231pd {rn-sae}, %zmm14, %zmm6, %zmm12 - vcmppd $22, {sae}, %zmm8, %zmm1, %k0 - -/* rel. error term: Eh=(1-Sh*R0)-Sl*R0 */ - vfnmadd231pd {rn-sae}, %zmm13, %zmm6, %zmm12 - -/* - * sqrt(1+x^2) ~ Sh + Sl + Sh*Eh*poly_s - * poly_s = c1+c2*Eh+c3*Eh^2 - */ - vmovups c4s+__svml_dasinh_data_internal_avx512(%rip), %zmm6 - vmovups c3s+__svml_dasinh_data_internal_avx512(%rip), %zmm8 - -/* Sh*Eh */ - vmulpd {rn-sae}, %zmm12, %zmm14, %zmm11 - vfmadd231pd {rn-sae}, %zmm12, %zmm6, %zmm8 - -/* Sh+x */ - vaddpd {rn-sae}, %zmm1, %zmm14, %zmm6 - kmovw %k0, %edx - vfmadd213pd {rn-sae}, %zmm5, %zmm12, %zmm8 - vfmadd213pd {rn-sae}, %zmm7, %zmm12, %zmm8 - -/* Xh */ - vsubpd {rn-sae}, %zmm14, %zmm6, %zmm12 - -/* Sl + Sh*Eh*poly_s */ - vfmadd213pd {rn-sae}, %zmm13, %zmm8, %zmm11 - -/* fixup for very large inputs */ - vmovups OneEighth+__svml_dasinh_data_internal_avx512(%rip), %zmm8 - -/* Xl */ - vsubpd {rn-sae}, %zmm12, %zmm1, %zmm12 - -/* Xin0+Sl+Sh*Eh*poly_s ~ x+sqrt(1+x^2) */ - vaddpd {rn-sae}, %zmm11, %zmm6, %zmm10 - -/* Sl_high */ - vsubpd {rn-sae}, %zmm6, %zmm10, %zmm5 - vmulpd {rn-sae}, %zmm8, %zmm1, %zmm10{%k1} - -/* Table lookups */ - vmovups __svml_dasinh_data_internal_avx512(%rip), %zmm6 - -/* Sl_l */ - vsubpd {rn-sae}, %zmm5, %zmm11, %zmm7 - vrcp14pd %zmm10, %zmm13 - -/* Xin_low */ - vaddpd {rn-sae}, %zmm12, %zmm7, %zmm14 - vmovups Log_tbl_L+__svml_dasinh_data_internal_avx512(%rip), %zmm7 - vmovups poly_coeff6+__svml_dasinh_data_internal_avx512(%rip), %zmm12 - -/* round reciprocal to 1+4b mantissas */ - vpaddq AddB5+__svml_dasinh_data_internal_avx512(%rip), %zmm13, %zmm11 - -/* fixup for very large inputs */ - vxorpd %zmm14, %zmm14, %zmm14{%k1} - vmovups poly_coeff5+__svml_dasinh_data_internal_avx512(%rip), %zmm13 - vandpd RcpBitMask+__svml_dasinh_data_internal_avx512(%rip), %zmm11, %zmm15 - vmovups poly_coeff7+__svml_dasinh_data_internal_avx512(%rip), %zmm11 - -/* Prepare table index */ - vpsrlq $48, %zmm15, %zmm5 - -/* reduced argument for log(): (Rcp*Xin-1)+Rcp*Xin_low */ - vfmsub231pd {rn-sae}, %zmm15, %zmm10, %zmm9 - -/* exponents */ - vgetexppd {sae}, %zmm15, %zmm8 - vmovups Four+__svml_dasinh_data_internal_avx512(%rip), %zmm10 - vpermt2pd Log_tbl_H+64+__svml_dasinh_data_internal_avx512(%rip), %zmm5, %zmm6 - vpermt2pd Log_tbl_L+64+__svml_dasinh_data_internal_avx512(%rip), %zmm5, %zmm7 - vsubpd {rn-sae}, %zmm10, %zmm8, %zmm8{%k1} - vfmadd231pd {rn-sae}, %zmm15, %zmm14, %zmm9 - -/* polynomials */ - vmovups poly_coeff9+__svml_dasinh_data_internal_avx512(%rip), %zmm10 - vmovups poly_coeff8+__svml_dasinh_data_internal_avx512(%rip), %zmm5 - vmovups poly_coeff4+__svml_dasinh_data_internal_avx512(%rip), %zmm14 - -/* -K*L2H + Th */ - vmovups L2H+__svml_dasinh_data_internal_avx512(%rip), %zmm15 - vfmadd231pd {rn-sae}, %zmm9, %zmm10, %zmm5 - -/* -K*L2L + Tl */ - vmovups L2L+__svml_dasinh_data_internal_avx512(%rip), %zmm10 - vfnmadd231pd {rn-sae}, %zmm8, %zmm15, %zmm6 - vfmadd213pd {rn-sae}, %zmm11, %zmm9, %zmm5 - vfnmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm8 - vmovups poly_coeff3+__svml_dasinh_data_internal_avx512(%rip), %zmm7 - vmovups poly_coeff1+__svml_dasinh_data_internal_avx512(%rip), %zmm10 - -/* R^2 */ - vmulpd {rn-sae}, %zmm9, %zmm9, %zmm11 - vfmadd213pd {rn-sae}, %zmm12, %zmm9, %zmm5 - vfmadd213pd {rn-sae}, %zmm13, %zmm9, %zmm5 - vfmadd213pd {rn-sae}, %zmm14, %zmm9, %zmm5 - vfmadd213pd {rn-sae}, %zmm7, %zmm9, %zmm5 - vmovups poly_coeff2+__svml_dasinh_data_internal_avx512(%rip), %zmm7 - vfmadd213pd {rn-sae}, %zmm7, %zmm9, %zmm5 - vfmadd213pd {rn-sae}, %zmm10, %zmm9, %zmm5 - -/* Tl + R^2*Poly */ - vfmadd213pd {rn-sae}, %zmm8, %zmm11, %zmm5 - -/* R+Tl + R^2*Poly */ - vaddpd {rn-sae}, %zmm9, %zmm5, %zmm9 - vaddpd {rn-sae}, %zmm9, %zmm6, %zmm4{%k2} - vxorpd %zmm2, %zmm4, %zmm0 - testl %edx, %edx - -/* Go to special inputs processing branch */ - jne L(SPECIAL_VALUES_BRANCH) - # LOE rbx r12 r13 r14 r15 edx zmm0 zmm3 - -/* Restore registers - * and exit the function - */ + /* (x^2)_low */ + vmovaps %zmm3, %zmm15 + vfmsub213pd {rn-sae}, %zmm14, %zmm3, %zmm15 + + /* Sh ~sqrt(1+x^2) */ + vmulpd {rn-sae}, %zmm6, %zmm8, %zmm14 + + /* Yl = (x^2)_low + B_low */ + vaddpd {rn-sae}, %zmm15, %zmm13, %zmm13 + + /* very large inputs ? */ + vmovups Threshold+__svml_dasinh_data_internal_avx512(%rip), %zmm15 + + /* (Yh*R0)_low */ + vfmsub213pd {rn-sae}, %zmm14, %zmm6, %zmm8 + vcmppd $21, {sae}, %zmm15, %zmm1, %k1 + + /* Sl = (Yh*R0)_low+(R0*Yl) */ + vfmadd213pd {rn-sae}, %zmm8, %zmm6, %zmm13 + vmovups LargeThreshold+__svml_dasinh_data_internal_avx512(%rip), %zmm8 + + /* rel. error term: Eh=1-Sh*R0 */ + vmovaps %zmm9, %zmm12 + vfnmadd231pd {rn-sae}, %zmm14, %zmm6, %zmm12 + vcmppd $22, {sae}, %zmm8, %zmm1, %k0 + + /* rel. error term: Eh=(1-Sh*R0)-Sl*R0 */ + vfnmadd231pd {rn-sae}, %zmm13, %zmm6, %zmm12 + + /* + * sqrt(1+x^2) ~ Sh + Sl + Sh*Eh*poly_s + * poly_s = c1+c2*Eh+c3*Eh^2 + */ + vmovups c4s+__svml_dasinh_data_internal_avx512(%rip), %zmm6 + vmovups c3s+__svml_dasinh_data_internal_avx512(%rip), %zmm8 + + /* Sh*Eh */ + vmulpd {rn-sae}, %zmm12, %zmm14, %zmm11 + vfmadd231pd {rn-sae}, %zmm12, %zmm6, %zmm8 + + /* Sh+x */ + vaddpd {rn-sae}, %zmm1, %zmm14, %zmm6 + kmovw %k0, %edx + vfmadd213pd {rn-sae}, %zmm5, %zmm12, %zmm8 + vfmadd213pd {rn-sae}, %zmm7, %zmm12, %zmm8 + + /* Xh */ + vsubpd {rn-sae}, %zmm14, %zmm6, %zmm12 + + /* Sl + Sh*Eh*poly_s */ + vfmadd213pd {rn-sae}, %zmm13, %zmm8, %zmm11 + + /* fixup for very large inputs */ + vmovups OneEighth+__svml_dasinh_data_internal_avx512(%rip), %zmm8 + + /* Xl */ + vsubpd {rn-sae}, %zmm12, %zmm1, %zmm12 + + /* Xin0+Sl+Sh*Eh*poly_s ~ x+sqrt(1+x^2) */ + vaddpd {rn-sae}, %zmm11, %zmm6, %zmm10 + + /* Sl_high */ + vsubpd {rn-sae}, %zmm6, %zmm10, %zmm5 + vmulpd {rn-sae}, %zmm8, %zmm1, %zmm10{%k1} + + /* Table lookups */ + vmovups __svml_dasinh_data_internal_avx512(%rip), %zmm6 + + /* Sl_l */ + vsubpd {rn-sae}, %zmm5, %zmm11, %zmm7 + vrcp14pd %zmm10, %zmm13 + + /* Xin_low */ + vaddpd {rn-sae}, %zmm12, %zmm7, %zmm14 + vmovups Log_tbl_L+__svml_dasinh_data_internal_avx512(%rip), %zmm7 + vmovups poly_coeff6+__svml_dasinh_data_internal_avx512(%rip), %zmm12 + + /* round reciprocal to 1+4b mantissas */ + vpaddq AddB5+__svml_dasinh_data_internal_avx512(%rip), %zmm13, %zmm11 + + /* fixup for very large inputs */ + vxorpd %zmm14, %zmm14, %zmm14{%k1} + vmovups poly_coeff5+__svml_dasinh_data_internal_avx512(%rip), %zmm13 + vandpd RcpBitMask+__svml_dasinh_data_internal_avx512(%rip), %zmm11, %zmm15 + vmovups poly_coeff7+__svml_dasinh_data_internal_avx512(%rip), %zmm11 + + /* Prepare table index */ + vpsrlq $48, %zmm15, %zmm5 + + /* reduced argument for log(): (Rcp*Xin-1)+Rcp*Xin_low */ + vfmsub231pd {rn-sae}, %zmm15, %zmm10, %zmm9 + + /* exponents */ + vgetexppd {sae}, %zmm15, %zmm8 + vmovups Four+__svml_dasinh_data_internal_avx512(%rip), %zmm10 + vpermt2pd Log_tbl_H+64+__svml_dasinh_data_internal_avx512(%rip), %zmm5, %zmm6 + vpermt2pd Log_tbl_L+64+__svml_dasinh_data_internal_avx512(%rip), %zmm5, %zmm7 + vsubpd {rn-sae}, %zmm10, %zmm8, %zmm8{%k1} + vfmadd231pd {rn-sae}, %zmm15, %zmm14, %zmm9 + + /* polynomials */ + vmovups poly_coeff9+__svml_dasinh_data_internal_avx512(%rip), %zmm10 + vmovups poly_coeff8+__svml_dasinh_data_internal_avx512(%rip), %zmm5 + vmovups poly_coeff4+__svml_dasinh_data_internal_avx512(%rip), %zmm14 + + /* -K*L2H + Th */ + vmovups L2H+__svml_dasinh_data_internal_avx512(%rip), %zmm15 + vfmadd231pd {rn-sae}, %zmm9, %zmm10, %zmm5 + + /* -K*L2L + Tl */ + vmovups L2L+__svml_dasinh_data_internal_avx512(%rip), %zmm10 + vfnmadd231pd {rn-sae}, %zmm8, %zmm15, %zmm6 + vfmadd213pd {rn-sae}, %zmm11, %zmm9, %zmm5 + vfnmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm8 + vmovups poly_coeff3+__svml_dasinh_data_internal_avx512(%rip), %zmm7 + vmovups poly_coeff1+__svml_dasinh_data_internal_avx512(%rip), %zmm10 + + /* R^2 */ + vmulpd {rn-sae}, %zmm9, %zmm9, %zmm11 + vfmadd213pd {rn-sae}, %zmm12, %zmm9, %zmm5 + vfmadd213pd {rn-sae}, %zmm13, %zmm9, %zmm5 + vfmadd213pd {rn-sae}, %zmm14, %zmm9, %zmm5 + vfmadd213pd {rn-sae}, %zmm7, %zmm9, %zmm5 + vmovups poly_coeff2+__svml_dasinh_data_internal_avx512(%rip), %zmm7 + vfmadd213pd {rn-sae}, %zmm7, %zmm9, %zmm5 + vfmadd213pd {rn-sae}, %zmm10, %zmm9, %zmm5 + + /* Tl + R^2*Poly */ + vfmadd213pd {rn-sae}, %zmm8, %zmm11, %zmm5 + + /* R+Tl + R^2*Poly */ + vaddpd {rn-sae}, %zmm9, %zmm5, %zmm9 + vaddpd {rn-sae}, %zmm9, %zmm6, %zmm4{%k2} + vxorpd %zmm2, %zmm4, %zmm0 + testl %edx, %edx + + /* Go to special inputs processing branch */ + jne L(SPECIAL_VALUES_BRANCH) + # LOE rbx r12 r13 r14 r15 edx zmm0 zmm3 + + /* Restore registers + * and exit the function + */ L(EXIT): - movq %rbp, %rsp - popq %rbp - cfi_def_cfa(7, 8) - cfi_restore(6) - ret - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - -/* Branch to process - * special inputs - */ + movq %rbp, %rsp + popq %rbp + cfi_def_cfa(7, 8) + cfi_restore(6) + ret + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + + /* Branch to process + * special inputs + */ L(SPECIAL_VALUES_BRANCH): - vmovups %zmm3, 64(%rsp) - vmovups %zmm0, 128(%rsp) - # LOE rbx r12 r13 r14 r15 edx zmm0 - - xorl %eax, %eax - # LOE rbx r12 r13 r14 r15 eax edx - - vzeroupper - movq %r12, 16(%rsp) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 - movl %eax, %r12d - movq %r13, 8(%rsp) - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 - movl %edx, %r13d - movq %r14, (%rsp) - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r15 r12d r13d - -/* Range mask - * bits check - */ + vmovups %zmm3, 64(%rsp) + vmovups %zmm0, 128(%rsp) + # LOE rbx r12 r13 r14 r15 edx zmm0 + + xorl %eax, %eax + # LOE rbx r12 r13 r14 r15 eax edx + + vzeroupper + movq %r12, 16(%rsp) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 + movl %eax, %r12d + movq %r13, 8(%rsp) + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 + movl %edx, %r13d + movq %r14, (%rsp) + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r15 r12d r13d + + /* Range mask + * bits check + */ L(RANGEMASK_CHECK): - btl %r12d, %r13d + btl %r12d, %r13d -/* Call scalar math function */ - jc L(SCALAR_MATH_CALL) - # LOE rbx r15 r12d r13d + /* Call scalar math function */ + jc L(SCALAR_MATH_CALL) + # LOE rbx r15 r12d r13d -/* Special inputs - * processing loop - */ + /* Special inputs + * processing loop + */ L(SPECIAL_VALUES_LOOP): - incl %r12d - cmpl $8, %r12d - -/* Check bits in range mask */ - jl L(RANGEMASK_CHECK) - # LOE rbx r15 r12d r13d - - movq 16(%rsp), %r12 - cfi_restore(12) - movq 8(%rsp), %r13 - cfi_restore(13) - movq (%rsp), %r14 - cfi_restore(14) - vmovups 128(%rsp), %zmm0 - -/* Go to exit */ - jmp L(EXIT) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r12 r13 r14 r15 zmm0 - -/* Scalar math fucntion call - * to process special input - */ + incl %r12d + cmpl $8, %r12d + + /* Check bits in range mask */ + jl L(RANGEMASK_CHECK) + # LOE rbx r15 r12d r13d + + movq 16(%rsp), %r12 + cfi_restore(12) + movq 8(%rsp), %r13 + cfi_restore(13) + movq (%rsp), %r14 + cfi_restore(14) + vmovups 128(%rsp), %zmm0 + + /* Go to exit */ + jmp L(EXIT) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r12 r13 r14 r15 zmm0 + + /* Scalar math fucntion call + * to process special input + */ L(SCALAR_MATH_CALL): - movl %r12d, %r14d - movsd 64(%rsp,%r14,8), %xmm0 - call asinh@PLT - # LOE rbx r14 r15 r12d r13d xmm0 + movl %r12d, %r14d + movsd 64(%rsp, %r14, 8), %xmm0 + call asinh@PLT + # LOE rbx r14 r15 r12d r13d xmm0 - movsd %xmm0, 128(%rsp,%r14,8) + movsd %xmm0, 128(%rsp, %r14, 8) -/* Process special inputs in loop */ - jmp L(SPECIAL_VALUES_LOOP) - # LOE rbx r15 r12d r13d + /* Process special inputs in loop */ + jmp L(SPECIAL_VALUES_LOOP) + # LOE rbx r15 r12d r13d END(_ZGVeN8v_asinh_skx) - .section .rodata, "a" - .align 64 + .section .rodata, "a" + .align 64 #ifdef __svml_dasinh_data_internal_avx512_typedef typedef unsigned int VUINT32; typedef struct { - __declspec(align(64)) VUINT32 Log_tbl_H[16][2]; - __declspec(align(64)) VUINT32 Log_tbl_L[16][2]; - __declspec(align(64)) VUINT32 One[8][2]; - __declspec(align(64)) VUINT32 AbsMask[8][2]; - __declspec(align(64)) VUINT32 SmallThreshold[8][2]; - __declspec(align(64)) VUINT32 Threshold[8][2]; - __declspec(align(64)) VUINT32 LargeThreshold[8][2]; - __declspec(align(64)) VUINT32 ca2[8][2]; - __declspec(align(64)) VUINT32 ca1[8][2]; - __declspec(align(64)) VUINT32 c4s[8][2]; - __declspec(align(64)) VUINT32 c3s[8][2]; - __declspec(align(64)) VUINT32 c2s[8][2]; - __declspec(align(64)) VUINT32 c1s[8][2]; - __declspec(align(64)) VUINT32 AddB5[8][2]; - __declspec(align(64)) VUINT32 RcpBitMask[8][2]; - __declspec(align(64)) VUINT32 OneEighth[8][2]; - __declspec(align(64)) VUINT32 Four[8][2]; - __declspec(align(64)) VUINT32 poly_coeff9[8][2]; - __declspec(align(64)) VUINT32 poly_coeff8[8][2]; - __declspec(align(64)) VUINT32 poly_coeff7[8][2]; - __declspec(align(64)) VUINT32 poly_coeff6[8][2]; - __declspec(align(64)) VUINT32 poly_coeff5[8][2]; - __declspec(align(64)) VUINT32 poly_coeff4[8][2]; - __declspec(align(64)) VUINT32 poly_coeff3[8][2]; - __declspec(align(64)) VUINT32 poly_coeff2[8][2]; - __declspec(align(64)) VUINT32 poly_coeff1[8][2]; - __declspec(align(64)) VUINT32 L2H[8][2]; - __declspec(align(64)) VUINT32 L2L[8][2]; - } __svml_dasinh_data_internal_avx512; + __declspec(align(64)) VUINT32 Log_tbl_H[16][2]; + __declspec(align(64)) VUINT32 Log_tbl_L[16][2]; + __declspec(align(64)) VUINT32 One[8][2]; + __declspec(align(64)) VUINT32 AbsMask[8][2]; + __declspec(align(64)) VUINT32 SmallThreshold[8][2]; + __declspec(align(64)) VUINT32 Threshold[8][2]; + __declspec(align(64)) VUINT32 LargeThreshold[8][2]; + __declspec(align(64)) VUINT32 ca2[8][2]; + __declspec(align(64)) VUINT32 ca1[8][2]; + __declspec(align(64)) VUINT32 c4s[8][2]; + __declspec(align(64)) VUINT32 c3s[8][2]; + __declspec(align(64)) VUINT32 c2s[8][2]; + __declspec(align(64)) VUINT32 c1s[8][2]; + __declspec(align(64)) VUINT32 AddB5[8][2]; + __declspec(align(64)) VUINT32 RcpBitMask[8][2]; + __declspec(align(64)) VUINT32 OneEighth[8][2]; + __declspec(align(64)) VUINT32 Four[8][2]; + __declspec(align(64)) VUINT32 poly_coeff9[8][2]; + __declspec(align(64)) VUINT32 poly_coeff8[8][2]; + __declspec(align(64)) VUINT32 poly_coeff7[8][2]; + __declspec(align(64)) VUINT32 poly_coeff6[8][2]; + __declspec(align(64)) VUINT32 poly_coeff5[8][2]; + __declspec(align(64)) VUINT32 poly_coeff4[8][2]; + __declspec(align(64)) VUINT32 poly_coeff3[8][2]; + __declspec(align(64)) VUINT32 poly_coeff2[8][2]; + __declspec(align(64)) VUINT32 poly_coeff1[8][2]; + __declspec(align(64)) VUINT32 L2H[8][2]; + __declspec(align(64)) VUINT32 L2L[8][2]; +} __svml_dasinh_data_internal_avx512; #endif __svml_dasinh_data_internal_avx512: - /*== Log_tbl_H ==*/ - .quad 0x0000000000000000 - .quad 0xbfaf0a30c0120000 - .quad 0xbfbe27076e2b0000 - .quad 0xbfc5ff3070a78000 - .quad 0xbfcc8ff7c79a8000 - .quad 0xbfd1675cababc000 - .quad 0xbfd4618bc21c4000 - .quad 0xbfd739d7f6bbc000 - .quad 0xbfd9f323ecbf8000 - .quad 0xbfdc8ff7c79a8000 - .quad 0xbfdf128f5faf0000 - .quad 0xbfe0be72e4252000 - .quad 0xbfe1e85f5e704000 - .quad 0xbfe307d7334f2000 - .quad 0xbfe41d8fe8468000 - .quad 0xbfe52a2d265bc000 - /*== Log_tbl_L ==*/ - .align 64 - .quad 0x0000000000000000 - .quad 0x3d53ab33d066d1d2 - .quad 0x3d2a342c2af0003c - .quad 0xbd43d3c873e20a07 - .quad 0xbd4a21ac25d81ef3 - .quad 0x3d59f1fc63382a8f - .quad 0xbd5ec27d0b7b37b3 - .quad 0xbd50069ce24c53fb - .quad 0xbd584bf2b68d766f - .quad 0xbd5a21ac25d81ef3 - .quad 0xbd3bb2cd720ec44c - .quad 0xbd55056d312f7668 - .quad 0xbd1a07bd8b34be7c - .quad 0x3d5e83c094debc15 - .quad 0x3d5aa33736867a17 - .quad 0xbd46abb9df22bc57 - /*== One ==*/ - .align 64 - .quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000 - /*== AbsMask ==*/ - .align 64 - .quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff - /*== SmallThreshold ==*/ - .align 64 - .quad 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000 - /*== Threshold ==*/ - .align 64 - .quad 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000 - /*== LargeThreshold ==*/ - .align 64 - .quad 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff - /*== ca2 ==*/ - .align 64 - .quad 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7 - /*== ca1 ==*/ - .align 64 - .quad 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e - /*== c4s ==*/ - .align 64 - .quad 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612 - /*== c3s ==*/ - .align 64 - .quad 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000 - /*== c2s ==*/ - .align 64 - .quad 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000 - /*== c1s ==*/ - .align 64 - .quad 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000 - /*== AddB5 ==*/ - .align 64 - .quad 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000 - /*== RcpBitMask ==*/ - .align 64 - .quad 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000 - /*==OneEighth ==*/ - .align 64 - .quad 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000 - /*== Four ==*/ - .align 64 - .quad 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000 - /*== poly_coeff9 ==*/ - .align 64 - .quad 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368 - /*== poly_coeff8 ==*/ - .align 64 - .quad 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778 - /*== poly_coeff7 ==*/ - .align 64 - .quad 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9 - /*== poly_coeff6 ==*/ - .align 64 - .quad 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1 - /*== poly_coeff5 ==*/ - .align 64 - .quad 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736 - /*== poly_coeff4 ==*/ - .align 64 - .quad 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af - /*== poly_coeff3 ==*/ - .align 64 - .quad 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65 - /*== poly_coeff2 ==*/ - .align 64 - .quad 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1 - /*== poly_coeff1 ==*/ - .align 64 - .quad 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000 - /*== L2H = log(2)_high ==*/ - .align 64 - .quad 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000 - /*== L2L = log(2)_low ==*/ - .align 64 - .quad 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000 - .align 64 - .type __svml_dasinh_data_internal_avx512,@object - .size __svml_dasinh_data_internal_avx512,.-__svml_dasinh_data_internal_avx512 + /* Log_tbl_H */ + .quad 0x0000000000000000 + .quad 0xbfaf0a30c0120000 + .quad 0xbfbe27076e2b0000 + .quad 0xbfc5ff3070a78000 + .quad 0xbfcc8ff7c79a8000 + .quad 0xbfd1675cababc000 + .quad 0xbfd4618bc21c4000 + .quad 0xbfd739d7f6bbc000 + .quad 0xbfd9f323ecbf8000 + .quad 0xbfdc8ff7c79a8000 + .quad 0xbfdf128f5faf0000 + .quad 0xbfe0be72e4252000 + .quad 0xbfe1e85f5e704000 + .quad 0xbfe307d7334f2000 + .quad 0xbfe41d8fe8468000 + .quad 0xbfe52a2d265bc000 + /* Log_tbl_L */ + .align 64 + .quad 0x0000000000000000 + .quad 0x3d53ab33d066d1d2 + .quad 0x3d2a342c2af0003c + .quad 0xbd43d3c873e20a07 + .quad 0xbd4a21ac25d81ef3 + .quad 0x3d59f1fc63382a8f + .quad 0xbd5ec27d0b7b37b3 + .quad 0xbd50069ce24c53fb + .quad 0xbd584bf2b68d766f + .quad 0xbd5a21ac25d81ef3 + .quad 0xbd3bb2cd720ec44c + .quad 0xbd55056d312f7668 + .quad 0xbd1a07bd8b34be7c + .quad 0x3d5e83c094debc15 + .quad 0x3d5aa33736867a17 + .quad 0xbd46abb9df22bc57 + /* One */ + .align 64 + .quad 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000, 0x3ff0000000000000 + /* AbsMask */ + .align 64 + .quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff + /* SmallThreshold */ + .align 64 + .quad 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000, 0x3f70000000000000 + /* Threshold */ + .align 64 + .quad 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000, 0x5fe0000000000000 + /* LargeThreshold */ + .align 64 + .quad 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff, 0x7fefffffffffffff + /* ca2 */ + .align 64 + .quad 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7, 0x3fb333220eaf02e7 + /* ca1 */ + .align 64 + .quad 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e, 0xbfc5555555521e7e + /* c4s */ + .align 64 + .quad 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612, 0x3fd1800001943612 + /* c3s */ + .align 64 + .quad 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000, 0x3fd40000013b0000 + /* c2s */ + .align 64 + .quad 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000, 0x3fd8000000000000 + /* c1s */ + .align 64 + .quad 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000, 0x3fe0000000000000 + /* AddB5 */ + .align 64 + .quad 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000, 0x0000800000000000 + /* RcpBitMask */ + .align 64 + .quad 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000, 0xffff000000000000 + /* OneEighth */ + .align 64 + .quad 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000, 0x3fc0000000000000 + /* Four */ + .align 64 + .quad 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000, 0x4010000000000000 + /* poly_coeff9 */ + .align 64 + .quad 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368, 0xbfb9a9b040214368 + /* poly_coeff8 */ + .align 64 + .quad 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778, 0x3fbc80666e249778 + /* poly_coeff7 */ + .align 64 + .quad 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9, 0xbfbffffb8a054bc9 + /* poly_coeff6 */ + .align 64 + .quad 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1, 0x3fc24922f71256f1 + /* poly_coeff5 */ + .align 64 + .quad 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736, 0xbfc55555559ba736 + /* poly_coeff4 */ + .align 64 + .quad 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af, 0x3fc9999999be77af + /* poly_coeff3 */ + .align 64 + .quad 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65, 0xbfcffffffffffc65 + /* poly_coeff2 */ + .align 64 + .quad 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1, 0x3fd55555555554c1 + /* poly_coeff1 */ + .align 64 + .quad 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000, 0xbfe0000000000000 + /* L2H = log(2)_high */ + .align 64 + .quad 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000, 0x3fe62E42FEFA0000 + /* L2L = log(2)_low */ + .align 64 + .quad 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000, 0x3d7cf79abc9e0000 + .align 64 + .type __svml_dasinh_data_internal_avx512, @object + .size __svml_dasinh_data_internal_avx512, .-__svml_dasinh_data_internal_avx512 |