about summary refs log tree commit diff
diff options
context:
space:
mode:
authorSunil K Pandey <skpgkp2@gmail.com>2021-12-29 09:05:18 -0800
committerSunil K Pandey <skpgkp2@gmail.com>2021-12-29 11:37:55 -0800
commitaa1809a1dfde88e5df73edba14b30e488b267343 (patch)
treee22646d81ab1bd03aa0b3295aeba2569b6f3a5c5
parent76ddc74e86f7bc36468736dd22c4c29e39cd62d8 (diff)
downloadglibc-aa1809a1dfde88e5df73edba14b30e488b267343.tar.gz
glibc-aa1809a1dfde88e5df73edba14b30e488b267343.tar.xz
glibc-aa1809a1dfde88e5df73edba14b30e488b267343.zip
x86-64: Add vector sinh/sinhf implementation to libmvec
Implement vectorized sinh/sinhf containing SSE, AVX, AVX2 and
AVX512 versions for libmvec as per vector ABI.  It also contains
accuracy and ABI tests for vector sinh/sinhf with regenerated ulps.

Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
-rw-r--r--bits/libm-simd-decl-stubs.h11
-rw-r--r--math/bits/mathcalls.h2
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libmvec.abilist8
-rw-r--r--sysdeps/x86/fpu/bits/math-vector.h4
-rw-r--r--sysdeps/x86/fpu/finclude/math-vector-fortran.h4
-rw-r--r--sysdeps/x86_64/fpu/Makeconfig1
-rw-r--r--sysdeps/x86_64/fpu/Versions2
-rw-r--r--sysdeps/x86_64/fpu/libm-test-ulps20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core-sse2.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core.c27
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S456
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core-sse.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core.c27
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S470
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core-avx2.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core.c27
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S461
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core-avx2.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core.c28
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S318
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core-sse2.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core.c28
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S308
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core-sse.S20
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core.c28
-rw-r--r--sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S309
-rw-r--r--sysdeps/x86_64/fpu/svml_d_sinh2_core.S29
-rw-r--r--sysdeps/x86_64/fpu/svml_d_sinh4_core.S29
-rw-r--r--sysdeps/x86_64/fpu/svml_d_sinh4_core_avx.S25
-rw-r--r--sysdeps/x86_64/fpu/svml_d_sinh8_core.S25
-rw-r--r--sysdeps/x86_64/fpu/svml_s_sinhf16_core.S25
-rw-r--r--sysdeps/x86_64/fpu/svml_s_sinhf4_core.S29
-rw-r--r--sysdeps/x86_64/fpu/svml_s_sinhf8_core.S29
-rw-r--r--sysdeps/x86_64/fpu/svml_s_sinhf8_core_avx.S25
-rw-r--r--sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx2.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx512f.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-libmvec-sinh.c3
-rw-r--r--sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx2.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx512f.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-libmvec-sinhf.c3
-rw-r--r--sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c1
-rw-r--r--sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c1
50 files changed, 2894 insertions, 1 deletions
diff --git a/bits/libm-simd-decl-stubs.h b/bits/libm-simd-decl-stubs.h
index 28dc4a82c5..6347320521 100644
--- a/bits/libm-simd-decl-stubs.h
+++ b/bits/libm-simd-decl-stubs.h
@@ -186,4 +186,15 @@
 #define __DECL_SIMD_expm1f32x
 #define __DECL_SIMD_expm1f64x
 #define __DECL_SIMD_expm1f128x
+
+#define __DECL_SIMD_sinh
+#define __DECL_SIMD_sinhf
+#define __DECL_SIMD_sinhl
+#define __DECL_SIMD_sinhf16
+#define __DECL_SIMD_sinhf32
+#define __DECL_SIMD_sinhf64
+#define __DECL_SIMD_sinhf128
+#define __DECL_SIMD_sinhf32x
+#define __DECL_SIMD_sinhf64x
+#define __DECL_SIMD_sinhf128x
 #endif
diff --git a/math/bits/mathcalls.h b/math/bits/mathcalls.h
index c57adc8ace..673b3a93ba 100644
--- a/math/bits/mathcalls.h
+++ b/math/bits/mathcalls.h
@@ -70,7 +70,7 @@ __MATHCALL (tan,, (_Mdouble_ __x));
 /* Hyperbolic cosine of X.  */
 __MATHCALL_VEC (cosh,, (_Mdouble_ __x));
 /* Hyperbolic sine of X.  */
-__MATHCALL (sinh,, (_Mdouble_ __x));
+__MATHCALL_VEC (sinh,, (_Mdouble_ __x));
 /* Hyperbolic tangent of X.  */
 __MATHCALL (tanh,, (_Mdouble_ __x));
 
diff --git a/sysdeps/unix/sysv/linux/x86_64/libmvec.abilist b/sysdeps/unix/sysv/linux/x86_64/libmvec.abilist
index c9d3213bd3..f9d7b085ab 100644
--- a/sysdeps/unix/sysv/linux/x86_64/libmvec.abilist
+++ b/sysdeps/unix/sysv/linux/x86_64/libmvec.abilist
@@ -53,6 +53,7 @@ GLIBC_2.35 _ZGVbN2v_cosh F
 GLIBC_2.35 _ZGVbN2v_exp10 F
 GLIBC_2.35 _ZGVbN2v_exp2 F
 GLIBC_2.35 _ZGVbN2v_expm1 F
+GLIBC_2.35 _ZGVbN2v_sinh F
 GLIBC_2.35 _ZGVbN2vv_hypot F
 GLIBC_2.35 _ZGVbN4v_acosf F
 GLIBC_2.35 _ZGVbN4v_asinf F
@@ -61,6 +62,7 @@ GLIBC_2.35 _ZGVbN4v_coshf F
 GLIBC_2.35 _ZGVbN4v_exp10f F
 GLIBC_2.35 _ZGVbN4v_exp2f F
 GLIBC_2.35 _ZGVbN4v_expm1f F
+GLIBC_2.35 _ZGVbN4v_sinhf F
 GLIBC_2.35 _ZGVbN4vv_hypotf F
 GLIBC_2.35 _ZGVcN4v_acos F
 GLIBC_2.35 _ZGVcN4v_asin F
@@ -69,6 +71,7 @@ GLIBC_2.35 _ZGVcN4v_cosh F
 GLIBC_2.35 _ZGVcN4v_exp10 F
 GLIBC_2.35 _ZGVcN4v_exp2 F
 GLIBC_2.35 _ZGVcN4v_expm1 F
+GLIBC_2.35 _ZGVcN4v_sinh F
 GLIBC_2.35 _ZGVcN4vv_hypot F
 GLIBC_2.35 _ZGVcN8v_acosf F
 GLIBC_2.35 _ZGVcN8v_asinf F
@@ -77,6 +80,7 @@ GLIBC_2.35 _ZGVcN8v_coshf F
 GLIBC_2.35 _ZGVcN8v_exp10f F
 GLIBC_2.35 _ZGVcN8v_exp2f F
 GLIBC_2.35 _ZGVcN8v_expm1f F
+GLIBC_2.35 _ZGVcN8v_sinhf F
 GLIBC_2.35 _ZGVcN8vv_hypotf F
 GLIBC_2.35 _ZGVdN4v_acos F
 GLIBC_2.35 _ZGVdN4v_asin F
@@ -85,6 +89,7 @@ GLIBC_2.35 _ZGVdN4v_cosh F
 GLIBC_2.35 _ZGVdN4v_exp10 F
 GLIBC_2.35 _ZGVdN4v_exp2 F
 GLIBC_2.35 _ZGVdN4v_expm1 F
+GLIBC_2.35 _ZGVdN4v_sinh F
 GLIBC_2.35 _ZGVdN4vv_hypot F
 GLIBC_2.35 _ZGVdN8v_acosf F
 GLIBC_2.35 _ZGVdN8v_asinf F
@@ -93,6 +98,7 @@ GLIBC_2.35 _ZGVdN8v_coshf F
 GLIBC_2.35 _ZGVdN8v_exp10f F
 GLIBC_2.35 _ZGVdN8v_exp2f F
 GLIBC_2.35 _ZGVdN8v_expm1f F
+GLIBC_2.35 _ZGVdN8v_sinhf F
 GLIBC_2.35 _ZGVdN8vv_hypotf F
 GLIBC_2.35 _ZGVeN16v_acosf F
 GLIBC_2.35 _ZGVeN16v_asinf F
@@ -101,6 +107,7 @@ GLIBC_2.35 _ZGVeN16v_coshf F
 GLIBC_2.35 _ZGVeN16v_exp10f F
 GLIBC_2.35 _ZGVeN16v_exp2f F
 GLIBC_2.35 _ZGVeN16v_expm1f F
+GLIBC_2.35 _ZGVeN16v_sinhf F
 GLIBC_2.35 _ZGVeN16vv_hypotf F
 GLIBC_2.35 _ZGVeN8v_acos F
 GLIBC_2.35 _ZGVeN8v_asin F
@@ -109,4 +116,5 @@ GLIBC_2.35 _ZGVeN8v_cosh F
 GLIBC_2.35 _ZGVeN8v_exp10 F
 GLIBC_2.35 _ZGVeN8v_exp2 F
 GLIBC_2.35 _ZGVeN8v_expm1 F
+GLIBC_2.35 _ZGVeN8v_sinh F
 GLIBC_2.35 _ZGVeN8vv_hypot F
diff --git a/sysdeps/x86/fpu/bits/math-vector.h b/sysdeps/x86/fpu/bits/math-vector.h
index e2f98e176f..51a41cfebc 100644
--- a/sysdeps/x86/fpu/bits/math-vector.h
+++ b/sysdeps/x86/fpu/bits/math-vector.h
@@ -90,6 +90,10 @@
 #  define __DECL_SIMD_expm1 __DECL_SIMD_x86_64
 #  undef __DECL_SIMD_expm1f
 #  define __DECL_SIMD_expm1f __DECL_SIMD_x86_64
+#  undef __DECL_SIMD_sinh
+#  define __DECL_SIMD_sinh __DECL_SIMD_x86_64
+#  undef __DECL_SIMD_sinhf
+#  define __DECL_SIMD_sinhf __DECL_SIMD_x86_64
 
 # endif
 #endif
diff --git a/sysdeps/x86/fpu/finclude/math-vector-fortran.h b/sysdeps/x86/fpu/finclude/math-vector-fortran.h
index 43233059f6..91e9b4fc83 100644
--- a/sysdeps/x86/fpu/finclude/math-vector-fortran.h
+++ b/sysdeps/x86/fpu/finclude/math-vector-fortran.h
@@ -44,6 +44,8 @@
 !GCC$ builtin (coshf) attributes simd (notinbranch) if('x86_64')
 !GCC$ builtin (expm1) attributes simd (notinbranch) if('x86_64')
 !GCC$ builtin (expm1f) attributes simd (notinbranch) if('x86_64')
+!GCC$ builtin (sinh) attributes simd (notinbranch) if('x86_64')
+!GCC$ builtin (sinhf) attributes simd (notinbranch) if('x86_64')
 
 !GCC$ builtin (cos) attributes simd (notinbranch) if('x32')
 !GCC$ builtin (cosf) attributes simd (notinbranch) if('x32')
@@ -73,3 +75,5 @@
 !GCC$ builtin (coshf) attributes simd (notinbranch) if('x32')
 !GCC$ builtin (expm1) attributes simd (notinbranch) if('x32')
 !GCC$ builtin (expm1f) attributes simd (notinbranch) if('x32')
+!GCC$ builtin (sinh) attributes simd (notinbranch) if('x32')
+!GCC$ builtin (sinhf) attributes simd (notinbranch) if('x32')
diff --git a/sysdeps/x86_64/fpu/Makeconfig b/sysdeps/x86_64/fpu/Makeconfig
index 8de8214971..81e9fc95b2 100644
--- a/sysdeps/x86_64/fpu/Makeconfig
+++ b/sysdeps/x86_64/fpu/Makeconfig
@@ -36,6 +36,7 @@ libmvec-funcs = \
   pow \
   sin \
   sincos \
+  sinh \
 
 # Define libmvec function for benchtests directory.
 libmvec-bench-funcs = \
diff --git a/sysdeps/x86_64/fpu/Versions b/sysdeps/x86_64/fpu/Versions
index 58debb2dbe..2710446d12 100644
--- a/sysdeps/x86_64/fpu/Versions
+++ b/sysdeps/x86_64/fpu/Versions
@@ -21,6 +21,7 @@ libmvec {
     _ZGVbN2v_exp10; _ZGVcN4v_exp10; _ZGVdN4v_exp10; _ZGVeN8v_exp10;
     _ZGVbN2v_exp2; _ZGVcN4v_exp2; _ZGVdN4v_exp2; _ZGVeN8v_exp2;
     _ZGVbN2v_expm1; _ZGVcN4v_expm1; _ZGVdN4v_expm1; _ZGVeN8v_expm1;
+    _ZGVbN2v_sinh; _ZGVcN4v_sinh; _ZGVdN4v_sinh; _ZGVeN8v_sinh;
     _ZGVbN2vv_hypot; _ZGVcN4vv_hypot; _ZGVdN4vv_hypot; _ZGVeN8vv_hypot;
     _ZGVbN4v_acosf; _ZGVcN8v_acosf; _ZGVdN8v_acosf; _ZGVeN16v_acosf;
     _ZGVbN4v_asinf; _ZGVcN8v_asinf; _ZGVdN8v_asinf; _ZGVeN16v_asinf;
@@ -29,6 +30,7 @@ libmvec {
     _ZGVbN4v_exp10f; _ZGVcN8v_exp10f; _ZGVdN8v_exp10f; _ZGVeN16v_exp10f;
     _ZGVbN4v_exp2f; _ZGVcN8v_exp2f; _ZGVdN8v_exp2f; _ZGVeN16v_exp2f;
     _ZGVbN4v_expm1f; _ZGVcN8v_expm1f; _ZGVdN8v_expm1f; _ZGVeN16v_expm1f;
+    _ZGVbN4v_sinhf; _ZGVcN8v_sinhf; _ZGVdN8v_sinhf; _ZGVeN16v_sinhf;
     _ZGVbN4vv_hypotf; _ZGVcN8vv_hypotf; _ZGVdN8vv_hypotf; _ZGVeN16vv_hypotf;
   }
 }
diff --git a/sysdeps/x86_64/fpu/libm-test-ulps b/sysdeps/x86_64/fpu/libm-test-ulps
index f05ece8c8a..f4b313119d 100644
--- a/sysdeps/x86_64/fpu/libm-test-ulps
+++ b/sysdeps/x86_64/fpu/libm-test-ulps
@@ -1840,6 +1840,26 @@ float: 3
 float128: 4
 ldouble: 5
 
+Function: "sinh_vlen16":
+float: 1
+
+Function: "sinh_vlen2":
+double: 2
+
+Function: "sinh_vlen4":
+double: 2
+float: 1
+
+Function: "sinh_vlen4_avx2":
+double: 2
+
+Function: "sinh_vlen8":
+double: 2
+float: 1
+
+Function: "sinh_vlen8_avx2":
+float: 1
+
 Function: "tan":
 float: 1
 float128: 1
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core-sse2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core-sse2.S
new file mode 100644
index 0000000000..ca12ad6678
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core-sse2.S
@@ -0,0 +1,20 @@
+/* SSE2 version of vectorized sinh, vector length is 2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVbN2v_sinh _ZGVbN2v_sinh_sse2
+#include "../svml_d_sinh2_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core.c b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core.c
new file mode 100644
index 0000000000..c0344b2902
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core.c
@@ -0,0 +1,27 @@
+/* Multiple versions of vectorized sinh, vector length is 2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVbN2v_sinh
+#include "ifunc-mathvec-sse4_1.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVbN2v_sinh, __GI__ZGVbN2v_sinh, __redirect__ZGVbN2v_sinh)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
new file mode 100644
index 0000000000..80d19e9dba
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh2_core_sse4.S
@@ -0,0 +1,456 @@
+/* Function sinh vectorized with SSE4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_dsinh_data_internal
+ */
+#define _dbInvLn2                     	0
+#define _dbLn2hi                      	16
+#define _dbLn2lo                      	32
+#define _dSign                        	48
+#define _dbT                          	64
+#define _dbShifter                    	2112
+#define _iDomainRange                 	2128
+#define _dPC2                         	2144
+#define _dPC3                         	2160
+#define _dPC4                         	2176
+#define _dPC5                         	2192
+#define _lIndexMask                   	2208
+
+#include <sysdep.h>
+
+        .text
+	.section .text.sse4,"ax",@progbits
+ENTRY(_ZGVbN2v_sinh_sse4)
+        subq      $72, %rsp
+        cfi_def_cfa_offset(80)
+        movaps    %xmm0, %xmm2
+
+/*  Abs argument  */
+        movups    _dSign+__svml_dsinh_data_internal(%rip), %xmm0
+        lea       _dbT+8+__svml_dsinh_data_internal(%rip), %rsi
+        andps     %xmm2, %xmm0
+        movaps    %xmm0, %xmm1
+
+/*
+ *  Load argument
+ * dM = x*2^K/log(2) + RShifter
+ */
+        movups    _dbInvLn2+__svml_dsinh_data_internal(%rip), %xmm10
+        pxor      %xmm2, %xmm1
+        mulpd     %xmm1, %xmm10
+        movups    _dbShifter+__svml_dsinh_data_internal(%rip), %xmm5
+        addpd     %xmm5, %xmm10
+
+/*
+ *  R
+ * dN = dM - RShifter
+ */
+        movaps    %xmm10, %xmm7
+        subpd     %xmm5, %xmm7
+
+/* dR = dX - dN*Log2_hi/2^K */
+        movups    _dbLn2hi+__svml_dsinh_data_internal(%rip), %xmm6
+        mulpd     %xmm7, %xmm6
+
+/* dR = (dX - dN*Log2_hi/2^K) - dN*Log2_lo/2^K */
+        movups    _dbLn2lo+__svml_dsinh_data_internal(%rip), %xmm8
+        mulpd     %xmm7, %xmm8
+
+/*
+ * Check for overflow\underflow
+ *
+ */
+        pshufd    $221, %xmm1, %xmm4
+        subpd     %xmm6, %xmm1
+        subpd     %xmm8, %xmm1
+
+/* VLOAD_CONST( D, dPC[0],         TAB._dPC1 ); */
+        movq      _iDomainRange+__svml_dsinh_data_internal(%rip), %xmm3
+        pcmpgtd   %xmm3, %xmm4
+
+/* dR2 = dR^2 */
+        movaps    %xmm1, %xmm3
+        mulpd     %xmm1, %xmm3
+        movmskps  %xmm4, %edx
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*a5)) = r + r*(r^2*(a3+r^2*a5)) ....
+ * dSinh_r = (a3+r^2*a5)
+ */
+        movups    _dPC5+__svml_dsinh_data_internal(%rip), %xmm12
+
+/*
+ * poly(r) = (dG2+dG1)+dG3*sinh(dR)+dG1*sinh(dR)+(dG1+dG2)*dR2*(a2 +a4*dR2)
+ * dOut = (a2 +a4*dR2)
+ */
+        movups    _dPC4+__svml_dsinh_data_internal(%rip), %xmm13
+        mulpd     %xmm3, %xmm12
+        mulpd     %xmm3, %xmm13
+        addpd     _dPC3+__svml_dsinh_data_internal(%rip), %xmm12
+        addpd     _dPC2+__svml_dsinh_data_internal(%rip), %xmm13
+
+/* dSinh_r = r^2*(a3+r^2*a5) */
+        mulpd     %xmm3, %xmm12
+
+/* dOut = dR2*(a2 +a4*dR2) */
+        mulpd     %xmm13, %xmm3
+
+/* dSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        mulpd     %xmm1, %xmm12
+
+/*
+ *  Index and lookup
+ * j
+ */
+        movups    _lIndexMask+__svml_dsinh_data_internal(%rip), %xmm9
+        andps     %xmm10, %xmm9
+        movd      %xmm9, %eax
+
+/* split j and N */
+        pxor      %xmm9, %xmm10
+
+/*
+ *  G1,G2,G3: dTdif,dTn * 2^N,2^(-N)
+ * lM now is an EXP(2^N)
+ */
+        psllq     $45, %xmm10
+
+/*  */
+        movaps    %xmm10, %xmm4
+        pextrw    $4, %xmm9, %ecx
+        addpd     %xmm12, %xmm1
+        shll      $4, %eax
+        shll      $4, %ecx
+        movq      (%rax,%rsi), %xmm11
+        movhpd    (%rcx,%rsi), %xmm11
+        paddq     %xmm11, %xmm4
+
+/*  */
+        psubq     %xmm10, %xmm11
+
+/* dG3 = dTn*2^N + dTn*2^-N */
+        movdqa    %xmm4, %xmm14
+        addpd     %xmm11, %xmm14
+
+/* dG2 = dTn*2^N - dTn*2^-N */
+        subpd     %xmm11, %xmm4
+        movq      -8(%rax,%rsi), %xmm15
+        movhpd    -8(%rcx,%rsi), %xmm15
+        paddq     %xmm10, %xmm15
+
+/* dG2 += dG1 */
+        addpd     %xmm15, %xmm4
+
+/* dG1 += dG3 */
+        addpd     %xmm14, %xmm15
+
+/* dOut = dG2*dR2*(a2 +a4*dR2) */
+        mulpd     %xmm4, %xmm3
+
+/* dOut = dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        mulpd     %xmm15, %xmm1
+        addpd     %xmm1, %xmm3
+
+/* dOut = dG2 + dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        addpd     %xmm3, %xmm4
+
+/*  Ret H  */
+        orps      %xmm4, %xmm0
+        andl      $3, %edx
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm2
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        addq      $72, %rsp
+        cfi_def_cfa_offset(8)
+        ret
+        cfi_def_cfa_offset(80)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        movups    %xmm2, 32(%rsp)
+        movups    %xmm0, 48(%rsp)
+                                # LOE rbx rbp r12 r13 r14 r15 edx xmm0
+
+        xorl      %eax, %eax
+        movq      %r12, 16(%rsp)
+        cfi_offset(12, -64)
+        movl      %eax, %r12d
+        movq      %r13, 8(%rsp)
+        cfi_offset(13, -72)
+        movl      %edx, %r13d
+        movq      %r14, (%rsp)
+        cfi_offset(14, -80)
+                                # LOE rbx rbp r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx rbp r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $2, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx rbp r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        movups    48(%rsp), %xmm0
+
+/* Go to exit */
+        jmp       L(EXIT)
+        cfi_offset(12, -64)
+        cfi_offset(13, -72)
+        cfi_offset(14, -80)
+                                # LOE rbx rbp r12 r13 r14 r15 xmm0
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movsd     32(%rsp,%r14,8), %xmm0
+        call      sinh@PLT
+                                # LOE rbx rbp r14 r15 r12d r13d xmm0
+
+        movsd     %xmm0, 48(%rsp,%r14,8)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx rbp r15 r12d r13d
+END(_ZGVbN2v_sinh_sse4)
+
+        .section .rodata, "a"
+        .align 16
+
+#ifdef __svml_dsinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(16)) VUINT32 _dbInvLn2[2][2];
+        __declspec(align(16)) VUINT32 _dbLn2hi[2][2];
+        __declspec(align(16)) VUINT32 _dbLn2lo[2][2];
+        __declspec(align(16)) VUINT32 _dSign[2][2];                //0x8000000000000000
+        __declspec(align(16)) VUINT32 _dbT[(1<<7)][2][2]; //precalc poly coeff
+        __declspec(align(16)) VUINT32 _dbShifter[2][2];
+        __declspec(align(16)) VUINT32 _iDomainRange[4][1];
+        __declspec(align(16)) VUINT32 _dPC2[2][2];
+        __declspec(align(16)) VUINT32 _dPC3[2][2];
+        __declspec(align(16)) VUINT32 _dPC4[2][2];
+        __declspec(align(16)) VUINT32 _dPC5[2][2];
+        __declspec(align(16)) VUINT32 _lIndexMask[2][2];
+} __svml_dsinh_data_internal;
+#endif
+__svml_dsinh_data_internal:
+        .quad 0x3FF71547652B82FE, 0x3FF71547652B82FE /* _dbInvLn2 = 1/log(2) */
+        .align 16
+        .quad 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000 /* _dbLn2hi  = log(2) hi*/
+        .align 16
+        .quad 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A /* _dbLn2lo  = log(2) lo*/
+        .align 16
+        .quad 0x8000000000000000, 0x8000000000000000 /* _dSign */
+        //_dbT
+        .align 16
+        .quad 0x0000000000000000, 0x3FE0000000000000  //2^( 0 /128-1) - 2^(- 0 /128-1), 2^(- 0 /128-1)
+        .quad 0x3F762E4A19BD1E74, 0x3FDFD3C22B8F71F1  //2^( 1 /128-1) - 2^(- 1 /128-1), 2^(- 1 /128-1)
+        .quad 0x3F862E5F6A0DFD36, 0x3FDFA7C1819E90D8  //2^( 2 /128-1) - 2^(- 2 /128-1), 2^(- 2 /128-1)
+        .quad 0x3F90A2E234040F5F, 0x3FDF7BFDAD9CBE14  //2^( 3 /128-1) - 2^(- 3 /128-1), 2^(- 3 /128-1)
+        .quad 0x3F962EB4ABCC5A81, 0x3FDF50765B6E4540  //2^( 4 /128-1) - 2^(- 4 /128-1), 2^(- 4 /128-1)
+        .quad 0x3F9BBAB1C5033244, 0x3FDF252B376BBA97  //2^( 5 /128-1) - 2^(- 5 /128-1), 2^(- 5 /128-1)
+        .quad 0x3FA0A372144EEB45, 0x3FDEFA1BEE615A27  //2^( 6 /128-1) - 2^(- 6 /128-1), 2^(- 6 /128-1)
+        .quad 0x3FA369AB3FFBF8B0, 0x3FDECF482D8E67F1  //2^( 7 /128-1) - 2^(- 7 /128-1), 2^(- 7 /128-1)
+        .quad 0x3FA63009BA740A2A, 0x3FDEA4AFA2A490DA  //2^( 8 /128-1) - 2^(- 8 /128-1), 2^(- 8 /128-1)
+        .quad 0x3FA8F692D8EA1B5A, 0x3FDE7A51FBC74C83  //2^( 9 /128-1) - 2^(- 9 /128-1), 2^(- 9 /128-1)
+        .quad 0x3FABBD4BF0E31A6F, 0x3FDE502EE78B3FF6  //2^( 10 /128-1) - 2^(- 10 /128-1), 2^(- 10 /128-1)
+        .quad 0x3FAE843A5840286A, 0x3FDE264614F5A129  //2^( 11 /128-1) - 2^(- 11 /128-1), 2^(- 11 /128-1)
+        .quad 0x3FB0A5B1B2A46D0A, 0x3FDDFC97337B9B5F  //2^( 12 /128-1) - 2^(- 12 /128-1), 2^(- 12 /128-1)
+        .quad 0x3FB20966375ABCDF, 0x3FDDD321F301B460  //2^( 13 /128-1) - 2^(- 13 /128-1), 2^(- 13 /128-1)
+        .quad 0x3FB36D3D65DCA4E8, 0x3FDDA9E603DB3285  //2^( 14 /128-1) - 2^(- 14 /128-1), 2^(- 14 /128-1)
+        .quad 0x3FB4D139EA06642A, 0x3FDD80E316C98398  //2^( 15 /128-1) - 2^(- 15 /128-1), 2^(- 15 /128-1)
+        .quad 0x3FB6355E6FFBF9BA, 0x3FDD5818DCFBA487  //2^( 16 /128-1) - 2^(- 16 /128-1), 2^(- 16 /128-1)
+        .quad 0x3FB799ADA42E4788, 0x3FDD2F87080D89F2  //2^( 17 /128-1) - 2^(- 17 /128-1), 2^(- 17 /128-1)
+        .quad 0x3FB8FE2A336035BC, 0x3FDD072D4A07897C  //2^( 18 /128-1) - 2^(- 18 /128-1), 2^(- 18 /128-1)
+        .quad 0x3FBA62D6CAABD6B6, 0x3FDCDF0B555DC3FA  //2^( 19 /128-1) - 2^(- 19 /128-1), 2^(- 19 /128-1)
+        .quad 0x3FBBC7B617878BAF, 0x3FDCB720DCEF9069  //2^( 20 /128-1) - 2^(- 20 /128-1), 2^(- 20 /128-1)
+        .quad 0x3FBD2CCAC7CB2A11, 0x3FDC8F6D9406E7B5  //2^( 21 /128-1) - 2^(- 21 /128-1), 2^(- 21 /128-1)
+        .quad 0x3FBE921789B52185, 0x3FDC67F12E57D14B  //2^( 22 /128-1) - 2^(- 22 /128-1), 2^(- 22 /128-1)
+        .quad 0x3FBFF79F0BEFA2C7, 0x3FDC40AB5FFFD07A  //2^( 23 /128-1) - 2^(- 23 /128-1), 2^(- 23 /128-1)
+        .quad 0x3FC0AEB1FECAE3A9, 0x3FDC199BDD85529C  //2^( 24 /128-1) - 2^(- 24 /128-1), 2^(- 24 /128-1)
+        .quad 0x3FC161B4871C5CEC, 0x3FDBF2C25BD71E09  //2^( 25 /128-1) - 2^(- 25 /128-1), 2^(- 25 /128-1)
+        .quad 0x3FC214D876F26FD0, 0x3FDBCC1E904BC1D2  //2^( 26 /128-1) - 2^(- 26 /128-1), 2^(- 26 /128-1)
+        .quad 0x3FC2C81F2693816F, 0x3FDBA5B030A1064A  //2^( 27 /128-1) - 2^(- 27 /128-1), 2^(- 27 /128-1)
+        .quad 0x3FC37B89EE88BEF7, 0x3FDB7F76F2FB5E47  //2^( 28 /128-1) - 2^(- 28 /128-1), 2^(- 28 /128-1)
+        .quad 0x3FC42F1A27A0B3CD, 0x3FDB59728DE5593A  //2^( 29 /128-1) - 2^(- 29 /128-1), 2^(- 29 /128-1)
+        .quad 0x3FC4E2D12AF1E037, 0x3FDB33A2B84F15FB  //2^( 30 /128-1) - 2^(- 30 /128-1), 2^(- 30 /128-1)
+        .quad 0x3FC596B051DD508D, 0x3FDB0E07298DB666  //2^( 31 /128-1) - 2^(- 31 /128-1), 2^(- 31 /128-1)
+        .quad 0x3FC64AB8F61134FA, 0x3FDAE89F995AD3AD  //2^( 32 /128-1) - 2^(- 32 /128-1), 2^(- 32 /128-1)
+        .quad 0x3FC6FEEC718B79D1, 0x3FDAC36BBFD3F37A  //2^( 33 /128-1) - 2^(- 33 /128-1), 2^(- 33 /128-1)
+        .quad 0x3FC7B34C1E9C607F, 0x3FDA9E6B5579FDBF  //2^( 34 /128-1) - 2^(- 34 /128-1), 2^(- 34 /128-1)
+        .quad 0x3FC867D957E91912, 0x3FDA799E1330B358  //2^( 35 /128-1) - 2^(- 35 /128-1), 2^(- 35 /128-1)
+        .quad 0x3FC91C95786E5C72, 0x3FDA5503B23E255D  //2^( 36 /128-1) - 2^(- 36 /128-1), 2^(- 36 /128-1)
+        .quad 0x3FC9D181DB83072F, 0x3FDA309BEC4A2D33  //2^( 37 /128-1) - 2^(- 37 /128-1), 2^(- 37 /128-1)
+        .quad 0x3FCA869FDCDAB512, 0x3FDA0C667B5DE565  //2^( 38 /128-1) - 2^(- 38 /128-1), 2^(- 38 /128-1)
+        .quad 0x3FCB3BF0D8885D4C, 0x3FD9E86319E32323  //2^( 39 /128-1) - 2^(- 39 /128-1), 2^(- 39 /128-1)
+        .quad 0x3FCBF1762B00EF69, 0x3FD9C49182A3F090  //2^( 40 /128-1) - 2^(- 40 /128-1), 2^(- 40 /128-1)
+        .quad 0x3FCCA731311DF0FB, 0x3FD9A0F170CA07BA  //2^( 41 /128-1) - 2^(- 41 /128-1), 2^(- 41 /128-1)
+        .quad 0x3FCD5D2348201C09, 0x3FD97D829FDE4E50  //2^( 42 /128-1) - 2^(- 42 /128-1), 2^(- 42 /128-1)
+        .quad 0x3FCE134DCDB1FE3E, 0x3FD95A44CBC8520F  //2^( 43 /128-1) - 2^(- 43 /128-1), 2^(- 43 /128-1)
+        .quad 0x3FCEC9B21FEA98EA, 0x3FD93737B0CDC5E5  //2^( 44 /128-1) - 2^(- 44 /128-1), 2^(- 44 /128-1)
+        .quad 0x3FCF80519D5001D3, 0x3FD9145B0B91FFC6  //2^( 45 /128-1) - 2^(- 45 /128-1), 2^(- 45 /128-1)
+        .quad 0x3FD01B96D26D026A, 0x3FD8F1AE99157736  //2^( 46 /128-1) - 2^(- 46 /128-1), 2^(- 46 /128-1)
+        .quad 0x3FD07723CAFA6331, 0x3FD8CF3216B5448C  //2^( 47 /128-1) - 2^(- 47 /128-1), 2^(- 47 /128-1)
+        .quad 0x3FD0D2D06841B373, 0x3FD8ACE5422AA0DB  //2^( 48 /128-1) - 2^(- 48 /128-1), 2^(- 48 /128-1)
+        .quad 0x3FD12E9D5A715381, 0x3FD88AC7D98A6699  //2^( 49 /128-1) - 2^(- 49 /128-1), 2^(- 49 /128-1)
+        .quad 0x3FD18A8B51F5C661, 0x3FD868D99B4492ED  //2^( 50 /128-1) - 2^(- 50 /128-1), 2^(- 50 /128-1)
+        .quad 0x3FD1E69AFF7B04D7, 0x3FD8471A4623C7AD  //2^( 51 /128-1) - 2^(- 51 /128-1), 2^(- 51 /128-1)
+        .quad 0x3FD242CD13EDD0F1, 0x3FD82589994CCE13  //2^( 52 /128-1) - 2^(- 52 /128-1), 2^(- 52 /128-1)
+        .quad 0x3FD29F22407D0A0C, 0x3FD80427543E1A12  //2^( 53 /128-1) - 2^(- 53 /128-1), 2^(- 53 /128-1)
+        .quad 0x3FD2FB9B369B0153, 0x3FD7E2F336CF4E62  //2^( 54 /128-1) - 2^(- 54 /128-1), 2^(- 54 /128-1)
+        .quad 0x3FD35838A7FECEC8, 0x3FD7C1ED0130C132  //2^( 55 /128-1) - 2^(- 55 /128-1), 2^(- 55 /128-1)
+        .quad 0x3FD3B4FB46A5A6CC, 0x3FD7A11473EB0187  //2^( 56 /128-1) - 2^(- 56 /128-1), 2^(- 56 /128-1)
+        .quad 0x3FD411E3C4D4302F, 0x3FD780694FDE5D3F  //2^( 57 /128-1) - 2^(- 57 /128-1), 2^(- 57 /128-1)
+        .quad 0x3FD46EF2D517DAC8, 0x3FD75FEB564267C9  //2^( 58 /128-1) - 2^(- 58 /128-1), 2^(- 58 /128-1)
+        .quad 0x3FD4CC292A48369E, 0x3FD73F9A48A58174  //2^( 59 /128-1) - 2^(- 59 /128-1), 2^(- 59 /128-1)
+        .quad 0x3FD5298777884B96, 0x3FD71F75E8EC5F74  //2^( 60 /128-1) - 2^(- 60 /128-1), 2^(- 60 /128-1)
+        .quad 0x3FD5870E7047F1BC, 0x3FD6FF7DF9519484  //2^( 61 /128-1) - 2^(- 61 /128-1), 2^(- 61 /128-1)
+        .quad 0x3FD5E4BEC8452A1A, 0x3FD6DFB23C651A2F  //2^( 62 /128-1) - 2^(- 62 /128-1), 2^(- 62 /128-1)
+        .quad 0x3FD64299338D7827, 0x3FD6C012750BDABF  //2^( 63 /128-1) - 2^(- 63 /128-1), 2^(- 63 /128-1)
+        .quad 0x3FD6A09E667F3BCD, 0x3FD6A09E667F3BCD  //2^( 64 /128-1) - 2^(- 64 /128-1), 2^(- 64 /128-1)
+        .quad 0x3FD6FECF15CB0C0B, 0x3FD68155D44CA973  //2^( 65 /128-1) - 2^(- 65 /128-1), 2^(- 65 /128-1)
+        .quad 0x3FD75D2BF6751239, 0x3FD6623882552225  //2^( 66 /128-1) - 2^(- 66 /128-1), 2^(- 66 /128-1)
+        .quad 0x3FD7BBB5BDD665E8, 0x3FD6434634CCC320  //2^( 67 /128-1) - 2^(- 67 /128-1), 2^(- 67 /128-1)
+        .quad 0x3FD81A6D219E6963, 0x3FD6247EB03A5585  //2^( 68 /128-1) - 2^(- 68 /128-1), 2^(- 68 /128-1)
+        .quad 0x3FD87952D7D426DF, 0x3FD605E1B976DC09  //2^( 69 /128-1) - 2^(- 69 /128-1), 2^(- 69 /128-1)
+        .quad 0x3FD8D86796D7AE49, 0x3FD5E76F15AD2148  //2^( 70 /128-1) - 2^(- 70 /128-1), 2^(- 70 /128-1)
+        .quad 0x3FD937AC156373C8, 0x3FD5C9268A5946B7  //2^( 71 /128-1) - 2^(- 71 /128-1), 2^(- 71 /128-1)
+        .quad 0x3FD997210A8DAEE4, 0x3FD5AB07DD485429  //2^( 72 /128-1) - 2^(- 72 /128-1), 2^(- 72 /128-1)
+        .quad 0x3FD9F6C72DC9BA68, 0x3FD58D12D497C7FD  //2^( 73 /128-1) - 2^(- 73 /128-1), 2^(- 73 /128-1)
+        .quad 0x3FDA569F36E974EA, 0x3FD56F4736B527DA  //2^( 74 /128-1) - 2^(- 74 /128-1), 2^(- 74 /128-1)
+        .quad 0x3FDAB6A9DE1EA215, 0x3FD551A4CA5D920F  //2^( 75 /128-1) - 2^(- 75 /128-1), 2^(- 75 /128-1)
+        .quad 0x3FDB16E7DBFC4CA3, 0x3FD5342B569D4F82  //2^( 76 /128-1) - 2^(- 76 /128-1), 2^(- 76 /128-1)
+        .quad 0x3FDB7759E9782918, 0x3FD516DAA2CF6642  //2^( 77 /128-1) - 2^(- 77 /128-1), 2^(- 77 /128-1)
+        .quad 0x3FDBD800BFEBF932, 0x3FD4F9B2769D2CA7  //2^( 78 /128-1) - 2^(- 78 /128-1), 2^(- 78 /128-1)
+        .quad 0x3FDC38DD1916F025, 0x3FD4DCB299FDDD0D  //2^( 79 /128-1) - 2^(- 79 /128-1), 2^(- 79 /128-1)
+        .quad 0x3FDC99EFAF1F1790, 0x3FD4BFDAD5362A27  //2^( 80 /128-1) - 2^(- 80 /128-1), 2^(- 80 /128-1)
+        .quad 0x3FDCFB393C92B539, 0x3FD4A32AF0D7D3DE  //2^( 81 /128-1) - 2^(- 81 /128-1), 2^(- 81 /128-1)
+        .quad 0x3FDD5CBA7C69B19C, 0x3FD486A2B5C13CD0  //2^( 82 /128-1) - 2^(- 82 /128-1), 2^(- 82 /128-1)
+        .quad 0x3FDDBE742A06FF34, 0x3FD46A41ED1D0057  //2^( 83 /128-1) - 2^(- 83 /128-1), 2^(- 83 /128-1)
+        .quad 0x3FDE2067013A029D, 0x3FD44E086061892D  //2^( 84 /128-1) - 2^(- 84 /128-1), 2^(- 84 /128-1)
+        .quad 0x3FDE8293BE3FFB87, 0x3FD431F5D950A897  //2^( 85 /128-1) - 2^(- 85 /128-1), 2^(- 85 /128-1)
+        .quad 0x3FDEE4FB1DC56E75, 0x3FD4160A21F72E2A  //2^( 86 /128-1) - 2^(- 86 /128-1), 2^(- 86 /128-1)
+        .quad 0x3FDF479DDCE78F58, 0x3FD3FA4504AC801C  //2^( 87 /128-1) - 2^(- 87 /128-1), 2^(- 87 /128-1)
+        .quad 0x3FDFAA7CB935ACFE, 0x3FD3DEA64C123422  //2^( 88 /128-1) - 2^(- 88 /128-1), 2^(- 88 /128-1)
+        .quad 0x3FE006CC38594EB1, 0x3FD3C32DC313A8E5  //2^( 89 /128-1) - 2^(- 89 /128-1), 2^(- 89 /128-1)
+        .quad 0x3FE03878E0EB1569, 0x3FD3A7DB34E59FF7  //2^( 90 /128-1) - 2^(- 90 /128-1), 2^(- 90 /128-1)
+        .quad 0x3FE06A44B5C74101, 0x3FD38CAE6D05D866  //2^( 91 /128-1) - 2^(- 91 /128-1), 2^(- 91 /128-1)
+        .quad 0x3FE09C3016A0D077, 0x3FD371A7373AA9CB  //2^( 92 /128-1) - 2^(- 92 /128-1), 2^(- 92 /128-1)
+        .quad 0x3FE0CE3B63676360, 0x3FD356C55F929FF1  //2^( 93 /128-1) - 2^(- 93 /128-1), 2^(- 93 /128-1)
+        .quad 0x3FE10066FC47F240, 0x3FD33C08B26416FF  //2^( 94 /128-1) - 2^(- 94 /128-1), 2^(- 94 /128-1)
+        .quad 0x3FE132B341AD8761, 0x3FD32170FC4CD831  //2^( 95 /128-1) - 2^(- 95 /128-1), 2^(- 95 /128-1)
+        .quad 0x3FE165209441F823, 0x3FD306FE0A31B715  //2^( 96 /128-1) - 2^(- 96 /128-1), 2^(- 96 /128-1)
+        .quad 0x3FE197AF54EE9EBB, 0x3FD2ECAFA93E2F56  //2^( 97 /128-1) - 2^(- 97 /128-1), 2^(- 97 /128-1)
+        .quad 0x3FE1CA5FE4DD1475, 0x3FD2D285A6E4030B  //2^( 98 /128-1) - 2^(- 98 /128-1), 2^(- 98 /128-1)
+        .quad 0x3FE1FD32A577EC72, 0x3FD2B87FD0DAD990  //2^( 99 /128-1) - 2^(- 99 /128-1), 2^(- 99 /128-1)
+        .quad 0x3FE23027F86B6ED6, 0x3FD29E9DF51FDEE1  //2^( 100 /128-1) - 2^(- 100 /128-1), 2^(- 100 /128-1)
+        .quad 0x3FE263403FA65489, 0x3FD284DFE1F56381  //2^( 101 /128-1) - 2^(- 101 /128-1), 2^(- 101 /128-1)
+        .quad 0x3FE2967BDD5A8364, 0x3FD26B4565E27CDD  //2^( 102 /128-1) - 2^(- 102 /128-1), 2^(- 102 /128-1)
+        .quad 0x3FE2C9DB33FDCAE9, 0x3FD251CE4FB2A63F  //2^( 103 /128-1) - 2^(- 103 /128-1), 2^(- 103 /128-1)
+        .quad 0x3FE2FD5EA64AA180, 0x3FD2387A6E756238  //2^( 104 /128-1) - 2^(- 104 /128-1), 2^(- 104 /128-1)
+        .quad 0x3FE331069740E22F, 0x3FD21F49917DDC96  //2^( 105 /128-1) - 2^(- 105 /128-1), 2^(- 105 /128-1)
+        .quad 0x3FE364D36A268AE0, 0x3FD2063B88628CD6  //2^( 106 /128-1) - 2^(- 106 /128-1), 2^(- 106 /128-1)
+        .quad 0x3FE398C582887B27, 0x3FD1ED5022FCD91D  //2^( 107 /128-1) - 2^(- 107 /128-1), 2^(- 107 /128-1)
+        .quad 0x3FE3CCDD443B3394, 0x3FD1D4873168B9AA  //2^( 108 /128-1) - 2^(- 108 /128-1), 2^(- 108 /128-1)
+        .quad 0x3FE4011B135B9590, 0x3FD1BBE084045CD4  //2^( 109 /128-1) - 2^(- 109 /128-1), 2^(- 109 /128-1)
+        .quad 0x3FE4357F544FA3C1, 0x3FD1A35BEB6FCB75  //2^( 110 /128-1) - 2^(- 110 /128-1), 2^(- 110 /128-1)
+        .quad 0x3FE46A0A6BC742FD, 0x3FD18AF9388C8DEA  //2^( 111 /128-1) - 2^(- 111 /128-1), 2^(- 111 /128-1)
+        .quad 0x3FE49EBCBEBCFBCA, 0x3FD172B83C7D517B  //2^( 112 /128-1) - 2^(- 112 /128-1), 2^(- 112 /128-1)
+        .quad 0x3FE4D396B276BC6F, 0x3FD15A98C8A58E51  //2^( 113 /128-1) - 2^(- 113 /128-1), 2^(- 113 /128-1)
+        .quad 0x3FE50898AC869B96, 0x3FD1429AAEA92DE0  //2^( 114 /128-1) - 2^(- 114 /128-1), 2^(- 114 /128-1)
+        .quad 0x3FE53DC312CB9B7A, 0x3FD12ABDC06C31CC  //2^( 115 /128-1) - 2^(- 115 /128-1), 2^(- 115 /128-1)
+        .quad 0x3FE573164B726DB6, 0x3FD11301D0125B51  //2^( 116 /128-1) - 2^(- 116 /128-1), 2^(- 116 /128-1)
+        .quad 0x3FE5A892BCF6379B, 0x3FD0FB66AFFED31B  //2^( 117 /128-1) - 2^(- 117 /128-1), 2^(- 117 /128-1)
+        .quad 0x3FE5DE38CE215725, 0x3FD0E3EC32D3D1A2  //2^( 118 /128-1) - 2^(- 118 /128-1), 2^(- 118 /128-1)
+        .quad 0x3FE61408E60E2888, 0x3FD0CC922B7247F7  //2^( 119 /128-1) - 2^(- 119 /128-1), 2^(- 119 /128-1)
+        .quad 0x3FE64A036C27CC52, 0x3FD0B5586CF9890F  //2^( 120 /128-1) - 2^(- 120 /128-1), 2^(- 120 /128-1)
+        .quad 0x3FE68028C82AEE2F, 0x3FD09E3ECAC6F383  //2^( 121 /128-1) - 2^(- 121 /128-1), 2^(- 121 /128-1)
+        .quad 0x3FE6B67962268C43, 0x3FD0874518759BC8  //2^( 122 /128-1) - 2^(- 122 /128-1), 2^(- 122 /128-1)
+        .quad 0x3FE6ECF5A27CBF28, 0x3FD0706B29DDF6DE  //2^( 123 /128-1) - 2^(- 123 /128-1), 2^(- 123 /128-1)
+        .quad 0x3FE7239DF1E38286, 0x3FD059B0D3158574  //2^( 124 /128-1) - 2^(- 124 /128-1), 2^(- 124 /128-1)
+        .quad 0x3FE75A72B9657E51, 0x3FD04315E86E7F85  //2^( 125 /128-1) - 2^(- 125 /128-1), 2^(- 125 /128-1)
+        .quad 0x3FE791746262D0A8, 0x3FD02C9A3E778061  //2^( 126 /128-1) - 2^(- 126 /128-1), 2^(- 126 /128-1)
+        .quad 0x3FE7C8A35691D856, 0x3FD0163DA9FB3335 //2^( 127 /128-1) - 2^(- 127 /128-1), 2^(- 127 /128-1)
+        .align 16
+        .quad 0x42C8000000000000, 0x42C8000000000000 /* _dbShifter = 1.5 * 2^(52-k)*/
+        .align 16
+        .long 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99         /* _iDomainRange 0x40861d9ac12a3e85 =(1021*2^K-0.5)*log(2)/2^K -needed for quick exp*/
+        .align 16
+        .quad 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD /* _dPC2 */
+        .align 16
+        .quad 0x3FC55555555554AD, 0x3FC55555555554AD /* _dPC3 */
+        .align 16
+        .quad 0x3FA55555CF16D299, 0x3FA55555CF16D299 /* _dPC4 */
+        .align 16
+        .quad 0x3F8111115712F425, 0x3F8111115712F425 /* _dPC5 */
+        .align 16
+        .quad 0x000000000000007f, 0x000000000000007f /* _lIndexMask */
+        .align 16
+        .type	__svml_dsinh_data_internal,@object
+        .size	__svml_dsinh_data_internal,.-__svml_dsinh_data_internal
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core-sse.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core-sse.S
new file mode 100644
index 0000000000..ae531575fe
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core-sse.S
@@ -0,0 +1,20 @@
+/* SSE version of vectorized sinh, vector length is 4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVdN4v_sinh _ZGVdN4v_sinh_sse_wrapper
+#include "../svml_d_sinh4_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core.c b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core.c
new file mode 100644
index 0000000000..bdf10b664b
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core.c
@@ -0,0 +1,27 @@
+/* Multiple versions of vectorized sinh, vector length is 4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVdN4v_sinh
+#include "ifunc-mathvec-avx2.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVdN4v_sinh, __GI__ZGVdN4v_sinh, __redirect__ZGVdN4v_sinh)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
new file mode 100644
index 0000000000..27b50d31a8
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh4_core_avx2.S
@@ -0,0 +1,470 @@
+/* Function sinh vectorized with AVX2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_dsinh_data_internal
+ */
+#define _dbInvLn2                     	0
+#define _dbLn2hi                      	32
+#define _dbLn2lo                      	64
+#define _dSign                        	96
+#define _dbT                          	128
+#define _dbShifter                    	2176
+#define _iDomainRange                 	2208
+#define _dPC2                         	2240
+#define _dPC3                         	2272
+#define _dPC4                         	2304
+#define _dPC5                         	2336
+#define _lIndexMask                   	2368
+
+#include <sysdep.h>
+
+        .text
+	.section .text.avx2,"ax",@progbits
+ENTRY(_ZGVdN4v_sinh_avx2)
+        pushq     %rbp
+        cfi_def_cfa_offset(16)
+        movq      %rsp, %rbp
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+        andq      $-32, %rsp
+        subq      $96, %rsp
+        lea       _dbT+8+__svml_dsinh_data_internal(%rip), %r8
+        vmovupd   _dbShifter+__svml_dsinh_data_internal(%rip), %ymm12
+
+/*
+ *  Load argument
+ * dM = x*2^K/log(2) + RShifter
+ */
+        vmovupd   _dbInvLn2+__svml_dsinh_data_internal(%rip), %ymm5
+        vmovupd   _dbLn2hi+__svml_dsinh_data_internal(%rip), %ymm13
+        vmovapd   %ymm0, %ymm8
+
+/*
+ * VLOAD_CONST( D, dPC[0],         TAB._dPC1 );
+ *  Abs argument
+ */
+        vandpd    _dSign+__svml_dsinh_data_internal(%rip), %ymm8, %ymm7
+        vxorpd    %ymm8, %ymm7, %ymm6
+        vfmadd213pd %ymm12, %ymm6, %ymm5
+
+/*
+ *  R
+ * dN = dM - RShifter
+ */
+        vsubpd    %ymm12, %ymm5, %ymm3
+
+/*
+ *  Index and lookup
+ * j
+ */
+        vandps    _lIndexMask+__svml_dsinh_data_internal(%rip), %ymm5, %ymm4
+
+/*
+ * Check for overflow\underflow
+ *
+ */
+        vextractf128 $1, %ymm6, %xmm9
+        vshufps   $221, %xmm9, %xmm6, %xmm10
+
+/* dR = dX - dN*Log2_hi/2^K */
+        vfnmadd231pd %ymm13, %ymm3, %ymm6
+        vpcmpgtd  _iDomainRange+__svml_dsinh_data_internal(%rip), %xmm10, %xmm11
+        vmovmskps %xmm11, %eax
+
+/* dR = (dX - dN*Log2_hi/2^K) - dN*Log2_lo/2^K */
+        vfnmadd231pd _dbLn2lo+__svml_dsinh_data_internal(%rip), %ymm3, %ymm6
+        vextractf128 $1, %ymm4, %xmm0
+        vmovd     %xmm4, %edx
+        vmovd     %xmm0, %esi
+        shll      $4, %edx
+        vpextrd   $2, %xmm4, %ecx
+
+/* split j and N */
+        vxorps    %ymm4, %ymm5, %ymm3
+        shll      $4, %esi
+        vpextrd   $2, %xmm0, %edi
+        shll      $4, %ecx
+
+/*
+ *  G1,G2,G3: dTdif,dTn * 2^N,2^(-N)
+ * lM now is an EXP(2^N)
+ */
+        vpsllq    $45, %ymm3, %ymm4
+        vmovq     (%rdx,%r8), %xmm14
+        vmovq     (%rsi,%r8), %xmm1
+        vmovhpd   (%rcx,%r8), %xmm14, %xmm15
+        shll      $4, %edi
+        vmovhpd   (%rdi,%r8), %xmm1, %xmm2
+
+/* dR2 = dR^2 */
+        vmulpd    %ymm6, %ymm6, %ymm1
+        vmovq     -8(%rdx,%r8), %xmm9
+        vmovq     -8(%rsi,%r8), %xmm11
+        vmovhpd   -8(%rcx,%r8), %xmm9, %xmm10
+        vmovhpd   -8(%rdi,%r8), %xmm11, %xmm12
+        vinsertf128 $1, %xmm2, %ymm15, %ymm2
+
+/*  */
+        vpaddq    %ymm4, %ymm2, %ymm5
+
+/*  */
+        vpsubq    %ymm4, %ymm2, %ymm14
+
+/* dG3 = dTn*2^N + dTn*2^-N */
+        vaddpd    %ymm14, %ymm5, %ymm2
+
+/* dG2 = dTn*2^N - dTn*2^-N */
+        vsubpd    %ymm14, %ymm5, %ymm14
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*a5)) = r + r*(r^2*(a3+r^2*a5)) ....
+ * dSinh_r = (a3+r^2*a5)
+ */
+        vmovupd   _dPC5+__svml_dsinh_data_internal(%rip), %ymm5
+        vfmadd213pd _dPC3+__svml_dsinh_data_internal(%rip), %ymm1, %ymm5
+        vinsertf128 $1, %xmm12, %ymm10, %ymm13
+        vpaddq    %ymm4, %ymm13, %ymm0
+
+/* dSinh_r = r^2*(a3+r^2*a5) */
+        vmulpd    %ymm5, %ymm1, %ymm4
+
+/* dG2 += dG1 */
+        vaddpd    %ymm14, %ymm0, %ymm3
+
+/* dG1 += dG3 */
+        vaddpd    %ymm2, %ymm0, %ymm0
+
+/* dSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        vfmadd213pd %ymm6, %ymm6, %ymm4
+
+/*
+ * poly(r) = (dG2+dG1)+dG3*sinh(dR)+dG1*sinh(dR)+(dG1+dG2)*dR2*(a2 +a4*dR2)
+ * dOut = (a2 +a4*dR2)
+ */
+        vmovupd   _dPC4+__svml_dsinh_data_internal(%rip), %ymm6
+        vfmadd213pd _dPC2+__svml_dsinh_data_internal(%rip), %ymm1, %ymm6
+
+/* dOut = dR2*(a2 +a4*dR2) */
+        vmulpd    %ymm6, %ymm1, %ymm1
+
+/* dOut = dG2*dR2*(a2 +a4*dR2) */
+        vmulpd    %ymm3, %ymm1, %ymm6
+
+/* dOut = dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        vfmadd213pd %ymm6, %ymm0, %ymm4
+
+/* dOut = dG2 + dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        vaddpd    %ymm4, %ymm3, %ymm5
+
+/*  Ret H  */
+        vorpd     %ymm5, %ymm7, %ymm0
+        testl     %eax, %eax
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx r12 r13 r14 r15 eax ymm0 ymm8
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        movq      %rbp, %rsp
+        popq      %rbp
+        cfi_def_cfa(7, 8)
+        cfi_restore(6)
+        ret
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        vmovupd   %ymm8, 32(%rsp)
+        vmovupd   %ymm0, 64(%rsp)
+                                # LOE rbx r12 r13 r14 r15 eax ymm0
+
+        xorl      %edx, %edx
+                                # LOE rbx r12 r13 r14 r15 eax edx
+
+        vzeroupper
+        movq      %r12, 16(%rsp)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
+        movl      %edx, %r12d
+        movq      %r13, 8(%rsp)
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
+        movl      %eax, %r13d
+        movq      %r14, (%rsp)
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $4, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        vmovupd   64(%rsp), %ymm0
+
+/* Go to exit */
+        jmp       L(EXIT)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r12 r13 r14 r15 ymm0
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movsd     32(%rsp,%r14,8), %xmm0
+        call      sinh@PLT
+                                # LOE rbx r14 r15 r12d r13d xmm0
+
+        movsd     %xmm0, 64(%rsp,%r14,8)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx r15 r12d r13d
+END(_ZGVdN4v_sinh_avx2)
+
+        .section .rodata, "a"
+        .align 32
+
+#ifdef __svml_dsinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(32)) VUINT32 _dbInvLn2[4][2];
+        __declspec(align(32)) VUINT32 _dbLn2hi[4][2];
+        __declspec(align(32)) VUINT32 _dbLn2lo[4][2];
+        __declspec(align(32)) VUINT32 _dSign[4][2];                //0x8000000000000000
+        __declspec(align(32)) VUINT32 _dbT[(1<<7)][2][2]; //precalc poly coeff
+        __declspec(align(32)) VUINT32 _dbShifter[4][2];
+        __declspec(align(32)) VUINT32 _iDomainRange[8][1];
+        __declspec(align(32)) VUINT32 _dPC2[4][2];
+        __declspec(align(32)) VUINT32 _dPC3[4][2];
+        __declspec(align(32)) VUINT32 _dPC4[4][2];
+        __declspec(align(32)) VUINT32 _dPC5[4][2];
+        __declspec(align(32)) VUINT32 _lIndexMask[4][2];
+} __svml_dsinh_data_internal;
+#endif
+__svml_dsinh_data_internal:
+        .quad 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE /* _dbInvLn2 = 1/log(2) */
+        .align 32
+        .quad 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000 /* _dbLn2hi  = log(2) hi*/
+        .align 32
+        .quad 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A /* _dbLn2lo  = log(2) lo*/
+        .align 32
+        .quad 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000 /* _dSign */
+        //_dbT
+        .align 32
+        .quad 0x0000000000000000, 0x3FE0000000000000  //2^( 0 /128-1) - 2^(- 0 /128-1), 2^(- 0 /128-1)
+        .quad 0x3F762E4A19BD1E74, 0x3FDFD3C22B8F71F1  //2^( 1 /128-1) - 2^(- 1 /128-1), 2^(- 1 /128-1)
+        .quad 0x3F862E5F6A0DFD36, 0x3FDFA7C1819E90D8  //2^( 2 /128-1) - 2^(- 2 /128-1), 2^(- 2 /128-1)
+        .quad 0x3F90A2E234040F5F, 0x3FDF7BFDAD9CBE14  //2^( 3 /128-1) - 2^(- 3 /128-1), 2^(- 3 /128-1)
+        .quad 0x3F962EB4ABCC5A81, 0x3FDF50765B6E4540  //2^( 4 /128-1) - 2^(- 4 /128-1), 2^(- 4 /128-1)
+        .quad 0x3F9BBAB1C5033244, 0x3FDF252B376BBA97  //2^( 5 /128-1) - 2^(- 5 /128-1), 2^(- 5 /128-1)
+        .quad 0x3FA0A372144EEB45, 0x3FDEFA1BEE615A27  //2^( 6 /128-1) - 2^(- 6 /128-1), 2^(- 6 /128-1)
+        .quad 0x3FA369AB3FFBF8B0, 0x3FDECF482D8E67F1  //2^( 7 /128-1) - 2^(- 7 /128-1), 2^(- 7 /128-1)
+        .quad 0x3FA63009BA740A2A, 0x3FDEA4AFA2A490DA  //2^( 8 /128-1) - 2^(- 8 /128-1), 2^(- 8 /128-1)
+        .quad 0x3FA8F692D8EA1B5A, 0x3FDE7A51FBC74C83  //2^( 9 /128-1) - 2^(- 9 /128-1), 2^(- 9 /128-1)
+        .quad 0x3FABBD4BF0E31A6F, 0x3FDE502EE78B3FF6  //2^( 10 /128-1) - 2^(- 10 /128-1), 2^(- 10 /128-1)
+        .quad 0x3FAE843A5840286A, 0x3FDE264614F5A129  //2^( 11 /128-1) - 2^(- 11 /128-1), 2^(- 11 /128-1)
+        .quad 0x3FB0A5B1B2A46D0A, 0x3FDDFC97337B9B5F  //2^( 12 /128-1) - 2^(- 12 /128-1), 2^(- 12 /128-1)
+        .quad 0x3FB20966375ABCDF, 0x3FDDD321F301B460  //2^( 13 /128-1) - 2^(- 13 /128-1), 2^(- 13 /128-1)
+        .quad 0x3FB36D3D65DCA4E8, 0x3FDDA9E603DB3285  //2^( 14 /128-1) - 2^(- 14 /128-1), 2^(- 14 /128-1)
+        .quad 0x3FB4D139EA06642A, 0x3FDD80E316C98398  //2^( 15 /128-1) - 2^(- 15 /128-1), 2^(- 15 /128-1)
+        .quad 0x3FB6355E6FFBF9BA, 0x3FDD5818DCFBA487  //2^( 16 /128-1) - 2^(- 16 /128-1), 2^(- 16 /128-1)
+        .quad 0x3FB799ADA42E4788, 0x3FDD2F87080D89F2  //2^( 17 /128-1) - 2^(- 17 /128-1), 2^(- 17 /128-1)
+        .quad 0x3FB8FE2A336035BC, 0x3FDD072D4A07897C  //2^( 18 /128-1) - 2^(- 18 /128-1), 2^(- 18 /128-1)
+        .quad 0x3FBA62D6CAABD6B6, 0x3FDCDF0B555DC3FA  //2^( 19 /128-1) - 2^(- 19 /128-1), 2^(- 19 /128-1)
+        .quad 0x3FBBC7B617878BAF, 0x3FDCB720DCEF9069  //2^( 20 /128-1) - 2^(- 20 /128-1), 2^(- 20 /128-1)
+        .quad 0x3FBD2CCAC7CB2A11, 0x3FDC8F6D9406E7B5  //2^( 21 /128-1) - 2^(- 21 /128-1), 2^(- 21 /128-1)
+        .quad 0x3FBE921789B52185, 0x3FDC67F12E57D14B  //2^( 22 /128-1) - 2^(- 22 /128-1), 2^(- 22 /128-1)
+        .quad 0x3FBFF79F0BEFA2C7, 0x3FDC40AB5FFFD07A  //2^( 23 /128-1) - 2^(- 23 /128-1), 2^(- 23 /128-1)
+        .quad 0x3FC0AEB1FECAE3A9, 0x3FDC199BDD85529C  //2^( 24 /128-1) - 2^(- 24 /128-1), 2^(- 24 /128-1)
+        .quad 0x3FC161B4871C5CEC, 0x3FDBF2C25BD71E09  //2^( 25 /128-1) - 2^(- 25 /128-1), 2^(- 25 /128-1)
+        .quad 0x3FC214D876F26FD0, 0x3FDBCC1E904BC1D2  //2^( 26 /128-1) - 2^(- 26 /128-1), 2^(- 26 /128-1)
+        .quad 0x3FC2C81F2693816F, 0x3FDBA5B030A1064A  //2^( 27 /128-1) - 2^(- 27 /128-1), 2^(- 27 /128-1)
+        .quad 0x3FC37B89EE88BEF7, 0x3FDB7F76F2FB5E47  //2^( 28 /128-1) - 2^(- 28 /128-1), 2^(- 28 /128-1)
+        .quad 0x3FC42F1A27A0B3CD, 0x3FDB59728DE5593A  //2^( 29 /128-1) - 2^(- 29 /128-1), 2^(- 29 /128-1)
+        .quad 0x3FC4E2D12AF1E037, 0x3FDB33A2B84F15FB  //2^( 30 /128-1) - 2^(- 30 /128-1), 2^(- 30 /128-1)
+        .quad 0x3FC596B051DD508D, 0x3FDB0E07298DB666  //2^( 31 /128-1) - 2^(- 31 /128-1), 2^(- 31 /128-1)
+        .quad 0x3FC64AB8F61134FA, 0x3FDAE89F995AD3AD  //2^( 32 /128-1) - 2^(- 32 /128-1), 2^(- 32 /128-1)
+        .quad 0x3FC6FEEC718B79D1, 0x3FDAC36BBFD3F37A  //2^( 33 /128-1) - 2^(- 33 /128-1), 2^(- 33 /128-1)
+        .quad 0x3FC7B34C1E9C607F, 0x3FDA9E6B5579FDBF  //2^( 34 /128-1) - 2^(- 34 /128-1), 2^(- 34 /128-1)
+        .quad 0x3FC867D957E91912, 0x3FDA799E1330B358  //2^( 35 /128-1) - 2^(- 35 /128-1), 2^(- 35 /128-1)
+        .quad 0x3FC91C95786E5C72, 0x3FDA5503B23E255D  //2^( 36 /128-1) - 2^(- 36 /128-1), 2^(- 36 /128-1)
+        .quad 0x3FC9D181DB83072F, 0x3FDA309BEC4A2D33  //2^( 37 /128-1) - 2^(- 37 /128-1), 2^(- 37 /128-1)
+        .quad 0x3FCA869FDCDAB512, 0x3FDA0C667B5DE565  //2^( 38 /128-1) - 2^(- 38 /128-1), 2^(- 38 /128-1)
+        .quad 0x3FCB3BF0D8885D4C, 0x3FD9E86319E32323  //2^( 39 /128-1) - 2^(- 39 /128-1), 2^(- 39 /128-1)
+        .quad 0x3FCBF1762B00EF69, 0x3FD9C49182A3F090  //2^( 40 /128-1) - 2^(- 40 /128-1), 2^(- 40 /128-1)
+        .quad 0x3FCCA731311DF0FB, 0x3FD9A0F170CA07BA  //2^( 41 /128-1) - 2^(- 41 /128-1), 2^(- 41 /128-1)
+        .quad 0x3FCD5D2348201C09, 0x3FD97D829FDE4E50  //2^( 42 /128-1) - 2^(- 42 /128-1), 2^(- 42 /128-1)
+        .quad 0x3FCE134DCDB1FE3E, 0x3FD95A44CBC8520F  //2^( 43 /128-1) - 2^(- 43 /128-1), 2^(- 43 /128-1)
+        .quad 0x3FCEC9B21FEA98EA, 0x3FD93737B0CDC5E5  //2^( 44 /128-1) - 2^(- 44 /128-1), 2^(- 44 /128-1)
+        .quad 0x3FCF80519D5001D3, 0x3FD9145B0B91FFC6  //2^( 45 /128-1) - 2^(- 45 /128-1), 2^(- 45 /128-1)
+        .quad 0x3FD01B96D26D026A, 0x3FD8F1AE99157736  //2^( 46 /128-1) - 2^(- 46 /128-1), 2^(- 46 /128-1)
+        .quad 0x3FD07723CAFA6331, 0x3FD8CF3216B5448C  //2^( 47 /128-1) - 2^(- 47 /128-1), 2^(- 47 /128-1)
+        .quad 0x3FD0D2D06841B373, 0x3FD8ACE5422AA0DB  //2^( 48 /128-1) - 2^(- 48 /128-1), 2^(- 48 /128-1)
+        .quad 0x3FD12E9D5A715381, 0x3FD88AC7D98A6699  //2^( 49 /128-1) - 2^(- 49 /128-1), 2^(- 49 /128-1)
+        .quad 0x3FD18A8B51F5C661, 0x3FD868D99B4492ED  //2^( 50 /128-1) - 2^(- 50 /128-1), 2^(- 50 /128-1)
+        .quad 0x3FD1E69AFF7B04D7, 0x3FD8471A4623C7AD  //2^( 51 /128-1) - 2^(- 51 /128-1), 2^(- 51 /128-1)
+        .quad 0x3FD242CD13EDD0F1, 0x3FD82589994CCE13  //2^( 52 /128-1) - 2^(- 52 /128-1), 2^(- 52 /128-1)
+        .quad 0x3FD29F22407D0A0C, 0x3FD80427543E1A12  //2^( 53 /128-1) - 2^(- 53 /128-1), 2^(- 53 /128-1)
+        .quad 0x3FD2FB9B369B0153, 0x3FD7E2F336CF4E62  //2^( 54 /128-1) - 2^(- 54 /128-1), 2^(- 54 /128-1)
+        .quad 0x3FD35838A7FECEC8, 0x3FD7C1ED0130C132  //2^( 55 /128-1) - 2^(- 55 /128-1), 2^(- 55 /128-1)
+        .quad 0x3FD3B4FB46A5A6CC, 0x3FD7A11473EB0187  //2^( 56 /128-1) - 2^(- 56 /128-1), 2^(- 56 /128-1)
+        .quad 0x3FD411E3C4D4302F, 0x3FD780694FDE5D3F  //2^( 57 /128-1) - 2^(- 57 /128-1), 2^(- 57 /128-1)
+        .quad 0x3FD46EF2D517DAC8, 0x3FD75FEB564267C9  //2^( 58 /128-1) - 2^(- 58 /128-1), 2^(- 58 /128-1)
+        .quad 0x3FD4CC292A48369E, 0x3FD73F9A48A58174  //2^( 59 /128-1) - 2^(- 59 /128-1), 2^(- 59 /128-1)
+        .quad 0x3FD5298777884B96, 0x3FD71F75E8EC5F74  //2^( 60 /128-1) - 2^(- 60 /128-1), 2^(- 60 /128-1)
+        .quad 0x3FD5870E7047F1BC, 0x3FD6FF7DF9519484  //2^( 61 /128-1) - 2^(- 61 /128-1), 2^(- 61 /128-1)
+        .quad 0x3FD5E4BEC8452A1A, 0x3FD6DFB23C651A2F  //2^( 62 /128-1) - 2^(- 62 /128-1), 2^(- 62 /128-1)
+        .quad 0x3FD64299338D7827, 0x3FD6C012750BDABF  //2^( 63 /128-1) - 2^(- 63 /128-1), 2^(- 63 /128-1)
+        .quad 0x3FD6A09E667F3BCD, 0x3FD6A09E667F3BCD  //2^( 64 /128-1) - 2^(- 64 /128-1), 2^(- 64 /128-1)
+        .quad 0x3FD6FECF15CB0C0B, 0x3FD68155D44CA973  //2^( 65 /128-1) - 2^(- 65 /128-1), 2^(- 65 /128-1)
+        .quad 0x3FD75D2BF6751239, 0x3FD6623882552225  //2^( 66 /128-1) - 2^(- 66 /128-1), 2^(- 66 /128-1)
+        .quad 0x3FD7BBB5BDD665E8, 0x3FD6434634CCC320  //2^( 67 /128-1) - 2^(- 67 /128-1), 2^(- 67 /128-1)
+        .quad 0x3FD81A6D219E6963, 0x3FD6247EB03A5585  //2^( 68 /128-1) - 2^(- 68 /128-1), 2^(- 68 /128-1)
+        .quad 0x3FD87952D7D426DF, 0x3FD605E1B976DC09  //2^( 69 /128-1) - 2^(- 69 /128-1), 2^(- 69 /128-1)
+        .quad 0x3FD8D86796D7AE49, 0x3FD5E76F15AD2148  //2^( 70 /128-1) - 2^(- 70 /128-1), 2^(- 70 /128-1)
+        .quad 0x3FD937AC156373C8, 0x3FD5C9268A5946B7  //2^( 71 /128-1) - 2^(- 71 /128-1), 2^(- 71 /128-1)
+        .quad 0x3FD997210A8DAEE4, 0x3FD5AB07DD485429  //2^( 72 /128-1) - 2^(- 72 /128-1), 2^(- 72 /128-1)
+        .quad 0x3FD9F6C72DC9BA68, 0x3FD58D12D497C7FD  //2^( 73 /128-1) - 2^(- 73 /128-1), 2^(- 73 /128-1)
+        .quad 0x3FDA569F36E974EA, 0x3FD56F4736B527DA  //2^( 74 /128-1) - 2^(- 74 /128-1), 2^(- 74 /128-1)
+        .quad 0x3FDAB6A9DE1EA215, 0x3FD551A4CA5D920F  //2^( 75 /128-1) - 2^(- 75 /128-1), 2^(- 75 /128-1)
+        .quad 0x3FDB16E7DBFC4CA3, 0x3FD5342B569D4F82  //2^( 76 /128-1) - 2^(- 76 /128-1), 2^(- 76 /128-1)
+        .quad 0x3FDB7759E9782918, 0x3FD516DAA2CF6642  //2^( 77 /128-1) - 2^(- 77 /128-1), 2^(- 77 /128-1)
+        .quad 0x3FDBD800BFEBF932, 0x3FD4F9B2769D2CA7  //2^( 78 /128-1) - 2^(- 78 /128-1), 2^(- 78 /128-1)
+        .quad 0x3FDC38DD1916F025, 0x3FD4DCB299FDDD0D  //2^( 79 /128-1) - 2^(- 79 /128-1), 2^(- 79 /128-1)
+        .quad 0x3FDC99EFAF1F1790, 0x3FD4BFDAD5362A27  //2^( 80 /128-1) - 2^(- 80 /128-1), 2^(- 80 /128-1)
+        .quad 0x3FDCFB393C92B539, 0x3FD4A32AF0D7D3DE  //2^( 81 /128-1) - 2^(- 81 /128-1), 2^(- 81 /128-1)
+        .quad 0x3FDD5CBA7C69B19C, 0x3FD486A2B5C13CD0  //2^( 82 /128-1) - 2^(- 82 /128-1), 2^(- 82 /128-1)
+        .quad 0x3FDDBE742A06FF34, 0x3FD46A41ED1D0057  //2^( 83 /128-1) - 2^(- 83 /128-1), 2^(- 83 /128-1)
+        .quad 0x3FDE2067013A029D, 0x3FD44E086061892D  //2^( 84 /128-1) - 2^(- 84 /128-1), 2^(- 84 /128-1)
+        .quad 0x3FDE8293BE3FFB87, 0x3FD431F5D950A897  //2^( 85 /128-1) - 2^(- 85 /128-1), 2^(- 85 /128-1)
+        .quad 0x3FDEE4FB1DC56E75, 0x3FD4160A21F72E2A  //2^( 86 /128-1) - 2^(- 86 /128-1), 2^(- 86 /128-1)
+        .quad 0x3FDF479DDCE78F58, 0x3FD3FA4504AC801C  //2^( 87 /128-1) - 2^(- 87 /128-1), 2^(- 87 /128-1)
+        .quad 0x3FDFAA7CB935ACFE, 0x3FD3DEA64C123422  //2^( 88 /128-1) - 2^(- 88 /128-1), 2^(- 88 /128-1)
+        .quad 0x3FE006CC38594EB1, 0x3FD3C32DC313A8E5  //2^( 89 /128-1) - 2^(- 89 /128-1), 2^(- 89 /128-1)
+        .quad 0x3FE03878E0EB1569, 0x3FD3A7DB34E59FF7  //2^( 90 /128-1) - 2^(- 90 /128-1), 2^(- 90 /128-1)
+        .quad 0x3FE06A44B5C74101, 0x3FD38CAE6D05D866  //2^( 91 /128-1) - 2^(- 91 /128-1), 2^(- 91 /128-1)
+        .quad 0x3FE09C3016A0D077, 0x3FD371A7373AA9CB  //2^( 92 /128-1) - 2^(- 92 /128-1), 2^(- 92 /128-1)
+        .quad 0x3FE0CE3B63676360, 0x3FD356C55F929FF1  //2^( 93 /128-1) - 2^(- 93 /128-1), 2^(- 93 /128-1)
+        .quad 0x3FE10066FC47F240, 0x3FD33C08B26416FF  //2^( 94 /128-1) - 2^(- 94 /128-1), 2^(- 94 /128-1)
+        .quad 0x3FE132B341AD8761, 0x3FD32170FC4CD831  //2^( 95 /128-1) - 2^(- 95 /128-1), 2^(- 95 /128-1)
+        .quad 0x3FE165209441F823, 0x3FD306FE0A31B715  //2^( 96 /128-1) - 2^(- 96 /128-1), 2^(- 96 /128-1)
+        .quad 0x3FE197AF54EE9EBB, 0x3FD2ECAFA93E2F56  //2^( 97 /128-1) - 2^(- 97 /128-1), 2^(- 97 /128-1)
+        .quad 0x3FE1CA5FE4DD1475, 0x3FD2D285A6E4030B  //2^( 98 /128-1) - 2^(- 98 /128-1), 2^(- 98 /128-1)
+        .quad 0x3FE1FD32A577EC72, 0x3FD2B87FD0DAD990  //2^( 99 /128-1) - 2^(- 99 /128-1), 2^(- 99 /128-1)
+        .quad 0x3FE23027F86B6ED6, 0x3FD29E9DF51FDEE1  //2^( 100 /128-1) - 2^(- 100 /128-1), 2^(- 100 /128-1)
+        .quad 0x3FE263403FA65489, 0x3FD284DFE1F56381  //2^( 101 /128-1) - 2^(- 101 /128-1), 2^(- 101 /128-1)
+        .quad 0x3FE2967BDD5A8364, 0x3FD26B4565E27CDD  //2^( 102 /128-1) - 2^(- 102 /128-1), 2^(- 102 /128-1)
+        .quad 0x3FE2C9DB33FDCAE9, 0x3FD251CE4FB2A63F  //2^( 103 /128-1) - 2^(- 103 /128-1), 2^(- 103 /128-1)
+        .quad 0x3FE2FD5EA64AA180, 0x3FD2387A6E756238  //2^( 104 /128-1) - 2^(- 104 /128-1), 2^(- 104 /128-1)
+        .quad 0x3FE331069740E22F, 0x3FD21F49917DDC96  //2^( 105 /128-1) - 2^(- 105 /128-1), 2^(- 105 /128-1)
+        .quad 0x3FE364D36A268AE0, 0x3FD2063B88628CD6  //2^( 106 /128-1) - 2^(- 106 /128-1), 2^(- 106 /128-1)
+        .quad 0x3FE398C582887B27, 0x3FD1ED5022FCD91D  //2^( 107 /128-1) - 2^(- 107 /128-1), 2^(- 107 /128-1)
+        .quad 0x3FE3CCDD443B3394, 0x3FD1D4873168B9AA  //2^( 108 /128-1) - 2^(- 108 /128-1), 2^(- 108 /128-1)
+        .quad 0x3FE4011B135B9590, 0x3FD1BBE084045CD4  //2^( 109 /128-1) - 2^(- 109 /128-1), 2^(- 109 /128-1)
+        .quad 0x3FE4357F544FA3C1, 0x3FD1A35BEB6FCB75  //2^( 110 /128-1) - 2^(- 110 /128-1), 2^(- 110 /128-1)
+        .quad 0x3FE46A0A6BC742FD, 0x3FD18AF9388C8DEA  //2^( 111 /128-1) - 2^(- 111 /128-1), 2^(- 111 /128-1)
+        .quad 0x3FE49EBCBEBCFBCA, 0x3FD172B83C7D517B  //2^( 112 /128-1) - 2^(- 112 /128-1), 2^(- 112 /128-1)
+        .quad 0x3FE4D396B276BC6F, 0x3FD15A98C8A58E51  //2^( 113 /128-1) - 2^(- 113 /128-1), 2^(- 113 /128-1)
+        .quad 0x3FE50898AC869B96, 0x3FD1429AAEA92DE0  //2^( 114 /128-1) - 2^(- 114 /128-1), 2^(- 114 /128-1)
+        .quad 0x3FE53DC312CB9B7A, 0x3FD12ABDC06C31CC  //2^( 115 /128-1) - 2^(- 115 /128-1), 2^(- 115 /128-1)
+        .quad 0x3FE573164B726DB6, 0x3FD11301D0125B51  //2^( 116 /128-1) - 2^(- 116 /128-1), 2^(- 116 /128-1)
+        .quad 0x3FE5A892BCF6379B, 0x3FD0FB66AFFED31B  //2^( 117 /128-1) - 2^(- 117 /128-1), 2^(- 117 /128-1)
+        .quad 0x3FE5DE38CE215725, 0x3FD0E3EC32D3D1A2  //2^( 118 /128-1) - 2^(- 118 /128-1), 2^(- 118 /128-1)
+        .quad 0x3FE61408E60E2888, 0x3FD0CC922B7247F7  //2^( 119 /128-1) - 2^(- 119 /128-1), 2^(- 119 /128-1)
+        .quad 0x3FE64A036C27CC52, 0x3FD0B5586CF9890F  //2^( 120 /128-1) - 2^(- 120 /128-1), 2^(- 120 /128-1)
+        .quad 0x3FE68028C82AEE2F, 0x3FD09E3ECAC6F383  //2^( 121 /128-1) - 2^(- 121 /128-1), 2^(- 121 /128-1)
+        .quad 0x3FE6B67962268C43, 0x3FD0874518759BC8  //2^( 122 /128-1) - 2^(- 122 /128-1), 2^(- 122 /128-1)
+        .quad 0x3FE6ECF5A27CBF28, 0x3FD0706B29DDF6DE  //2^( 123 /128-1) - 2^(- 123 /128-1), 2^(- 123 /128-1)
+        .quad 0x3FE7239DF1E38286, 0x3FD059B0D3158574  //2^( 124 /128-1) - 2^(- 124 /128-1), 2^(- 124 /128-1)
+        .quad 0x3FE75A72B9657E51, 0x3FD04315E86E7F85  //2^( 125 /128-1) - 2^(- 125 /128-1), 2^(- 125 /128-1)
+        .quad 0x3FE791746262D0A8, 0x3FD02C9A3E778061  //2^( 126 /128-1) - 2^(- 126 /128-1), 2^(- 126 /128-1)
+        .quad 0x3FE7C8A35691D856, 0x3FD0163DA9FB3335 //2^( 127 /128-1) - 2^(- 127 /128-1), 2^(- 127 /128-1)
+        .align 32
+        .quad 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000 /* _dbShifter = 1.5 * 2^(52-k)*/
+        .align 32
+        .long 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99         /* _iDomainRange 0x40861d9ac12a3e85 =(1021*2^K-0.5)*log(2)/2^K -needed for quick exp*/
+        .align 32
+        .quad 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD /* _dPC2 */
+        .align 32
+        .quad 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD /* _dPC3 */
+        .align 32
+        .quad 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299 /* _dPC4 */
+        .align 32
+        .quad 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425 /* _dPC5 */
+        .align 32
+        .quad 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f /* _lIndexMask */
+        .align 32
+        .type	__svml_dsinh_data_internal,@object
+        .size	__svml_dsinh_data_internal,.-__svml_dsinh_data_internal
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core-avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core-avx2.S
new file mode 100644
index 0000000000..d767d25080
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core-avx2.S
@@ -0,0 +1,20 @@
+/* AVX2 version of vectorized sinh, vector length is 8.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVeN8v_sinh _ZGVeN8v_sinh_avx2_wrapper
+#include "../svml_d_sinh8_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core.c b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core.c
new file mode 100644
index 0000000000..427d07bce2
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core.c
@@ -0,0 +1,27 @@
+/* Multiple versions of vectorized sinh, vector length is 8.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVeN8v_sinh
+#include "ifunc-mathvec-avx512-skx.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVeN8v_sinh, __GI__ZGVeN8v_sinh, __redirect__ZGVeN8v_sinh)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
new file mode 100644
index 0000000000..d057d6c7eb
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_d_sinh8_core_avx512.S
@@ -0,0 +1,461 @@
+/* Function sinh vectorized with AVX-512.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_dsinh_data_internal
+ */
+#define _dbInvLn2                     	0
+#define _dbLn2hi                      	64
+#define _dbLn2lo                      	128
+#define _dSign                        	192
+#define _dbT                          	256
+#define _dbShifter                    	2304
+#define _iDomainRange                 	2368
+#define _dPC2                         	2432
+#define _dPC3                         	2496
+#define _dPC4                         	2560
+#define _dPC5                         	2624
+#define _lIndexMask                   	2688
+
+#include <sysdep.h>
+
+        .text
+	.section .text.evex512,"ax",@progbits
+ENTRY(_ZGVeN8v_sinh_skx)
+        pushq     %rbp
+        cfi_def_cfa_offset(16)
+        movq      %rsp, %rbp
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+        andq      $-64, %rsp
+        subq      $192, %rsp
+        lea       _dbT+8+__svml_dsinh_data_internal(%rip), %rax
+        vmovaps   %zmm0, %zmm8
+
+/*  Abs argument  */
+        vandpd    _dSign+__svml_dsinh_data_internal(%rip), %zmm8, %zmm7
+        vmovups   _dbShifter+__svml_dsinh_data_internal(%rip), %zmm13
+
+/*
+ *  Load argument
+ * dM = x*2^K/log(2) + RShifter
+ */
+        vmovups   _dbInvLn2+__svml_dsinh_data_internal(%rip), %zmm12
+        vmovups   _dbLn2hi+__svml_dsinh_data_internal(%rip), %zmm14
+        vmovups   _dPC5+__svml_dsinh_data_internal(%rip), %zmm6
+
+/* VLOAD_CONST( D, dPC[0],         TAB._dPC1 ); */
+        vmovups   _dPC4+__svml_dsinh_data_internal(%rip), %zmm4
+        vxorpd    %zmm8, %zmm7, %zmm5
+        kxnorw    %k0, %k0, %k1
+        kxnorw    %k0, %k0, %k2
+        vfmadd213pd {rn-sae}, %zmm13, %zmm5, %zmm12
+
+/*
+ * Check for overflow\underflow
+ *
+ */
+        vpsrlq    $32, %zmm5, %zmm9
+
+/*
+ *  R
+ * dN = dM - RShifter
+ */
+        vsubpd    {rn-sae}, %zmm13, %zmm12, %zmm2
+        vpmovqd   %zmm9, %ymm10
+        vmovups   _dbLn2lo+__svml_dsinh_data_internal(%rip), %zmm9
+
+/* dR = dX - dN*Log2_hi/2^K */
+        vfnmadd231pd {rn-sae}, %zmm14, %zmm2, %zmm5
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*a5)) = r + r*(r^2*(a3+r^2*a5)) ....
+ * dSinh_r = (a3+r^2*a5)
+ */
+        vmovups   _dPC3+__svml_dsinh_data_internal(%rip), %zmm14
+
+/* dR = (dX - dN*Log2_hi/2^K) - dN*Log2_lo/2^K */
+        vfnmadd231pd {rn-sae}, %zmm9, %zmm2, %zmm5
+        vpcmpgtd  _iDomainRange+__svml_dsinh_data_internal(%rip), %ymm10, %ymm11
+        vmovmskps %ymm11, %edx
+
+/* dR2 = dR^2 */
+        vmulpd    {rn-sae}, %zmm5, %zmm5, %zmm2
+        vfmadd231pd {rn-sae}, %zmm2, %zmm6, %zmm14
+
+/*
+ *  Index and lookup
+ * j
+ */
+        vpandq    _lIndexMask+__svml_dsinh_data_internal(%rip), %zmm12, %zmm15
+        vpsllq    $4, %zmm15, %zmm1
+        vpmovqd   %zmm1, %ymm0
+        vpxord    %zmm11, %zmm11, %zmm11
+        vpxord    %zmm10, %zmm10, %zmm10
+        vgatherdpd (%rax,%ymm0), %zmm11{%k1}
+        vgatherdpd -8(%rax,%ymm0), %zmm10{%k2}
+
+/* split j and N */
+        vpxorq    %zmm15, %zmm12, %zmm3
+
+/*
+ *  G1,G2,G3: dTdif,dTn * 2^N,2^(-N)
+ * lM now is an EXP(2^N)
+ */
+        vpsllq    $45, %zmm3, %zmm3
+        vpaddq    %zmm3, %zmm10, %zmm1
+
+/*  */
+        vpaddq    %zmm3, %zmm11, %zmm12
+
+/*  */
+        vpsubq    %zmm3, %zmm11, %zmm13
+
+/* dSinh_r = r^2*(a3+r^2*a5) */
+        vmulpd    {rn-sae}, %zmm2, %zmm14, %zmm3
+
+/* dG2 = dTn*2^N - dTn*2^-N */
+        vsubpd    {rn-sae}, %zmm13, %zmm12, %zmm15
+
+/* dG3 = dTn*2^N + dTn*2^-N */
+        vaddpd    {rn-sae}, %zmm13, %zmm12, %zmm0
+
+/* dSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        vfmadd213pd {rn-sae}, %zmm5, %zmm5, %zmm3
+
+/*
+ * poly(r) = (dG2+dG1)+dG3*sinh(dR)+dG1*sinh(dR)+(dG1+dG2)*dR2*(a2 +a4*dR2)
+ * dOut = (a2 +a4*dR2)
+ */
+        vmovups   _dPC2+__svml_dsinh_data_internal(%rip), %zmm5
+
+/* dG1 += dG3 */
+        vaddpd    {rn-sae}, %zmm0, %zmm1, %zmm6
+        vfmadd231pd {rn-sae}, %zmm2, %zmm4, %zmm5
+
+/* dOut = dR2*(a2 +a4*dR2) */
+        vmulpd    {rn-sae}, %zmm2, %zmm5, %zmm4
+
+/* dG2 += dG1 */
+        vaddpd    {rn-sae}, %zmm15, %zmm1, %zmm2
+
+/* dOut = dG2*dR2*(a2 +a4*dR2) */
+        vmulpd    {rn-sae}, %zmm2, %zmm4, %zmm4
+
+/* dOut = dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        vfmadd213pd {rn-sae}, %zmm4, %zmm6, %zmm3
+
+/* dOut = dG2 + dG1*sinh(dR)+dG2*dR2*(a2 +a4*dR2) */
+        vaddpd    {rn-sae}, %zmm2, %zmm3, %zmm0
+
+/*  Ret H  */
+        vorpd     %zmm0, %zmm7, %zmm0
+        testl     %edx, %edx
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx r12 r13 r14 r15 edx zmm0 zmm8
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        movq      %rbp, %rsp
+        popq      %rbp
+        cfi_def_cfa(7, 8)
+        cfi_restore(6)
+        ret
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        vmovups   %zmm8, 64(%rsp)
+        vmovups   %zmm0, 128(%rsp)
+                                # LOE rbx r12 r13 r14 r15 edx zmm0
+
+        xorl      %eax, %eax
+                                # LOE rbx r12 r13 r14 r15 eax edx
+
+        vzeroupper
+        movq      %r12, 16(%rsp)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
+        movl      %eax, %r12d
+        movq      %r13, 8(%rsp)
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
+        movl      %edx, %r13d
+        movq      %r14, (%rsp)
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $8, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        vmovups   128(%rsp), %zmm0
+
+/* Go to exit */
+        jmp       L(EXIT)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r12 r13 r14 r15 zmm0
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movsd     64(%rsp,%r14,8), %xmm0
+        call      sinh@PLT
+                                # LOE rbx r14 r15 r12d r13d xmm0
+
+        movsd     %xmm0, 128(%rsp,%r14,8)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx r15 r12d r13d
+END(_ZGVeN8v_sinh_skx)
+
+        .section .rodata, "a"
+        .align 64
+
+#ifdef __svml_dsinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(64)) VUINT32 _dbInvLn2[8][2];
+        __declspec(align(64)) VUINT32 _dbLn2hi[8][2];
+        __declspec(align(64)) VUINT32 _dbLn2lo[8][2];
+        __declspec(align(64)) VUINT32 _dSign[8][2];                //0x8000000000000000
+        __declspec(align(64)) VUINT32 _dbT[(1<<7)][2][2]; //precalc poly coeff
+        __declspec(align(64)) VUINT32 _dbShifter[8][2];
+        __declspec(align(64)) VUINT32 _iDomainRange[16][1];
+        __declspec(align(64)) VUINT32 _dPC2[8][2];
+        __declspec(align(64)) VUINT32 _dPC3[8][2];
+        __declspec(align(64)) VUINT32 _dPC4[8][2];
+        __declspec(align(64)) VUINT32 _dPC5[8][2];
+        __declspec(align(64)) VUINT32 _lIndexMask[8][2];
+} __svml_dsinh_data_internal;
+#endif
+__svml_dsinh_data_internal:
+        .quad 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE, 0x3FF71547652B82FE /* _dbInvLn2 = 1/log(2) */
+        .align 64
+        .quad 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000, 0x3FE62E42FEFA0000 /* _dbLn2hi  = log(2) hi*/
+        .align 64
+        .quad 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A, 0x3D7CF79ABC9E3B3A /* _dbLn2lo  = log(2) lo*/
+        .align 64
+        .quad 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000, 0x8000000000000000 /* _dSign */
+        //_dbT
+        .align 64
+        .quad 0x0000000000000000, 0x3FE0000000000000  //2^( 0 /128-1) - 2^(- 0 /128-1), 2^(- 0 /128-1)
+        .quad 0x3F762E4A19BD1E74, 0x3FDFD3C22B8F71F1  //2^( 1 /128-1) - 2^(- 1 /128-1), 2^(- 1 /128-1)
+        .quad 0x3F862E5F6A0DFD36, 0x3FDFA7C1819E90D8  //2^( 2 /128-1) - 2^(- 2 /128-1), 2^(- 2 /128-1)
+        .quad 0x3F90A2E234040F5F, 0x3FDF7BFDAD9CBE14  //2^( 3 /128-1) - 2^(- 3 /128-1), 2^(- 3 /128-1)
+        .quad 0x3F962EB4ABCC5A81, 0x3FDF50765B6E4540  //2^( 4 /128-1) - 2^(- 4 /128-1), 2^(- 4 /128-1)
+        .quad 0x3F9BBAB1C5033244, 0x3FDF252B376BBA97  //2^( 5 /128-1) - 2^(- 5 /128-1), 2^(- 5 /128-1)
+        .quad 0x3FA0A372144EEB45, 0x3FDEFA1BEE615A27  //2^( 6 /128-1) - 2^(- 6 /128-1), 2^(- 6 /128-1)
+        .quad 0x3FA369AB3FFBF8B0, 0x3FDECF482D8E67F1  //2^( 7 /128-1) - 2^(- 7 /128-1), 2^(- 7 /128-1)
+        .quad 0x3FA63009BA740A2A, 0x3FDEA4AFA2A490DA  //2^( 8 /128-1) - 2^(- 8 /128-1), 2^(- 8 /128-1)
+        .quad 0x3FA8F692D8EA1B5A, 0x3FDE7A51FBC74C83  //2^( 9 /128-1) - 2^(- 9 /128-1), 2^(- 9 /128-1)
+        .quad 0x3FABBD4BF0E31A6F, 0x3FDE502EE78B3FF6  //2^( 10 /128-1) - 2^(- 10 /128-1), 2^(- 10 /128-1)
+        .quad 0x3FAE843A5840286A, 0x3FDE264614F5A129  //2^( 11 /128-1) - 2^(- 11 /128-1), 2^(- 11 /128-1)
+        .quad 0x3FB0A5B1B2A46D0A, 0x3FDDFC97337B9B5F  //2^( 12 /128-1) - 2^(- 12 /128-1), 2^(- 12 /128-1)
+        .quad 0x3FB20966375ABCDF, 0x3FDDD321F301B460  //2^( 13 /128-1) - 2^(- 13 /128-1), 2^(- 13 /128-1)
+        .quad 0x3FB36D3D65DCA4E8, 0x3FDDA9E603DB3285  //2^( 14 /128-1) - 2^(- 14 /128-1), 2^(- 14 /128-1)
+        .quad 0x3FB4D139EA06642A, 0x3FDD80E316C98398  //2^( 15 /128-1) - 2^(- 15 /128-1), 2^(- 15 /128-1)
+        .quad 0x3FB6355E6FFBF9BA, 0x3FDD5818DCFBA487  //2^( 16 /128-1) - 2^(- 16 /128-1), 2^(- 16 /128-1)
+        .quad 0x3FB799ADA42E4788, 0x3FDD2F87080D89F2  //2^( 17 /128-1) - 2^(- 17 /128-1), 2^(- 17 /128-1)
+        .quad 0x3FB8FE2A336035BC, 0x3FDD072D4A07897C  //2^( 18 /128-1) - 2^(- 18 /128-1), 2^(- 18 /128-1)
+        .quad 0x3FBA62D6CAABD6B6, 0x3FDCDF0B555DC3FA  //2^( 19 /128-1) - 2^(- 19 /128-1), 2^(- 19 /128-1)
+        .quad 0x3FBBC7B617878BAF, 0x3FDCB720DCEF9069  //2^( 20 /128-1) - 2^(- 20 /128-1), 2^(- 20 /128-1)
+        .quad 0x3FBD2CCAC7CB2A11, 0x3FDC8F6D9406E7B5  //2^( 21 /128-1) - 2^(- 21 /128-1), 2^(- 21 /128-1)
+        .quad 0x3FBE921789B52185, 0x3FDC67F12E57D14B  //2^( 22 /128-1) - 2^(- 22 /128-1), 2^(- 22 /128-1)
+        .quad 0x3FBFF79F0BEFA2C7, 0x3FDC40AB5FFFD07A  //2^( 23 /128-1) - 2^(- 23 /128-1), 2^(- 23 /128-1)
+        .quad 0x3FC0AEB1FECAE3A9, 0x3FDC199BDD85529C  //2^( 24 /128-1) - 2^(- 24 /128-1), 2^(- 24 /128-1)
+        .quad 0x3FC161B4871C5CEC, 0x3FDBF2C25BD71E09  //2^( 25 /128-1) - 2^(- 25 /128-1), 2^(- 25 /128-1)
+        .quad 0x3FC214D876F26FD0, 0x3FDBCC1E904BC1D2  //2^( 26 /128-1) - 2^(- 26 /128-1), 2^(- 26 /128-1)
+        .quad 0x3FC2C81F2693816F, 0x3FDBA5B030A1064A  //2^( 27 /128-1) - 2^(- 27 /128-1), 2^(- 27 /128-1)
+        .quad 0x3FC37B89EE88BEF7, 0x3FDB7F76F2FB5E47  //2^( 28 /128-1) - 2^(- 28 /128-1), 2^(- 28 /128-1)
+        .quad 0x3FC42F1A27A0B3CD, 0x3FDB59728DE5593A  //2^( 29 /128-1) - 2^(- 29 /128-1), 2^(- 29 /128-1)
+        .quad 0x3FC4E2D12AF1E037, 0x3FDB33A2B84F15FB  //2^( 30 /128-1) - 2^(- 30 /128-1), 2^(- 30 /128-1)
+        .quad 0x3FC596B051DD508D, 0x3FDB0E07298DB666  //2^( 31 /128-1) - 2^(- 31 /128-1), 2^(- 31 /128-1)
+        .quad 0x3FC64AB8F61134FA, 0x3FDAE89F995AD3AD  //2^( 32 /128-1) - 2^(- 32 /128-1), 2^(- 32 /128-1)
+        .quad 0x3FC6FEEC718B79D1, 0x3FDAC36BBFD3F37A  //2^( 33 /128-1) - 2^(- 33 /128-1), 2^(- 33 /128-1)
+        .quad 0x3FC7B34C1E9C607F, 0x3FDA9E6B5579FDBF  //2^( 34 /128-1) - 2^(- 34 /128-1), 2^(- 34 /128-1)
+        .quad 0x3FC867D957E91912, 0x3FDA799E1330B358  //2^( 35 /128-1) - 2^(- 35 /128-1), 2^(- 35 /128-1)
+        .quad 0x3FC91C95786E5C72, 0x3FDA5503B23E255D  //2^( 36 /128-1) - 2^(- 36 /128-1), 2^(- 36 /128-1)
+        .quad 0x3FC9D181DB83072F, 0x3FDA309BEC4A2D33  //2^( 37 /128-1) - 2^(- 37 /128-1), 2^(- 37 /128-1)
+        .quad 0x3FCA869FDCDAB512, 0x3FDA0C667B5DE565  //2^( 38 /128-1) - 2^(- 38 /128-1), 2^(- 38 /128-1)
+        .quad 0x3FCB3BF0D8885D4C, 0x3FD9E86319E32323  //2^( 39 /128-1) - 2^(- 39 /128-1), 2^(- 39 /128-1)
+        .quad 0x3FCBF1762B00EF69, 0x3FD9C49182A3F090  //2^( 40 /128-1) - 2^(- 40 /128-1), 2^(- 40 /128-1)
+        .quad 0x3FCCA731311DF0FB, 0x3FD9A0F170CA07BA  //2^( 41 /128-1) - 2^(- 41 /128-1), 2^(- 41 /128-1)
+        .quad 0x3FCD5D2348201C09, 0x3FD97D829FDE4E50  //2^( 42 /128-1) - 2^(- 42 /128-1), 2^(- 42 /128-1)
+        .quad 0x3FCE134DCDB1FE3E, 0x3FD95A44CBC8520F  //2^( 43 /128-1) - 2^(- 43 /128-1), 2^(- 43 /128-1)
+        .quad 0x3FCEC9B21FEA98EA, 0x3FD93737B0CDC5E5  //2^( 44 /128-1) - 2^(- 44 /128-1), 2^(- 44 /128-1)
+        .quad 0x3FCF80519D5001D3, 0x3FD9145B0B91FFC6  //2^( 45 /128-1) - 2^(- 45 /128-1), 2^(- 45 /128-1)
+        .quad 0x3FD01B96D26D026A, 0x3FD8F1AE99157736  //2^( 46 /128-1) - 2^(- 46 /128-1), 2^(- 46 /128-1)
+        .quad 0x3FD07723CAFA6331, 0x3FD8CF3216B5448C  //2^( 47 /128-1) - 2^(- 47 /128-1), 2^(- 47 /128-1)
+        .quad 0x3FD0D2D06841B373, 0x3FD8ACE5422AA0DB  //2^( 48 /128-1) - 2^(- 48 /128-1), 2^(- 48 /128-1)
+        .quad 0x3FD12E9D5A715381, 0x3FD88AC7D98A6699  //2^( 49 /128-1) - 2^(- 49 /128-1), 2^(- 49 /128-1)
+        .quad 0x3FD18A8B51F5C661, 0x3FD868D99B4492ED  //2^( 50 /128-1) - 2^(- 50 /128-1), 2^(- 50 /128-1)
+        .quad 0x3FD1E69AFF7B04D7, 0x3FD8471A4623C7AD  //2^( 51 /128-1) - 2^(- 51 /128-1), 2^(- 51 /128-1)
+        .quad 0x3FD242CD13EDD0F1, 0x3FD82589994CCE13  //2^( 52 /128-1) - 2^(- 52 /128-1), 2^(- 52 /128-1)
+        .quad 0x3FD29F22407D0A0C, 0x3FD80427543E1A12  //2^( 53 /128-1) - 2^(- 53 /128-1), 2^(- 53 /128-1)
+        .quad 0x3FD2FB9B369B0153, 0x3FD7E2F336CF4E62  //2^( 54 /128-1) - 2^(- 54 /128-1), 2^(- 54 /128-1)
+        .quad 0x3FD35838A7FECEC8, 0x3FD7C1ED0130C132  //2^( 55 /128-1) - 2^(- 55 /128-1), 2^(- 55 /128-1)
+        .quad 0x3FD3B4FB46A5A6CC, 0x3FD7A11473EB0187  //2^( 56 /128-1) - 2^(- 56 /128-1), 2^(- 56 /128-1)
+        .quad 0x3FD411E3C4D4302F, 0x3FD780694FDE5D3F  //2^( 57 /128-1) - 2^(- 57 /128-1), 2^(- 57 /128-1)
+        .quad 0x3FD46EF2D517DAC8, 0x3FD75FEB564267C9  //2^( 58 /128-1) - 2^(- 58 /128-1), 2^(- 58 /128-1)
+        .quad 0x3FD4CC292A48369E, 0x3FD73F9A48A58174  //2^( 59 /128-1) - 2^(- 59 /128-1), 2^(- 59 /128-1)
+        .quad 0x3FD5298777884B96, 0x3FD71F75E8EC5F74  //2^( 60 /128-1) - 2^(- 60 /128-1), 2^(- 60 /128-1)
+        .quad 0x3FD5870E7047F1BC, 0x3FD6FF7DF9519484  //2^( 61 /128-1) - 2^(- 61 /128-1), 2^(- 61 /128-1)
+        .quad 0x3FD5E4BEC8452A1A, 0x3FD6DFB23C651A2F  //2^( 62 /128-1) - 2^(- 62 /128-1), 2^(- 62 /128-1)
+        .quad 0x3FD64299338D7827, 0x3FD6C012750BDABF  //2^( 63 /128-1) - 2^(- 63 /128-1), 2^(- 63 /128-1)
+        .quad 0x3FD6A09E667F3BCD, 0x3FD6A09E667F3BCD  //2^( 64 /128-1) - 2^(- 64 /128-1), 2^(- 64 /128-1)
+        .quad 0x3FD6FECF15CB0C0B, 0x3FD68155D44CA973  //2^( 65 /128-1) - 2^(- 65 /128-1), 2^(- 65 /128-1)
+        .quad 0x3FD75D2BF6751239, 0x3FD6623882552225  //2^( 66 /128-1) - 2^(- 66 /128-1), 2^(- 66 /128-1)
+        .quad 0x3FD7BBB5BDD665E8, 0x3FD6434634CCC320  //2^( 67 /128-1) - 2^(- 67 /128-1), 2^(- 67 /128-1)
+        .quad 0x3FD81A6D219E6963, 0x3FD6247EB03A5585  //2^( 68 /128-1) - 2^(- 68 /128-1), 2^(- 68 /128-1)
+        .quad 0x3FD87952D7D426DF, 0x3FD605E1B976DC09  //2^( 69 /128-1) - 2^(- 69 /128-1), 2^(- 69 /128-1)
+        .quad 0x3FD8D86796D7AE49, 0x3FD5E76F15AD2148  //2^( 70 /128-1) - 2^(- 70 /128-1), 2^(- 70 /128-1)
+        .quad 0x3FD937AC156373C8, 0x3FD5C9268A5946B7  //2^( 71 /128-1) - 2^(- 71 /128-1), 2^(- 71 /128-1)
+        .quad 0x3FD997210A8DAEE4, 0x3FD5AB07DD485429  //2^( 72 /128-1) - 2^(- 72 /128-1), 2^(- 72 /128-1)
+        .quad 0x3FD9F6C72DC9BA68, 0x3FD58D12D497C7FD  //2^( 73 /128-1) - 2^(- 73 /128-1), 2^(- 73 /128-1)
+        .quad 0x3FDA569F36E974EA, 0x3FD56F4736B527DA  //2^( 74 /128-1) - 2^(- 74 /128-1), 2^(- 74 /128-1)
+        .quad 0x3FDAB6A9DE1EA215, 0x3FD551A4CA5D920F  //2^( 75 /128-1) - 2^(- 75 /128-1), 2^(- 75 /128-1)
+        .quad 0x3FDB16E7DBFC4CA3, 0x3FD5342B569D4F82  //2^( 76 /128-1) - 2^(- 76 /128-1), 2^(- 76 /128-1)
+        .quad 0x3FDB7759E9782918, 0x3FD516DAA2CF6642  //2^( 77 /128-1) - 2^(- 77 /128-1), 2^(- 77 /128-1)
+        .quad 0x3FDBD800BFEBF932, 0x3FD4F9B2769D2CA7  //2^( 78 /128-1) - 2^(- 78 /128-1), 2^(- 78 /128-1)
+        .quad 0x3FDC38DD1916F025, 0x3FD4DCB299FDDD0D  //2^( 79 /128-1) - 2^(- 79 /128-1), 2^(- 79 /128-1)
+        .quad 0x3FDC99EFAF1F1790, 0x3FD4BFDAD5362A27  //2^( 80 /128-1) - 2^(- 80 /128-1), 2^(- 80 /128-1)
+        .quad 0x3FDCFB393C92B539, 0x3FD4A32AF0D7D3DE  //2^( 81 /128-1) - 2^(- 81 /128-1), 2^(- 81 /128-1)
+        .quad 0x3FDD5CBA7C69B19C, 0x3FD486A2B5C13CD0  //2^( 82 /128-1) - 2^(- 82 /128-1), 2^(- 82 /128-1)
+        .quad 0x3FDDBE742A06FF34, 0x3FD46A41ED1D0057  //2^( 83 /128-1) - 2^(- 83 /128-1), 2^(- 83 /128-1)
+        .quad 0x3FDE2067013A029D, 0x3FD44E086061892D  //2^( 84 /128-1) - 2^(- 84 /128-1), 2^(- 84 /128-1)
+        .quad 0x3FDE8293BE3FFB87, 0x3FD431F5D950A897  //2^( 85 /128-1) - 2^(- 85 /128-1), 2^(- 85 /128-1)
+        .quad 0x3FDEE4FB1DC56E75, 0x3FD4160A21F72E2A  //2^( 86 /128-1) - 2^(- 86 /128-1), 2^(- 86 /128-1)
+        .quad 0x3FDF479DDCE78F58, 0x3FD3FA4504AC801C  //2^( 87 /128-1) - 2^(- 87 /128-1), 2^(- 87 /128-1)
+        .quad 0x3FDFAA7CB935ACFE, 0x3FD3DEA64C123422  //2^( 88 /128-1) - 2^(- 88 /128-1), 2^(- 88 /128-1)
+        .quad 0x3FE006CC38594EB1, 0x3FD3C32DC313A8E5  //2^( 89 /128-1) - 2^(- 89 /128-1), 2^(- 89 /128-1)
+        .quad 0x3FE03878E0EB1569, 0x3FD3A7DB34E59FF7  //2^( 90 /128-1) - 2^(- 90 /128-1), 2^(- 90 /128-1)
+        .quad 0x3FE06A44B5C74101, 0x3FD38CAE6D05D866  //2^( 91 /128-1) - 2^(- 91 /128-1), 2^(- 91 /128-1)
+        .quad 0x3FE09C3016A0D077, 0x3FD371A7373AA9CB  //2^( 92 /128-1) - 2^(- 92 /128-1), 2^(- 92 /128-1)
+        .quad 0x3FE0CE3B63676360, 0x3FD356C55F929FF1  //2^( 93 /128-1) - 2^(- 93 /128-1), 2^(- 93 /128-1)
+        .quad 0x3FE10066FC47F240, 0x3FD33C08B26416FF  //2^( 94 /128-1) - 2^(- 94 /128-1), 2^(- 94 /128-1)
+        .quad 0x3FE132B341AD8761, 0x3FD32170FC4CD831  //2^( 95 /128-1) - 2^(- 95 /128-1), 2^(- 95 /128-1)
+        .quad 0x3FE165209441F823, 0x3FD306FE0A31B715  //2^( 96 /128-1) - 2^(- 96 /128-1), 2^(- 96 /128-1)
+        .quad 0x3FE197AF54EE9EBB, 0x3FD2ECAFA93E2F56  //2^( 97 /128-1) - 2^(- 97 /128-1), 2^(- 97 /128-1)
+        .quad 0x3FE1CA5FE4DD1475, 0x3FD2D285A6E4030B  //2^( 98 /128-1) - 2^(- 98 /128-1), 2^(- 98 /128-1)
+        .quad 0x3FE1FD32A577EC72, 0x3FD2B87FD0DAD990  //2^( 99 /128-1) - 2^(- 99 /128-1), 2^(- 99 /128-1)
+        .quad 0x3FE23027F86B6ED6, 0x3FD29E9DF51FDEE1  //2^( 100 /128-1) - 2^(- 100 /128-1), 2^(- 100 /128-1)
+        .quad 0x3FE263403FA65489, 0x3FD284DFE1F56381  //2^( 101 /128-1) - 2^(- 101 /128-1), 2^(- 101 /128-1)
+        .quad 0x3FE2967BDD5A8364, 0x3FD26B4565E27CDD  //2^( 102 /128-1) - 2^(- 102 /128-1), 2^(- 102 /128-1)
+        .quad 0x3FE2C9DB33FDCAE9, 0x3FD251CE4FB2A63F  //2^( 103 /128-1) - 2^(- 103 /128-1), 2^(- 103 /128-1)
+        .quad 0x3FE2FD5EA64AA180, 0x3FD2387A6E756238  //2^( 104 /128-1) - 2^(- 104 /128-1), 2^(- 104 /128-1)
+        .quad 0x3FE331069740E22F, 0x3FD21F49917DDC96  //2^( 105 /128-1) - 2^(- 105 /128-1), 2^(- 105 /128-1)
+        .quad 0x3FE364D36A268AE0, 0x3FD2063B88628CD6  //2^( 106 /128-1) - 2^(- 106 /128-1), 2^(- 106 /128-1)
+        .quad 0x3FE398C582887B27, 0x3FD1ED5022FCD91D  //2^( 107 /128-1) - 2^(- 107 /128-1), 2^(- 107 /128-1)
+        .quad 0x3FE3CCDD443B3394, 0x3FD1D4873168B9AA  //2^( 108 /128-1) - 2^(- 108 /128-1), 2^(- 108 /128-1)
+        .quad 0x3FE4011B135B9590, 0x3FD1BBE084045CD4  //2^( 109 /128-1) - 2^(- 109 /128-1), 2^(- 109 /128-1)
+        .quad 0x3FE4357F544FA3C1, 0x3FD1A35BEB6FCB75  //2^( 110 /128-1) - 2^(- 110 /128-1), 2^(- 110 /128-1)
+        .quad 0x3FE46A0A6BC742FD, 0x3FD18AF9388C8DEA  //2^( 111 /128-1) - 2^(- 111 /128-1), 2^(- 111 /128-1)
+        .quad 0x3FE49EBCBEBCFBCA, 0x3FD172B83C7D517B  //2^( 112 /128-1) - 2^(- 112 /128-1), 2^(- 112 /128-1)
+        .quad 0x3FE4D396B276BC6F, 0x3FD15A98C8A58E51  //2^( 113 /128-1) - 2^(- 113 /128-1), 2^(- 113 /128-1)
+        .quad 0x3FE50898AC869B96, 0x3FD1429AAEA92DE0  //2^( 114 /128-1) - 2^(- 114 /128-1), 2^(- 114 /128-1)
+        .quad 0x3FE53DC312CB9B7A, 0x3FD12ABDC06C31CC  //2^( 115 /128-1) - 2^(- 115 /128-1), 2^(- 115 /128-1)
+        .quad 0x3FE573164B726DB6, 0x3FD11301D0125B51  //2^( 116 /128-1) - 2^(- 116 /128-1), 2^(- 116 /128-1)
+        .quad 0x3FE5A892BCF6379B, 0x3FD0FB66AFFED31B  //2^( 117 /128-1) - 2^(- 117 /128-1), 2^(- 117 /128-1)
+        .quad 0x3FE5DE38CE215725, 0x3FD0E3EC32D3D1A2  //2^( 118 /128-1) - 2^(- 118 /128-1), 2^(- 118 /128-1)
+        .quad 0x3FE61408E60E2888, 0x3FD0CC922B7247F7  //2^( 119 /128-1) - 2^(- 119 /128-1), 2^(- 119 /128-1)
+        .quad 0x3FE64A036C27CC52, 0x3FD0B5586CF9890F  //2^( 120 /128-1) - 2^(- 120 /128-1), 2^(- 120 /128-1)
+        .quad 0x3FE68028C82AEE2F, 0x3FD09E3ECAC6F383  //2^( 121 /128-1) - 2^(- 121 /128-1), 2^(- 121 /128-1)
+        .quad 0x3FE6B67962268C43, 0x3FD0874518759BC8  //2^( 122 /128-1) - 2^(- 122 /128-1), 2^(- 122 /128-1)
+        .quad 0x3FE6ECF5A27CBF28, 0x3FD0706B29DDF6DE  //2^( 123 /128-1) - 2^(- 123 /128-1), 2^(- 123 /128-1)
+        .quad 0x3FE7239DF1E38286, 0x3FD059B0D3158574  //2^( 124 /128-1) - 2^(- 124 /128-1), 2^(- 124 /128-1)
+        .quad 0x3FE75A72B9657E51, 0x3FD04315E86E7F85  //2^( 125 /128-1) - 2^(- 125 /128-1), 2^(- 125 /128-1)
+        .quad 0x3FE791746262D0A8, 0x3FD02C9A3E778061  //2^( 126 /128-1) - 2^(- 126 /128-1), 2^(- 126 /128-1)
+        .quad 0x3FE7C8A35691D856, 0x3FD0163DA9FB3335 //2^( 127 /128-1) - 2^(- 127 /128-1), 2^(- 127 /128-1)
+        .align 64
+        .quad 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000, 0x42C8000000000000 /* _dbShifter = 1.5 * 2^(52-k)*/
+        .align 64
+        .long 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99, 0x40861d99         /* _iDomainRange 0x40861d9ac12a3e85 =(1021*2^K-0.5)*log(2)/2^K -needed for quick exp*/
+        .align 64
+        .quad 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD, 0x3FDFFFFFFFFFFDBD /* _dPC2 */
+        .align 64
+        .quad 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD, 0x3FC55555555554AD /* _dPC3 */
+        .align 64
+        .quad 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299, 0x3FA55555CF16D299 /* _dPC4 */
+        .align 64
+        .quad 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425, 0x3F8111115712F425 /* _dPC5 */
+        .align 64
+        .quad 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f, 0x000000000000007f /* _lIndexMask */
+        .align 64
+        .type	__svml_dsinh_data_internal,@object
+        .size	__svml_dsinh_data_internal,.-__svml_dsinh_data_internal
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core-avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core-avx2.S
new file mode 100644
index 0000000000..06525b7b37
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core-avx2.S
@@ -0,0 +1,20 @@
+/* AVX2 version of vectorized sinhf.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVeN16v_sinhf _ZGVeN16v_sinhf_avx2_wrapper
+#include "../svml_s_sinhf16_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core.c b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core.c
new file mode 100644
index 0000000000..6a954caa37
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core.c
@@ -0,0 +1,28 @@
+/* Multiple versions of vectorized sinhf, vector length is 16.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVeN16v_sinhf
+#include "ifunc-mathvec-avx512-skx.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVeN16v_sinhf, __GI__ZGVeN16v_sinhf,
+	       __redirect__ZGVeN16v_sinhf)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
new file mode 100644
index 0000000000..1119c00259
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf16_core_avx512.S
@@ -0,0 +1,318 @@
+/* Function sinhf vectorized with AVX-512.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_ssinh_data_internal
+ */
+#define _sInvLn2                      	0
+#define _sLn2hi                       	64
+#define _sLn2lo                       	128
+#define _sSign                        	192
+#define _sShifter                     	256
+#define _iDomainRange                 	320
+#define _sPC1                         	384
+#define _sPC2                         	448
+#define _sPC3                         	512
+#define _sPC4                         	576
+#define _sPC5                         	640
+#define _sPC6                         	704
+#define _iHalf                        	768
+
+#include <sysdep.h>
+
+        .text
+	.section .text.exex512,"ax",@progbits
+ENTRY(_ZGVeN16v_sinhf_skx)
+        pushq     %rbp
+        cfi_def_cfa_offset(16)
+        movq      %rsp, %rbp
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+        andq      $-64, %rsp
+        subq      $192, %rsp
+        vmovaps   %zmm0, %zmm5
+
+/*
+ *  Implementation
+ *  Abs argument
+ */
+        vandps    _sSign+__svml_ssinh_data_internal(%rip), %zmm5, %zmm4
+
+/*
+ * Check for overflow\underflow
+ * MORE faster than GE?
+ */
+        vpternlogd $255, %zmm6, %zmm6, %zmm6
+        vmovups   _sShifter+__svml_ssinh_data_internal(%rip), %zmm7
+
+/*
+ *  Load argument
+ * dM = x/log(2) + RShifter
+ */
+        vmovups   _sInvLn2+__svml_ssinh_data_internal(%rip), %zmm11
+        vmovups   _sLn2hi+__svml_ssinh_data_internal(%rip), %zmm8
+        vmovups   _sLn2lo+__svml_ssinh_data_internal(%rip), %zmm10
+        vmovups   _iHalf+__svml_ssinh_data_internal(%rip), %zmm12
+        vmovups   _sPC5+__svml_ssinh_data_internal(%rip), %zmm0
+        vmovups   _sPC6+__svml_ssinh_data_internal(%rip), %zmm3
+
+/* x^2 */
+        vmovups   _sPC2+__svml_ssinh_data_internal(%rip), %zmm2
+        vxorps    %zmm5, %zmm4, %zmm1
+        vfmadd213ps {rn-sae}, %zmm7, %zmm1, %zmm11
+        vpcmpd    $2, _iDomainRange+__svml_ssinh_data_internal(%rip), %zmm1, %k1
+
+/*
+ *  G1,G2 2^N,2^(-N)
+ * iM now is an EXP(2^N)
+ */
+        vpslld    $23, %zmm11, %zmm13
+
+/*
+ *  R
+ * sN = sM - RShifter
+ */
+        vsubps    {rn-sae}, %zmm7, %zmm11, %zmm9
+        vpaddd    %zmm13, %zmm12, %zmm14
+        vpsubd    %zmm13, %zmm12, %zmm15
+
+/* sG1 = 2^(N-1)+2^(-N-1) */
+        vaddps    {rn-sae}, %zmm15, %zmm14, %zmm7
+        vpandnd   %zmm1, %zmm1, %zmm6{%k1}
+
+/* sR = sX - sN*Log2_hi */
+        vfnmadd231ps {rn-sae}, %zmm8, %zmm9, %zmm1
+        vptestmd  %zmm6, %zmm6, %k0
+
+/* sG2 = 2^(N-1)-2^(-N-1) */
+        vsubps    {rn-sae}, %zmm15, %zmm14, %zmm8
+
+/* sR = (sX - sN*Log2_hi) - sN*Log2_lo */
+        vfnmadd231ps {rn-sae}, %zmm10, %zmm9, %zmm1
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*(a5+{v1 r^2*a7})))) = r + r*(r^2*(a3+r^2*(a5+r^2*a7))) ....
+ * sSinh_r = (a3+r^2*a5)
+ */
+        vmovups   _sPC3+__svml_ssinh_data_internal(%rip), %zmm14
+        kmovw     %k0, %edx
+
+/* sR2 = sR^2 */
+        vmulps    {rn-sae}, %zmm1, %zmm1, %zmm6
+        vfmadd231ps {rn-sae}, %zmm6, %zmm0, %zmm14
+
+/* sSinh_r = r^2*(a3+r^2*a5) */
+        vmulps    {rn-sae}, %zmm6, %zmm14, %zmm0
+
+/* sSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        vfmadd213ps {rn-sae}, %zmm1, %zmm1, %zmm0
+
+/*
+ * sinh(X) = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2)
+ * sOut = (a4 +a6*sR2)
+ */
+        vmovups   _sPC4+__svml_ssinh_data_internal(%rip), %zmm1
+        vfmadd231ps {rn-sae}, %zmm6, %zmm3, %zmm1
+
+/* sOut = a2+sR2*(a4+a6*sR2) */
+        vfmadd213ps {rn-sae}, %zmm2, %zmm6, %zmm1
+
+/* sOut = sR2*(a2+sR2*(a4+a6*sR2) */
+        vmulps    {rn-sae}, %zmm6, %zmm1, %zmm2
+
+/* sOut = sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vmulps    {rn-sae}, %zmm8, %zmm2, %zmm3
+
+/* sOut = sG1*sinh(dR)+sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vfmadd213ps {rn-sae}, %zmm3, %zmm0, %zmm7
+
+/* sOut = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vaddps    {rn-sae}, %zmm8, %zmm7, %zmm9
+
+/*  Ret H  */
+        vorps     %zmm9, %zmm4, %zmm0
+        testl     %edx, %edx
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx r12 r13 r14 r15 edx zmm0 zmm5
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        movq      %rbp, %rsp
+        popq      %rbp
+        cfi_def_cfa(7, 8)
+        cfi_restore(6)
+        ret
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        vmovups   %zmm5, 64(%rsp)
+        vmovups   %zmm0, 128(%rsp)
+                                # LOE rbx r12 r13 r14 r15 edx zmm0
+
+        xorl      %eax, %eax
+                                # LOE rbx r12 r13 r14 r15 eax edx
+
+        vzeroupper
+        movq      %r12, 16(%rsp)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
+        movl      %eax, %r12d
+        movq      %r13, 8(%rsp)
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
+        movl      %edx, %r13d
+        movq      %r14, (%rsp)
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $16, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        vmovups   128(%rsp), %zmm0
+
+/* Go to exit */
+        jmp       L(EXIT)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r12 r13 r14 r15 zmm0
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movss     64(%rsp,%r14,4), %xmm0
+        call      sinhf@PLT
+                                # LOE rbx r14 r15 r12d r13d xmm0
+
+        movss     %xmm0, 128(%rsp,%r14,4)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx r15 r12d r13d
+END(_ZGVeN16v_sinhf_skx)
+
+        .section .rodata, "a"
+        .align 64
+
+#ifdef __svml_ssinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(64)) VUINT32 _sInvLn2[16][1];
+        __declspec(align(64)) VUINT32 _sLn2hi[16][1];
+        __declspec(align(64)) VUINT32 _sLn2lo[16][1];
+        __declspec(align(64)) VUINT32 _sSign[16][1];
+        __declspec(align(64)) VUINT32 _sShifter[16][1];
+        __declspec(align(64)) VUINT32 _iDomainRange[16][1];
+        __declspec(align(64)) VUINT32 _sPC1[16][1];
+        __declspec(align(64)) VUINT32 _sPC2[16][1];
+        __declspec(align(64)) VUINT32 _sPC3[16][1];
+        __declspec(align(64)) VUINT32 _sPC4[16][1];
+        __declspec(align(64)) VUINT32 _sPC5[16][1];
+        __declspec(align(64)) VUINT32 _sPC6[16][1];
+        __declspec(align(64)) VUINT32 _iHalf[16][1];
+} __svml_ssinh_data_internal;
+#endif
+__svml_ssinh_data_internal:
+        .long 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B           /* _sInvLn2  */  //k=0
+        .align 64
+        .long 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000           /* _sLn2hi   */
+        .align 64
+        .long 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4           /* _sLn2lo   */
+        .align 64
+        .long 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000           /* _sSign    */
+        .align 64
+        .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000           /* _sShifter */
+        .align 64
+        .long 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E           /* _iDomainRange */
+        .align 64
+        .long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000         /* _sPC1=1  */
+        .align 64
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000         /* _sPC2  */
+        .align 64
+        .long 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57         /* _sPC3  */
+        .align 64
+        .long 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72         /* _sPC4  */
+        .align 64
+        .long 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461         /* _sPC5  */
+        .align 64
+        .long 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3         /* _sPC6  */
+        // Integer constants
+        .align 64
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000 /* _iHalf*/
+        .align 64
+        .type	__svml_ssinh_data_internal,@object
+        .size	__svml_ssinh_data_internal,.-__svml_ssinh_data_internal
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core-sse2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core-sse2.S
new file mode 100644
index 0000000000..1b31095fe1
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core-sse2.S
@@ -0,0 +1,20 @@
+/* SSE2 version of vectorized sinhf, vector length is 4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVbN4v_sinhf _ZGVbN4v_sinhf_sse2
+#include "../svml_s_sinhf4_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core.c b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core.c
new file mode 100644
index 0000000000..9d4297c2c9
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core.c
@@ -0,0 +1,28 @@
+/* Multiple versions of vectorized sinhf, vector length is 4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVbN4v_sinhf
+#include "ifunc-mathvec-sse4_1.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVbN4v_sinhf, __GI__ZGVbN4v_sinhf,
+	       __redirect__ZGVbN4v_sinhf)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
new file mode 100644
index 0000000000..82d6f55d33
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf4_core_sse4.S
@@ -0,0 +1,308 @@
+/* Function sinhf vectorized with SSE4.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_ssinh_data_internal
+ */
+#define _sInvLn2                      	0
+#define _sLn2hi                       	16
+#define _sLn2lo                       	32
+#define _sSign                        	48
+#define _sShifter                     	64
+#define _iDomainRange                 	80
+#define _sPC1                         	96
+#define _sPC2                         	112
+#define _sPC3                         	128
+#define _sPC4                         	144
+#define _sPC5                         	160
+#define _sPC6                         	176
+#define _iHalf                        	192
+
+#include <sysdep.h>
+
+        .text
+	.section .text.sse4,"ax",@progbits
+ENTRY(_ZGVbN4v_sinhf_sse4)
+        subq      $72, %rsp
+        cfi_def_cfa_offset(80)
+
+/*
+ *  Implementation
+ *  Abs argument
+ */
+        movups    _sSign+__svml_ssinh_data_internal(%rip), %xmm14
+        andps     %xmm0, %xmm14
+        movaps    %xmm14, %xmm10
+
+/*
+ *  Load argument
+ * dM = x/log(2) + RShifter
+ */
+        movups    _sInvLn2+__svml_ssinh_data_internal(%rip), %xmm7
+        pxor      %xmm0, %xmm10
+        mulps     %xmm10, %xmm7
+
+/*
+ * Check for overflow\underflow
+ * MORE faster than GE?
+ */
+        movaps    %xmm10, %xmm1
+        movups    _sShifter+__svml_ssinh_data_internal(%rip), %xmm2
+
+/* sR = sX - sN*Log2_hi */
+        movups    _sLn2hi+__svml_ssinh_data_internal(%rip), %xmm3
+        addps     %xmm2, %xmm7
+
+/*
+ *  R
+ * sN = sM - RShifter
+ */
+        movaps    %xmm7, %xmm4
+
+/*
+ *  G1,G2 2^N,2^(-N)
+ * iM now is an EXP(2^N)
+ */
+        pslld     $23, %xmm7
+
+/* sR = (sX - sN*Log2_hi) - sN*Log2_lo */
+        movups    _sLn2lo+__svml_ssinh_data_internal(%rip), %xmm5
+        subps     %xmm2, %xmm4
+        mulps     %xmm4, %xmm3
+        mulps     %xmm4, %xmm5
+        subps     %xmm3, %xmm10
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*(a5+{v1 r^2*a7})))) = r + r*(r^2*(a3+r^2*(a5+r^2*a7))) ....
+ * sSinh_r = (a3+r^2*a5)
+ */
+        movups    _sPC5+__svml_ssinh_data_internal(%rip), %xmm8
+        subps     %xmm5, %xmm10
+
+/* sR2 = sR^2 */
+        movaps    %xmm10, %xmm12
+        mulps     %xmm10, %xmm12
+
+/*
+ * sinh(X) = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2)
+ * sOut = (a4 +a6*sR2)
+ */
+        movups    _sPC6+__svml_ssinh_data_internal(%rip), %xmm9
+        mulps     %xmm12, %xmm8
+        mulps     %xmm12, %xmm9
+        addps     _sPC3+__svml_ssinh_data_internal(%rip), %xmm8
+        addps     _sPC4+__svml_ssinh_data_internal(%rip), %xmm9
+
+/* sSinh_r = r^2*(a3+r^2*a5) */
+        mulps     %xmm12, %xmm8
+
+/* sOut = a2+sR2*(a4+a6*sR2) */
+        mulps     %xmm12, %xmm9
+
+/* sSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        mulps     %xmm10, %xmm8
+        addps     _sPC2+__svml_ssinh_data_internal(%rip), %xmm9
+        addps     %xmm8, %xmm10
+
+/* sOut = sR2*(a2+sR2*(a4+a6*sR2) */
+        mulps     %xmm9, %xmm12
+        movdqu    _iHalf+__svml_ssinh_data_internal(%rip), %xmm6
+        movdqa    %xmm6, %xmm13
+        psubd     %xmm7, %xmm6
+        paddd     %xmm7, %xmm13
+
+/* sG1 = 2^(N-1)+2^(-N-1) */
+        movdqa    %xmm13, %xmm11
+
+/* sG2 = 2^(N-1)-2^(-N-1) */
+        subps     %xmm6, %xmm13
+        addps     %xmm6, %xmm11
+
+/* sOut = sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        mulps     %xmm13, %xmm12
+
+/* sOut = sG1*sinh(dR)+sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        mulps     %xmm10, %xmm11
+        pcmpgtd   _iDomainRange+__svml_ssinh_data_internal(%rip), %xmm1
+        addps     %xmm11, %xmm12
+        movmskps  %xmm1, %edx
+
+/* sOut = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        addps     %xmm12, %xmm13
+
+/*  Ret H  */
+        orps      %xmm13, %xmm14
+        testl     %edx, %edx
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm14
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        movaps    %xmm14, %xmm0
+        addq      $72, %rsp
+        cfi_def_cfa_offset(8)
+        ret
+        cfi_def_cfa_offset(80)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        movups    %xmm0, 32(%rsp)
+        movups    %xmm14, 48(%rsp)
+                                # LOE rbx rbp r12 r13 r14 r15 edx
+
+        xorl      %eax, %eax
+        movq      %r12, 16(%rsp)
+        cfi_offset(12, -64)
+        movl      %eax, %r12d
+        movq      %r13, 8(%rsp)
+        cfi_offset(13, -72)
+        movl      %edx, %r13d
+        movq      %r14, (%rsp)
+        cfi_offset(14, -80)
+                                # LOE rbx rbp r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx rbp r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $4, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx rbp r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        movups    48(%rsp), %xmm14
+
+/* Go to exit */
+        jmp       L(EXIT)
+        cfi_offset(12, -64)
+        cfi_offset(13, -72)
+        cfi_offset(14, -80)
+                                # LOE rbx rbp r12 r13 r14 r15 xmm14
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movss     32(%rsp,%r14,4), %xmm0
+        call      sinhf@PLT
+                                # LOE rbx rbp r14 r15 r12d r13d xmm0
+
+        movss     %xmm0, 48(%rsp,%r14,4)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx rbp r15 r12d r13d
+END(_ZGVbN4v_sinhf_sse4)
+
+        .section .rodata, "a"
+        .align 16
+
+#ifdef __svml_ssinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(16)) VUINT32 _sInvLn2[4][1];
+        __declspec(align(16)) VUINT32 _sLn2hi[4][1];
+        __declspec(align(16)) VUINT32 _sLn2lo[4][1];
+        __declspec(align(16)) VUINT32 _sSign[4][1];
+        __declspec(align(16)) VUINT32 _sShifter[4][1];
+        __declspec(align(16)) VUINT32 _iDomainRange[4][1];
+        __declspec(align(16)) VUINT32 _sPC1[4][1];
+        __declspec(align(16)) VUINT32 _sPC2[4][1];
+        __declspec(align(16)) VUINT32 _sPC3[4][1];
+        __declspec(align(16)) VUINT32 _sPC4[4][1];
+        __declspec(align(16)) VUINT32 _sPC5[4][1];
+        __declspec(align(16)) VUINT32 _sPC6[4][1];
+        __declspec(align(16)) VUINT32 _iHalf[4][1];
+} __svml_ssinh_data_internal;
+#endif
+__svml_ssinh_data_internal:
+        .long 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B           /* _sInvLn2  */  //k=0
+        .align 16
+        .long 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000           /* _sLn2hi   */
+        .align 16
+        .long 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4           /* _sLn2lo   */
+        .align 16
+        .long 0x80000000, 0x80000000, 0x80000000, 0x80000000           /* _sSign    */
+        .align 16
+        .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000           /* _sShifter */
+        .align 16
+        .long 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E           /* _iDomainRange */
+        .align 16
+        .long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000         /* _sPC1=1  */
+        .align 16
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000         /* _sPC2  */
+        .align 16
+        .long 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57         /* _sPC3  */
+        .align 16
+        .long 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72         /* _sPC4  */
+        .align 16
+        .long 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461         /* _sPC5  */
+        .align 16
+        .long 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3         /* _sPC6  */
+        // Integer constants
+        .align 16
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000 /* _iHalf*/
+        .align 16
+        .type	__svml_ssinh_data_internal,@object
+        .size	__svml_ssinh_data_internal,.-__svml_ssinh_data_internal
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core-sse.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core-sse.S
new file mode 100644
index 0000000000..d3c9c607a0
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core-sse.S
@@ -0,0 +1,20 @@
+/* SSE version of vectorized sinhf, vector length is 8.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define _ZGVdN8v_sinhf _ZGVdN8v_sinhf_sse_wrapper
+#include "../svml_s_sinhf8_core.S"
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core.c b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core.c
new file mode 100644
index 0000000000..2a2e21e742
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core.c
@@ -0,0 +1,28 @@
+/* Multiple versions of vectorized sinhf, vector length is 8.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#define SYMBOL_NAME _ZGVdN8v_sinhf
+#include "ifunc-mathvec-avx2.h"
+
+libc_ifunc_redirected (REDIRECT_NAME, SYMBOL_NAME, IFUNC_SELECTOR ());
+
+#ifdef SHARED
+__hidden_ver1 (_ZGVdN8v_sinhf, __GI__ZGVdN8v_sinhf,
+	       __redirect__ZGVdN8v_sinhf)
+  __attribute__ ((visibility ("hidden")));
+#endif
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
new file mode 100644
index 0000000000..ea13fb60d4
--- /dev/null
+++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sinhf8_core_avx2.S
@@ -0,0 +1,309 @@
+/* Function sinhf vectorized with AVX2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   https://www.gnu.org/licenses/.  */
+
+/*
+ * ALGORITHM DESCRIPTION:
+ *
+ *   Compute sinh(x) as (exp(x)-exp(-x))/2,
+ *   where exp is calculated as
+ *   exp(M*ln2 + ln2*(j/2^k) + r) = 2^M * 2^(j/2^k) * exp(r)
+ *
+ *   Special cases:
+ *
+ *   sinh(NaN) = quiet NaN, and raise invalid exception
+ *   sinh(INF) = that INF
+ *   sinh(x)   = x for subnormals
+ *   sinh(x) overflows for big x and returns MAXLOG+log(2)
+ *
+ */
+
+/* Offsets for data table __svml_ssinh_data_internal
+ */
+#define _sInvLn2                      	0
+#define _sLn2hi                       	32
+#define _sLn2lo                       	64
+#define _sSign                        	96
+#define _sShifter                     	128
+#define _iDomainRange                 	160
+#define _sPC1                         	192
+#define _sPC2                         	224
+#define _sPC3                         	256
+#define _sPC4                         	288
+#define _sPC5                         	320
+#define _sPC6                         	352
+#define _iHalf                        	384
+
+#include <sysdep.h>
+
+        .text
+	.section .text.avx2,"ax",@progbits
+ENTRY(_ZGVdN8v_sinhf_avx2)
+        pushq     %rbp
+        cfi_def_cfa_offset(16)
+        movq      %rsp, %rbp
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+        andq      $-32, %rsp
+        subq      $96, %rsp
+        vmovups   _sInvLn2+__svml_ssinh_data_internal(%rip), %ymm7
+        vmovups   _sShifter+__svml_ssinh_data_internal(%rip), %ymm4
+        vmovups   _sLn2hi+__svml_ssinh_data_internal(%rip), %ymm5
+
+/*
+ * sinh(X) = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2)
+ * sOut = (a4 +a6*sR2)
+ */
+        vmovups   _sPC6+__svml_ssinh_data_internal(%rip), %ymm14
+
+/*
+ * sinh(r) = r*((a1=1)+r^2*(a3+r^2*(a5+{v1 r^2*a7})))) = r + r*(r^2*(a3+r^2*(a5+r^2*a7))) ....
+ * sSinh_r = (a3+r^2*a5)
+ */
+        vmovups   _sPC5+__svml_ssinh_data_internal(%rip), %ymm12
+        vmovups   _iHalf+__svml_ssinh_data_internal(%rip), %ymm8
+        vmovaps   %ymm0, %ymm2
+
+/*
+ *  Implementation
+ *  Abs argument
+ */
+        vandps    _sSign+__svml_ssinh_data_internal(%rip), %ymm2, %ymm1
+        vxorps    %ymm2, %ymm1, %ymm0
+
+/*
+ *  Load argument
+ * dM = x/log(2) + RShifter
+ */
+        vfmadd213ps %ymm4, %ymm0, %ymm7
+
+/*
+ *  R
+ * sN = sM - RShifter
+ */
+        vsubps    %ymm4, %ymm7, %ymm6
+
+/*
+ *  G1,G2 2^N,2^(-N)
+ * iM now is an EXP(2^N)
+ */
+        vpslld    $23, %ymm7, %ymm9
+
+/*
+ * Check for overflow\underflow
+ * MORE faster than GE?
+ */
+        vpcmpgtd  _iDomainRange+__svml_ssinh_data_internal(%rip), %ymm0, %ymm3
+
+/* sR = sX - sN*Log2_hi */
+        vfnmadd231ps %ymm5, %ymm6, %ymm0
+        vpaddd    %ymm9, %ymm8, %ymm10
+        vpsubd    %ymm9, %ymm8, %ymm11
+
+/* sR = (sX - sN*Log2_hi) - sN*Log2_lo */
+        vfnmadd231ps _sLn2lo+__svml_ssinh_data_internal(%rip), %ymm6, %ymm0
+
+/* sR2 = sR^2 */
+        vmulps    %ymm0, %ymm0, %ymm13
+        vfmadd213ps _sPC4+__svml_ssinh_data_internal(%rip), %ymm13, %ymm14
+        vfmadd213ps _sPC3+__svml_ssinh_data_internal(%rip), %ymm13, %ymm12
+
+/* sOut = a2+sR2*(a4+a6*sR2) */
+        vfmadd213ps _sPC2+__svml_ssinh_data_internal(%rip), %ymm13, %ymm14
+
+/* sSinh_r = r^2*(a3+r^2*a5) */
+        vmulps    %ymm12, %ymm13, %ymm12
+
+/* sOut = sR2*(a2+sR2*(a4+a6*sR2) */
+        vmulps    %ymm14, %ymm13, %ymm15
+
+/* sSinh_r = r + r*(r^2*(a3+r^2*a5)) */
+        vfmadd213ps %ymm0, %ymm0, %ymm12
+        vmovmskps %ymm3, %edx
+
+/* sG1 = 2^(N-1)+2^(-N-1) */
+        vaddps    %ymm11, %ymm10, %ymm3
+
+/* sG2 = 2^(N-1)-2^(-N-1) */
+        vsubps    %ymm11, %ymm10, %ymm10
+
+/* sOut = sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vmulps    %ymm15, %ymm10, %ymm0
+
+/* sOut = sG1*sinh(dR)+sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vfmadd213ps %ymm0, %ymm12, %ymm3
+
+/* sOut = sG2 + sG1*sinh(dR) + sG2*sR2*(a2+sR2*(a4+a6*sR2) */
+        vaddps    %ymm3, %ymm10, %ymm4
+
+/*  Ret H  */
+        vorps     %ymm4, %ymm1, %ymm0
+        testl     %edx, %edx
+
+/* Go to special inputs processing branch */
+        jne       L(SPECIAL_VALUES_BRANCH)
+                                # LOE rbx r12 r13 r14 r15 edx ymm0 ymm2
+
+/* Restore registers
+ * and exit the function
+ */
+
+L(EXIT):
+        movq      %rbp, %rsp
+        popq      %rbp
+        cfi_def_cfa(7, 8)
+        cfi_restore(6)
+        ret
+        cfi_def_cfa(6, 16)
+        cfi_offset(6, -16)
+
+/* Branch to process
+ * special inputs
+ */
+
+L(SPECIAL_VALUES_BRANCH):
+        vmovups   %ymm2, 32(%rsp)
+        vmovups   %ymm0, 64(%rsp)
+                                # LOE rbx r12 r13 r14 r15 edx ymm0
+
+        xorl      %eax, %eax
+                                # LOE rbx r12 r13 r14 r15 eax edx
+
+        vzeroupper
+        movq      %r12, 16(%rsp)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
+        movl      %eax, %r12d
+        movq      %r13, 8(%rsp)
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
+        movl      %edx, %r13d
+        movq      %r14, (%rsp)
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r15 r12d r13d
+
+/* Range mask
+ * bits check
+ */
+
+L(RANGEMASK_CHECK):
+        btl       %r12d, %r13d
+
+/* Call scalar math function */
+        jc        L(SCALAR_MATH_CALL)
+                                # LOE rbx r15 r12d r13d
+
+/* Special inputs
+ * processing loop
+ */
+
+L(SPECIAL_VALUES_LOOP):
+        incl      %r12d
+        cmpl      $8, %r12d
+
+/* Check bits in range mask */
+        jl        L(RANGEMASK_CHECK)
+                                # LOE rbx r15 r12d r13d
+
+        movq      16(%rsp), %r12
+        cfi_restore(12)
+        movq      8(%rsp), %r13
+        cfi_restore(13)
+        movq      (%rsp), %r14
+        cfi_restore(14)
+        vmovups   64(%rsp), %ymm0
+
+/* Go to exit */
+        jmp       L(EXIT)
+        /*  DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -80; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xb0, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -88; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa8, 0xff, 0xff, 0xff, 0x22
+        /*  DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -32; DW_OP_and; DW_OP_const4s: -96; DW_OP_plus)  */
+        .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xe0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0xa0, 0xff, 0xff, 0xff, 0x22
+                                # LOE rbx r12 r13 r14 r15 ymm0
+
+/* Scalar math fucntion call
+ * to process special input
+ */
+
+L(SCALAR_MATH_CALL):
+        movl      %r12d, %r14d
+        movss     32(%rsp,%r14,4), %xmm0
+        call      sinhf@PLT
+                                # LOE rbx r14 r15 r12d r13d xmm0
+
+        movss     %xmm0, 64(%rsp,%r14,4)
+
+/* Process special inputs in loop */
+        jmp       L(SPECIAL_VALUES_LOOP)
+                                # LOE rbx r15 r12d r13d
+END(_ZGVdN8v_sinhf_avx2)
+
+        .section .rodata, "a"
+        .align 32
+
+#ifdef __svml_ssinh_data_internal_typedef
+typedef unsigned int VUINT32;
+typedef struct
+{
+        __declspec(align(32)) VUINT32 _sInvLn2[8][1];
+        __declspec(align(32)) VUINT32 _sLn2hi[8][1];
+        __declspec(align(32)) VUINT32 _sLn2lo[8][1];
+        __declspec(align(32)) VUINT32 _sSign[8][1];
+        __declspec(align(32)) VUINT32 _sShifter[8][1];
+        __declspec(align(32)) VUINT32 _iDomainRange[8][1];
+        __declspec(align(32)) VUINT32 _sPC1[8][1];
+        __declspec(align(32)) VUINT32 _sPC2[8][1];
+        __declspec(align(32)) VUINT32 _sPC3[8][1];
+        __declspec(align(32)) VUINT32 _sPC4[8][1];
+        __declspec(align(32)) VUINT32 _sPC5[8][1];
+        __declspec(align(32)) VUINT32 _sPC6[8][1];
+        __declspec(align(32)) VUINT32 _iHalf[8][1];
+} __svml_ssinh_data_internal;
+#endif
+__svml_ssinh_data_internal:
+        .long 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B, 0x3FB8AA3B           /* _sInvLn2  */  //k=0
+        .align 32
+        .long 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000, 0x3F317000           /* _sLn2hi   */
+        .align 32
+        .long 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4, 0x3805FDF4           /* _sLn2lo   */
+        .align 32
+        .long 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000           /* _sSign    */
+        .align 32
+        .long 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000, 0x4b400000           /* _sShifter */
+        .align 32
+        .long 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E, 0x42AEAC4E           /* _iDomainRange */
+        .align 32
+        .long 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000         /* _sPC1=1  */
+        .align 32
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000         /* _sPC2  */
+        .align 32
+        .long 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57, 0x3e2aaa57         /* _sPC3  */
+        .align 32
+        .long 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72, 0x3d2aaa72         /* _sPC4  */
+        .align 32
+        .long 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461, 0x3c091461         /* _sPC5  */
+        .align 32
+        .long 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3, 0x3ab6a8a3         /* _sPC6  */
+        // Integer constants
+        .align 32
+        .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000 /* _iHalf*/
+        .align 32
+        .type	__svml_ssinh_data_internal,@object
+        .size	__svml_ssinh_data_internal,.-__svml_ssinh_data_internal
diff --git a/sysdeps/x86_64/fpu/svml_d_sinh2_core.S b/sysdeps/x86_64/fpu/svml_d_sinh2_core.S
new file mode 100644
index 0000000000..91bda7318c
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_d_sinh2_core.S
@@ -0,0 +1,29 @@
+/* Function sinh vectorized with SSE2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVbN2v_sinh)
+WRAPPER_IMPL_SSE2 sinh
+END (_ZGVbN2v_sinh)
+
+#ifndef USE_MULTIARCH
+ libmvec_hidden_def (_ZGVbN2v_sinh)
+#endif
diff --git a/sysdeps/x86_64/fpu/svml_d_sinh4_core.S b/sysdeps/x86_64/fpu/svml_d_sinh4_core.S
new file mode 100644
index 0000000000..7b8091946a
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_d_sinh4_core.S
@@ -0,0 +1,29 @@
+/* Function sinh vectorized with AVX2, wrapper version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVdN4v_sinh)
+WRAPPER_IMPL_AVX _ZGVbN2v_sinh
+END (_ZGVdN4v_sinh)
+
+#ifndef USE_MULTIARCH
+ libmvec_hidden_def (_ZGVdN4v_sinh)
+#endif
diff --git a/sysdeps/x86_64/fpu/svml_d_sinh4_core_avx.S b/sysdeps/x86_64/fpu/svml_d_sinh4_core_avx.S
new file mode 100644
index 0000000000..f773bf110c
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_d_sinh4_core_avx.S
@@ -0,0 +1,25 @@
+/* Function sinh vectorized in AVX ISA as wrapper to SSE4 ISA version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVcN4v_sinh)
+WRAPPER_IMPL_AVX _ZGVbN2v_sinh
+END (_ZGVcN4v_sinh)
diff --git a/sysdeps/x86_64/fpu/svml_d_sinh8_core.S b/sysdeps/x86_64/fpu/svml_d_sinh8_core.S
new file mode 100644
index 0000000000..153a18429c
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_d_sinh8_core.S
@@ -0,0 +1,25 @@
+/* Function sinh vectorized with AVX-512, wrapper to AVX2.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_d_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVeN8v_sinh)
+WRAPPER_IMPL_AVX512 _ZGVdN4v_sinh
+END (_ZGVeN8v_sinh)
diff --git a/sysdeps/x86_64/fpu/svml_s_sinhf16_core.S b/sysdeps/x86_64/fpu/svml_s_sinhf16_core.S
new file mode 100644
index 0000000000..f8dc7da336
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_s_sinhf16_core.S
@@ -0,0 +1,25 @@
+/* Function sinhf vectorized with AVX-512. Wrapper to AVX2 version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_s_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVeN16v_sinhf)
+WRAPPER_IMPL_AVX512 _ZGVdN8v_sinhf
+END (_ZGVeN16v_sinhf)
diff --git a/sysdeps/x86_64/fpu/svml_s_sinhf4_core.S b/sysdeps/x86_64/fpu/svml_s_sinhf4_core.S
new file mode 100644
index 0000000000..d065d03eb6
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_s_sinhf4_core.S
@@ -0,0 +1,29 @@
+/* Function sinhf vectorized with SSE2, wrapper version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_s_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVbN4v_sinhf)
+WRAPPER_IMPL_SSE2 sinhf
+END (_ZGVbN4v_sinhf)
+
+#ifndef USE_MULTIARCH
+ libmvec_hidden_def (_ZGVbN4v_sinhf)
+#endif
diff --git a/sysdeps/x86_64/fpu/svml_s_sinhf8_core.S b/sysdeps/x86_64/fpu/svml_s_sinhf8_core.S
new file mode 100644
index 0000000000..1194699a76
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_s_sinhf8_core.S
@@ -0,0 +1,29 @@
+/* Function sinhf vectorized with AVX2, wrapper version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_s_wrapper_impl.h"
+
+	.text
+ENTRY (_ZGVdN8v_sinhf)
+WRAPPER_IMPL_AVX _ZGVbN4v_sinhf
+END (_ZGVdN8v_sinhf)
+
+#ifndef USE_MULTIARCH
+ libmvec_hidden_def (_ZGVdN8v_sinhf)
+#endif
diff --git a/sysdeps/x86_64/fpu/svml_s_sinhf8_core_avx.S b/sysdeps/x86_64/fpu/svml_s_sinhf8_core_avx.S
new file mode 100644
index 0000000000..82c6b9b239
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_s_sinhf8_core_avx.S
@@ -0,0 +1,25 @@
+/* Function sinhf vectorized in AVX ISA as wrapper to SSE4 ISA version.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "svml_s_wrapper_impl.h"
+
+        .text
+ENTRY (_ZGVcN8v_sinhf)
+WRAPPER_IMPL_AVX _ZGVbN4v_sinhf
+END (_ZGVcN8v_sinhf)
diff --git a/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx.c b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx.c
new file mode 100644
index 0000000000..55aa36d866
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx.c
@@ -0,0 +1 @@
+#include "test-double-libmvec-sinh.c"
diff --git a/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx2.c b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx2.c
new file mode 100644
index 0000000000..55aa36d866
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx2.c
@@ -0,0 +1 @@
+#include "test-double-libmvec-sinh.c"
diff --git a/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx512f.c b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx512f.c
new file mode 100644
index 0000000000..55aa36d866
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-double-libmvec-sinh-avx512f.c
@@ -0,0 +1 @@
+#include "test-double-libmvec-sinh.c"
diff --git a/sysdeps/x86_64/fpu/test-double-libmvec-sinh.c b/sysdeps/x86_64/fpu/test-double-libmvec-sinh.c
new file mode 100644
index 0000000000..82dcaf745d
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-double-libmvec-sinh.c
@@ -0,0 +1,3 @@
+#define LIBMVEC_TYPE double
+#define LIBMVEC_FUNC sinh
+#include "test-vector-abi-arg1.h"
diff --git a/sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c b/sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c
index 0222f9f5b8..db136cc901 100644
--- a/sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-double-vlen2-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVbN2v_exp2)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10), _ZGVbN2v_exp10)
 VECTOR_WRAPPER (WRAPPER_NAME (cosh), _ZGVbN2v_cosh)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1), _ZGVbN2v_expm1)
+VECTOR_WRAPPER (WRAPPER_NAME (sinh), _ZGVbN2v_sinh)
 
 #define VEC_INT_TYPE __m128i
 
diff --git a/sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c b/sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c
index 1aad9faf9c..5fc09ac8c0 100644
--- a/sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-double-vlen4-avx2-wrappers.c
@@ -38,6 +38,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVdN4v_exp2)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10), _ZGVdN4v_exp10)
 VECTOR_WRAPPER (WRAPPER_NAME (cosh), _ZGVdN4v_cosh)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1), _ZGVdN4v_expm1)
+VECTOR_WRAPPER (WRAPPER_NAME (sinh), _ZGVdN4v_sinh)
 
 #ifndef __ILP32__
 # define VEC_INT_TYPE __m256i
diff --git a/sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c b/sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c
index e404bf899d..26ef7fb365 100644
--- a/sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-double-vlen4-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVcN4v_exp2)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10), _ZGVcN4v_exp10)
 VECTOR_WRAPPER (WRAPPER_NAME (cosh), _ZGVcN4v_cosh)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1), _ZGVcN4v_expm1)
+VECTOR_WRAPPER (WRAPPER_NAME (sinh), _ZGVcN4v_sinh)
 
 #define VEC_INT_TYPE __m128i
 
diff --git a/sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c b/sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c
index 2b4de59343..c7055fca76 100644
--- a/sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-double-vlen8-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2), _ZGVeN8v_exp2)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10), _ZGVeN8v_exp10)
 VECTOR_WRAPPER (WRAPPER_NAME (cosh), _ZGVeN8v_cosh)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1), _ZGVeN8v_expm1)
+VECTOR_WRAPPER (WRAPPER_NAME (sinh), _ZGVeN8v_sinh)
 
 #ifndef __ILP32__
 # define VEC_INT_TYPE __m512i
diff --git a/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx.c b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx.c
new file mode 100644
index 0000000000..93986945f3
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx.c
@@ -0,0 +1 @@
+#include "test-float-libmvec-sinhf.c"
diff --git a/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx2.c b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx2.c
new file mode 100644
index 0000000000..93986945f3
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx2.c
@@ -0,0 +1 @@
+#include "test-float-libmvec-sinhf.c"
diff --git a/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx512f.c b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx512f.c
new file mode 100644
index 0000000000..93986945f3
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf-avx512f.c
@@ -0,0 +1 @@
+#include "test-float-libmvec-sinhf.c"
diff --git a/sysdeps/x86_64/fpu/test-float-libmvec-sinhf.c b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf.c
new file mode 100644
index 0000000000..fb1f3c5c48
--- /dev/null
+++ b/sysdeps/x86_64/fpu/test-float-libmvec-sinhf.c
@@ -0,0 +1,3 @@
+#define LIBMVEC_TYPE float
+#define LIBMVEC_FUNC sinhf
+#include "test-vector-abi-arg1.h"
diff --git a/sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c b/sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c
index 9a4a1b84a9..d353bcb0f2 100644
--- a/sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVeN16v_exp2f)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10f), _ZGVeN16v_exp10f)
 VECTOR_WRAPPER (WRAPPER_NAME (coshf), _ZGVeN16v_coshf)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1f), _ZGVeN16v_expm1f)
+VECTOR_WRAPPER (WRAPPER_NAME (sinhf), _ZGVeN16v_sinhf)
 
 #define VEC_INT_TYPE __m512i
 
diff --git a/sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c b/sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c
index eb4e36d0e2..5e59117626 100644
--- a/sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVbN4v_exp2f)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10f), _ZGVbN4v_exp10f)
 VECTOR_WRAPPER (WRAPPER_NAME (coshf), _ZGVbN4v_coshf)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1f), _ZGVbN4v_expm1f)
+VECTOR_WRAPPER (WRAPPER_NAME (sinhf), _ZGVbN4v_sinhf)
 
 #define VEC_INT_TYPE __m128i
 
diff --git a/sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c b/sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c
index d8adab59e6..e884a5f4df 100644
--- a/sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c
@@ -38,6 +38,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVdN8v_exp2f)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10f), _ZGVdN8v_exp10f)
 VECTOR_WRAPPER (WRAPPER_NAME (coshf), _ZGVdN8v_coshf)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1f), _ZGVdN8v_expm1f)
+VECTOR_WRAPPER (WRAPPER_NAME (sinhf), _ZGVdN8v_sinhf)
 
 /* Redefinition of wrapper to be compatible with _ZGVdN8vvv_sincosf.  */
 #undef VECTOR_WRAPPER_fFF
diff --git a/sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c b/sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c
index e6e1a90c72..95910d39e9 100644
--- a/sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c
+++ b/sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c
@@ -35,6 +35,7 @@ VECTOR_WRAPPER (WRAPPER_NAME (exp2f), _ZGVcN8v_exp2f)
 VECTOR_WRAPPER (WRAPPER_NAME (exp10f), _ZGVcN8v_exp10f)
 VECTOR_WRAPPER (WRAPPER_NAME (coshf), _ZGVcN8v_coshf)
 VECTOR_WRAPPER (WRAPPER_NAME (expm1f), _ZGVcN8v_expm1f)
+VECTOR_WRAPPER (WRAPPER_NAME (sinhf), _ZGVcN8v_sinhf)
 
 #define VEC_INT_TYPE __m128i