about summary refs log tree commit diff
path: root/sysdeps/aarch64/fpu/tanf_sve.c
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/aarch64/fpu/tanf_sve.c')
-rw-r--r--sysdeps/aarch64/fpu/tanf_sve.c118
1 files changed, 118 insertions, 0 deletions
diff --git a/sysdeps/aarch64/fpu/tanf_sve.c b/sysdeps/aarch64/fpu/tanf_sve.c
new file mode 100644
index 0000000000..856cbece7e
--- /dev/null
+++ b/sysdeps/aarch64/fpu/tanf_sve.c
@@ -0,0 +1,118 @@
+/* Single-precision vector (SVE) tan function
+
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "sv_math.h"
+
+static const struct data
+{
+  float pio2_1, pio2_2, pio2_3, invpio2;
+  float c1, c3, c5;
+  float c0, c2, c4, range_val, shift;
+} data = {
+  /* Coefficients generated using:
+     poly = fpminimax((tan(sqrt(x))-sqrt(x))/x^(3/2),
+		      deg,
+		      [|single ...|],
+		      [a*a;b*b]);
+     optimize relative error
+     final prec : 23 bits
+     deg : 5
+     a : 0x1p-126 ^ 2
+     b : ((pi) / 0x1p2) ^ 2
+     dirty rel error: 0x1.f7c2e4p-25
+     dirty abs error: 0x1.f7c2ecp-25.  */
+  .c0 = 0x1.55555p-2,	      .c1 = 0x1.11166p-3,
+  .c2 = 0x1.b88a78p-5,	      .c3 = 0x1.7b5756p-6,
+  .c4 = 0x1.4ef4cep-8,	      .c5 = 0x1.0e1e74p-7,
+
+  .pio2_1 = 0x1.921fb6p+0f,   .pio2_2 = -0x1.777a5cp-25f,
+  .pio2_3 = -0x1.ee59dap-50f, .invpio2 = 0x1.45f306p-1f,
+  .range_val = 0x1p15f,	      .shift = 0x1.8p+23f
+};
+
+static svfloat32_t NOINLINE
+special_case (svfloat32_t x, svfloat32_t y, svbool_t cmp)
+{
+  return sv_call_f32 (tanf, x, y, cmp);
+}
+
+/* Fast implementation of SVE tanf.
+   Maximum error is 3.45 ULP:
+   SV_NAME_F1 (tan)(-0x1.e5f0cap+13) got 0x1.ff9856p-1
+				    want 0x1.ff9850p-1.  */
+svfloat32_t SV_NAME_F1 (tan) (svfloat32_t x, const svbool_t pg)
+{
+  const struct data *d = ptr_barrier (&data);
+
+  /* Determine whether input is too large to perform fast regression.  */
+  svbool_t cmp = svacge (pg, x, d->range_val);
+
+  svfloat32_t odd_coeffs = svld1rq (svptrue_b32 (), &d->c1);
+  svfloat32_t pi_vals = svld1rq (svptrue_b32 (), &d->pio2_1);
+
+  /* n = rint(x/(pi/2)).  */
+  svfloat32_t q = svmla_lane (sv_f32 (d->shift), x, pi_vals, 3);
+  svfloat32_t n = svsub_x (pg, q, d->shift);
+  /* n is already a signed integer, simply convert it.  */
+  svint32_t in = svcvt_s32_x (pg, n);
+  /* Determine if x lives in an interval, where |tan(x)| grows to infinity.  */
+  svint32_t alt = svand_x (pg, in, 1);
+  svbool_t pred_alt = svcmpne (pg, alt, 0);
+
+  /* r = x - n * (pi/2)  (range reduction into 0 .. pi/4).  */
+  svfloat32_t r;
+  r = svmls_lane (x, n, pi_vals, 0);
+  r = svmls_lane (r, n, pi_vals, 1);
+  r = svmls_lane (r, n, pi_vals, 2);
+
+  /* If x lives in an interval, where |tan(x)|
+     - is finite, then use a polynomial approximation of the form
+       tan(r) ~ r + r^3 * P(r^2) = r + r * r^2 * P(r^2).
+     - grows to infinity then use symmetries of tangent and the identity
+       tan(r) = cotan(pi/2 - r) to express tan(x) as 1/tan(-r). Finally, use
+       the same polynomial approximation of tan as above.  */
+
+  /* Perform additional reduction if required.  */
+  svfloat32_t z = svneg_m (r, pred_alt, r);
+
+  /* Evaluate polynomial approximation of tangent on [-pi/4, pi/4],
+     using Estrin on z^2.  */
+  svfloat32_t z2 = svmul_x (pg, z, z);
+  svfloat32_t p01 = svmla_lane (sv_f32 (d->c0), z2, odd_coeffs, 0);
+  svfloat32_t p23 = svmla_lane (sv_f32 (d->c2), z2, odd_coeffs, 1);
+  svfloat32_t p45 = svmla_lane (sv_f32 (d->c4), z2, odd_coeffs, 2);
+
+  svfloat32_t z4 = svmul_x (pg, z2, z2);
+  svfloat32_t p = svmla_x (pg, p01, z4, p23);
+
+  svfloat32_t z8 = svmul_x (pg, z4, z4);
+  p = svmla_x (pg, p, z8, p45);
+
+  svfloat32_t y = svmla_x (pg, z, p, svmul_x (pg, z, z2));
+
+  /* Transform result back, if necessary.  */
+  svfloat32_t inv_y = svdivr_x (pg, y, 1.0f);
+
+  /* No need to pass pg to specialcase here since cmp is a strict subset,
+     guaranteed by the cmpge above.  */
+  if (__glibc_unlikely (svptest_any (pg, cmp)))
+    return special_case (x, svsel (pred_alt, inv_y, y), cmp);
+
+  return svsel (pred_alt, inv_y, y);
+}