1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
/* Double-precision AdvSIMD atan2
Copyright (C) 2023-2024 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include "v_math.h"
#include "poly_advsimd_f64.h"
static const struct data
{
float64x2_t pi_over_2;
float64x2_t poly[20];
} data = {
/* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
the interval [2**-1022, 1.0]. */
.poly = { V2 (-0x1.5555555555555p-2), V2 (0x1.99999999996c1p-3),
V2 (-0x1.2492492478f88p-3), V2 (0x1.c71c71bc3951cp-4),
V2 (-0x1.745d160a7e368p-4), V2 (0x1.3b139b6a88ba1p-4),
V2 (-0x1.11100ee084227p-4), V2 (0x1.e1d0f9696f63bp-5),
V2 (-0x1.aebfe7b418581p-5), V2 (0x1.842dbe9b0d916p-5),
V2 (-0x1.5d30140ae5e99p-5), V2 (0x1.338e31eb2fbbcp-5),
V2 (-0x1.00e6eece7de8p-5), V2 (0x1.860897b29e5efp-6),
V2 (-0x1.0051381722a59p-6), V2 (0x1.14e9dc19a4a4ep-7),
V2 (-0x1.d0062b42fe3bfp-9), V2 (0x1.17739e210171ap-10),
V2 (-0x1.ab24da7be7402p-13), V2 (0x1.358851160a528p-16), },
.pi_over_2 = V2 (0x1.921fb54442d18p+0),
};
#define SignMask v_u64 (0x8000000000000000)
/* Special cases i.e. 0, infinity, NaN (fall back to scalar calls). */
static float64x2_t VPCS_ATTR NOINLINE
special_case (float64x2_t y, float64x2_t x, float64x2_t ret, uint64x2_t cmp)
{
return v_call2_f64 (atan2, y, x, ret, cmp);
}
/* Returns 1 if input is the bit representation of 0, infinity or nan. */
static inline uint64x2_t
zeroinfnan (uint64x2_t i)
{
/* (2 * i - 1) >= (2 * asuint64 (INFINITY) - 1). */
return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (1)),
v_u64 (2 * asuint64 (INFINITY) - 1));
}
/* Fast implementation of vector atan2.
Maximum observed error is 2.8 ulps:
_ZGVnN2vv_atan2 (0x1.9651a429a859ap+5, 0x1.953075f4ee26p+5)
got 0x1.92d628ab678ccp-1
want 0x1.92d628ab678cfp-1. */
float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x)
{
const struct data *data_ptr = ptr_barrier (&data);
uint64x2_t ix = vreinterpretq_u64_f64 (x);
uint64x2_t iy = vreinterpretq_u64_f64 (y);
uint64x2_t special_cases = vorrq_u64 (zeroinfnan (ix), zeroinfnan (iy));
uint64x2_t sign_x = vandq_u64 (ix, SignMask);
uint64x2_t sign_y = vandq_u64 (iy, SignMask);
uint64x2_t sign_xy = veorq_u64 (sign_x, sign_y);
float64x2_t ax = vabsq_f64 (x);
float64x2_t ay = vabsq_f64 (y);
uint64x2_t pred_xlt0 = vcltzq_f64 (x);
uint64x2_t pred_aygtax = vcgtq_f64 (ay, ax);
/* Set up z for call to atan. */
float64x2_t n = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay);
float64x2_t d = vbslq_f64 (pred_aygtax, ay, ax);
float64x2_t z = vdivq_f64 (n, d);
/* Work out the correct shift. */
float64x2_t shift = vreinterpretq_f64_u64 (
vandq_u64 (pred_xlt0, vreinterpretq_u64_f64 (v_f64 (-2.0))));
shift = vbslq_f64 (pred_aygtax, vaddq_f64 (shift, v_f64 (1.0)), shift);
shift = vmulq_f64 (shift, data_ptr->pi_over_2);
/* Calculate the polynomial approximation.
Use split Estrin scheme for P(z^2) with deg(P)=19. Use split instead of
full scheme to avoid underflow in x^16.
The order 19 polynomial P approximates
(atan(sqrt(x))-sqrt(x))/x^(3/2). */
float64x2_t z2 = vmulq_f64 (z, z);
float64x2_t x2 = vmulq_f64 (z2, z2);
float64x2_t x4 = vmulq_f64 (x2, x2);
float64x2_t x8 = vmulq_f64 (x4, x4);
float64x2_t ret
= vfmaq_f64 (v_estrin_7_f64 (z2, x2, x4, data_ptr->poly),
v_estrin_11_f64 (z2, x2, x4, x8, data_ptr->poly + 8), x8);
/* Finalize. y = shift + z + z^3 * P(z^2). */
ret = vfmaq_f64 (z, ret, vmulq_f64 (z2, z));
ret = vaddq_f64 (ret, shift);
/* Account for the sign of x and y. */
ret = vreinterpretq_f64_u64 (
veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy));
if (__glibc_unlikely (v_any_u64 (special_cases)))
return special_case (y, x, ret, special_cases);
return ret;
}
|