about summary refs log tree commit diff
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-10-25 15:21:39 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-11-01 11:17:04 -0300
commit345e9c7d0b36922e790e43bc4a75c40664e7981a (patch)
tree0756950c5ca08fa86fe643baceacb75f7a856f97
parent93ced0e1b83ec837f3de70c751180d225fe3f8dc (diff)
downloadglibc-345e9c7d0b36922e790e43bc4a75c40664e7981a.tar.gz
glibc-345e9c7d0b36922e790e43bc4a75c40664e7981a.tar.xz
glibc-345e9c7d0b36922e790e43bc4a75c40664e7981a.zip
math: Add e_gammaf_r to glibc code and style
Also remove the use of builtins in favor of standard names, compiler
already inline them (if supported) with current compiler options.
It also fixes and issue where __builtin_roundeven is not support on
gcc older than version 10.

Checked on x86_64-linux-gnu and i686-linux_gnu.

Signed-off-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Reviewed-by: DJ Delorie <dj@redhat.com>
-rw-r--r--sysdeps/ieee754/flt-32/e_gammaf_r.c180
-rw-r--r--sysdeps/m68k/m680x0/fpu/math_errf.c1
2 files changed, 103 insertions, 78 deletions
diff --git a/sysdeps/ieee754/flt-32/e_gammaf_r.c b/sysdeps/ieee754/flt-32/e_gammaf_r.c
index 90ed3b4890..6b1f95d50f 100644
--- a/sysdeps/ieee754/flt-32/e_gammaf_r.c
+++ b/sysdeps/ieee754/flt-32/e_gammaf_r.c
@@ -37,9 +37,7 @@ SOFTWARE.
 #include <stddef.h>
 #include <libm-alias-finite.h>
 #include <math-narrow-eval.h>
-
-typedef union {float f; uint32_t u;} b32u32_u;
-typedef union {double f; uint64_t u;} b64u64_u;
+#include "math_config.h"
 
 float
 __ieee754_gammaf_r (float x, int *signgamp)
@@ -54,97 +52,125 @@ __ieee754_gammaf_r (float x, int *signgamp)
 
   /* List of exceptional cases. Each entry contains the 32-bit encoding u of x,
      a binary32 approximation f of gamma(x), and a correction term df.  */
-  static const struct {uint32_t u; float f, df;} tb[] = {
-    {0x27de86a9u, 0x1.268266p+47f, 0x1p22f},      // x = 0x1.bd0d52p-48
-    {0x27e05475u, 0x1.242422p+47f, 0x1p22f},      // x = 0x1.c0a8eap-48
-    {0xb63befb3u, -0x1.5cb6e4p+18f, 0x1p-7f},     // x = -0x1.77df66p-19
-    {0x3c7bb570u, 0x1.021d9p+6f, 0x1p-19f},       // x = 0x1.f76aep-7
-    {0x41e886d1u, 0x1.33136ap+98f, 0x1p73f},      // x = 0x1.d10da2p+4
-    {0xc067d177u, 0x1.f6850cp-3f, 0x1p-28f},      // x = -0x1.cfa2eep+1
-    {0xbd99da31u, -0x1.befe66p+3, -0x1p-22f},     // x = -0x1.33b462p-4
-    {0xbf54c45au, -0x1.a6b4ecp+2, +0x1p-23f},     // x = -0x1.a988b4p-1
-    {0x41ee77feu, 0x1.d3631cp+101, -0x1p-76f},    // x = 0x1.dceffcp+4
-    {0x3f843a64u, 0x1.f6c638p-1, 0x1p-26f},       // x = 0x1.0874c8p+0
+  static const struct
+  {
+    uint32_t u;
+    float f, df;
+  } tb[] = {
+    { 0x27de86a9u,  0x1.268266p+47f,  0x1p22f },  /* x = 0x1.bd0d52p-48 */
+    { 0x27e05475u,  0x1.242422p+47f,  0x1p22f },  /* x = 0x1.c0a8eap-48 */
+    { 0xb63befb3u, -0x1.5cb6e4p+18f,  0x1p-7f },  /* x = -0x1.77df66p-19 */
+    { 0x3c7bb570u,  0x1.021d9p+6f,    0x1p-19f }, /* x = 0x1.f76aep-7 */
+    { 0x41e886d1u,  0x1.33136ap+98f,  0x1p73f },  /* x = 0x1.d10da2p+4 */
+    { 0xc067d177u,  0x1.f6850cp-3f,   0x1p-28f }, /* x = -0x1.cfa2eep+1 */
+    { 0xbd99da31u, -0x1.befe66p+3,   -0x1p-22f }, /* x = -0x1.33b462p-4 */
+    { 0xbf54c45au, -0x1.a6b4ecp+2,    0x1p-23f }, /* x = -0x1.a988b4p-1 */
+    { 0x41ee77feu,  0x1.d3631cp+101, -0x1p-76f }, /* x = 0x1.dceffcp+4 */
+    { 0x3f843a64u,  0x1.f6c638p-1,    0x1p-26f }, /* x = 0x1.0874c8p+0 */
   };
 
-  b32u32_u t = {.f = x};
-  uint32_t ax = t.u<<1;
-  if(__builtin_expect(ax>=(0xffu<<24), 0)){ /* x=NaN or +/-Inf */
-    if(ax==(0xffu<<24)){ /* x=+/-Inf */
-      if(t.u>>31){ /* x=-Inf */
-        return x / x; /* will raise the "Invalid operation" exception */
-      }
-      return x; /* x=+Inf */
+  uint32_t t = asuint (x);
+  uint32_t ax = t << 1;
+  if (__glibc_unlikely (ax >= (0xffu << 24)))
+    { /* x=NaN or +/-Inf */
+      if (ax == (0xffu << 24))
+	{ /* x=+/-Inf */
+	  if (t >> 31) /* x=-Inf */
+	    return __math_invalidf (x);
+	  return x; /* x=+Inf */
+	}
+      return x + x; /* x=NaN, where x+x ensures the "Invalid operation"
+		       exception is set if x is sNaN */
     }
-    return x + x; /* x=NaN, where x+x ensures the "Invalid operation"
-                     exception is set if x is sNaN */
-  }
   double z = x;
-  if(__builtin_expect(ax<0x6d000000u, 0)){ /* |x| < 0x1p-18 */
-    volatile double d = (0x1.fa658c23b1578p-1 - 0x1.d0a118f324b63p-1*z)*z - 0x1.2788cfc6fb619p-1;
-    double f = 1.0/z + d;
-    float r = f;
-    b64u64_u rt = {.f = f};
-    if(((rt.u+2)&0xfffffff) < 4){
-      for(unsigned i=0;i<sizeof(tb)/sizeof(tb[0]);i++)
-	if(t.u==tb[i].u) return tb[i].f + tb[i].df;
+  if (__glibc_unlikely (ax < 0x6d000000u))
+    { /* |x| < 0x1p-18 */
+      volatile double d = (0x1.fa658c23b1578p-1 - 0x1.d0a118f324b63p-1 * z)
+	* z - 0x1.2788cfc6fb619p-1;
+      double f = 1.0 / z + d;
+      float r = f;
+      uint64_t rt = asuint64 (f);
+      if (((rt + 2) & 0xfffffff) < 4)
+	{
+	  for (unsigned i = 0; i < sizeof (tb) / sizeof (tb[0]); i++)
+	    if (t == tb[i].u)
+	      return tb[i].f + tb[i].df;
+	}
+      return r;
+    }
+  float fx = floorf (x);
+  if (__glibc_unlikely (x >= 0x1.18522p+5f))
+    {
+      /* Overflow case.  The original CORE-MATH code returns
+	 0x1p127f * 0x1p127f, but apparently some compilers replace this
+	 by +Inf.  */
+      return math_narrow_eval (x * 0x1p127f);
     }
-    return r;
-  }
-  float fx = __builtin_floorf(x);
-  if(__builtin_expect(x >= 0x1.18522p+5f, 0)){
-    /* Overflow case. The original CORE-MATH code returns 0x1p127f * 0x1p127f,
-       but apparently some compilers replace this by +Inf.  */
-    return math_narrow_eval (x * 0x1p127f);
-  }
   /* compute k only after the overflow check, otherwise the case to integer
      might overflow */
   int k = fx;
-  if(__builtin_expect(fx==x, 0)){ /* x is integer */
-    if(x == 0.0f){
-      return 1.0f/x;
+  if (__glibc_unlikely (fx == x))
+    { /* x is integer */
+      if (x == 0.0f)
+	return 1.0f / x;
+      if (x < 0.0f)
+	return __math_invalidf (0.0f);
+      double t0 = 1, x0 = 1;
+      for (int i = 1; i < k; i++, x0 += 1.0)
+	t0 *= x0;
+      return t0;
     }
-    if(x < 0.0f){
-      return 0.0f / 0.0f; /* should raise the "Invalid operation" exception */
+  if (__glibc_unlikely (x < -42.0f))
+    { /* negative non-integer */
+      /* For x < -42, x non-integer, |gamma(x)| < 2^-151.  */
+      static const float sgn[2] = { 0x1p-127f, -0x1p-127f };
+      /* Underflows always happens */
+      return math_narrow_eval (0x1p-127f * sgn[k & 1]);
     }
-    double t0 = 1, x0 = 1;
-    for(int i=1; i<k; i++, x0 += 1.0) t0 *= x0;
-    return t0;
-  }
-  if(__builtin_expect(x<-42.0f, 0)){ /* negative non-integer */
-    /* For x < -42, x non-integer, |gamma(x)| < 2^-151.  */
-    static const float sgn[2] = {0x1p-127f, -0x1p-127f};
-    /* Underflows always happens */
-    return math_narrow_eval (0x1p-127f * sgn[k&1]);
-  }
-  /* The array c[] stores a degree-15 polynomial approximation for gamma(x).  */
+  /* The array c[] stores a degree-15 polynomial approximation for
+     gamma(x).  */
   static const double c[] =
-    {0x1.c9a76be577123p+0, 0x1.8f2754ddcf90dp+0, 0x1.0d1191949419bp+0, 0x1.e1f42cf0ae4a1p-2,
-     0x1.82b358a3ab638p-3, 0x1.e1f2b30cd907bp-5, 0x1.240f6d4071bd8p-6, 0x1.1522c9f3cd012p-8,
-     0x1.1fd0051a0525bp-10, 0x1.9808a8b96c37ep-13, 0x1.b3f78e01152b5p-15, 0x1.49c85a7e1fd04p-18,
-     0x1.471ca49184475p-19, -0x1.368f0b7ed9e36p-23, 0x1.882222f9049efp-23, -0x1.a69ed2042842cp-25};
+    {
+       0x1.c9a76be577123p+0,   0x1.8f2754ddcf90dp+0,  0x1.0d1191949419bp+0,
+       0x1.e1f42cf0ae4a1p-2,   0x1.82b358a3ab638p-3,  0x1.e1f2b30cd907bp-5,
+       0x1.240f6d4071bd8p-6,   0x1.1522c9f3cd012p-8,  0x1.1fd0051a0525bp-10,
+       0x1.9808a8b96c37ep-13,  0x1.b3f78e01152b5p-15, 0x1.49c85a7e1fd04p-18,
+       0x1.471ca49184475p-19, -0x1.368f0b7ed9e36p-23, 0x1.882222f9049efp-23,
+      -0x1.a69ed2042842cp-25
+   };
 
-  double m = z - 0x1.7p+1, i = __builtin_roundeven(m), step = __builtin_copysign(1.0,i);
-  double d = m - i, d2 = d*d, d4 = d2*d2, d8 = d4*d4;
-  double f = (c[0] + d*c[1]) + d2*(c[2] + d*c[3]) + d4*((c[4] + d*c[5]) + d2*(c[6] + d*c[7]))
-    + d8*((c[8] + d*c[9]) + d2*(c[10] + d*c[11]) + d4*((c[12] + d*c[13]) + d2*(c[14] + d*c[15])));
-  int jm = __builtin_fabs(i);
+  double m = z - 0x1.7p+1;
+  double i = roundeven (m);
+  double step = copysign (1.0, i);
+  double d = m - i, d2 = d * d, d4 = d2 * d2, d8 = d4 * d4;
+  double f = (c[0] + d * c[1]) + d2 * (c[2] + d * c[3])
+	     + d4 * ((c[4] + d * c[5]) + d2 * (c[6] + d * c[7]))
+	     + d8 * ((c[8] + d * c[9]) + d2 * (c[10] + d * c[11])
+		     + d4 * ((c[12] + d * c[13]) + d2 * (c[14] + d * c[15])));
+  int jm = fabs (i);
   double w = 1;
-  if(jm){
-    z -= 0.5 + step*0.5;
-    w = z;
-    for(int j=jm-1; j; j--) {z -= step; w *= z;}
-  }
-  if(i<=-0.5) w = 1/w;
+  if (jm)
+    {
+      z -= 0.5 + step * 0.5;
+      w = z;
+      for (int j = jm - 1; j; j--)
+	{
+	  z -= step;
+	  w *= z;
+	}
+    }
+  if (i <= -0.5)
+    w = 1 / w;
   f *= w;
-  b64u64_u rt = {.f = f};
+  uint64_t rt = asuint64 (f);
   float r = f;
   /* Deal with exceptional cases.  */
-  if(__builtin_expect(((rt.u+2)&0xfffffff) < 8, 0)){
-    for(unsigned j=0;j<sizeof(tb)/sizeof(tb[0]);j++) {
-      if(t.u==tb[j].u) return tb[j].f + tb[j].df;
+  if (__glibc_unlikely (((rt + 2) & 0xfffffff) < 8))
+    {
+      for (unsigned j = 0; j < sizeof (tb) / sizeof (tb[0]); j++)
+	if (t == tb[j].u)
+	  return tb[j].f + tb[j].df;
     }
-  }
   return r;
 }
 libm_alias_finite (__ieee754_gammaf_r, __gammaf_r)
diff --git a/sysdeps/m68k/m680x0/fpu/math_errf.c b/sysdeps/m68k/m680x0/fpu/math_errf.c
deleted file mode 100644
index 1cc8931700..0000000000
--- a/sysdeps/m68k/m680x0/fpu/math_errf.c
+++ /dev/null
@@ -1 +0,0 @@
-/* Not needed.  */