about summary refs log tree commit diff
path: root/sysdeps/ieee754/dbl-64/e_gamma_r.c
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2015-09-23 18:14:57 +0000
committerJoseph Myers <joseph@codesourcery.com>2015-09-23 18:14:57 +0000
commit54142c44e963f410262fa868c159b6df858f3c53 (patch)
treecc38bd53701afabc893a378e24fefd25d2062d86 /sysdeps/ieee754/dbl-64/e_gamma_r.c
parent5df386a5492e00348c0ba4c6520d27783c253816 (diff)
downloadglibc-54142c44e963f410262fa868c159b6df858f3c53.tar.gz
glibc-54142c44e963f410262fa868c159b6df858f3c53.tar.xz
glibc-54142c44e963f410262fa868c159b6df858f3c53.zip
Use math_narrow_eval more consistently.
Where glibc code needs to avoid excess range and precision in
floating-point arithmetic, code variously uses either asms or volatile
to force the results of that arithmetic to memory; mostly this is
conditional on FLT_EVAL_METHOD, but in the case of lrint / llrint
functions some use of volatile is unconditional (and is present
unnecessarily in versions for long double).  This patch make such code
use the recently-added math_narrow_eval macro consistently, removing
the unnecessary uses of volatile in long double lrint / llrint
implementations completely.

Tested for x86_64, x86, mips64 and powerpc.

	* math/s_nexttowardf.c (__nexttowardf): Use math_narrow_eval.
	* stdlib/strtod_l.c: Include <math_private.h>.
	(overflow_value): Use math_narrow_eval.
	(underflow_value): Likewise.
	* sysdeps/i386/fpu/s_nexttoward.c (__nexttoward): Likewise.
	* sysdeps/i386/fpu/s_nexttowardf.c (__nexttowardf): Likewise.
	* sysdeps/ieee754/dbl-64/e_gamma_r.c (gamma_positive): Likewise.
	(__ieee754_gamma_r): Likewise.
	* sysdeps/ieee754/dbl-64/gamma_productf.c (__gamma_productf):
	Likewise.
	* sysdeps/ieee754/dbl-64/k_rem_pio2.c (__kernel_rem_pio2):
	Likewise.
	* sysdeps/ieee754/dbl-64/lgamma_neg.c (__lgamma_neg): Likewise.
	* sysdeps/ieee754/dbl-64/s_erf.c (__erfc): Likewise.
	* sysdeps/ieee754/dbl-64/s_llrint.c (__llrint): Likewise.
	* sysdeps/ieee754/dbl-64/s_lrint.c (__lrint): Likewise.
	* sysdeps/ieee754/flt-32/e_gammaf_r.c (gammaf_positive): Likewise.
	(__ieee754_gammaf_r): Likewise.
	* sysdeps/ieee754/flt-32/k_rem_pio2f.c (__kernel_rem_pio2f):
	Likewise.
	* sysdeps/ieee754/flt-32/lgamma_negf.c (__lgamma_negf): Likewise.
	* sysdeps/ieee754/flt-32/s_erff.c (__erfcf): Likewise.
	* sysdeps/ieee754/flt-32/s_llrintf.c (__llrintf): Likewise.
	* sysdeps/ieee754/flt-32/s_lrintf.c (__lrintf): Likewise.
	* sysdeps/ieee754/ldbl-128/s_llrintl.c (__llrintl): Do not use
	volatile.
	* sysdeps/ieee754/ldbl-128/s_lrintl.c (__lrintl): Likewise.
	* sysdeps/ieee754/ldbl-128/s_nexttoward.c (__nexttoward): Use
	math_narrow_eval.
	* sysdeps/ieee754/ldbl-128ibm/s_nexttoward.c (__nexttoward):
	Likewise.
	* sysdeps/ieee754/ldbl-128ibm/s_nexttowardf.c (__nexttowardf):
	Likewise.
	* sysdeps/ieee754/ldbl-96/gamma_product.c (__gamma_product):
	Likewise.
	* sysdeps/ieee754/ldbl-96/s_llrintl.c (__llrintl): Do not use
	volatile.
	* sysdeps/ieee754/ldbl-96/s_lrintl.c (__lrintl): Likewise.
	* sysdeps/ieee754/ldbl-96/s_nexttoward.c (__nexttoward): Use
	math_narrow_eval.
	* sysdeps/ieee754/ldbl-96/s_nexttowardf.c (__nexttowardf):
	Likewise.
	* sysdeps/ieee754/ldbl-opt/s_nexttowardfd.c (__nldbl_nexttowardf):
	Likewise.
Diffstat (limited to 'sysdeps/ieee754/dbl-64/e_gamma_r.c')
-rw-r--r--sysdeps/ieee754/dbl-64/e_gamma_r.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/sysdeps/ieee754/dbl-64/e_gamma_r.c b/sysdeps/ieee754/dbl-64/e_gamma_r.c
index d1acaa6a12..d3fb8af97e 100644
--- a/sysdeps/ieee754/dbl-64/e_gamma_r.c
+++ b/sysdeps/ieee754/dbl-64/e_gamma_r.c
@@ -76,11 +76,7 @@ gamma_positive (double x, int *exp2_adj)
 	  /* Adjust into the range for applying Stirling's
 	     approximation.  */
 	  double n = __ceil (12.0 - x);
-#if FLT_EVAL_METHOD != 0
-	  volatile
-#endif
-	  double x_tmp = x + n;
-	  x_adj = x_tmp;
+	  x_adj = math_narrow_eval (x + n);
 	  x_eps = (x - (x_adj - n));
 	  prod = __gamma_product (x_adj - n, x_eps, n, &eps);
 	}
@@ -119,9 +115,6 @@ __ieee754_gamma_r (double x, int *signgamp)
 {
   int32_t hx;
   u_int32_t lx;
-#if FLT_EVAL_METHOD != 0
-  volatile
-#endif
   double ret;
 
   EXTRACT_WORDS (hx, lx, x);
@@ -157,7 +150,7 @@ __ieee754_gamma_r (double x, int *signgamp)
     {
       /* Overflow.  */
       *signgamp = 0;
-      ret = DBL_MAX * DBL_MAX;
+      ret = math_narrow_eval (DBL_MAX * DBL_MAX);
       return ret;
     }
   else
@@ -201,27 +194,28 @@ __ieee754_gamma_r (double x, int *signgamp)
 		}
 	    }
 	}
+      ret = math_narrow_eval (ret);
     }
   if (isinf (ret) && x != 0)
     {
       if (*signgamp < 0)
 	{
-	  ret = -__copysign (DBL_MAX, ret) * DBL_MAX;
+	  ret = math_narrow_eval (-__copysign (DBL_MAX, ret) * DBL_MAX);
 	  ret = -ret;
 	}
       else
-	ret = __copysign (DBL_MAX, ret) * DBL_MAX;
+	ret = math_narrow_eval (__copysign (DBL_MAX, ret) * DBL_MAX);
       return ret;
     }
   else if (ret == 0)
     {
       if (*signgamp < 0)
 	{
-	  ret = -__copysign (DBL_MIN, ret) * DBL_MIN;
+	  ret = math_narrow_eval (-__copysign (DBL_MIN, ret) * DBL_MIN);
 	  ret = -ret;
 	}
       else
-	ret = __copysign (DBL_MIN, ret) * DBL_MIN;
+	ret = math_narrow_eval (__copysign (DBL_MIN, ret) * DBL_MIN);
       return ret;
     }
   else