about summary refs log tree commit diff
path: root/sysdeps/alpha/fpu/s_ceilf.c
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/alpha/fpu/s_ceilf.c')
-rw-r--r--sysdeps/alpha/fpu/s_ceilf.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/sysdeps/alpha/fpu/s_ceilf.c b/sysdeps/alpha/fpu/s_ceilf.c
index 3defaeb01e..35c51a2766 100644
--- a/sysdeps/alpha/fpu/s_ceilf.c
+++ b/sysdeps/alpha/fpu/s_ceilf.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1998 Free Software Foundation, Inc.
+/* Copyright (C) 1998, 2000 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Richard Henderson.
 
@@ -19,39 +19,35 @@
 
 #include <math.h>
 
+/* Use the -inf rounding mode conversion instructions to implement
+   ceil, via something akin to -floor(-x).  This is much faster than
+   playing with the fpcr to achieve +inf rounding mode.  */
+
 float
 __ceilf (float x)
 {
-  if (x != 0 && fabsf (x) < 16777216.0f)  /* 1 << FLT_MANT_DIG */
+  if (isless (fabsf (x), 16777216.0f))	/* 1 << FLT_MANT_DIG */
     {
-      float tmp1, tmp2;
-      unsigned long fpcr0, fpcr1;
-      unsigned long pinf = 3UL << 58;
-
-      /* Set round to +inf.  */
-      __asm __volatile("excb; mf_fpcr %0" : "=f"(fpcr0));
-      __asm __volatile("mt_fpcr %0; excb" : : "f"(fpcr0 | pinf));
+      /* Note that Alpha S_Floating is stored in registers in a
+	 restricted T_Floating format, so we don't even need to
+	 convert back to S_Floating in the end.  The initial
+	 conversion to T_Floating is needed to handle denormals.  */
 
-      /* Calculate!  
-         Note that Alpha S_Floating is stored in registers in a
-         restricted T_Floating format, so we don't even need to
-         convert back to S_Floating in the end.  The initial
-         conversion to T_Floating is needed to handle denormals.  */
+      float tmp1, tmp2, new_x;
 
+      new_x = -x;
+      __asm ("cvtst/s %3,%2\n\t"
 #ifdef _IEEE_FP_INEXACT
-      __asm("cvtst/s %3,%2\n\tcvttq/svid %2,%1\n\tcvtqt/suid %1,%0"
-	    : "=f"(x), "=&f"(tmp1), "=&f"(tmp2)
-	    : "f"(x));
+	     "cvttq/svim %2,%1\n\t"
 #else
-      __asm("cvtst/s %3,%2\n\tcvttq/svd %2,%1\n\tcvtqt/d %1,%0"
-	    : "=f"(x), "=&f"(tmp1), "=&f"(tmp2)
-	    : "f"(x));
+	     "cvttq/svm %2,%1\n\t"
 #endif
+	     "cvtqt/m %1,%0\n\t"
+	     : "=f"(new_x), "=&f"(tmp1), "=&f"(tmp2)
+	     : "f"(new_x));
 
-      /* Reset rounding mode, while retaining new exception bits.  */
-      __asm __volatile("excb; mf_fpcr %0" : "=f"(fpcr1));
-      fpcr0 = (fpcr0 & pinf) | (fpcr1 & ~pinf);
-      __asm __volatile("mt_fpcr %0; excb" : : "f"(fpcr0));
+      /* Fix up the negation we did above, as well as handling -0 properly. */
+      x = copysignf(new_x, x);
     }
   return x;
 }