about summary refs log tree commit diff
path: root/sysdeps/ia64/fpu/libm_support.h
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/ia64/fpu/libm_support.h')
-rw-r--r--sysdeps/ia64/fpu/libm_support.h1008
1 files changed, 652 insertions, 356 deletions
diff --git a/sysdeps/ia64/fpu/libm_support.h b/sysdeps/ia64/fpu/libm_support.h
index 50dac33133..dc9c0a2648 100644
--- a/sysdeps/ia64/fpu/libm_support.h
+++ b/sysdeps/ia64/fpu/libm_support.h
@@ -1,7 +1,8 @@
 /* file: libm_support.h */
 
 
-// Copyright (c) 2000 - 2002, Intel Corporation
+/*
+// Copyright (c) 2000 - 2004, Intel Corporation
 // All rights reserved.
 //
 // Contributed 2000 by the Intel Numerics Group, Intel Corporation
@@ -21,13 +22,14 @@
 // products derived from this software without specific prior written
 // permission.
 
+//
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS 
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS
 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING
 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@@ -38,17 +40,17 @@
 // http://www.intel.com/software/products/opensource/libraries/num.htm.
 //
 
-// History: 02/02/2000 Initial version 
+// History: 02/02/2000 Initial version
 //          2/28/2000 added tags for logb and nextafter
 //          3/22/2000 Changes to support _LIB_VERSIONIMF variable
-//                    and filled some enum gaps. Added support for C99.  
+//                    and filled some enum gaps. Added support for C99.
 //          5/31/2000 added prototypes for __libm_frexp_4l/8l
 //          8/10/2000 Changed declaration of _LIB_VERSIONIMF to work for library
 //                    builds and other application builds (precompiler directives).
 //          8/11/2000 Added pointers-to-matherr-functions declarations to allow
 //                    for user-defined matherr functions in the dll build.
 //         12/07/2000 Added scalbn error_types values.
-//          5/01/2001 Added error_types values for C99 nearest integer 
+//          5/01/2001 Added error_types values for C99 nearest integer
 //                    functions.
 //          6/07/2001 Added error_types values for fdim.
 //          6/18/2001 Added include of complex_support.h.
@@ -65,232 +67,142 @@
 //         06/27/2002 Added error_types for sinhcosh.
 //         12/05/2002 Added error_types for annuity and compound
 //         04/10/2003 Added error_types for tgammal/tgamma/tgammaf
+//         05/16/2003 FP-treatment macros copied here from IA32 libm_support.h
+//         06/02/2003 Added pad into struct fp80 (12/16 bytes).
+//         08/01/2003 Added struct ker80 and macros for multiprecision addition,
+//                    subtraction, multiplication, division, square root.
+//         08/07/2003 History section updated.
+//         09/03/2003 ALIGN(n) macro added.
+//         10/01/2003 LDOUBLE_ALIGN and fp80 corrected on linux to 16 bytes.
+//         11/24/2004 Added ifdef around definitions of INT32/64
+//         12/15/2004 Added error_types for exp10, nextafter, nexttoward
+//                    underflow.  Moved error codes into libm_error_codes.h.
 //
+*/
 
-void __libm_sincos_pi4(double,double*,double*,int);
-void __libm_y0y1(double , double *, double *);
-void __libm_j0j1(double , double *, double *);
-double __libm_j0(double);
-double __libm_j1(double);
-double __libm_jn(int,double);
-double __libm_y0(double);
-double __libm_y1(double);
-double __libm_yn(int,double);
-double __libm_copysign (double, double);
-float __libm_copysignf (float, float);
-long double __libm_copysignl (long double, long double);
-
-extern double sqrt(double);
-extern double fabs(double);
-extern double log(double);
-extern double log1p(double);
-extern double sqrt(double);
-extern double sin(double);
-extern double exp(double);
-extern double modf(double, double *);
-extern double asinh(double);
-extern double acosh(double);
-extern double atanh(double);
-extern double tanh(double);
-extern double erf(double);
-extern double erfc(double);
-extern double j0(double);
-extern double j1(double);
-extern double jn(int, double);
-extern double y0(double);
-extern double y1(double);
-extern double yn(int, double);
-
-extern float  fabsf(float);
-extern float  asinhf(float);
-extern float  acoshf(float);
-extern float  atanhf(float);
-extern float  tanhf(float);
-extern float  erff(float);
-extern float  erfcf(float);
-extern float  j0f(float);
-extern float  j1f(float);
-extern float  jnf(int, float);
-extern float  y0f(float);
-extern float  y1f(float);
-extern float  ynf(int, float);
-
-extern long double log1pl(long double);
-extern long double logl(long double);
-extern long double sqrtl(long double);
-extern long double expl(long double);
-extern long double fabsl(long double);
+#ifndef __LIBM_SUPPORT_H_INCLUDED__
+#define __LIBM_SUPPORT_H_INCLUDED__
 
-#if !(defined(SIZE_INT_32) || defined(SIZE_INT_64))
-#error integer size not established; define SIZE_INT_32 or SIZE_INT_64
+#ifndef _LIBC
+#if !(defined(_WIN32) || defined(_WIN64))
+# pragma const_seg(".rodata") /* place constant data in text (code) section */
 #endif
 
-#if (defined(SIZE_INT_32) && defined(SIZE_INT_64))
-#error multiple integer size definitions; define SIZE_INT_32 or SIZE_INT_64
+#if defined(__ICC) || defined(__ICL) || defined(__ECC) || defined(__ECL)
+# pragma warning( disable : 1682 )	/* #1682: ixplicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) */
+# pragma warning( disable : 1683 )	/* #1683: explicit conversion of a 64-bit integral type to a smaller integral type (potential portability problem) */
 #endif
-
-#if !(defined(SIZE_LONG_INT_32) || defined(SIZE_LONG_INT_64))
-#error long int size not established; define SIZE_LONG_INT_32 or SIZE_LONG_INT_64
 #endif
 
-#if (defined(SIZE_LONG_INT_32) && defined(SIZE_LONG_INT_64))
-#error multiple long int size definitions; define SIZE_LONG_INT_32 or SIZE_LONG_INT_64
+/* macros to form a double value in hex representation (unsigned int type) */
+
+#define DOUBLE_HEX(hi,lo) 0x##lo,0x##hi /*LITTLE_ENDIAN*/
+
+#include "libm_cpu_defs.h"
+
+#if !(defined (IA64))
+#  include "libm_dll.h"
+#  include "libm_dispatch.h"
 #endif
 
-#if !(defined(SIZE_LONG_LONG_INT_32) || defined(SIZE_LONG_LONG_INT_64))
-#error long long int size not established; define SIZE_LONG_LONG_INT_32 or SIZE_LONG_LONG_INT_64
+#include "libm_error_codes.h"
+
+struct exceptionf
+{
+  int type;
+  char *name;
+  float arg1, arg2, retval;
+};
+
+# ifdef __cplusplus
+struct __exception
+{
+  int type;
+  char *name;
+  double arg1, arg2, retval;
+};
+# else
+
+#  ifndef _LIBC
+struct exception
+{
+  int type;
+  char *name;
+  double arg1, arg2, retval;
+};
+#  endif
+# endif
+
+struct exceptionl
+{
+  int type;
+  char *name;
+  long double arg1, arg2, retval;
+};
+
+#if (defined (_MS_) && defined (IA64))
+#define   MATHERR_F   _matherrf
+#define   MATHERR_D   _matherr
+#else
+#define MATHERR_F   matherrf
+#define MATHERR_D   matherr
 #endif
 
-#if (defined(SIZE_LONG_LONG_INT_32) && defined(SIZE_LONG_LONG_INT_64))
-#error multiple long long int size definitions; define SIZE_LONG_LONG_INT_32 or SIZE_LONG_LONG_INT_64
+# ifdef __cplusplus
+#define EXC_DECL_D  __exception
+#else
+// exception is a reserved name in C++
+#define EXC_DECL_D  exception
 #endif
 
+extern int MATHERR_F(struct exceptionf*);
+extern int MATHERR_D(struct EXC_DECL_D*);
+extern int matherrl(struct exceptionl*);
+
+#ifndef _LIBC
+// Add code to support _LIB_VERSIONIMF
 typedef enum
 {
-  logl_zero=0,   logl_negative,                  /*  0,  1 */
-  log_zero,      log_negative,                   /*  2,  3 */
-  logf_zero,     logf_negative,                  /*  4,  5 */
-  log10l_zero,   log10l_negative,                /*  6,  7 */
-  log10_zero,    log10_negative,                 /*  8,  9 */
-  log10f_zero,   log10f_negative,                /* 10, 11 */
-  expl_overflow, expl_underflow,                 /* 12, 13 */
-  exp_overflow,  exp_underflow,                  /* 14, 15 */
-  expf_overflow, expf_underflow,                 /* 16, 17 */
-  powl_overflow, powl_underflow,                 /* 18, 19 */
-  powl_zero_to_zero,                             /* 20     */
-  powl_zero_to_negative,                         /* 21     */
-  powl_neg_to_non_integer,                       /* 22     */
-  powl_nan_to_zero,                              /* 23     */
-  pow_overflow,  pow_underflow,                  /* 24, 25 */
-  pow_zero_to_zero,                              /* 26     */ 
-  pow_zero_to_negative,                          /* 27     */
-  pow_neg_to_non_integer,                        /* 28     */
-  pow_nan_to_zero,                               /* 29     */
-  powf_overflow, powf_underflow,                 /* 30, 31 */
-  powf_zero_to_zero,                             /* 32     */
-  powf_zero_to_negative,                         /* 33     */ 
-  powf_neg_to_non_integer,                       /* 34     */ 
-  powf_nan_to_zero,                              /* 35     */
-  atan2l_zero,                                   /* 36     */
-  atan2_zero,                                    /* 37     */
-  atan2f_zero,                                   /* 38     */
-  expm1l_overflow,                               /* 39     */
-  expm1l_underflow,                              /* 40     */
-  expm1_overflow,                                /* 41     */
-  expm1_underflow,                               /* 42     */
-  expm1f_overflow,                               /* 43     */
-  expm1f_underflow,                              /* 44     */
-  hypotl_overflow,                               /* 45     */
-  hypot_overflow,                                /* 46     */
-  hypotf_overflow,                               /* 47     */
-  sqrtl_negative,                                /* 48     */
-  sqrt_negative,                                 /* 49     */
-  sqrtf_negative,                                /* 50     */
-  scalbl_overflow, scalbl_underflow,             /* 51, 52  */
-  scalb_overflow,  scalb_underflow,              /* 53, 54  */
-  scalbf_overflow, scalbf_underflow,             /* 55, 56  */
-  acosl_gt_one, acos_gt_one, acosf_gt_one,       /* 57, 58, 59 */
-  asinl_gt_one, asin_gt_one, asinf_gt_one,       /* 60, 61, 62 */
-  coshl_overflow, cosh_overflow, coshf_overflow, /* 63, 64, 65 */
-  y0l_zero, y0l_negative,y0l_gt_loss,            /* 66, 67, 68 */
-  y0_zero, y0_negative,y0_gt_loss,               /* 69, 70, 71 */
-  y0f_zero, y0f_negative,y0f_gt_loss,            /* 72, 73, 74 */
-  y1l_zero, y1l_negative,y1l_gt_loss,            /* 75, 76, 77 */ 
-  y1_zero, y1_negative,y1_gt_loss,               /* 78, 79, 80 */ 
-  y1f_zero, y1f_negative,y1f_gt_loss,            /* 81, 82, 83 */ 
-  ynl_zero, ynl_negative,ynl_gt_loss,            /* 84, 85, 86 */
-  yn_zero, yn_negative,yn_gt_loss,               /* 87, 88, 89 */
-  ynf_zero, ynf_negative,ynf_gt_loss,            /* 90, 91, 92 */
-  j0l_gt_loss,                                   /* 93 */ 
-  j0_gt_loss,                                    /* 94 */
-  j0f_gt_loss,                                   /* 95 */
-  j1l_gt_loss,                                   /* 96 */
-  j1_gt_loss,                                    /* 97 */
-  j1f_gt_loss,                                   /* 98 */
-  jnl_gt_loss,                                   /* 99 */
-  jn_gt_loss,                                    /* 100 */
-  jnf_gt_loss,                                   /* 101 */
-  lgammal_overflow, lgammal_negative,lgammal_reserve, /* 102, 103, 104 */
-  lgamma_overflow, lgamma_negative,lgamma_reserve,    /* 105, 106, 107 */
-  lgammaf_overflow, lgammaf_negative, lgammaf_reserve,/* 108, 109, 110 */
-  gammal_overflow,gammal_negative, gammal_reserve,    /* 111, 112, 113 */
-  gamma_overflow, gamma_negative, gamma_reserve,      /* 114, 115, 116 */
-  gammaf_overflow,gammaf_negative,gammaf_reserve,     /* 117, 118, 119 */   
-  fmodl_by_zero,                                 /* 120 */
-  fmod_by_zero,                                  /* 121 */
-  fmodf_by_zero,                                 /* 122 */
-  remainderl_by_zero,                            /* 123 */
-  remainder_by_zero,                             /* 124 */
-  remainderf_by_zero,                            /* 125 */
-  sinhl_overflow, sinh_overflow, sinhf_overflow, /* 126, 127, 128 */
-  atanhl_gt_one, atanhl_eq_one,                  /* 129, 130 */
-  atanh_gt_one, atanh_eq_one,                    /* 131, 132 */
-  atanhf_gt_one, atanhf_eq_one,                  /* 133, 134 */
-  acoshl_lt_one,                                 /* 135 */
-  acosh_lt_one,                                  /* 136 */
-  acoshf_lt_one,                                 /* 137 */
-  log1pl_zero,   log1pl_negative,                /* 138, 139 */
-  log1p_zero,    log1p_negative,                 /* 140, 141 */
-  log1pf_zero,   log1pf_negative,                /* 142, 143 */
-  ldexpl_overflow,   ldexpl_underflow,           /* 144, 145 */
-  ldexp_overflow,    ldexp_underflow,            /* 146, 147 */
-  ldexpf_overflow,   ldexpf_underflow,           /* 148, 149 */
-  logbl_zero,   logb_zero, logbf_zero,           /* 150, 151, 152 */
-  nextafterl_overflow,   nextafter_overflow,  
-  nextafterf_overflow,                           /* 153, 154, 155 */
-  ilogbl_zero,  ilogb_zero, ilogbf_zero,         /* 156, 157, 158 */
-  exp2l_overflow, exp2l_underflow,               /* 159, 160 */
-  exp2_overflow,  exp2_underflow,                /* 161, 162 */
-  exp2f_overflow, exp2f_underflow,               /* 163, 164 */
-  exp10l_overflow, exp10_overflow,
-  exp10f_overflow,                               /* 165, 166, 167 */
-  log2l_zero,    log2l_negative,                 /* 168, 169 */
-  log2_zero,     log2_negative,                  /* 170, 171 */
-  log2f_zero,    log2f_negative,                 /* 172, 173 */
-  scalbnl_overflow, scalbnl_underflow,           /* 174, 175 */
-  scalbn_overflow,  scalbn_underflow,            /* 176, 177 */
-  scalbnf_overflow, scalbnf_underflow,           /* 178, 179 */
-  remquol_by_zero,                               /* 180 */
-  remquo_by_zero,                                /* 181 */
-  remquof_by_zero,                               /* 182 */
-  lrintl_large, lrint_large, lrintf_large,       /* 183, 184, 185 */
-  llrintl_large, llrint_large, llrintf_large,    /* 186, 187, 188 */
-  lroundl_large, lround_large, lroundf_large,    /* 189, 190, 191 */
-  llroundl_large, llround_large, llroundf_large, /* 192, 193, 194 */
-  fdiml_overflow, fdim_overflow, fdimf_overflow, /* 195, 196, 197 */
-  nexttowardl_overflow,   nexttoward_overflow,   
-  nexttowardf_overflow,                          /* 198, 199, 200 */
-  scalblnl_overflow, scalblnl_underflow,         /* 201, 202 */
-  scalbln_overflow,  scalbln_underflow,          /* 203, 204 */
-  scalblnf_overflow, scalblnf_underflow,         /* 205, 206 */
-  erfcl_underflow, erfc_underflow, erfcf_underflow, /* 207, 208, 209 */
-  acosdl_gt_one, acosd_gt_one, acosdf_gt_one,    /* 210, 211, 212 */
-  asindl_gt_one, asind_gt_one, asindf_gt_one,    /* 213, 214, 215 */
-  atan2dl_zero, atan2d_zero, atan2df_zero,       /* 216, 217, 218 */
-  tandl_overflow, tand_overflow, tandf_overflow, /* 219, 220, 221 */
-  cotdl_overflow, cotd_overflow, cotdf_overflow, /* 222, 223, 224 */
-  cotl_overflow, cot_overflow, cotf_overflow,    /* 225, 226, 227 */
-  sinhcoshl_overflow, sinhcosh_overflow, sinhcoshf_overflow, /* 228, 229, 230 */
-  annuityl_by_zero, annuity_by_zero, annuityf_by_zero, /* 231, 232, 233 */
-  annuityl_less_m1, annuity_less_m1, annuityf_less_m1, /* 234, 235, 236 */
-  annuityl_overflow, annuity_overflow, annuityf_overflow, /* 237, 238, 239 */
-  annuityl_underflow, annuity_underflow, annuityf_underflow, /* 240, 241, 242 */
-  compoundl_by_zero, compound_by_zero, compoundf_by_zero, /* 243, 244, 245 */
-  compoundl_less_m1, compound_less_m1, compoundf_less_m1, /* 246, 247, 248 */
-  compoundl_overflow, compound_overflow, compoundf_overflow, /* 249, 250, 251 */
-  compoundl_underflow, compound_underflow, compoundf_underflow, /* 252, 253, 254 */
-  tgammal_overflow, tgammal_negative, tgammal_reserve, /* 255, 256, 257 */
-  tgamma_overflow, tgamma_negative, tgamma_reserve, /* 258, 259, 260 */
-  tgammaf_overflow, tgammaf_negative, tgammaf_reserve, /* 261, 262, 263 */
-} error_types;
-
-void __libm_error_support(void*,void*,void*,error_types);
-#ifdef _LIBC
-libc_hidden_proto(__libm_error_support)
+    _IEEE_ = -1, // IEEE-like behavior
+    _SVID_,      // SysV, Rel. 4 behavior
+    _XOPEN_,     // Unix98
+    _POSIX_,     // Posix
+    _ISOC_       // ISO C9X
+} _LIB_VERSION_TYPE;
 #endif
 
-#define HI_SIGNIFICAND_LESS(X, HI) ((X)->hi_significand < 0x ## HI)
-#define f64abs(x) ((x) < 0.0 ? -(x) : (x))
+// This is a run-time variable and may affect
+// floating point behavior of the libm functions
+
+#if !defined( LIBM_BUILD )
+#if defined( _DLL )
+extern _LIB_VERSION_TYPE __declspec(dllimport) _LIB_VERSIONIMF;
+#else
+extern _LIB_VERSION_TYPE _LIB_VERSIONIMF;
+#endif  /* _DLL */
+#else
+extern int (*pmatherrf)(struct exceptionf*);
+extern int (*pmatherr)(struct EXC_DECL_D*);
+extern int (*pmatherrl)(struct exceptionl*);
+#endif  /* LIBM_BUILD */
+
+/* memory format definitions (LITTLE_ENDIAN only) */
+
+#if !(defined(SIZE_INT_32) || defined(SIZE_INT_64))
+# error "You need to define SIZE_INT_32 or SIZE_INT_64"
+#endif
+
+#if (defined(SIZE_INT_32) && defined(SIZE_INT_64))
+#error multiple integer size definitions; define SIZE_INT_32 or SIZE_INT_64
+#endif
+
+#if !(defined(SIZE_LONG_32) || defined(SIZE_LONG_64))
+# error "You need to define SIZE_LONG_32 or SIZE_LONG_64"
+#endif
+
+#if (defined(SIZE_LONG_32) && defined(SIZE_LONG_64))
+#error multiple integer size definitions; define SIZE_LONG_32 or SIZE_LONG_64
+#endif
 
 #if !defined(__USE_EXTERNAL_FPMEMTYP_H__)
 
@@ -342,22 +254,519 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
     unsigned         exponent:15;
     unsigned         sign:1;
 #endif
+    unsigned         pad:16;
+#if !(defined(__unix__) && defined(__i386__))
+    unsigned         padwin:32;
+#endif
 };
 
 #endif /*__USE_EXTERNAL_FPMEMTYP_H__*/
 
-/* macros to form a double value in hex representation (unsigned int type) */
+#if !(defined(opensource))
+typedef          __int32  INT32;
+typedef   signed __int32 SINT32;
+typedef unsigned __int32 UINT32;
 
-#define DOUBLE_HEX(hi,lo) 0x##lo,0x##hi /*LITTLE_ENDIAN*/
+typedef          __int64  INT64;
+typedef   signed __int64 SINT64;
+typedef unsigned __int64 UINT64;
+#else
+typedef          int  INT32;
+typedef   signed int SINT32;
+typedef unsigned int UINT32;
 
-/* macros to form a long double value in hex representation (unsigned short type) */
+typedef          long long  INT64;
+typedef   signed long long SINT64;
+typedef unsigned long long UINT64;
+#endif
 
-#if defined(_WIN32) || defined(_WIN64)
-#define LDOUBLE_ALIGN 16
-#else
-#define LDOUBLE_ALIGN 12
+#if (defined(_WIN32) || defined(_WIN64))        /* Windows */
+# define I64CONST(bits) 0x##bits##i64
+# define U64CONST(bits) 0x##bits##ui64
+#elif (defined(__linux__) && defined(_M_IA64))  /* Linux,64 */
+# define I64CONST(bits) 0x##bits##L
+# define U64CONST(bits) 0x##bits##uL
+#else                                           /* Linux,32 */
+# define I64CONST(bits) 0x##bits##LL
+# define U64CONST(bits) 0x##bits##uLL
+#endif
+
+struct ker80 {
+    union {
+        long double ldhi;
+        struct fp80 fphi;
+    };
+    union {
+        long double ldlo;
+        struct fp80 fplo;
+    };
+    int ex;
+};
+
+/* Addition: x+y                                            */
+/* The result is sum rhi+rlo                                */
+/* Temporary variables: t1                                  */
+/* All variables are in long double precision               */
+/* Correct if no overflow (algorithm by D.Knuth)           */
+#define __LIBM_ADDL1_K80( rhi,rlo,x,y, t1 )                 \
+    rhi = x   + y;                                          \
+    rlo = rhi - x;                                          \
+    t1  = rhi - rlo;                                        \
+    rlo = y   - rlo;                                        \
+    t1  = x   - t1;                                         \
+    rlo = rlo + t1;
+
+/* Addition: (xhi+xlo) + (yhi+ylo)                          */
+/* The result is sum rhi+rlo                                */
+/* Temporary variables: t1                                  */
+/* All variables are in long double precision               */
+/* Correct if no overflow (algorithm by T.J.Dekker)         */
+#define __LIBM_ADDL2_K80( rhi,rlo,xhi,xlo,yhi,ylo, t1 )     \
+    rlo = xhi+yhi;                                          \
+    if ( VALUE_GT_80(FP80(xhi),FP80(yhi)) ) {               \
+        t1=xhi-rlo;t1=t1+yhi;t1=t1+ylo;t1=t1+xlo;           \
+    } else {                                                \
+        t1=yhi-rlo;t1=t1+xhi;t1=t1+xlo;t1=t1+ylo;           \
+    }                                                       \
+    rhi=rlo+t1;                                             \
+    rlo=rlo-rhi;rlo=rlo+t1;
+
+/* Addition: r=x+y                                          */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Temporary variables: t1                                  */
+/* Correct if x and y belong to interval [2^-8000;2^8000],  */
+/* or when one or both of them are zero                     */
+#if   defined(SIZE_INT_32)
+#define __LIBM_ADDL_K80(r,x,y, t1)                          \
+    if ( ((y)->ex+(y)->fphi.exponent-134 <                  \
+          (x)->ex+(x)->fphi.exponent)       &&              \
+         ((x)->ex+(x)->fphi.exponent <                      \
+          (y)->ex+(y)->fphi.exponent+134)   &&              \
+         !SIGNIFICAND_ZERO_80(&((x)->fphi)) &&              \
+         !SIGNIFICAND_ZERO_80(&((y)->fphi)) )               \
+    {                                                       \
+        /* y/2^134 < x < y*2^134,               */          \
+        /* and x,y are nonzero finite numbers   */          \
+        if ( (x)->ex != (y)->ex ) {                         \
+            /* adjust x->ex to y->ex */                     \
+            /* t1 = 2^(x->ex - y->ex) */                    \
+            FP80(t1)->sign = 0;                             \
+            FP80(t1)->exponent = BIAS_80 + (x)->ex-(y)->ex; \
+            /*  exponent is correct because             */  \
+            /*  |x->ex - y->ex| =                       */  \
+            /*  = |  (x->ex + x->fphi.exponent) -       */  \
+            /*      -(y->ex + y->fphi.exponent) +       */  \
+            /*              + y->fphi.exponent  -       */  \
+            /*              - x->fphi.exponent     | <  */  \
+            /*  < |  (x->ex+x->fphi.exponent) -         */  \
+            /*      -(y->ex+y->fphi.exponent)      | +  */  \
+            /*   +|  y->fphi.exponent -                 */  \
+            /*      -x->fphi.exponent              | <  */  \
+            /*  < 134 + 16000                           */  \
+            FP80(t1)->hi_significand = 0x80000000;          \
+            FP80(t1)->lo_significand = 0x00000000;          \
+            (x)->ex = (y)->ex;                              \
+            (x)->ldhi *= t1;                                \
+            (x)->ldlo *= t1;                                \
+        }                                                   \
+        /* r==x+y */                                        \
+        (r)->ex = (y)->ex;                                  \
+        __LIBM_ADDL2_K80( (r)->ldhi,(r)->ldlo,              \
+            (x)->ldhi,(x)->ldlo, (y)->ldhi,(y)->ldlo, t1 ); \
+    } else if ( SIGNIFICAND_ZERO_80(&((x)->fphi)) ||        \
+             ((y)->ex+(y)->fphi.exponent-BIAS_80 - 134 >=   \
+              (x)->ex+(x)->fphi.exponent-BIAS_80) )         \
+    {                                                       \
+        /* |x|<<|y| */                                      \
+        *(r) = *(y);                                        \
+    } else {                                                \
+        /* |y|<<|x| */                                      \
+        *(r) = *(x);                                        \
+    }
+#elif defined(SIZE_INT_64)
+#define __LIBM_ADDL_K80(r,x,y, t1)                          \
+    if ( ((y)->ex+(y)->fphi.exponent-134 <                  \
+          (x)->ex+(x)->fphi.exponent)       &&              \
+         ((x)->ex+(x)->fphi.exponent <                      \
+          (y)->ex+(y)->fphi.exponent+134)   &&              \
+         !SIGNIFICAND_ZERO_80(&((x)->fphi)) &&              \
+         !SIGNIFICAND_ZERO_80(&((y)->fphi)) )               \
+    {                                                       \
+        /* y/2^134 < x < y*2^134,               */          \
+        /* and x,y are nonzero finite numbers   */          \
+        if ( (x)->ex != (y)->ex ) {                         \
+            /* adjust x->ex to y->ex */                     \
+            /* t1 = 2^(x->ex - y->ex) */                    \
+            FP80(t1)->sign = 0;                             \
+            FP80(t1)->exponent = BIAS_80 + (x)->ex-(y)->ex; \
+            /*  exponent is correct because             */  \
+            /*  |x->ex - y->ex| =                       */  \
+            /*  = |  (x->ex + x->fphi.exponent) -       */  \
+            /*      -(y->ex + y->fphi.exponent) +       */  \
+            /*              + y->fphi.exponent  -       */  \
+            /*              - x->fphi.exponent     | <  */  \
+            /*  < |  (x->ex+x->fphi.exponent) -         */  \
+            /*      -(y->ex+y->fphi.exponent)      | +  */  \
+            /*   +|  y->fphi.exponent -                 */  \
+            /*      -x->fphi.exponent              | <  */  \
+            /*  < 134 + 16000                           */  \
+            FP80(t1)->significand = 0x8000000000000000;     \
+            (x)->ex = (y)->ex;                              \
+            (x)->ldhi *= t1;                                \
+            (x)->ldlo *= t1;                                \
+        }                                                   \
+        /* r==x+y */                                        \
+        (r)->ex = (y)->ex;                                  \
+        __LIBM_ADDL2_K80( (r)->ldhi,(r)->ldlo,              \
+            (x)->ldhi,(x)->ldlo, (y)->ldhi,(y)->ldlo, t1 ); \
+    } else if ( SIGNIFICAND_ZERO_80(&((x)->fphi)) ||        \
+             ((y)->ex+(y)->fphi.exponent-BIAS_80 - 134 >=   \
+              (x)->ex+(x)->fphi.exponent-BIAS_80) )         \
+    {                                                       \
+        /* |x|<<|y| */                                      \
+        *(r) = *(y);                                        \
+    } else {                                                \
+        /* |y|<<|x| */                                      \
+        *(r) = *(x);                                        \
+    }
 #endif
 
+/* Addition: r=x+y                                          */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Temporary variables: t1                                  */
+/* Correct for any finite x and y                           */
+#define __LIBM_ADDL_NORM_K80(r,x,y, t1)                     \
+    if ( ((x)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((x)->fphi.exponent-BIAS_80>+8000) ||              \
+         ((y)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((y)->fphi.exponent-BIAS_80>+8000) )               \
+    {                                                       \
+        __libm_normalizel_k80(x);                           \
+        __libm_normalizel_k80(y);                           \
+    }                                                       \
+    __LIBM_ADDL_K80(r,x,y, t1)
+
+/* Subtraction: x-y                                         */
+/* The result is sum rhi+rlo                                */
+/* Temporary variables: t1                                  */
+/* All variables are in long double precision               */
+/* Correct if no overflow (algorithm by D.Knuth)           */
+#define __LIBM_SUBL1_K80( rhi, rlo, x, y, t1 )              \
+    rhi = x   - y;                                          \
+    rlo = rhi - x;                                          \
+    t1  = rhi - rlo;                                        \
+    rlo = y   + rlo;                                        \
+    t1  = x   - t1;                                         \
+    rlo = t1  - rlo;
+
+/* Subtraction: (xhi+xlo) - (yhi+ylo)                       */
+/* The result is sum rhi+rlo                                */
+/* Temporary variables: t1                                  */
+/* All variables are in long double precision               */
+/* Correct if no overflow (algorithm by T.J.Dekker)         */
+#define __LIBM_SUBL2_K80( rhi,rlo,xhi,xlo,yhi,ylo, t1 )     \
+    rlo = xhi-yhi;                                          \
+    if ( VALUE_GT_80(FP80(xhi),FP80(yhi)) ) {               \
+        t1=xhi-rlo;t1=t1-yhi;t1=t1-ylo;t1=t1+xlo;           \
+    } else {                                                \
+        t1=yhi+rlo;t1=xhi-t1;t1=t1+xlo;t1=t1-ylo;           \
+    }                                                       \
+    rhi=rlo+t1;                                             \
+    rlo=rlo-rhi;rlo=rlo+t1;
+
+/* Subtraction: r=x-y                                       */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Temporary variables: t1                                  */
+/* Correct if x and y belong to interval [2^-8000;2^8000],  */
+/* or when one or both of them are zero                     */
+#if   defined(SIZE_INT_32)
+#define __LIBM_SUBL_K80(r,x,y, t1)                          \
+    if ( ((y)->ex+(y)->fphi.exponent-134 <                  \
+          (x)->ex+(x)->fphi.exponent)       &&              \
+         ((x)->ex+(x)->fphi.exponent <                      \
+          (y)->ex+(y)->fphi.exponent+134)   &&              \
+         !SIGNIFICAND_ZERO_80(&((x)->fphi)) &&              \
+         !SIGNIFICAND_ZERO_80(&((y)->fphi)) )               \
+    {                                                       \
+        /* y/2^134 < x < y*2^134,               */          \
+        /* and x,y are nonzero finite numbers   */          \
+        if ( (x)->ex != (y)->ex ) {                         \
+            /* adjust x->ex to y->ex */                     \
+            /* t1 = 2^(x->ex - y->ex) */                    \
+            FP80(t1)->sign = 0;                             \
+            FP80(t1)->exponent = BIAS_80 + (x)->ex-(y)->ex; \
+            /*  exponent is correct because             */  \
+            /*  |x->ex - y->ex| =                       */  \
+            /*  = |  (x->ex + x->fphi.exponent) -       */  \
+            /*      -(y->ex + y->fphi.exponent) +       */  \
+            /*              + y->fphi.exponent  -       */  \
+            /*              - x->fphi.exponent     | <  */  \
+            /*  < |  (x->ex+x->fphi.exponent) -         */  \
+            /*      -(y->ex+y->fphi.exponent)      | +  */  \
+            /*   +|  y->fphi.exponent -                 */  \
+            /*      -x->fphi.exponent              | <  */  \
+            /*  < 134 + 16000                           */  \
+            FP80(t1)->hi_significand = 0x80000000;          \
+            FP80(t1)->lo_significand = 0x00000000;          \
+            (x)->ex = (y)->ex;                              \
+            (x)->ldhi *= t1;                                \
+            (x)->ldlo *= t1;                                \
+        }                                                   \
+        /* r==x+y */                                        \
+        (r)->ex = (y)->ex;                                  \
+        __LIBM_SUBL2_K80( (r)->ldhi,(r)->ldlo,              \
+            (x)->ldhi,(x)->ldlo, (y)->ldhi,(y)->ldlo, t1 ); \
+    } else if ( SIGNIFICAND_ZERO_80(&((x)->fphi)) ||        \
+             ((y)->ex+(y)->fphi.exponent-BIAS_80 - 134 >=   \
+              (x)->ex+(x)->fphi.exponent-BIAS_80) )         \
+    {                                                       \
+        /* |x|<<|y| */                                      \
+        (r)->ex   =   (y)->ex;                              \
+        (r)->ldhi = -((y)->ldhi);                           \
+        (r)->ldlo = -((y)->ldlo);                           \
+    } else {                                                \
+        /* |y|<<|x| */                                      \
+        *(r) = *(x);                                        \
+    }
+#elif defined(SIZE_INT_64)
+#define __LIBM_SUBL_K80(r,x,y, t1)                          \
+    if ( ((y)->ex+(y)->fphi.exponent-134 <                  \
+          (x)->ex+(x)->fphi.exponent)       &&              \
+         ((x)->ex+(x)->fphi.exponent <                      \
+          (y)->ex+(y)->fphi.exponent+134)   &&              \
+         !SIGNIFICAND_ZERO_80(&((x)->fphi)) &&              \
+         !SIGNIFICAND_ZERO_80(&((y)->fphi)) )               \
+    {                                                       \
+        /* y/2^134 < x < y*2^134,               */          \
+        /* and x,y are nonzero finite numbers   */          \
+        if ( (x)->ex != (y)->ex ) {                         \
+            /* adjust x->ex to y->ex */                     \
+            /* t1 = 2^(x->ex - y->ex) */                    \
+            FP80(t1)->sign = 0;                             \
+            FP80(t1)->exponent = BIAS_80 + (x)->ex-(y)->ex; \
+            /*  exponent is correct because             */  \
+            /*  |x->ex - y->ex| =                       */  \
+            /*  = |  (x->ex + x->fphi.exponent) -       */  \
+            /*      -(y->ex + y->fphi.exponent) +       */  \
+            /*              + y->fphi.exponent  -       */  \
+            /*              - x->fphi.exponent     | <  */  \
+            /*  < |  (x->ex+x->fphi.exponent) -         */  \
+            /*      -(y->ex+y->fphi.exponent)      | +  */  \
+            /*   +|  y->fphi.exponent -                 */  \
+            /*      -x->fphi.exponent              | <  */  \
+            /*  < 134 + 16000                           */  \
+            FP80(t1)->significand = 0x8000000000000000;     \
+            (x)->ex = (y)->ex;                              \
+            (x)->ldhi *= t1;                                \
+            (x)->ldlo *= t1;                                \
+        }                                                   \
+        /* r==x+y */                                        \
+        (r)->ex = (y)->ex;                                  \
+        __LIBM_SUBL2_K80( (r)->ldhi,(r)->ldlo,              \
+            (x)->ldhi,(x)->ldlo, (y)->ldhi,(y)->ldlo, t1 ); \
+    } else if ( SIGNIFICAND_ZERO_80(&((x)->fphi)) ||        \
+             ((y)->ex+(y)->fphi.exponent-BIAS_80 - 134 >=   \
+              (x)->ex+(x)->fphi.exponent-BIAS_80) )         \
+    {                                                       \
+        /* |x|<<|y| */                                      \
+        (r)->ex   =   (y)->ex;                              \
+        (r)->ldhi = -((y)->ldhi);                           \
+        (r)->ldlo = -((y)->ldlo);                           \
+    } else {                                                \
+        /* |y|<<|x| */                                      \
+        *(r) = *(x);                                        \
+    }
+#endif
+
+/* Subtraction: r=x+y                                       */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Temporary variables: t1                                  */
+/* Correct for any finite x and y                           */
+#define __LIBM_SUBL_NORM_K80(r,x,y, t1)                     \
+    if ( ((x)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((x)->fphi.exponent-BIAS_80>+8000) ||              \
+         ((y)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((y)->fphi.exponent-BIAS_80>+8000) )               \
+    {                                                       \
+        __libm_normalizel_k80(x);                           \
+        __libm_normalizel_k80(y);                           \
+    }                                                       \
+    __LIBM_SUBL_K80(r,x,y, t1)
+
+/* Multiplication: x*y                                      */
+/* The result is sum rhi+rlo                                */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6                   */
+/* All variables are in long double precision               */
+/* Correct if no over/underflow (algorithm by T.J.Dekker)   */
+#define __LIBM_MULL1_K80(rhi,rlo,x,y,                       \
+                                     t32,t1,t2,t3,t4,t5,t6) \
+    t1=(x)*(t32); t3=x-t1; t3=t3+t1; t4=x-t3;               \
+    t1=(y)*(t32); t5=y-t1; t5=t5+t1; t6=y-t5;               \
+    t1=(t3)*(t5);                                           \
+    t2=(t3)*(t6)+(t4)*(t5);                                 \
+    rhi=t1+t2;                                              \
+    rlo=t1-rhi; rlo=rlo+t2; rlo=rlo+(t4*t6);
+
+/* Multiplication: (xhi+xlo)*(yhi+ylo)                      */
+/* The result is sum rhi+rlo                                */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8             */
+/* All variables are in long double precision               */
+/* Correct if no over/underflow (algorithm by T.J.Dekker)   */
+#define __LIBM_MULL2_K80(rhi,rlo,xhi,xlo,yhi,ylo,           \
+                               t32,t1,t2,t3,t4,t5,t6,t7,t8) \
+    __LIBM_MULL1_K80(t7,t8,xhi,yhi, t32,t1,t2,t3,t4,t5,t6)  \
+    t1=(xhi)*(ylo)+(xlo)*(yhi); t1=t1+t8;                   \
+    rhi=t7+t1;                                              \
+    rlo=t7-rhi; rlo=rlo+t1;
+
+/* Multiplication: r=x*y                                    */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8             */
+/* Correct if x and y belong to interval [2^-8000;2^8000]   */
+#define __LIBM_MULL_K80(r,x,y, t32,t1,t2,t3,t4,t5,t6,t7,t8) \
+    (r)->ex = (x)->ex + (y)->ex;                            \
+    __LIBM_MULL2_K80((r)->ldhi,(r)->ldlo,                   \
+        (x)->ldhi,(x)->ldlo,(y)->ldhi,(y)->ldlo,            \
+        t32,t1,t2,t3,t4,t5,t6,t7,t8)
+
+/* Multiplication: r=x*y                                    */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8             */
+/* Correct for any finite x and y                           */
+#define __LIBM_MULL_NORM_K80(r,x,y,                         \
+                               t32,t1,t2,t3,t4,t5,t6,t7,t8) \
+    if ( ((x)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((x)->fphi.exponent-BIAS_80>+8000) ||              \
+         ((y)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((y)->fphi.exponent-BIAS_80>+8000) )               \
+    {                                                       \
+        __libm_normalizel_k80(x);                           \
+        __libm_normalizel_k80(y);                           \
+    }                                                       \
+    __LIBM_MULL_K80(r,x,y, t32,t1,t2,t3,t4,t5,t6,t7,t8)
+
+/* Division: (xhi+xlo)/(yhi+ylo)                            */
+/* The result is sum rhi+rlo                                */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8,t9          */
+/* All variables are in long double precision               */
+/* Correct if no over/underflow (algorithm by T.J.Dekker)   */
+#define __LIBM_DIVL2_K80(rhi,rlo,xhi,xlo,yhi,ylo,           \
+                            t32,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    t7=(xhi)/(yhi);                                         \
+    __LIBM_MULL1_K80(t8,t9,t7,yhi, t32,t1,t2,t3,t4,t5,t6)   \
+    t1=xhi-t8; t1=t1-t9; t1=t1+xlo; t1=t1-(t7)*(ylo);       \
+    t1=(t1)/(yhi);                                          \
+    rhi=t7+t1;                                              \
+    rlo=t7-rhi; rlo=rlo+t1;
+
+/* Division: r=x/y                                          */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8,t9          */
+/* Correct if x and y belong to interval [2^-8000;2^8000]   */
+#define __LIBM_DIVL_K80(r,x,y,                              \
+                            t32,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    (r)->ex = (x)->ex - (y)->ex;                            \
+    __LIBM_DIVL2_K80( (r)->ldhi,(r)->ldlo,                  \
+        (x)->ldhi,(x)->ldlo,(y)->ldhi,(y)->ldlo,            \
+        t32,t1,t2,t3,t4,t5,t6,t7,t8,t9)
+
+/* Division: r=x/y                                          */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8             */
+/* Correct for any finite x and y                           */
+#define __LIBM_DIVL_NORM_K80(r,x,y,                         \
+                            t32,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    if ( ((x)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((x)->fphi.exponent-BIAS_80>+8000) ||              \
+         ((y)->fphi.exponent-BIAS_80<-8000) ||              \
+         ((y)->fphi.exponent-BIAS_80>+8000) )               \
+    {                                                       \
+        __libm_normalizel_k80(x);                           \
+        __libm_normalizel_k80(y);                           \
+    }                                                       \
+    __LIBM_DIVL_K80(r,x,y, t32,t1,t2,t3,t4,t5,t6,t7,t8,t9)
+
+/* Square root: sqrt(xhi+xlo)                               */
+/* The result is sum rhi+rlo                                */
+/* Here t32 is the constant 2^32+1                          */
+/*      half is the constant 0.5                            */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8,t9          */
+/* All variables are in long double precision               */
+/* Correct for positive xhi+xlo (algorithm by T.J.Dekker)   */
+#define __LIBM_SQRTL2_NORM_K80(rhi,rlo,xhi,xlo,             \
+                       t32,half,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    t7=sqrtl(xhi);                                          \
+    __LIBM_MULL1_K80(t8,t9,t7,t7, t32,t1,t2,t3,t4,t5,t6)    \
+    t1=xhi-t8; t1=t1-t9; t1=t1+xlo; t1=(t1)*(half);         \
+    t1=(t1)/(t7);                                           \
+    rhi=t7+t1;                                              \
+    rlo=t7-rhi; rlo=rlo+t1;
+
+/* Square root: r=sqrt(x)                                   */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/*      half is the constant 0.5                            */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8,t9          */
+/* Correct if x belongs to interval [2^-16000;2^16000]      */
+#define __LIBM_SQRTL_K80(r,x,                               \
+                       t32,half,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    if ( ((x)->ex & 1) == 1 ) {                             \
+        (x)->ex    = (x)->ex + 1;                           \
+        (x)->ldhi *= half;                                  \
+        (x)->ldlo *= half;                                  \
+    }                                                       \
+    (r)->ex = (x)->ex >> 1;                                 \
+    __LIBM_SQRTL2_NORM_K80( (r)->ldhi,(r)->ldlo,            \
+        (x)->ldhi,(x)->ldlo,                                \
+        t32,half,t1,t2,t3,t4,t5,t6,t7,t8,t9)
+
+/* Square root: r=sqrt(x)                                   */
+/* Variables r,x,y are pointers to struct ker80,            */
+/* all other variables are in long double precision         */
+/* Here t32 is the constant 2^32+1                          */
+/*      half is the constant 0.5                            */
+/* Temporary variables: t1,t2,t3,t4,t5,t6,t7,t8,t9          */
+/* Correct for any positive x                               */
+#define __LIBM_SQRTL_NORM_K80(r,x,                          \
+                       t32,half,t1,t2,t3,t4,t5,t6,t7,t8,t9) \
+    if ( ((x)->fphi.exponent-BIAS_80<-16000) ||             \
+         ((x)->fphi.exponent-BIAS_80>+16000) )              \
+    {                                                       \
+        __libm_normalizel_k80(x);                           \
+    }                                                       \
+    __LIBM_SQRTL_K80(r,x, t32,half,t1,t2,t3,t4,t5,t6,t7,t8,t9)
+
+
+#ifdef __INTEL_COMPILER
+#define ALIGN(n) __declspec(align(n))
+#else /* __INTEL_COMPILER */
+#define ALIGN(n)
+#endif /* __INTEL_COMPILER */
+
+/* macros to form a long double value in hex representation (unsigned short type) */
+
+#if (defined(__unix__) && defined(__i386__))
+# define LDOUBLE_ALIGN 12	/* IA32 Linux: 12-byte alignment */
+#else	/*__linux__ & IA32*/
+# define LDOUBLE_ALIGN 16	/* EFI2/IA32 Win or IPF Win/Linux: 16-byte alignment */
+#endif	/*__linux__ & IA32*/
+
 #if (LDOUBLE_ALIGN == 16)
 #define _XPD_ ,0x0000,0x0000,0x0000
 #else /*12*/
@@ -451,7 +860,7 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
 # define SIGNIFICAND_LT_HEX_64(X,HI,LO) ((X)->significand <  0x ## HI ## LO)
 # define SIGNIFICAND_LE_HEX_64(X,HI,LO) ((X)->significand <= 0x ## HI ## LO)
 #endif
-	
+
 #if defined(SIZE_INT_32)
 # define SIGNIFICAND_EQ_HEX_80(X,HI,LO) \
     (((X)->hi_significand == 0x ## HI) && ((X)->lo_significand == 0x ## LO))
@@ -514,15 +923,15 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
 
 #if defined(SIZE_INT_32)
 # define SIGNIFICAND_EQ_64(X,Y) \
-	(((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand == (Y)->lo_significand))
+  (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand == (Y)->lo_significand))
 # define SIGNIFICAND_GT_64(X,Y) (((X)->hi_significand > (Y)->hi_significand) || \
-	(((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand >  (Y)->lo_significand)))
+  (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand >  (Y)->lo_significand)))
 # define SIGNIFICAND_GE_64(X,Y) (((X)->hi_significand > (Y)->hi_significand) || \
-	(((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand >= (Y)->lo_significand)))
+  (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand >= (Y)->lo_significand)))
 # define SIGNIFICAND_LT_64(X,Y) (((X)->hi_significand < (Y)->hi_significand) || \
-    (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand <  (Y)->lo_significand)))
+  (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand <  (Y)->lo_significand)))
 # define SIGNIFICAND_LE_64(X,Y) (((X)->hi_significand < (Y)->hi_significand) || \
-    (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand <= (Y)->lo_significand)))
+  (((X)->hi_significand == (Y)->hi_significand) && ((X)->lo_significand <= (Y)->lo_significand)))
 #elif defined(SIZE_INT_64)
 # define SIGNIFICAND_EQ_64(X,Y) ((X)->significand == (Y)->significand)
 # define SIGNIFICAND_GT_64(X,Y) ((X)->significand >  (Y)->significand)
@@ -560,7 +969,7 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_LT_32(X, Y))))
 #define VALUE_LE_32(X,Y) (((X)->exponent < (Y)->exponent) || \
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_LE_32(X, Y))))
-   
+
 #define VALUE_EQ_64(X,Y) \
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_EQ_64(X, Y)))
 #define VALUE_GT_64(X,Y) (((X)->exponent > (Y)->exponent) || \
@@ -571,7 +980,7 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_LT_64(X, Y))))
 #define VALUE_LE_64(X,Y) (((X)->exponent < (Y)->exponent) || \
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_LE_64(X, Y))))
-   
+
 #define VALUE_EQ_80(X,Y) \
    (((X)->exponent == (Y)->exponent) && (SIGNIFICAND_EQ_80(X, Y)))
 #define VALUE_GT_80(X,Y) (((X)->exponent > (Y)->exponent) || \
@@ -622,134 +1031,21 @@ struct fp80 { /*/ sign:1 exponent:15 significand:64 (NO implied bits) */
 #endif
 
 
+/* error codes */
 
-#if (defined(_WIN32) && !defined(_WIN64))
-
-#define FP80_DECLARE()
-#define _FPC_64    0x0300
-static unsigned short __wControlWord, __wNewControlWord;
-#define FP80_SET() { \
-        __asm { fnstcw   word ptr [__wControlWord] }   \
-        __wNewControlWord = __wControlWord | _FPC_64;  \
-        __asm { fldcw   word ptr [__wNewControlWord] } \
-    }
-#define FP80_RESET() { \
-        __asm { fldcw   word ptr [__wControlWord] } \
-    }
-#else /* defined(_WIN32) && !defined(_WIN64) */
-
-#define FP80_DECLARE()
-#define FP80_SET()
-#define FP80_RESET()
-
-#endif  /* defined(_WIN32) && !defined(_WIN64) */
-
-
-#ifdef _LIBC
-# include <math.h>
-#else
-
-static const unsigned INF[] = {
-    DOUBLE_HEX(7ff00000, 00000000),
-    DOUBLE_HEX(fff00000, 00000000)
-};
-
-static const double _zeroo = 0.0;
-static const double _bigg = 1.0e300;
-static const double _ponee = 1.0;
-static const double _nonee = -1.0; 
-
-#define INVALID    (_zeroo * *((double*)&INF[0]))
-#define PINF       *((double*)&INF[0]) 
-#define NINF       -PINF 
-#define PINF_DZ    (_ponee/_zeroo) 
-#define X_TLOSS    1.41484755040568800000e+16
-#endif
-
-struct exceptionf
-{
-  int type;
-  char *name;
-  float arg1, arg2, retval;
-};
-
-# ifdef __cplusplus
-struct __exception
-{
-  int type;
-  char *name;
-  double arg1, arg2, retval;
-};
-# else 
-
-#  ifndef _LIBC
-struct exception
-{
-  int type;
-  char *name;
-  double arg1, arg2, retval;
-};
-#  endif
-# endif
-
-
-
-struct exceptionl
-{
-  int type;
-  char *name;
-  long double arg1, arg2, retval;
-};
-
-#ifdef _MS_
-#define	MATHERR_F	_matherrf
-#define	MATHERR_D	_matherr
-#else
-#define	MATHERR_F	matherrf
-#define	MATHERR_D	matherr
-#endif
-
-# ifdef __cplusplus
-#define	EXC_DECL_D	__exception
-#else
-// exception is a reserved name in C++
-#define	EXC_DECL_D	exception
-#endif
-
-extern int MATHERR_F(struct exceptionf*);
-extern int MATHERR_D(struct EXC_DECL_D*);
-extern int matherrl(struct exceptionl*);
-
+#define DOMAIN     1   /* argument domain error */
+#define SING       2   /* argument singularity */
+#define OVERFLOW   3   /* overflow range error */
+#define UNDERFLOW  4   /* underflow range error */
+#define TLOSS      5   /* total loss of precision */
+#define PLOSS      6   /* partial loss of precision */
 
-/* Set these appropriately to make thread Safe */
-#define ERRNO_RANGE  errno = ERANGE
-#define ERRNO_DOMAIN errno = EDOM
+/* */
 
+#define VOLATILE_32 /*volatile*/
+#define VOLATILE_64 /*volatile*/
+#define VOLATILE_80 /*volatile*/
 
-// Add code to support _LIB_VERSIONIMF
-#ifndef _LIBC
-typedef enum
-{
-    _IEEE_ = -1, // IEEE-like behavior
-    _SVID_,      // SysV, Rel. 4 behavior
-    _XOPEN_,     // Unix98
-    _POSIX_,     // Posix
-    _ISOC_       // ISO C9X
-} _LIB_VERSION_TYPE;
-
-
-#if !defined( LIBM_BUILD )
-#if defined( _DLL )
-extern _LIB_VERSION_TYPE __declspec(dllimport) _LIB_VERSIONIMF;
-#else
-extern _LIB_VERSION_TYPE _LIB_VERSIONIMF;
-#endif	/* _DLL */
-#else
-extern int (*pmatherrf)(struct exceptionf*);
-extern int (*pmatherr)(struct EXC_DECL_D*);
-extern int (*pmatherrl)(struct exceptionl*);
-#endif	/* LIBM_BUILD */
+#define QUAD_TYPE _Quad
 
-// This is a run-time variable and may affect
-// floating point behavior of the libm functions
-#endif
+#endif    /*__LIBM_SUPPORT_H_INCLUDED__*/