diff options
author | Ulrich Drepper <drepper@redhat.com> | 2001-03-12 00:04:52 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2001-03-12 00:04:52 +0000 |
commit | e4d8276142b9c07b23043ef44b0fe8fa7bcc3121 (patch) | |
tree | f153a80b6ce0fdd3261ff18a16fd80bd965231c3 /sysdeps/ieee754/dbl-64/s_tan.c | |
parent | d3c8723f6415af59a6ec14fcb918ad0e4d1fb588 (diff) | |
download | glibc-e4d8276142b9c07b23043ef44b0fe8fa7bcc3121.tar.gz glibc-e4d8276142b9c07b23043ef44b0fe8fa7bcc3121.tar.xz glibc-e4d8276142b9c07b23043ef44b0fe8fa7bcc3121.zip |
Update.
2001-03-11 Ulrich Drepper <drepper@redhat.com> Last-bit accurate math library implementation by IBM Haifa. Contributed by Abraham Ziv <ziv@il.ibm.com>, Moshe Olshansky <olshansk@il.ibm.com>, Ealan Henis <ealan@il.ibm.com>, and Anna Reitman <reitman@il.ibm.com>. * math/Makefile (dbl-only-routines): New variable. (libm-routines): Add $(dbl-only-routines). * sysdeps/ieee754/dbl-64/e_acos.c: Empty, definition is in e_asin.c. * sysdeps/ieee754/dbl-64/e_asin.c: Replaced with accurate asin implementation. * sysdeps/ieee754/dbl-64/e_atan2.c: Replaced with accurate atan2 implementation. * sysdeps/ieee754/dbl-64/e_exp.c: Replaced with accurate exp implementation. * sysdeps/ieee754/dbl-64/e_lgamma_r.c: Don't use __kernel_sin and __kernel_cos. * sysdeps/ieee754/dbl-64/e_log.c: Replaced with accurate log implementation. * sysdeps/ieee754/dbl-64/e_remainder.c: Replaced with accurate remainder implementation. * sysdeps/ieee754/dbl-64/e_pow.c: Replaced with accurate pow implementation. * sysdeps/ieee754/dbl-64/e_sqrt.c: Replaced with accurate sqrt implementation. * sysdeps/ieee754/dbl-64/k_cos.c: Empty, definition is in s_sin.c. * sysdeps/ieee754/dbl-64/k_sin.c: Empty, definition is in s_sin.c. * sysdeps/ieee754/dbl-64/s_atan.c: Replaced with accurate atan implementation. * sysdeps/ieee754/dbl-64/s_cos.c: Empty, definition is in s_sin.c. * sysdeps/ieee754/dbl-64/s_sin.c: Replaced with accurate sin/cos implementation. * sysdeps/ieee754/dbl-64/s_sincos.c: Rewritten to not use __kernel_sin and __kernel_cos. * sysdeps/ieee754/dbl-64/s_tan.c: Replaced with accurate tan implementation. * sysdeps/ieee754/dbl-64/Dist: Add new non-code files. * sysdeps/ieee754/dbl-64/MathLib.h: New file. * sysdeps/ieee754/dbl-64/asincos.tbl: New file. * sysdeps/ieee754/dbl-64/atnat.h: New file. * sysdeps/ieee754/dbl-64/atnat2.h: New file. * sysdeps/ieee754/dbl-64/branred.c: New file. * sysdeps/ieee754/dbl-64/branred.h: New file. * sysdeps/ieee754/dbl-64/dla.h: New file. * sysdeps/ieee754/dbl-64/doasin.c: New file. * sysdeps/ieee754/dbl-64/doasin.h: New file. * sysdeps/ieee754/dbl-64/dosincos.c: New file. * sysdeps/ieee754/dbl-64/dosincos.h: New file. * sysdeps/ieee754/dbl-64/endian.h: New file. * sysdeps/ieee754/dbl-64/halfulp.c: New file. * sysdeps/ieee754/dbl-64/mpa.c: New file. * sysdeps/ieee754/dbl-64/mpa.h: New file. * sysdeps/ieee754/dbl-64/mpa2.h: New file. * sysdeps/ieee754/dbl-64/mpatan.c: New file. * sysdeps/ieee754/dbl-64/mpatan.h: New file. * sysdeps/ieee754/dbl-64/mpatan2.c: New file. * sysdeps/ieee754/dbl-64/mpexp.c: New file. * sysdeps/ieee754/dbl-64/mpexp.h: New file. * sysdeps/ieee754/dbl-64/mplog.c: New file. * sysdeps/ieee754/dbl-64/mplog.h: New file. * sysdeps/ieee754/dbl-64/mpsqrt.c: New file. * sysdeps/ieee754/dbl-64/mpsqrt.h: New file. * sysdeps/ieee754/dbl-64/mptan.c: New file. * sysdeps/ieee754/dbl-64/mydefs.h: New file. * sysdeps/ieee754/dbl-64/powtwo.tbl: New file. * sysdeps/ieee754/dbl-64/root.tbl: New file. * sysdeps/ieee754/dbl-64/sincos.tbl: New file. * sysdeps/ieee754/dbl-64/sincos32.c: New file. * sysdeps/ieee754/dbl-64/sincos32.h: New file. * sysdeps/ieee754/dbl-64/slowexp.c: New file. * sysdeps/ieee754/dbl-64/slowpow.c: New file. * sysdeps/ieee754/dbl-64/uasncs.h: New file. * sysdeps/ieee754/dbl-64/uatan.tbl: New file. * sysdeps/ieee754/dbl-64/uexp.h: New file. * sysdeps/ieee754/dbl-64/uexp.tbl: New file. * sysdeps/ieee754/dbl-64/ulog.h: New file. * sysdeps/ieee754/dbl-64/ulog.tbl: New file. * sysdeps/ieee754/dbl-64/upow.h: New file. * sysdeps/ieee754/dbl-64/upow.tbl: New file. * sysdeps/ieee754/dbl-64/urem.h: New file. * sysdeps/ieee754/dbl-64/uroot.h: New file. * sysdeps/ieee754/dbl-64/usncs.h: New file. * sysdeps/ieee754/dbl-64/utan.h: New file. * sysdeps/ieee754/dbl-64/utan.tbl: New file. * sysdeps/i386/fpu/branred.c: New file. * sysdeps/i386/fpu/doasin.c: New file. * sysdeps/i386/fpu/dosincos.c: New file. * sysdeps/i386/fpu/halfulp.c: New file. * sysdeps/i386/fpu/mpa.c: New file. * sysdeps/i386/fpu/mpatan.c: New file. * sysdeps/i386/fpu/mpatan2.c: New file. * sysdeps/i386/fpu/mpexp.c: New file. * sysdeps/i386/fpu/mplog.c: New file. * sysdeps/i386/fpu/mpsqrt.c: New file. * sysdeps/i386/fpu/mptan.c: New file. * sysdeps/i386/fpu/sincos32.c: New file. * sysdeps/i386/fpu/slowexp.c: New file. * sysdeps/i386/fpu/slowpow.c: New file. * sysdeps/ia64/fpu/branred.c: New file. * sysdeps/ia64/fpu/doasin.c: New file. * sysdeps/ia64/fpu/dosincos.c: New file. * sysdeps/ia64/fpu/halfulp.c: New file. * sysdeps/ia64/fpu/mpa.c: New file. * sysdeps/ia64/fpu/mpatan.c: New file. * sysdeps/ia64/fpu/mpatan2.c: New file. * sysdeps/ia64/fpu/mpexp.c: New file. * sysdeps/ia64/fpu/mplog.c: New file. * sysdeps/ia64/fpu/mpsqrt.c: New file. * sysdeps/ia64/fpu/mptan.c: New file. * sysdeps/ia64/fpu/sincos32.c: New file. * sysdeps/ia64/fpu/slowexp.c: New file. * sysdeps/ia64/fpu/slowpow.c: New file. * sysdeps/m68k/fpu/branred.c: New file. * sysdeps/m68k/fpu/doasin.c: New file. * sysdeps/m68k/fpu/dosincos.c: New file. * sysdeps/m68k/fpu/halfulp.c: New file. * sysdeps/m68k/fpu/mpa.c: New file. * sysdeps/m68k/fpu/mpatan.c: New file. * sysdeps/m68k/fpu/mpatan2.c: New file. * sysdeps/m68k/fpu/mpexp.c: New file. * sysdeps/m68k/fpu/mplog.c: New file. * sysdeps/m68k/fpu/mpsqrt.c: New file. * sysdeps/m68k/fpu/mptan.c: New file. * sysdeps/m68k/fpu/sincos32.c: New file. * sysdeps/m68k/fpu/slowexp.c: New file. * sysdeps/m68k/fpu/slowpow.c: New file. * iconvdata/gconv-modules: Add a number of alias, mostly for IBM codepages.
Diffstat (limited to 'sysdeps/ieee754/dbl-64/s_tan.c')
-rw-r--r-- | sysdeps/ieee754/dbl-64/s_tan.c | 533 |
1 files changed, 466 insertions, 67 deletions
diff --git a/sysdeps/ieee754/dbl-64/s_tan.c b/sysdeps/ieee754/dbl-64/s_tan.c index 714cf27dd2..2db8673389 100644 --- a/sysdeps/ieee754/dbl-64/s_tan.c +++ b/sysdeps/ieee754/dbl-64/s_tan.c @@ -1,81 +1,480 @@ -/* @(#)s_tan.c 5.1 93/09/24 */ /* - * ==================================================== - * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * IBM Accurate Mathematical Library + * Copyright (c) International Business Machines Corp., 2001 * - * Developed at SunPro, a Sun Microsystems, Inc. business. - * Permission to use, copy, modify, and distribute this - * software is freely granted, provided that this notice - * is preserved. - * ==================================================== - */ - -#if defined(LIBM_SCCS) && !defined(lint) -static char rcsid[] = "$NetBSD: s_tan.c,v 1.7 1995/05/10 20:48:18 jtc Exp $"; -#endif - -/* tan(x) - * Return tangent function of x. - * - * kernel function: - * __kernel_tan ... tangent function on [-pi/4,pi/4] - * __ieee754_rem_pio2 ... argument reduction routine - * - * Method. - * Let S,C and T denote the sin, cos and tan respectively on - * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2 - * in [-pi/4 , +pi/4], and let n = k mod 4. - * We have - * - * n sin(x) cos(x) tan(x) - * ---------------------------------------------------------- - * 0 S C T - * 1 C -S -1/T - * 2 -S -C T - * 3 -C S -1/T - * ---------------------------------------------------------- + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. * - * Special cases: - * Let trig be any of sin, cos, or tan. - * trig(+-INF) is NaN, with signals; - * trig(NaN) is that NaN; + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * - * Accuracy: - * TRIG(x) returns trig(x) nearly rounded + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +/*********************************************************************/ +/* MODULE_NAME: utan.c */ +/* */ +/* FUNCTIONS: utan */ +/* tanMp */ +/* */ +/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */ +/* branred.c sincos32.c mptan.c */ +/* utan.tbl */ +/* */ +/* An ultimate tan routine. Given an IEEE double machine number x */ +/* it computes the correctly rounded (to nearest) value of tan(x). */ +/* Assumption: Machine arithmetic operations are performed in */ +/* round to nearest mode of IEEE 754 standard. */ +/* */ +/*********************************************************************/ +#include "endian.h" +#include "dla.h" +#include "mpa.h" +#include "MathLib.h" +static double tanMp(double); +void __mptan(double, mp_no *, int); -#include "math.h" -#include "math_private.h" +double tan(double x) { +#include "utan.h" +#include "utan.tbl" -#ifdef __STDC__ - double __tan(double x) -#else - double __tan(x) - double x; -#endif -{ - double y[2],z=0.0; - int32_t n, ix; + int ux,i,n; + double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy, + t,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2; + int p; + number num,v; + mp_no mpa,mpy,mpt1,mpt2; + + int branred(double, double *, double *); + int mpranred(double, mp_no *, int); + + /* x=+-INF, x=NaN */ + num.d = x; ux = num.i[HIGH_HALF]; + if ((ux&0x7ff00000)==0x7ff00000) return x-x; + + w=(x<ZERO) ? -x : x; + + /* (I) The case abs(x) <= 1.259e-8 */ + if (w<=g1.d) return x; + + /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */ + if (w<=g2.d) { + + /* First stage */ + x2 = x*x; + t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d)))); + if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) return y; + + /* Second stage */ + c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+ + x2*a27.d)))))); + EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5) + ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2) + if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) return y; + return tanMp(x); + } + + /* (III) The case 0.0608 < abs(x) <= 0.787 */ + if (w<=g3.d) { + + /* First stage */ + i = ((int) (mfftnhf.d+TWO8*w)); + z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE; + pz = z+z*z2*(e0.d+z2*e1.d); + fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz); + if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) return (s*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=fi+(t2-(t4=fi*ua3.d+t3*ub3.d)))==fi+(t2+t4)) return (s*y); + + /* Second stage */ + ffi = xfg[i][3].d; + c1 = z2*(a7.d+z2*(a9.d+z2*a11.d)); + EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5) + ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2) + + ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2) + MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8) + SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2) + DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + + if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) return (s*y); + return tanMp(x); + } + + /* (---) The case 0.787 < abs(x) <= 25 */ + if (w<=g4.d) { + /* Range reduction by algorithm i */ + t = (x*hpinv.d + toint.d); + xn = t - toint.d; + v.d = t; + t1 = (x - xn*mp1.d) - xn*mp2.d; + n =v.i[LOW_HALF] & 0x00000001; + da = xn*mp3.d; + a=t1-da; + da = (t1-a)-da; + if (a<ZERO) {ya=-a; yya=-da; sy=MONE;} + else {ya= a; yya= da; sy= ONE;} + + /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */ + if (ya<=gy1.d) return tanMp(x); + + /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */ + if (ya<=gy2.d) { + a2 = a*a; + t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d)))); + if (n) { + /* First stage -cot */ + EADD(a,t2,b,db) + DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) return (-y); } + else { + /* First stage tan */ + if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) return y; } + /* Second stage */ + /* Range reduction by algorithm ii */ + t = (x*hpinv.d + toint.d); + xn = t - toint.d; + v.d = t; + t1 = (x - xn*mp1.d) - xn*mp2.d; + n =v.i[LOW_HALF] & 0x00000001; + da = xn*pp3.d; + t=t1-da; + da = (t1-t)-da; + t1 = xn*pp4.d; + a = t - t1; + da = ((t-a)-t1)+da; + + /* Second stage */ + EADD(a,da,t1,t2) a=t1; da=t2; + MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+ + x2*a27.d)))))); + ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2) + + if (n) { + /* Second stage -cot */ + DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) return (-y); } + else { + /* Second stage tan */ + if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) return y; } + return tanMp(x); + } + + /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */ + + /* First stage */ + i = ((int) (mfftnhf.d+TWO8*ya)); + z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z; + pz = z+z*z2*(e0.d+z2*e1.d); + fi = xfg[i][1].d; gi = xfg[i][2].d; - /* High word of x. */ - GET_HIGH_WORD(ix,x); + if (n) { + /* -cot */ + t2 = pz*(fi+gi)/(fi+pz); + if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) return (-sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=gi-(t2-(t4=gi*ua10.d+t3*ub10.d)))==gi-(t2+t4)) return (-sy*y); } + else { + /* tan */ + t2 = pz*(gi+fi)/(gi-pz); + if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) return (sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=fi+(t2-(t4=fi*ua9.d+t3*ub9.d)))==fi+(t2+t4)) return (sy*y); } - /* |x| ~< pi/4 */ - ix &= 0x7fffffff; - if(ix <= 0x3fe921fb) return __kernel_tan(x,z,1); + /* Second stage */ + ffi = xfg[i][3].d; + EADD(z0,yya,z,zz) + MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = z2*(a7.d+z2*(a9.d+z2*a11.d)); + ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2) - /* tan(Inf or NaN) is NaN */ - else if (ix>=0x7ff00000) return x-x; /* NaN */ + ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2) + MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8) + SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2) - /* argument reduction needed */ - else { - n = __ieee754_rem_pio2(x,y); - return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even - -1 -- n odd */ - } + if (n) { + /* -cot */ + DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) return (-sy*y); } + else { + /* tan */ + DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) return (sy*y); } + + return tanMp(x); + } + + /* (---) The case 25 < abs(x) <= 1e8 */ + if (w<=g5.d) { + /* Range reduction by algorithm ii */ + t = (x*hpinv.d + toint.d); + xn = t - toint.d; + v.d = t; + t1 = (x - xn*mp1.d) - xn*mp2.d; + n =v.i[LOW_HALF] & 0x00000001; + da = xn*pp3.d; + t=t1-da; + da = (t1-t)-da; + t1 = xn*pp4.d; + a = t - t1; + da = ((t-a)-t1)+da; + EADD(a,da,t1,t2) a=t1; da=t2; + if (a<ZERO) {ya=-a; yya=-da; sy=MONE;} + else {ya= a; yya= da; sy= ONE;} + + /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */ + if (ya<=gy1.d) return tanMp(x); + + /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */ + if (ya<=gy2.d) { + a2 = a*a; + t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d)))); + if (n) { + /* First stage -cot */ + EADD(a,t2,b,db) + DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) return (-y); } + else { + /* First stage tan */ + if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) return y; } + + /* Second stage */ + MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+ + x2*a27.d)))))); + ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2) + + if (n) { + /* Second stage -cot */ + DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) return (-y); } + else { + /* Second stage tan */ + if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) return (y); } + return tanMp(x); + } + + /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */ + /* First stage */ + i = ((int) (mfftnhf.d+TWO8*ya)); + z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z; + pz = z+z*z2*(e0.d+z2*e1.d); + fi = xfg[i][1].d; gi = xfg[i][2].d; + + if (n) { + /* -cot */ + t2 = pz*(fi+gi)/(fi+pz); + if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) return (-sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=gi-(t2-(t4=gi*ua18.d+t3*ub18.d)))==gi-(t2+t4)) return (-sy*y); } + else { + /* tan */ + t2 = pz*(gi+fi)/(gi-pz); + if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) return (sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=fi+(t2-(t4=fi*ua17.d+t3*ub17.d)))==fi+(t2+t4)) return (sy*y); } + + /* Second stage */ + ffi = xfg[i][3].d; + EADD(z0,yya,z,zz) + MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = z2*(a7.d+z2*(a9.d+z2*a11.d)); + ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2) + + ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2) + MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8) + SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2) + + if (n) { + /* -cot */ + DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) return (-sy*y); } + else { + /* tan */ + DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) return (sy*y); } + return tanMp(x); + } + + /* (---) The case 1e8 < abs(x) < 2**1024 */ + /* Range reduction by algorithm iii */ + n = (branred(x,&a,&da)) & 0x00000001; + EADD(a,da,t1,t2) a=t1; da=t2; + if (a<ZERO) {ya=-a; yya=-da; sy=MONE;} + else {ya= a; yya= da; sy= ONE;} + + /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */ + if (ya<=gy1.d) return tanMp(x); + + /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */ + if (ya<=gy2.d) { + a2 = a*a; + t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d)))); + if (n) { + /* First stage -cot */ + EADD(a,t2,b,db) + DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) return (-y); } + else { + /* First stage tan */ + if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) return y; } + + /* Second stage */ + /* Reduction by algorithm iv */ + p=10; n = (mpranred(x,&mpa,p)) & 0x00000001; + mp_dbl(&mpa,&a,p); dbl_mp(a,&mpt1,p); + sub(&mpa,&mpt1,&mpt2,p); mp_dbl(&mpt2,&da,p); + + MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+ + x2*a27.d)))))); + ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2) + MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2) + + if (n) { + /* Second stage -cot */ + DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) return (-y); } + else { + /* Second stage tan */ + if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) return y; } + return tanMp(x); + } + + /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */ + /* First stage */ + i = ((int) (mfftnhf.d+TWO8*ya)); + z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z; + pz = z+z*z2*(e0.d+z2*e1.d); + fi = xfg[i][1].d; gi = xfg[i][2].d; + + if (n) { + /* -cot */ + t2 = pz*(fi+gi)/(fi+pz); + if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) return (-sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=gi-(t2-(t4=gi*ua26.d+t3*ub26.d)))==gi-(t2+t4)) return (-sy*y); } + else { + /* tan */ + t2 = pz*(gi+fi)/(gi-pz); + if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) return (sy*y); + t3 = (t2<ZERO) ? -t2 : t2; + if ((y=fi+(t2-(t4=fi*ua25.d+t3*ub25.d)))==fi+(t2+t4)) return (sy*y); } + + /* Second stage */ + ffi = xfg[i][3].d; + EADD(z0,yya,z,zz) + MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8) + c1 = z2*(a7.d+z2*(a9.d+z2*a11.d)); + ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2) + MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8) + MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8) + ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2) + + ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2) + MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8) + SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2) + + if (n) { + /* -cot */ + DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) return (-sy*y); } + else { + /* tan */ + DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10) + if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) return (sy*y); } + return tanMp(x); +} + + +/* multiple precision stage */ +/* Convert x to multi precision number,compute tan(x) by mptan() routine */ +/* and converts result back to double */ +static double tanMp(double x) +{ + int p; + double y; + mp_no mpy; + p=32; + __mptan(x, &mpy, p); + __mp_dbl(&mpy,&y,p); + return y; } -weak_alias (__tan, tan) + #ifdef NO_LONG_DOUBLE -strong_alias (__tan, __tanl) -weak_alias (__tan, tanl) +weak_alias (tan, tanl) #endif |