/* Optimized cosf function. Copyright (C) 2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see . */ #include #define __need_Emath #include /* Short algorithm description: * * 1) if |x| == 0: return 1.0-|x|. * 2) if |x| < 2^-27: return 1.0-|x|. * 3) if |x| < 2^-5 : return 1.0+x^2*DP_COS2_0+x^5*DP_COS2_1. * 4) if |x| < Pi/4: return 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))). * 5) if |x| < 9*Pi/4: * 5.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0x0e, n=k+3, * t=|x|-j*Pi/4. * 5.2) Reconstruction: * s = (-1.0)^((n>>2)&1) * if(n&2 != 0) { * using cos(t) polynomial for |t|= 2^23, very large args: * 7.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+3, * t=|x|-j*Pi/4. * 7.2) Reconstruction same as (5.2). * 8) if x is Inf, return x-x, and set errno=EDOM. * 9) if x is NaN, return x-x. * * Special cases: * cos(+-0) = 1 not raising inexact, * cos(subnormal) raises inexact, * cos(min_normalized) raises inexact, * cos(normalized) raises inexact, * cos(Inf) = NaN, raises invalid, sets errno to EDOM, * cos(NaN) = NaN. */ .text ENTRY(__cosf) /* Input: single precision x in %xmm0 */ movd %xmm0, %eax /* Bits of x */ movaps %xmm0, %xmm7 /* Copy of x */ cvtss2sd %xmm0, %xmm0 /* DP x */ movss L(SP_ABS_MASK)(%rip), %xmm3 andl $0x7fffffff, %eax /* |x| */ cmpl $0x3f490fdb, %eax /* |x|=Pi/4 */ andps %xmm7, %xmm3 /* SP |x| */ andpd L(DP_ABS_MASK)(%rip), %xmm0 /* DP |x| */ movss L(SP_INVPIO4)(%rip), %xmm2 /* SP 1/(Pi/4) */ cmpl $0x40e231d6, %eax /* |x|<9*Pi/4? */ jae L(large_args) /* Here if Pi/4<=|x|<9*Pi/4 */ mulss %xmm3, %xmm2 /* SP |x|/(Pi/4) */ cvttss2si %xmm2, %eax /* k, number of Pi/4 in x */ lea L(PIO4J)(%rip), %rsi addl $1, %eax /* k+1 */ movl $0x0e, %edx andl %eax, %edx /* j = (k+1)&0x0e */ addl $2, %eax /* n */ subsd (%rsi,%rdx,8), %xmm0 /* t = |x| - j * Pi/4 */ L(reconstruction): /* Input: %eax=n, %xmm0=t */ testl $2, %eax /* n&2 != 0? */ jz L(sin_poly) /*L(cos_poly):*/ /* Here if cos(x) calculated using cos(t) polynomial for |t|>2)&1) * result = s * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4))))) */ shrl $2, %eax /* n>>2 */ mulsd %xmm0, %xmm0 /* y=t^2 */ andl $1, %eax /* (n>>2)&1 */ movaps %xmm0, %xmm1 /* y */ mulsd %xmm0, %xmm0 /* z=t^4 */ movsd L(DP_C4)(%rip), %xmm4 /* C4 */ mulsd %xmm0, %xmm4 /* z*C4 */ movsd L(DP_C3)(%rip), %xmm3 /* C3 */ mulsd %xmm0, %xmm3 /* z*C3 */ lea L(DP_ONES)(%rip), %rsi addsd L(DP_C2)(%rip), %xmm4 /* C2+z*C4 */ mulsd %xmm0, %xmm4 /* z*(C2+z*C4) */ addsd L(DP_C1)(%rip), %xmm3 /* C1+z*C3 */ mulsd %xmm0, %xmm3 /* z*(C1+z*C3) */ addsd L(DP_C0)(%rip), %xmm4 /* C0+z*(C2+z*C4) */ mulsd %xmm1, %xmm4 /* y*(C0+z*(C2+z*C4)) */ addsd %xmm4, %xmm3 /* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */ /* 1.0+y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */ addsd L(DP_ONES)(%rip), %xmm3 mulsd (%rsi,%rax,8), %xmm3 /* DP result */ cvtsd2ss %xmm3, %xmm0 /* SP result */ ret .p2align 4 L(sin_poly): /* Here if cos(x) calculated using sin(t) polynomial for |t|>2)&1) * result = s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4))))) */ movaps %xmm0, %xmm4 /* t */ shrl $2, %eax /* n>>2 */ mulsd %xmm0, %xmm0 /* y=t^2 */ andl $1, %eax /* (n>>2)&1 */ movaps %xmm0, %xmm1 /* y */ mulsd %xmm0, %xmm0 /* z=t^4 */ movsd L(DP_S4)(%rip), %xmm2 /* S4 */ mulsd %xmm0, %xmm2 /* z*S4 */ movsd L(DP_S3)(%rip), %xmm3 /* S3 */ mulsd %xmm0, %xmm3 /* z*S3 */ lea L(DP_ONES)(%rip), %rsi addsd L(DP_S2)(%rip), %xmm2 /* S2+z*S4 */ mulsd %xmm0, %xmm2 /* z*(S2+z*S4) */ addsd L(DP_S1)(%rip), %xmm3 /* S1+z*S3 */ mulsd %xmm0, %xmm3 /* z*(S1+z*S3) */ addsd L(DP_S0)(%rip), %xmm2 /* S0+z*(S2+z*S4) */ mulsd %xmm1, %xmm2 /* y*(S0+z*(S2+z*S4)) */ /* t*s, where s = sign(x) * (-1.0)^((n>>2)&1) */ mulsd (%rsi,%rax,8), %xmm4 /* y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */ addsd %xmm2, %xmm3 /* t*s*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */ mulsd %xmm4, %xmm3 /* t*s*(1.0+y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */ addsd %xmm4, %xmm3 cvtsd2ss %xmm3, %xmm0 /* SP result */ ret .p2align 4 L(large_args): /* Here if |x|>=9*Pi/4 */ cmpl $0x7f800000, %eax /* x is Inf or NaN? */ jae L(arg_inf_or_nan) /* Here if finite |x|>=9*Pi/4 */ cmpl $0x4b000000, %eax /* |x|<2^23? */ jae L(very_large_args) /* Here if 9*Pi/4<=|x|<2^23 */ movsd L(DP_INVPIO4)(%rip), %xmm1 /* 1/(Pi/4) */ mulsd %xmm0, %xmm1 /* |x|/(Pi/4) */ cvttsd2si %xmm1, %eax /* k=trunc(|x|/(Pi/4)) */ addl $1, %eax /* k+1 */ movl %eax, %edx andl $0xfffffffe, %edx /* j=(k+1)&0xfffffffe */ cvtsi2sdl %edx, %xmm4 /* DP j */ movsd L(DP_PIO4HI)(%rip), %xmm2 /* -PIO4HI = high part of -Pi/4 */ mulsd %xmm4, %xmm2 /* -j*PIO4HI */ movsd L(DP_PIO4LO)(%rip), %xmm3 /* -PIO4LO = low part of -Pi/4 */ addsd %xmm2, %xmm0 /* |x| - j*PIO4HI */ addl $2, %eax /* n */ mulsd %xmm3, %xmm4 /* j*PIO4LO */ addsd %xmm4, %xmm0 /* t = |x| - j*PIO4HI - j*PIO4LO */ jmp L(reconstruction) .p2align 4 L(very_large_args): /* Here if finite |x|>=2^23 */ /* bitpos = (ix>>23) - BIAS_32 + 59; */ shrl $23, %eax /* eb = biased exponent of x */ /* bitpos = eb - 0x7f + 59, where 0x7f is exponent bias */ subl $68, %eax movl $28, %ecx /* %cl=28 */ movl %eax, %edx /* bitpos copy */ /* j = bitpos/28; */ div %cl /* j in register %al=%ax/%cl */ movapd %xmm0, %xmm3 /* |x| */ /* clear unneeded remainder from %ah */ andl $0xff, %eax imull $28, %eax, %ecx /* j*28 */ lea L(_FPI)(%rip), %rsi movsd L(DP_HI_MASK)(%rip), %xmm4 /* DP_HI_MASK */ movapd %xmm0, %xmm5 /* |x| */ mulsd -16(%rsi,%rax,8), %xmm3 /* tmp3 = FPI[j-2]*|x| */ movapd %xmm0, %xmm1 /* |x| */ mulsd -8(%rsi,%rax,8), %xmm5 /* tmp2 = FPI[j-1]*|x| */ mulsd (%rsi,%rax,8), %xmm0 /* tmp0 = FPI[j]*|x| */ addl $19, %ecx /* j*28+19 */ mulsd 8(%rsi,%rax,8), %xmm1 /* tmp1 = FPI[j+1]*|x| */ cmpl %ecx, %edx /* bitpos>=j*28+19? */ jl L(very_large_skip1) /* Here if bitpos>=j*28+19 */ andpd %xmm3, %xmm4 /* HI(tmp3) */ subsd %xmm4, %xmm3 /* tmp3 = tmp3 - HI(tmp3) */ L(very_large_skip1): movsd L(DP_2POW52)(%rip), %xmm6 movapd %xmm5, %xmm2 /* tmp2 copy */ addsd %xmm3, %xmm5 /* tmp5 = tmp3 + tmp2 */ movl $1, %edx addsd %xmm5, %xmm6 /* tmp6 = tmp5 + 2^52 */ movsd 8+L(DP_2POW52)(%rip), %xmm4 movd %xmm6, %eax /* k = I64_LO(tmp6); */ addsd %xmm6, %xmm4 /* tmp4 = tmp6 - 2^52 */ comisd %xmm5, %xmm4 /* tmp4 > tmp5? */ jbe L(very_large_skip2) /* Here if tmp4 > tmp5 */ subl $1, %eax /* k-- */ addsd 8+L(DP_ONES)(%rip), %xmm4 /* tmp4 -= 1.0 */ L(very_large_skip2): andl %eax, %edx /* k&1 */ lea L(DP_ZERONE)(%rip), %rsi subsd %xmm4, %xmm3 /* tmp3 -= tmp4 */ addsd (%rsi,%rdx,8), %xmm3 /* t = DP_ZERONE[k&1] + tmp3 */ addsd %xmm2, %xmm3 /* t += tmp2 */ addsd %xmm3, %xmm0 /* t += tmp0 */ addl $3, %eax /* n=k+3 */ addsd %xmm1, %xmm0 /* t += tmp1 */ mulsd L(DP_PIO4)(%rip), %xmm0 /* t *= PI04 */ jmp L(reconstruction) /* end of very_large_args peth */ .p2align 4 L(arg_less_pio4): /* Here if |x|