about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog28
-rw-r--r--math/libm-test.inc21
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/Makefile2
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/e_expf.c18
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/s_cosf-sse2.S559
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/s_cosf.c29
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/s_sinf-sse2.S575
-rw-r--r--sysdeps/i386/i686/fpu/multiarch/s_sinf.c28
-rw-r--r--sysdeps/ieee754/flt-32/s_cosf.c11
-rw-r--r--sysdeps/ieee754/flt-32/s_sinf.c11
-rw-r--r--sysdeps/x86_64/fpu/libm-test-ulps35
-rw-r--r--sysdeps/x86_64/fpu/s_cosf.S535
-rw-r--r--sysdeps/x86_64/fpu/s_sinf.S558
13 files changed, 2403 insertions, 7 deletions
diff --git a/ChangeLog b/ChangeLog
index ef7477d920..dd40cb8342 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,31 @@
+2012-09-03  Liubov Dmitrieva  <liubov.dmitrieva@gmail.com>
+
+	* sysdeps/i386/i686/fpu/multiarch/Makefile (sysdep_routines):
+	Add s_sinf-sse2, s_conf-sse2.
+
+	* sysdeps/i386/i686/fpu/multiarch/s_sinf-sse2.S: New file.
+	* sysdeps/i386/i686/fpu/multiarch/s_cosf-sse2.S: New file.
+	* sysdeps/i386/i686/fpu/multiarch/s_sinf.c: New file.
+	* sysdeps/i386/i686/fpu/multiarch/s_cosf.c: New file.
+
+	* sysdeps/ieee754/flt-32/s_sinf.c (SINF, SINF_FUNC): Add macros
+	for using routine as __sinf_ia32.
+	Use macro for function declaration and weak_alias.
+	* sysdeps/ieee754/flt-32/s_cosf.c (COSF, COSF_FUNC): Add macros
+	for using routine as __cosf_ia32.
+	Use macro for function declaration and weak_alias.
+
+	* sysdeps/i386/i686/fpu/multiarch/e_expf-sse2.S: Fix Copyright.
+	* sysdeps/i386/i686/fpu/multiarch/e_expf.c: Fix Copyright.
+
+	* sysdeps/x86_64/fpu/s_sinf.S: New file.
+	* sysdeps/x86_64/fpu/s_cosf.S: New file.
+	* sysdeps/x86_64/fpu/libm-test-ulps: Update.
+
+	* math/libm-test.inc (cos_test): Add more test cases.
+	(sin_test): Likewise.
+	(sincos_test): Likewise.
+
 2012-09-03  Andreas Krebbel  <Andreas.Krebbel@de.ibm.com>
 
 	* sysdeps/s390/s390-32/multiarch/ifunc-resolve.c
diff --git a/math/libm-test.inc b/math/libm-test.inc
index 0f64ea4971..fa781fc592 100644
--- a/math/libm-test.inc
+++ b/math/libm-test.inc
@@ -2752,6 +2752,13 @@ cos_test (void)
   TEST_f_f (cos, 0x1p16383L, 0.9210843909921906206874509522505756251609L);
 #endif
 
+  TEST_f_f (cos, 0x1p+120, -9.25879022854837867303861764107414946730833e-01L);
+  TEST_f_f (cos, 0x1p+127, 7.81914638714960072263910298466369236613162e-01L);
+  TEST_f_f (cos, 0x1.fffff8p+127, 9.98819362551949040703862043664101081064641e-01L);
+  TEST_f_f (cos, 0x1.fffffep+127, 8.53021039830304158051791467692161107353094e-01L);
+  TEST_f_f (cos, 0x1p+50, 8.68095904660550604334592502063501320395739e-01L);
+  TEST_f_f (cos, 0x1p+28, -1.65568979490578758865468278195361551113358e-01L);
+
   END (cos);
 }
 
@@ -8179,6 +8186,13 @@ sin_test (void)
   TEST_f_f (sin, 0x1p16383L, 0.3893629985894208126948115852610595405563L);
 #endif
 
+  TEST_f_f (sin, 0x1p+120, 3.77820109360752022655548470056922991960587e-01L);
+  TEST_f_f (sin, 0x1p+127, 6.23385512955870240370428801097126489001833e-01L);
+  TEST_f_f (sin, 0x1.fffff8p+127, 4.85786063130487339701113680434728152037092e-02L);
+  TEST_f_f (sin, 0x1.fffffep+127, -5.21876523333658540551505357019806722935726e-01L);
+  TEST_f_f (sin, 0x1p+50, 4.96396515208940840876821859865411368093356e-01L);
+  TEST_f_f (sin, 0x1p+28, -9.86198211836975655703110310527108292055548e-01L);
+
   END (sin);
 
 }
@@ -8362,6 +8376,13 @@ sincos_test (void)
   TEST_extra (sincos, 0x1p16383L, 0.3893629985894208126948115852610595405563L, 0.9210843909921906206874509522505756251609L);
 #endif
 
+  TEST_extra (sincos, 0x1p+120, 3.77820109360752022655548470056922991960587e-01L, -9.25879022854837867303861764107414946730833e-01L);
+  TEST_extra (sincos, 0x1p+127, 6.23385512955870240370428801097126489001833e-01L, 7.81914638714960072263910298466369236613162e-01L);
+  TEST_extra (sincos, 0x1.fffff8p+127, 4.85786063130487339701113680434728152037092e-02L, 9.98819362551949040703862043664101081064641e-01L);
+  TEST_extra (sincos, 0x1.fffffep+127, -5.21876523333658540551505357019806722935726e-01L, 8.53021039830304158051791467692161107353094e-01L);
+  TEST_extra (sincos, 0x1p+50, 4.96396515208940840876821859865411368093356e-01L, 8.68095904660550604334592502063501320395739e-01L);
+  TEST_extra (sincos, 0x1p+28, -9.86198211836975655703110310527108292055548e-01L, -1.65568979490578758865468278195361551113358e-01L);
+
   END (sincos);
 }
 
diff --git a/sysdeps/i386/i686/fpu/multiarch/Makefile b/sysdeps/i386/i686/fpu/multiarch/Makefile
index 1de37fe4aa..aa28f72841 100644
--- a/sysdeps/i386/i686/fpu/multiarch/Makefile
+++ b/sysdeps/i386/i686/fpu/multiarch/Makefile
@@ -1,3 +1,3 @@
 ifeq ($(subdir),math)
-libm-sysdep_routines += e_expf-sse2 e_expf-ia32
+libm-sysdep_routines += e_expf-sse2 e_expf-ia32 s_sinf-sse2 s_cosf-sse2
 endif
diff --git a/sysdeps/i386/i686/fpu/multiarch/e_expf.c b/sysdeps/i386/i686/fpu/multiarch/e_expf.c
index 65858a3113..1966400d1b 100644
--- a/sysdeps/i386/i686/fpu/multiarch/e_expf.c
+++ b/sysdeps/i386/i686/fpu/multiarch/e_expf.c
@@ -1,3 +1,21 @@
+/* Multiple versions of expf
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
 #include <init-arch.h>
 
 extern double __ieee754_expf_sse2 (double);
diff --git a/sysdeps/i386/i686/fpu/multiarch/s_cosf-sse2.S b/sysdeps/i386/i686/fpu/multiarch/s_cosf-sse2.S
new file mode 100644
index 0000000000..2b5a2a5ae2
--- /dev/null
+++ b/sysdeps/i386/i686/fpu/multiarch/s_cosf-sse2.S
@@ -0,0 +1,559 @@
+/* Optimized with sse2 version of cosf
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#define __need_Emath
+#include <bits/errno.h>
+
+/* Short algorithm description:
+ *
+ *  1) if |x| == 0: return 1.0-|x|.
+ *  2) if |x| <  2^-27: return 1.0-|x|.
+ *  3) if |x| <  2^-5 : return 1.0+x^2*DP_COS2_0+x^5*DP_COS2_1.
+ *  4) if |x| <   Pi/4: return 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))).
+ *  5) if |x| < 9*Pi/4:
+ *      5.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0x0e, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      5.2) Reconstruction:
+ *          s = (-1.0)^((n>>2)&1)
+ *          if(n&2 != 0) {
+ *              using cos(t) polynomial for |t|<Pi/4, result is
+ *              s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4))))).
+ *          } else {
+ *              using sin(t) polynomial for |t|<Pi/4, result is
+ *              s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4))))).
+ *          }
+ *  6) if |x| < 2^23, large args:
+ *      6.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      6.2) Reconstruction same as (5.2).
+ *  7) if |x| >= 2^23, very large args:
+ *      7.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      7.2) Reconstruction same as (5.2).
+ *  8) if x is Inf, return x-x, and set errno=EDOM.
+ *  9) if x is NaN, return x-x.
+ *
+ * Special cases:
+ *  cos(+-0)==+-0 not raising inexact/underflow,
+ *  cos(subnormal) raises inexact/underflow
+ *  cos(min_normalized) raises inexact/underflow
+ *  cos(normalized) raises inexact
+ *  cos(Inf) = NaN, raises invalid, sets errno to EDOM
+ *  cos(NaN) = NaN
+ */
+
+#ifdef	PIC
+# define MO1(symbol)			L(symbol)##@GOTOFF(%ebx)
+# define MO2(symbol,reg2,_scale)	L(symbol)##@GOTOFF(%ebx,reg2,_scale)
+# define SAVE_BX			pushl	%ebx
+# define RESTORE_BX			popl	%ebx
+# define ARG_X				8(%esp)
+#else
+# define MO1(symbol)			L(symbol)
+# define MO2(symbol,reg2,_scale)	L(symbol)(,reg2,_scale)
+# define SAVE_BX
+# define RESTORE_BX
+# define ARG_X				4(%esp)
+#endif
+
+	.text
+ENTRY(__cosf_sse2)
+	/* Input: single precision x on stack at address ARG_X */
+
+#ifdef	PIC
+	SAVE_BX
+	LOAD_PIC_REG(bx)
+#endif
+
+	movl	ARG_X, %eax		/* Bits of x */
+	cvtss2sd ARG_X, %xmm0		/* DP x */
+	andl	$0x7fffffff, %eax	/* |x| */
+
+	cmpl	$0x3f490fdb, %eax	/* |x|<Pi/4?  */
+	jb	L(arg_less_pio4)
+
+	/* Here if |x|>=Pi/4 */
+	movd	%eax, %xmm3		/* SP |x| */
+	andpd	MO1(DP_ABS_MASK),%xmm0	/* DP |x| */
+	movss	MO1(SP_INVPIO4), %xmm2	/* SP 1/(Pi/4) */
+
+	cmpl	$0x40e231d6, %eax	/* |x|<9*Pi/4?  */
+	jae	L(large_args)
+
+	/* Here if Pi/4<=|x|<9*Pi/4 */
+	mulss	%xmm3, %xmm2		/* SP |x|/(Pi/4) */
+	cvttss2si %xmm2, %eax		/* k, number of Pi/4 in x */
+	addl	$1, %eax		/* k+1 */
+	movl	$0x0e, %edx
+	andl	%eax, %edx		/* j = (k+1)&0x0e */
+	addl	$2, %eax		/* n */
+	subsd	MO2(PIO4J,%edx,8), %xmm0 /* t = |x| - j * Pi/4 */
+
+L(reconstruction):
+	/* Input: %eax=n, %xmm0=t */
+	testl	$2, %eax		/* n&2 != 0?  */
+	jz	L(sin_poly)
+
+/*L(cos_poly):*/
+	/* Here if cos(x) calculated using cos(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))))
+	 */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	MO1(DP_C4), %xmm4	/* C4 */
+	mulsd	%xmm0, %xmm4		/* z*C4 */
+	movsd	MO1(DP_C3), %xmm3	/* C3 */
+	mulsd	%xmm0, %xmm3		/* z*C3 */
+	addsd	MO1(DP_C2), %xmm4	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm4		/* z*(C2+z*C4) */
+	lea	-8(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	addsd	MO1(DP_C1), %xmm3	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm3		/* z*(C1+z*C3) */
+	addsd	MO1(DP_C0), %xmm4	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm4		/* y*(C0+z*(C2+z*C4)) */
+
+	addsd	%xmm4, %xmm3		/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	/* 1.0+y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	MO1(DP_ONES), %xmm3
+
+	mulsd	MO2(DP_ONES,%eax,8), %xmm3 /* DP result */
+	movsd	%xmm3, 0(%esp)		/* Move result from sse...  */
+	fldl	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	8(%esp), %esp
+	RESTORE_BX
+	ret
+
+	.p2align	4
+L(sin_poly):
+	/* Here if cos(x) calculated using sin(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))))
+	 */
+
+	movaps	%xmm0, %xmm4		/* t */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	MO1(DP_S4), %xmm2	/* S4 */
+	mulsd	%xmm0, %xmm2		/* z*S4 */
+	movsd	MO1(DP_S3), %xmm3	/* S3 */
+	mulsd	%xmm0, %xmm3		/* z*S3 */
+	lea	-8(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	addsd	MO1(DP_S2), %xmm2	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm2		/* z*(S2+z*S4) */
+	addsd	MO1(DP_S1), %xmm3	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm3		/* z*(S1+z*S3) */
+	addsd	MO1(DP_S0), %xmm2	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm2		/* y*(S0+z*(S2+z*S4)) */
+	/* t*s, where s = sign(x) * (-1.0)^((n>>2)&1) */
+	mulsd	MO2(DP_ONES,%eax,8), %xmm4
+	addsd	%xmm2, %xmm3		/* y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	/* t*s*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	mulsd	%xmm4, %xmm3
+	/* t*s*(1.0+y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	movsd	%xmm3, 0(%esp)		/* Move result from sse...   */
+	fldl	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	8(%esp), %esp
+	RESTORE_BX
+	ret
+
+
+	.p2align	4
+L(large_args):
+	/* Here if |x|>=9*Pi/4 */
+	cmpl	$0x7f800000, %eax	/* x is Inf or NaN?  */
+	jae	L(arg_inf_or_nan)
+
+	/* Here if finite |x|>=9*Pi/4 */
+	cmpl	$0x4b000000, %eax	/* |x|<2^23?  */
+	jae	L(very_large_args)
+
+	/* Here if 9*Pi/4<=|x|<2^23 */
+	movsd	MO1(DP_INVPIO4), %xmm1	/* 1/(Pi/4) */
+	mulsd	%xmm0, %xmm1		/* |x|/(Pi/4) */
+	cvttsd2si %xmm1, %eax		/* k=trunc(|x|/(Pi/4)) */
+	addl	$1, %eax		/* k+1 */
+	movl	%eax, %edx
+	andl	$0xfffffffe, %edx	/* j=(k+1)&0xfffffffe */
+	cvtsi2sdl %edx, %xmm4		/* DP j */
+	movsd	MO1(DP_PIO4HI), %xmm2	/* -PIO4HI = high part of -Pi/4 */
+	mulsd	%xmm4, %xmm2		/* -j*PIO4HI */
+	movsd	MO1(DP_PIO4LO), %xmm3	/* -PIO4LO = low part of -Pi/4 */
+	addsd	%xmm2, %xmm0		/* |x| - j*PIO4HI */
+	addl	$2, %eax		/* n */
+	mulsd	%xmm3, %xmm4		/* j*PIO4LO */
+	addsd	%xmm4, %xmm0		/* t = |x| - j*PIO4HI - j*PIO4LO */
+	jmp	L(reconstruction)
+
+	.p2align	4
+L(very_large_args):
+	/* Here if finite |x|>=2^23 */
+
+	/* bitpos = (ix>>23) - BIAS_32 + 59; */
+	shrl	$23, %eax		/* eb = biased exponent of x */
+	/* bitpos = eb - 0x7f + 59, where 0x7f is exponent bias */
+	subl	$68, %eax
+	movl	$28, %ecx		/* %cl=28 */
+	movl	%eax, %edx		/* bitpos copy */
+
+	/* j = bitpos/28; */
+	div	%cl			/* j in register %al=%ax/%cl */
+	movapd	%xmm0, %xmm3		/* |x| */
+	/* clear unneeded remainder from %ah */
+	andl	$0xff, %eax
+
+	imull	$28, %eax, %ecx		/* j*28 */
+	movsd	MO1(DP_HI_MASK), %xmm4	/* DP_HI_MASK */
+	movapd	%xmm0, %xmm5		/* |x| */
+	mulsd	-2*8+MO2(_FPI,%eax,8), %xmm3	/* tmp3 = FPI[j-2]*|x| */
+	movapd	%xmm0, %xmm1		/* |x| */
+	mulsd	-1*8+MO2(_FPI,%eax,8), %xmm5	/* tmp2 = FPI[j-1]*|x| */
+	mulsd	0*8+MO2(_FPI,%eax,8), %xmm0	/* tmp0 = FPI[j]*|x| */
+	addl	$19, %ecx		/* j*28+19 */
+	mulsd	1*8+MO2(_FPI,%eax,8), %xmm1	/* tmp1 = FPI[j+1]*|x| */
+	cmpl	%ecx, %edx		/* bitpos>=j*28+19?  */
+	jl	L(very_large_skip1)
+
+	/* Here if bitpos>=j*28+19 */
+	andpd	%xmm3, %xmm4		/* HI(tmp3) */
+	subsd	%xmm4, %xmm3		/* tmp3 = tmp3 - HI(tmp3) */
+L(very_large_skip1):
+
+	movsd	MO1(DP_2POW52), %xmm6
+	movapd	%xmm5, %xmm2		/* tmp2 copy */
+	addsd	%xmm3, %xmm5		/* tmp5 = tmp3 + tmp2 */
+	movl	$1, %edx
+	addsd	%xmm5, %xmm6		/* tmp6 = tmp5 + 2^52 */
+	movsd	8+MO1(DP_2POW52), %xmm4
+	movd	%xmm6, %eax		/* k = I64_LO(tmp6); */
+	addsd	%xmm6, %xmm4		/* tmp4 = tmp6 - 2^52 */
+	comisd	%xmm5, %xmm4		/* tmp4 > tmp5?  */
+	jbe	L(very_large_skip2)
+
+	/* Here if tmp4 > tmp5 */
+	subl	$1, %eax		/* k-- */
+	addsd	8+MO1(DP_ONES), %xmm4	/* tmp4 -= 1.0 */
+L(very_large_skip2):
+
+	andl	%eax, %edx		/* k&1 */
+	subsd	%xmm4, %xmm3		/* tmp3 -= tmp4 */
+	addsd	MO2(DP_ZERONE,%edx,8), %xmm3 /* t  = DP_ZERONE[k&1] + tmp3 */
+	addsd	%xmm2, %xmm3		/* t += tmp2 */
+	addsd	%xmm3, %xmm0		/* t += tmp0 */
+	addl	$3, %eax		/* n=k+3 */
+	addsd	%xmm1, %xmm0		/* t += tmp1 */
+	mulsd	MO1(DP_PIO4), %xmm0	/* t *= PI04 */
+
+	jmp	L(reconstruction)	/* end of very_large_args peth */
+
+
+	.p2align	4
+L(arg_less_pio4):
+	/* Here if |x|<Pi/4 */
+	cmpl	$0x3d000000, %eax	/* |x|<2^-5?  */
+	jl	L(arg_less_2pn5)
+
+	/* Here if 2^-5<=|x|<Pi/4 */
+	mulsd	%xmm0, %xmm0		/* y=x^2 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=x^4 */
+	movsd	MO1(DP_C4), %xmm3	/* C4 */
+	mulsd	%xmm0, %xmm3		/* z*C4 */
+	movsd	MO1(DP_C3), %xmm5	/* C3 */
+	mulsd	%xmm0, %xmm5		/* z*C3 */
+	addsd	MO1(DP_C2), %xmm3	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm3		/* z*(C2+z*C4) */
+	addsd	MO1(DP_C1), %xmm5	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm5		/* z*(C1+z*C3) */
+	addsd	MO1(DP_C0), %xmm3	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm3		/* y*(C0+z*(C2+z*C4)) */
+	addsd	%xmm5, %xmm3		/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	/* 1.0 + y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	MO1(DP_ONES), %xmm3
+	cvtsd2ss %xmm3, %xmm3		/* SP result */
+
+L(epilogue):
+	lea	-4(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	movss	%xmm3, 0(%esp)		/* Move result from sse...  */
+	flds	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	4(%esp), %esp
+	RESTORE_BX
+	ret
+
+	.p2align	4
+L(arg_less_2pn5):
+	/* Here if |x|<2^-5 */
+	cmpl	$0x32000000, %eax	/* |x|<2^-27?  */
+	jl	L(arg_less_2pn27)
+
+	/* Here if 2^-27<=|x|<2^-5 */
+	mulsd	%xmm0, %xmm0		/* DP x^2 */
+	movsd	MO1(DP_COS2_1), %xmm3	/* DP DP_COS2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_COS2_1 */
+	addsd	MO1(DP_COS2_0), %xmm3	/* DP DP_COS2_0+x^2*DP_COS2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_COS2_0+x^4*DP_COS2_1 */
+	/* DP 1.0+x^2*DP_COS2_0+x^4*DP_COS2_1 */
+	addsd	MO1(DP_ONES), %xmm3
+	cvtsd2ss %xmm3, %xmm3		/* SP result */
+	jmp	L(epilogue)
+
+	.p2align	4
+L(arg_less_2pn27):
+	/* Here if |x|<2^-27 */
+	movss	ARG_X, %xmm0		/* x */
+	andps	MO1(SP_ABS_MASK),%xmm0	/* |x| */
+	movss	MO1(SP_ONE), %xmm3	/* 1.0 */
+	subss	%xmm0, %xmm3		/* result is 1.0-|x| */
+	jmp	L(epilogue)
+
+	.p2align	4
+L(arg_inf_or_nan):
+	/* Here if |x| is Inf or NAN */
+	jne	L(skip_errno_setting)	/* in case of x is NaN */
+
+	/* Here if x is Inf. Set errno to EDOM.  */
+	call	JUMPTARGET(__errno_location)
+	movl	$EDOM, (%eax)
+
+	.p2align	4
+L(skip_errno_setting):
+	/* Here if |x| is Inf or NAN. Continued.  */
+	movss	ARG_X, %xmm3		/* load x */
+	subss	%xmm3, %xmm3		/* Result is NaN */
+	jmp	L(epilogue)
+END(__cosf_sse2)
+
+
+	.section .rodata, "a"
+	.p2align 3
+L(PIO4J): /* Table of j*Pi/4, for j=0,1,..,10 */
+	.long	0x00000000,0x00000000
+	.long	0x54442d18,0x3fe921fb
+	.long	0x54442d18,0x3ff921fb
+	.long	0x7f3321d2,0x4002d97c
+	.long	0x54442d18,0x400921fb
+	.long	0x2955385e,0x400f6a7a
+	.long	0x7f3321d2,0x4012d97c
+	.long	0xe9bba775,0x4015fdbb
+	.long	0x54442d18,0x401921fb
+	.long	0xbeccb2bb,0x401c463a
+	.long	0x2955385e,0x401f6a7a
+	.type L(PIO4J), @object
+	ASM_SIZE_DIRECTIVE(L(PIO4J))
+
+	.p2align 3
+L(_FPI): /* 4/Pi broken into sum of positive DP values */
+	.long	0x00000000,0x00000000
+	.long	0x6c000000,0x3ff45f30
+	.long	0x2a000000,0x3e3c9c88
+	.long	0xa8000000,0x3c54fe13
+	.long	0xd0000000,0x3aaf47d4
+	.long	0x6c000000,0x38fbb81b
+	.long	0xe0000000,0x3714acc9
+	.long	0x7c000000,0x3560e410
+	.long	0x56000000,0x33bca2c7
+	.long	0xac000000,0x31fbd778
+	.long	0xe0000000,0x300b7246
+	.long	0xe8000000,0x2e5d2126
+	.long	0x48000000,0x2c970032
+	.long	0xe8000000,0x2ad77504
+	.long	0xe0000000,0x290921cf
+	.long	0xb0000000,0x274deb1c
+	.long	0xe0000000,0x25829a73
+	.long	0xbe000000,0x23fd1046
+	.long	0x10000000,0x2224baed
+	.long	0x8e000000,0x20709d33
+	.long	0x80000000,0x1e535a2f
+	.long	0x64000000,0x1cef904e
+	.long	0x30000000,0x1b0d6398
+	.long	0x24000000,0x1964ce7d
+	.long	0x16000000,0x17b908bf
+	.type L(_FPI), @object
+	ASM_SIZE_DIRECTIVE(L(_FPI))
+
+/* Coefficients of polynomial
+ for cos(x)~=1.0+x^2*DP_COS2_0+x^4*DP_COS2_1, |x|<2^-5.  */
+	.p2align 3
+L(DP_COS2_0):
+	.long	0xff5cc6fd,0xbfdfffff
+	.type L(DP_COS2_0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_COS2_0))
+
+	.p2align 3
+L(DP_COS2_1):
+	.long	0xb178dac5,0x3fa55514
+	.type L(DP_COS2_1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_COS2_1))
+
+	.p2align 3
+L(DP_ZERONE):
+	.long	0x00000000,0x00000000	/* 0.0 */
+	.long	0x00000000,0xbff00000	/* 1.0 */
+	.type L(DP_ZERONE),@object
+	ASM_SIZE_DIRECTIVE(L(DP_ZERONE))
+
+	.p2align 3
+L(DP_ONES):
+	.long	0x00000000,0x3ff00000	/* +1.0 */
+	.long	0x00000000,0xbff00000	/* -1.0 */
+	.type L(DP_ONES), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ONES))
+
+/* Coefficients of polynomial
+ for sin(t)~=t+t^3*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_S3):
+	.long	0x64e6b5b4,0x3ec71d72
+	.type L(DP_S3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S3))
+
+	.p2align 3
+L(DP_S1):
+	.long	0x10c2688b,0x3f811111
+	.type L(DP_S1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S1))
+
+	.p2align 3
+L(DP_S4):
+	.long	0x1674b58a,0xbe5a947e
+	.type L(DP_S4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S4))
+
+	.p2align 3
+L(DP_S2):
+	.long	0x8b4bd1f9,0xbf2a019f
+	.type L(DP_S2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S2))
+
+	.p2align 3
+L(DP_S0):
+	.long	0x55551cd9,0xbfc55555
+	.type L(DP_S0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S0))
+
+/* Coefficients of polynomial
+ for cos(t)~=1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_C3):
+	.long	0x9ac43cc0,0x3efa00eb
+	.type L(DP_C3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C3))
+
+	.p2align 3
+L(DP_C1):
+	.long	0x545c50c7,0x3fa55555
+	.type L(DP_C1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C1))
+
+	.p2align 3
+L(DP_C4):
+	.long	0xdd8844d7,0xbe923c97
+	.type L(DP_C4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C4))
+
+	.p2align 3
+L(DP_C2):
+	.long	0x348b6874,0xbf56c16b
+	.type L(DP_C2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C2))
+
+	.p2align 3
+L(DP_C0):
+	.long	0xfffe98ae,0xbfdfffff
+	.type L(DP_C0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C0))
+
+	.p2align 3
+L(DP_PIO4):
+	.long	0x54442d18,0x3fe921fb	/* Pi/4 */
+	.type L(DP_PIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4))
+
+	.p2align 3
+L(DP_2POW52):
+	.long	0x00000000,0x43300000	/* +2^52 */
+	.long	0x00000000,0xc3300000	/* -2^52 */
+	.type L(DP_2POW52), @object
+	ASM_SIZE_DIRECTIVE(L(DP_2POW52))
+
+	.p2align 3
+L(DP_INVPIO4):
+	.long	0x6dc9c883,0x3ff45f30	/* 4/Pi */
+	.type L(DP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_INVPIO4))
+
+	.p2align 3
+L(DP_PIO4HI):
+	.long	0x54000000,0xbfe921fb	/* High part of Pi/4 */
+	.type L(DP_PIO4HI), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4HI))
+
+	.p2align 3
+L(DP_PIO4LO):
+	.long	0x11A62633,0xbe010b46	/* Low part of Pi/4 */
+	.type L(DP_PIO4LO), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4LO))
+
+	.p2align 2
+L(SP_INVPIO4):
+	.long	0x3fa2f983		/* 4/Pi */
+	.type L(SP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(SP_INVPIO4))
+
+	.p2align 4
+L(DP_ABS_MASK): /* Mask for getting DP absolute value */
+	.long	0xffffffff,0x7fffffff
+	.long	0xffffffff,0x7fffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 3
+L(DP_HI_MASK): /* Mask for getting high 21 bits of DP value */
+	.long	0x00000000,0xffffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 4
+L(SP_ABS_MASK): /* Mask for getting SP absolute value */
+	.long	0x7fffffff,0x7fffffff
+	.long	0x7fffffff,0x7fffffff
+	.type L(SP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(SP_ABS_MASK))
+
+	.p2align 2
+L(SP_ONE):
+	.long	0x3f800000		/* 1.0 */
+	.type L(SP_ONE), @object
+	ASM_SIZE_DIRECTIVE(L(SP_ONE))
+
+weak_alias (__cosf, cosf)
diff --git a/sysdeps/i386/i686/fpu/multiarch/s_cosf.c b/sysdeps/i386/i686/fpu/multiarch/s_cosf.c
new file mode 100644
index 0000000000..f07c3e5103
--- /dev/null
+++ b/sysdeps/i386/i686/fpu/multiarch/s_cosf.c
@@ -0,0 +1,29 @@
+/* Multiple versions of cosf
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <init-arch.h>
+
+extern float __cosf_sse2 (float);
+extern float __cosf_ia32 (float);
+float __cosf (float);
+
+libm_ifunc (__cosf, HAS_SSE2 ? __cosf_sse2 : __cosf_ia32);
+weak_alias (__cosf, cosf);
+
+#define COSF __cosf_ia32
+#include <sysdeps/ieee754/flt-32/s_cosf.c>
diff --git a/sysdeps/i386/i686/fpu/multiarch/s_sinf-sse2.S b/sysdeps/i386/i686/fpu/multiarch/s_sinf-sse2.S
new file mode 100644
index 0000000000..cda175094a
--- /dev/null
+++ b/sysdeps/i386/i686/fpu/multiarch/s_sinf-sse2.S
@@ -0,0 +1,575 @@
+/* Optimized with sse2 version of sinf
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#define __need_Emath
+#include <bits/errno.h>
+
+/* Short algorithm description:
+ *
+ *  1) if |x| == 0: return x.
+ *  2) if |x| <  2^-27: return x-x*DP_SMALL, raise underflow only when needed.
+ *  3) if |x| <  2^-5 : return x+x^3*DP_SIN2_0+x^5*DP_SIN2_1.
+ *  4) if |x| <   Pi/4: return x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))).
+ *  5) if |x| < 9*Pi/4:
+ *      5.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0x0e, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      5.2) Reconstruction:
+ *          s = sign(x) * (-1.0)^((n>>2)&1)
+ *          if(n&2 != 0) {
+ *              using cos(t) polynomial for |t|<Pi/4, result is
+ *              s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4))))).
+ *          } else {
+ *              using sin(t) polynomial for |t|<Pi/4, result is
+ *              s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4))))).
+ *          }
+ *  6) if |x| < 2^23, large args:
+ *      6.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      6.2) Reconstruction same as (5.2).
+ *  7) if |x| >= 2^23, very large args:
+ *      7.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      7.2) Reconstruction same as (5.2).
+ *  8) if x is Inf, return x-x, and set errno=EDOM.
+ *  9) if x is NaN, return x-x.
+ *
+ * Special cases:
+ *  sin(+-0)==+-0 not raising inexact/underflow,
+ *  sin(subnormal) raises inexact/underflow
+ *  sin(min_normalized) raises inexact/underflow
+ *  sin(normalized) raises inexact
+ *  sin(Inf) = NaN, raises invalid, sets errno to EDOM
+ *  sin(NaN) = NaN
+ */
+
+#ifdef	PIC
+# define MO1(symbol)			L(symbol)##@GOTOFF(%ebx)
+# define MO2(symbol,reg2,_scale)	L(symbol)##@GOTOFF(%ebx,reg2,_scale)
+# define SAVE_BX			pushl	%ebx
+# define RESTORE_BX			popl	%ebx
+# define ARG_X				8(%esp)
+#else
+# define MO1(symbol)			L(symbol)
+# define MO2(symbol,reg2,_scale)	L(symbol)(,reg2,_scale)
+# define SAVE_BX
+# define RESTORE_BX
+# define ARG_X				4(%esp)
+#endif
+
+	.text
+ENTRY(__sinf_sse2)
+	/* Input: single precision x on stack at address ARG_X */
+
+#ifdef	PIC
+	SAVE_BX
+	LOAD_PIC_REG(bx)
+#endif
+
+	movl	ARG_X, %eax		/* Bits of x */
+	cvtss2sd ARG_X, %xmm0		/* DP x */
+	andl	$0x7fffffff, %eax	/* |x| */
+
+	cmpl	$0x3f490fdb, %eax	/* |x|<Pi/4?  */
+	jb	L(arg_less_pio4)
+
+	/* Here if |x|>=Pi/4 */
+	movd	%eax, %xmm3		/* SP |x| */
+	andpd	MO1(DP_ABS_MASK),%xmm0	/* DP |x| */
+	movss	MO1(SP_INVPIO4), %xmm2	/* SP 1/(Pi/4) */
+
+	cmpl	$0x40e231d6, %eax	/* |x|<9*Pi/4?  */
+	jae	L(large_args)
+
+	/* Here if Pi/4<=|x|<9*Pi/4 */
+	mulss	%xmm3, %xmm2		/* SP |x|/(Pi/4) */
+	movl	ARG_X, %ecx		/* Load x */
+	cvttss2si %xmm2, %eax		/* k, number of Pi/4 in x */
+	shrl	$31, %ecx		/* sign of x */
+	addl	$1, %eax		/* k+1 */
+	movl	$0x0e, %edx
+	andl	%eax, %edx		/* j = (k+1)&0x0e */
+	subsd	MO2(PIO4J,%edx,8), %xmm0 /* t = |x| - j * Pi/4 */
+
+L(reconstruction):
+	/* Input: %eax=n, %xmm0=t, %ecx=sign(x) */
+	testl	$2, %eax		/* n&2 != 0?  */
+	jz	L(sin_poly)
+
+/*L(cos_poly):*/
+	/* Here if sin(x) calculated using cos(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))))
+	 */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	MO1(DP_C4), %xmm4	/* C4 */
+	mulsd	%xmm0, %xmm4		/* z*C4 */
+	xorl	%eax, %ecx		/* (-1.0)^((n>>2)&1) XOR sign(x) */
+	movsd	MO1(DP_C3), %xmm3	/* C3 */
+	mulsd	%xmm0, %xmm3		/* z*C3 */
+	addsd	MO1(DP_C2), %xmm4	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm4		/* z*(C2+z*C4) */
+	lea	-8(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	addsd	MO1(DP_C1), %xmm3	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm3		/* z*(C1+z*C3) */
+	addsd	MO1(DP_C0), %xmm4	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm4		/* y*(C0+z*(C2+z*C4)) */
+
+	addsd	%xmm4, %xmm3		/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	/* 1.0+y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	MO1(DP_ONES), %xmm3
+
+	mulsd	MO2(DP_ONES,%ecx,8), %xmm3 /* DP result */
+	movsd	%xmm3, 0(%esp)		/* Move result from sse...  */
+	fldl	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	8(%esp), %esp
+	RESTORE_BX
+	ret
+
+	.p2align	4
+L(sin_poly):
+	/* Here if sin(x) calculated using sin(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))))
+	 */
+
+	movaps	%xmm0, %xmm4		/* t */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	xorl	%eax, %ecx		/* (-1.0)^((n>>2)&1) XOR sign(x) */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	MO1(DP_S4), %xmm2	/* S4 */
+	mulsd	%xmm0, %xmm2		/* z*S4 */
+	movsd	MO1(DP_S3), %xmm3	/* S3 */
+	mulsd	%xmm0, %xmm3		/* z*S3 */
+	lea	-8(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	addsd	MO1(DP_S2), %xmm2	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm2		/* z*(S2+z*S4) */
+	addsd	MO1(DP_S1), %xmm3	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm3		/* z*(S1+z*S3) */
+	addsd	MO1(DP_S0), %xmm2	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm2		/* y*(S0+z*(S2+z*S4)) */
+	/* t*s, where s = sign(x) * (-1.0)^((n>>2)&1) */
+	mulsd	MO2(DP_ONES,%ecx,8), %xmm4
+	addsd	%xmm2, %xmm3		/* y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	/* t*s*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	mulsd	%xmm4, %xmm3
+	/* t*s*(1.0+y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	movsd	%xmm3, 0(%esp)		/* Move result from sse...  */
+	fldl	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	8(%esp), %esp
+	RESTORE_BX
+	ret
+
+
+	.p2align	4
+L(large_args):
+	/* Here if |x|>=9*Pi/4 */
+	cmpl	$0x7f800000, %eax	/* x is Inf or NaN?  */
+	jae	L(arg_inf_or_nan)
+
+	/* Here if finite |x|>=9*Pi/4 */
+	cmpl	$0x4b000000, %eax	/* |x|<2^23?  */
+	jae	L(very_large_args)
+
+	/* Here if 9*Pi/4<=|x|<2^23 */
+	movsd	MO1(DP_INVPIO4), %xmm1	/* 1/(Pi/4) */
+	mulsd	%xmm0, %xmm1		/* |x|/(Pi/4) */
+	cvttsd2si %xmm1, %eax		/* k=trunc(|x|/(Pi/4)) */
+	addl	$1, %eax		/* k+1 */
+	movl	%eax, %edx
+	andl	$0xfffffffe, %edx	/* j=(k+1)&0xfffffffe */
+	cvtsi2sdl %edx, %xmm4		/* DP j */
+	movl	ARG_X, %ecx		/* Load x */
+	movsd	MO1(DP_PIO4HI), %xmm2	/* -PIO4HI = high part of -Pi/4 */
+	shrl	$31, %ecx		/* sign bit of x */
+	mulsd	%xmm4, %xmm2		/* -j*PIO4HI */
+	movsd	MO1(DP_PIO4LO), %xmm3	/* -PIO4LO = low part of -Pi/4 */
+	addsd	%xmm2, %xmm0		/* |x| - j*PIO4HI */
+	mulsd	%xmm3, %xmm4		/* j*PIO4LO */
+	addsd	%xmm4, %xmm0		/* t = |x| - j*PIO4HI - j*PIO4LO */
+	jmp	L(reconstruction)
+
+	.p2align	4
+L(very_large_args):
+	/* Here if finite |x|>=2^23 */
+
+	/* bitpos = (ix>>23) - BIAS_32 + 59; */
+	shrl	$23, %eax		/* eb = biased exponent of x */
+	/* bitpos = eb - 0x7f + 59, where 0x7f is exponent bias */
+	subl	$68, %eax
+	movl	$28, %ecx		/* %cl=28 */
+	movl	%eax, %edx		/* bitpos copy */
+
+	/* j = bitpos/28; */
+	div	%cl			/* j in register %al=%ax/%cl */
+	movapd	%xmm0, %xmm3		/* |x| */
+	/* clear unneeded remainder from %ah */
+	andl	$0xff, %eax
+
+	imull	$28, %eax, %ecx		/* j*28 */
+	movsd	MO1(DP_HI_MASK), %xmm4	/* DP_HI_MASK */
+	movapd	%xmm0, %xmm5		/* |x| */
+	mulsd	-2*8+MO2(_FPI,%eax,8), %xmm3	/* tmp3 = FPI[j-2]*|x| */
+	movapd	%xmm0, %xmm1		/* |x| */
+	mulsd	-1*8+MO2(_FPI,%eax,8), %xmm5	/* tmp2 = FPI[j-1]*|x| */
+	mulsd	0*8+MO2(_FPI,%eax,8), %xmm0	/* tmp0 = FPI[j]*|x| */
+	addl	$19, %ecx		/* j*28+19 */
+	mulsd	1*8+MO2(_FPI,%eax,8), %xmm1	/* tmp1 = FPI[j+1]*|x| */
+	cmpl	%ecx, %edx		/* bitpos>=j*28+19?   */
+	jl	L(very_large_skip1)
+
+	/* Here if bitpos>=j*28+19 */
+	andpd	%xmm3, %xmm4		/* HI(tmp3) */
+	subsd	%xmm4, %xmm3		/* tmp3 = tmp3 - HI(tmp3) */
+L(very_large_skip1):
+
+	movsd	MO1(DP_2POW52), %xmm6
+	movapd	%xmm5, %xmm2		/* tmp2 copy */
+	addsd	%xmm3, %xmm5		/* tmp5 = tmp3 + tmp2 */
+	movl	$1, %edx
+	addsd	%xmm5, %xmm6		/* tmp6 = tmp5 + 2^52 */
+	movsd	8+MO1(DP_2POW52), %xmm4
+	movd	%xmm6, %eax		/* k = I64_LO(tmp6); */
+	addsd	%xmm6, %xmm4		/* tmp4 = tmp6 - 2^52 */
+	movl	ARG_X, %ecx		/* Load x */
+	comisd	%xmm5, %xmm4		/* tmp4 > tmp5?  */
+	jbe	L(very_large_skip2)
+
+	/* Here if tmp4 > tmp5 */
+	subl	$1, %eax		/* k-- */
+	addsd	8+MO1(DP_ONES), %xmm4	/* tmp4 -= 1.0 */
+L(very_large_skip2):
+
+	andl	%eax, %edx		/* k&1 */
+	subsd	%xmm4, %xmm3		/* tmp3 -= tmp4 */
+	addsd	MO2(DP_ZERONE,%edx,8), %xmm3 /* t  = DP_ZERONE[k&1] + tmp3 */
+	addsd	%xmm2, %xmm3		/* t += tmp2 */
+	shrl	$31, %ecx		/* sign of x */
+	addsd	%xmm3, %xmm0		/* t += tmp0 */
+	addl	$1, %eax		/* n=k+1 */
+	addsd	%xmm1, %xmm0		/* t += tmp1 */
+	mulsd	MO1(DP_PIO4), %xmm0	/* t *= PI04 */
+
+	jmp	L(reconstruction)	/* end of very_large_args peth */
+
+
+
+
+
+	.p2align	4
+L(arg_less_pio4):
+	/* Here if |x|<Pi/4 */
+	cmpl	$0x3d000000, %eax	/* |x|<2^-5?  */
+	jl	L(arg_less_2pn5)
+
+	/* Here if 2^-5<=|x|<Pi/4 */
+	movaps	%xmm0, %xmm3		/* x */
+	mulsd	%xmm0, %xmm0		/* y=x^2 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=x^4 */
+	movsd	MO1(DP_S4), %xmm4	/* S4 */
+	mulsd	%xmm0, %xmm4		/* z*S4 */
+	movsd	MO1(DP_S3), %xmm5	/* S3 */
+	mulsd	%xmm0, %xmm5		/* z*S3 */
+	addsd	MO1(DP_S2), %xmm4	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm4		/* z*(S2+z*S4) */
+	addsd	MO1(DP_S1), %xmm5	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm5		/* z*(S1+z*S3) */
+	addsd	MO1(DP_S0), %xmm4	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm4		/* y*(S0+z*(S2+z*S4)) */
+	mulsd	%xmm3, %xmm5		/* x*z*(S1+z*S3) */
+	mulsd	%xmm3, %xmm4		/* x*y*(S0+z*(S2+z*S4)) */
+	/* x*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm5, %xmm4
+	/* x + x*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	cvtsd2ss %xmm3, %xmm3		/* SP result */
+
+L(epilogue):
+	lea	-4(%esp), %esp		/* Borrow 4 bytes of stack frame */
+	movss	%xmm3, 0(%esp)		/* Move result from sse...  */
+	flds	0(%esp)			/* ...to FPU.  */
+	/* Return back 4 bytes of stack frame */
+	lea	4(%esp), %esp
+	RESTORE_BX
+	ret
+
+	.p2align	4
+L(arg_less_2pn5):
+	/* Here if |x|<2^-5 */
+	cmpl	$0x32000000, %eax	/* |x|<2^-27?  */
+	jl	L(arg_less_2pn27)
+
+	/* Here if 2^-27<=|x|<2^-5 */
+	movaps	%xmm0, %xmm1		/* DP x */
+	mulsd	%xmm0, %xmm0		/* DP x^2 */
+	movsd	MO1(DP_SIN2_1), %xmm3	/* DP DP_SIN2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_SIN2_1 */
+	addsd	MO1(DP_SIN2_0), %xmm3	/* DP DP_SIN2_0+x^2*DP_SIN2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_SIN2_0+x^4*DP_SIN2_1 */
+	mulsd	%xmm1, %xmm3		/* DP x^3*DP_SIN2_0+x^5*DP_SIN2_1 */
+	addsd	%xmm1, %xmm3		/* DP x+x^3*DP_SIN2_0+x^5*DP_SIN2_1 */
+	cvtsd2ss %xmm3, %xmm3		/* SP result */
+	jmp	L(epilogue)
+
+	.p2align	4
+L(arg_less_2pn27):
+	movss	ARG_X, %xmm3		/* SP x */
+	cmpl	$0, %eax		/* x=0?  */
+	je	L(epilogue)		/* in case x=0 return sin(+-0)==+-0 */
+	/* Here if |x|<2^-27 */
+	/*
+	 * Special cases here:
+	 *  sin(subnormal) raises inexact/underflow
+	 *  sin(min_normalized) raises inexact/underflow
+	 *  sin(normalized) raises inexact
+	 */
+	movaps	%xmm0, %xmm3		/* Copy of DP x */
+	mulsd	MO1(DP_SMALL), %xmm0	/* x*DP_SMALL */
+	subsd	%xmm0, %xmm3		/* Result is x-x*DP_SMALL */
+	cvtsd2ss %xmm3, %xmm3		/* Result converted to SP */
+	jmp	L(epilogue)
+
+	.p2align	4
+L(arg_inf_or_nan):
+	/* Here if |x| is Inf or NAN */
+	jne	L(skip_errno_setting)	/* in case of x is NaN */
+
+	/* Here if x is Inf. Set errno to EDOM.  */
+	call	JUMPTARGET(__errno_location)
+	movl	$EDOM, (%eax)
+
+	.p2align	4
+L(skip_errno_setting):
+	/* Here if |x| is Inf or NAN. Continued.  */
+	movss	ARG_X, %xmm3		/* load x */
+	subss	%xmm3, %xmm3		/* Result is NaN */
+	jmp	L(epilogue)
+END(__sinf_sse2)
+
+
+	.section .rodata, "a"
+	.p2align 3
+L(PIO4J): /* Table of j*Pi/4, for j=0,1,..,10 */
+	.long	0x00000000,0x00000000
+	.long	0x54442d18,0x3fe921fb
+	.long	0x54442d18,0x3ff921fb
+	.long	0x7f3321d2,0x4002d97c
+	.long	0x54442d18,0x400921fb
+	.long	0x2955385e,0x400f6a7a
+	.long	0x7f3321d2,0x4012d97c
+	.long	0xe9bba775,0x4015fdbb
+	.long	0x54442d18,0x401921fb
+	.long	0xbeccb2bb,0x401c463a
+	.long	0x2955385e,0x401f6a7a
+	.type L(PIO4J), @object
+	ASM_SIZE_DIRECTIVE(L(PIO4J))
+
+	.p2align 3
+L(_FPI): /* 4/Pi broken into sum of positive DP values */
+	.long	0x00000000,0x00000000
+	.long	0x6c000000,0x3ff45f30
+	.long	0x2a000000,0x3e3c9c88
+	.long	0xa8000000,0x3c54fe13
+	.long	0xd0000000,0x3aaf47d4
+	.long	0x6c000000,0x38fbb81b
+	.long	0xe0000000,0x3714acc9
+	.long	0x7c000000,0x3560e410
+	.long	0x56000000,0x33bca2c7
+	.long	0xac000000,0x31fbd778
+	.long	0xe0000000,0x300b7246
+	.long	0xe8000000,0x2e5d2126
+	.long	0x48000000,0x2c970032
+	.long	0xe8000000,0x2ad77504
+	.long	0xe0000000,0x290921cf
+	.long	0xb0000000,0x274deb1c
+	.long	0xe0000000,0x25829a73
+	.long	0xbe000000,0x23fd1046
+	.long	0x10000000,0x2224baed
+	.long	0x8e000000,0x20709d33
+	.long	0x80000000,0x1e535a2f
+	.long	0x64000000,0x1cef904e
+	.long	0x30000000,0x1b0d6398
+	.long	0x24000000,0x1964ce7d
+	.long	0x16000000,0x17b908bf
+	.type L(_FPI), @object
+	ASM_SIZE_DIRECTIVE(L(_FPI))
+
+/* Coefficients of polynomial
+   for sin(x)~=x+x^3*DP_SIN2_0+x^5*DP_SIN2_1, |x|<2^-5.  */
+	.p2align 3
+L(DP_SIN2_0):
+	.long	0x5543d49d,0xbfc55555
+	.type L(DP_SIN2_0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SIN2_0))
+
+	.p2align 3
+L(DP_SIN2_1):
+	.long	0x75cec8c5,0x3f8110f4
+	.type L(DP_SIN2_1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SIN2_1))
+
+	.p2align 3
+L(DP_ZERONE):
+	.long	0x00000000,0x00000000	/* 0.0 */
+	.long	0x00000000,0xbff00000	/* 1.0 */
+	.type L(DP_ZERONE), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ZERONE))
+
+	.p2align 3
+L(DP_ONES):
+	.long	0x00000000,0x3ff00000	/* +1.0 */
+	.long	0x00000000,0xbff00000	/* -1.0 */
+	.type L(DP_ONES), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ONES))
+
+/* Coefficients of polynomial
+   for sin(t)~=t+t^3*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_S3):
+	.long	0x64e6b5b4,0x3ec71d72
+	.type L(DP_S3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S3))
+
+	.p2align 3
+L(DP_S1):
+	.long	0x10c2688b,0x3f811111
+	.type L(DP_S1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S1))
+
+	.p2align 3
+L(DP_S4):
+	.long	0x1674b58a,0xbe5a947e
+	.type L(DP_S4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S4))
+
+	.p2align 3
+L(DP_S2):
+	.long	0x8b4bd1f9,0xbf2a019f
+	.type L(DP_S2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S2))
+
+	.p2align 3
+L(DP_S0):
+	.long	0x55551cd9,0xbfc55555
+	.type L(DP_S0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S0))
+
+	.p2align 3
+L(DP_SMALL):
+	.long	0x00000000,0x3cd00000	/* 2^(-50) */
+	.type L(DP_SMALL), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SMALL))
+
+/* Coefficients of polynomial
+   for cos(t)~=1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_C3):
+	.long	0x9ac43cc0,0x3efa00eb
+	.type L(DP_C3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C3))
+
+	.p2align 3
+L(DP_C1):
+	.long	0x545c50c7,0x3fa55555
+	.type L(DP_C1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C1))
+
+	.p2align 3
+L(DP_C4):
+	.long	0xdd8844d7,0xbe923c97
+	.type L(DP_C4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C4))
+
+	.p2align 3
+L(DP_C2):
+	.long	0x348b6874,0xbf56c16b
+	.type L(DP_C2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C2))
+
+	.p2align 3
+L(DP_C0):
+	.long	0xfffe98ae,0xbfdfffff
+	.type L(DP_C0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C0))
+
+	.p2align 3
+L(DP_PIO4):
+	.long	0x54442d18,0x3fe921fb	/* Pi/4 */
+	.type L(DP_PIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4))
+
+	.p2align 3
+L(DP_2POW52):
+	.long	0x00000000,0x43300000	/* +2^52 */
+	.long	0x00000000,0xc3300000	/* -2^52 */
+	.type L(DP_2POW52), @object
+	ASM_SIZE_DIRECTIVE(L(DP_2POW52))
+
+	.p2align 3
+L(DP_INVPIO4):
+	.long	0x6dc9c883,0x3ff45f30	/* 4/Pi */
+	.type L(DP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_INVPIO4))
+
+	.p2align 3
+L(DP_PIO4HI):
+	.long	0x54000000,0xbfe921fb	/* High part of Pi/4 */
+	.type L(DP_PIO4HI), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4HI))
+
+	.p2align 3
+L(DP_PIO4LO):
+	.long	0x11A62633,0xbe010b46	/* Low part of Pi/4 */
+	.type L(DP_PIO4LO), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4LO))
+
+	.p2align 2
+L(SP_INVPIO4):
+	.long	0x3fa2f983		/* 4/Pi */
+	.type L(SP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(SP_INVPIO4))
+
+	.p2align 4
+L(DP_ABS_MASK): /* Mask for getting DP absolute value */
+	.long	0xffffffff,0x7fffffff
+	.long	0xffffffff,0x7fffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 3
+L(DP_HI_MASK): /* Mask for getting high 21 bits of DP value */
+	.long	0x00000000,0xffffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+weak_alias (__sinf, sinf)
diff --git a/sysdeps/i386/i686/fpu/multiarch/s_sinf.c b/sysdeps/i386/i686/fpu/multiarch/s_sinf.c
new file mode 100644
index 0000000000..80b2fd6875
--- /dev/null
+++ b/sysdeps/i386/i686/fpu/multiarch/s_sinf.c
@@ -0,0 +1,28 @@
+/* Multiple versions of sinf
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <init-arch.h>
+
+extern float __sinf_sse2 (float);
+extern float __sinf_ia32 (float);
+float __sinf (float);
+
+libm_ifunc (__sinf, HAS_SSE2 ? __sinf_sse2 : __sinf_ia32);
+weak_alias (__sinf, sinf);
+#define SINF __sinf_ia32
+#include <sysdeps/ieee754/flt-32/s_sinf.c>
diff --git a/sysdeps/ieee754/flt-32/s_cosf.c b/sysdeps/ieee754/flt-32/s_cosf.c
index f4bd81904b..864ab27a4f 100644
--- a/sysdeps/ieee754/flt-32/s_cosf.c
+++ b/sysdeps/ieee754/flt-32/s_cosf.c
@@ -23,7 +23,13 @@ static char rcsid[] = "$NetBSD: s_cosf.c,v 1.4 1995/05/10 20:47:03 jtc Exp $";
 
 static const float one=1.0;
 
-float __cosf(float x)
+#ifndef COSF
+# define COSF_FUNC __cosf
+#else
+# define COSF_FUNC COSF
+#endif
+
+float COSF_FUNC(float x)
 {
 	float y[2],z=0.0;
 	int32_t n,ix;
@@ -53,4 +59,7 @@ float __cosf(float x)
 	    }
 	}
 }
+
+#ifndef COSF
 weak_alias (__cosf, cosf)
+#endif
diff --git a/sysdeps/ieee754/flt-32/s_sinf.c b/sysdeps/ieee754/flt-32/s_sinf.c
index 02fa29f23b..916e345571 100644
--- a/sysdeps/ieee754/flt-32/s_sinf.c
+++ b/sysdeps/ieee754/flt-32/s_sinf.c
@@ -21,7 +21,13 @@ static char rcsid[] = "$NetBSD: s_sinf.c,v 1.4 1995/05/10 20:48:16 jtc Exp $";
 #include <math.h>
 #include <math_private.h>
 
-float __sinf(float x)
+#ifndef SINF
+# define SINF_FUNC __sinf
+#else
+# define SINF_FUNC SINF
+#endif
+
+float SINF_FUNC(float x)
 {
 	float y[2],z=0.0;
 	int32_t n, ix;
@@ -51,4 +57,7 @@ float __sinf(float x)
 	    }
 	}
 }
+
+#ifndef SINF
 weak_alias (__sinf, sinf)
+#endif
diff --git a/sysdeps/x86_64/fpu/libm-test-ulps b/sysdeps/x86_64/fpu/libm-test-ulps
index 8a38221133..eb02222d23 100644
--- a/sysdeps/x86_64/fpu/libm-test-ulps
+++ b/sysdeps/x86_64/fpu/libm-test-ulps
@@ -1208,6 +1208,8 @@ ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_downward (10) == -0.8390715290764524522588639478240648345199":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_downward (2) == -0.4161468365471423869975682295007621897660":
@@ -1239,6 +1241,8 @@ ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_downward (9) == -0.9111302618846769883682947111811653112463":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 
@@ -1285,15 +1289,22 @@ ildouble: 1
 ldouble: 1
 
 # cos_upward
+Test "cos_upward (1) == 0.5403023058681397174009366074429766037323":
+float: 1
+ifloat: 1
 Test "cos_upward (10) == -0.8390715290764524522588639478240648345199":
 float: 1
 ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_upward (2) == -0.4161468365471423869975682295007621897660":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_upward (3) == -0.9899924966004454572715727947312613023937":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_upward (4) == -0.6536436208636119146391681830977503814241":
@@ -1311,6 +1322,8 @@ Test "cos_upward (7) == 0.7539022543433046381411975217191820122183":
 float: 1
 ifloat: 1
 Test "cos_upward (8) == -0.1455000338086135258688413818311946826093":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "cos_upward (9) == -0.9111302618846769883682947111811653112463":
@@ -2319,6 +2332,8 @@ Test "sin_downward (8) == 0.9893582466233817778081235982452886721164":
 ildouble: 1
 ldouble: 1
 Test "sin_downward (9) == 0.4121184852417565697562725663524351793439":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 
@@ -2374,6 +2389,8 @@ Test "sin_upward (1) == 0.8414709848078965066525023216302989996226":
 float: 1
 ifloat: 1
 Test "sin_upward (10) == -0.5440211108893698134047476618513772816836":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "sin_upward (2) == 0.9092974268256816953960198659117448427023":
@@ -2382,6 +2399,8 @@ ifloat: 2
 ildouble: 1
 ldouble: 1
 Test "sin_upward (3) == 0.1411200080598672221007448028081102798469":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "sin_upward (4) == -0.7568024953079282513726390945118290941359":
@@ -2390,11 +2409,19 @@ ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "sin_upward (5) == -0.9589242746631384688931544061559939733525":
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 Test "sin_upward (6) == -0.2794154981989258728115554466118947596280":
 ildouble: 1
 ldouble: 1
+Test "sin_upward (7) == 0.6569865987187890903969990915936351779369":
+float: 1
+ifloat: 1
+Test "sin_upward (8) == 0.9893582466233817778081235982452886721164":
+float: 1
+ifloat: 1
 Test "sin_upward (9) == 0.4121184852417565697562725663524351793439":
 float: 1
 ifloat: 1
@@ -3052,8 +3079,8 @@ ildouble: 1
 ldouble: 1
 
 Function: "cos_upward":
-float: 2
-ifloat: 2
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 
@@ -3412,8 +3439,8 @@ ildouble: 1
 ldouble: 1
 
 Function: "sin_upward":
-float: 2
-ifloat: 2
+float: 1
+ifloat: 1
 ildouble: 1
 ldouble: 1
 
diff --git a/sysdeps/x86_64/fpu/s_cosf.S b/sysdeps/x86_64/fpu/s_cosf.S
new file mode 100644
index 0000000000..7eeefe8f30
--- /dev/null
+++ b/sysdeps/x86_64/fpu/s_cosf.S
@@ -0,0 +1,535 @@
+/* Optimized cosf function.
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#define __need_Emath
+#include <bits/errno.h>
+
+/* Short algorithm description:
+ *
+ *  1) if |x| == 0: return 1.0-|x|.
+ *  2) if |x| <  2^-27: return 1.0-|x|.
+ *  3) if |x| <  2^-5 : return 1.0+x^2*DP_COS2_0+x^5*DP_COS2_1.
+ *  4) if |x| <   Pi/4: return 1.0+x^2*(C0+x^2*(C1+x^2*(C2+x^2*(C3+x^2*C4)))).
+ *  5) if |x| < 9*Pi/4:
+ *      5.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0x0e, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      5.2) Reconstruction:
+ *          s = (-1.0)^((n>>2)&1)
+ *          if(n&2 != 0) {
+ *              using cos(t) polynomial for |t|<Pi/4, result is
+ *              s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4))))).
+ *          } else {
+ *              using sin(t) polynomial for |t|<Pi/4, result is
+ *              s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4))))).
+ *          }
+ *  6) if |x| < 2^23, large args:
+ *      6.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      6.2) Reconstruction same as (5.2).
+ *  7) if |x| >= 2^23, very large args:
+ *      7.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+3,
+ *           t=|x|-j*Pi/4.
+ *      7.2) Reconstruction same as (5.2).
+ *  8) if x is Inf, return x-x, and set errno=EDOM.
+ *  9) if x is NaN, return x-x.
+ *
+ * Special cases:
+ *  cos(+-0)==+-0 not raising inexact/underflow,
+ *  cos(subnormal) raises inexact/underflow
+ *  cos(min_normalized) raises inexact/underflow
+ *  cos(normalized) raises inexact
+ *  cos(Inf) = NaN, raises invalid, sets errno to EDOM
+ *  cos(NaN) = NaN
+ */
+
+	.text
+ENTRY(__cosf)
+	/* Input: single precision x in %xmm0 */
+
+	movd	%xmm0, %eax		/* Bits of x */
+	movaps	%xmm0, %xmm7		/* Copy of x */
+	cvtss2sd %xmm0, %xmm0		/* DP x */
+	movss	L(SP_ABS_MASK)(%rip), %xmm3
+	andl	$0x7fffffff, %eax	/* |x| */
+
+	cmpl	$0x3f490fdb, %eax	/* |x|<Pi/4?  */
+	jb	L(arg_less_pio4)
+
+	/* Here if |x|>=Pi/4 */
+	andps	%xmm7, %xmm3		/* SP |x| */
+	andpd	L(DP_ABS_MASK)(%rip), %xmm0	/* DP |x| */
+	movss	L(SP_INVPIO4)(%rip), %xmm2	/* SP 1/(Pi/4) */
+
+	cmpl	$0x40e231d6, %eax	/* |x|<9*Pi/4?  */
+	jae	L(large_args)
+
+	/* Here if Pi/4<=|x|<9*Pi/4 */
+	mulss	%xmm3, %xmm2		/* SP |x|/(Pi/4) */
+	cvttss2si %xmm2, %eax		/* k, number of Pi/4 in x */
+	lea	L(PIO4J)(%rip), %rsi
+	addl	$1, %eax		/* k+1 */
+	movl	$0x0e, %edx
+	andl	%eax, %edx		/* j = (k+1)&0x0e */
+	addl	$2, %eax		/* n */
+	subsd	(%rsi,%rdx,8), %xmm0	/* t = |x| - j * Pi/4 */
+
+L(reconstruction):
+	/* Input: %eax=n, %xmm0=t */
+	testl	$2, %eax		/* n&2 != 0?  */
+	jz	L(sin_poly)
+
+/*L(cos_poly):*/
+	/* Here if cos(x) calculated using cos(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))))
+	 */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	L(DP_C4)(%rip), %xmm4	/* C4 */
+	mulsd	%xmm0, %xmm4		/* z*C4 */
+	movsd	L(DP_C3)(%rip), %xmm3	/* C3 */
+	mulsd	%xmm0, %xmm3		/* z*C3 */
+	lea	L(DP_ONES)(%rip), %rsi
+	addsd	L(DP_C2)(%rip), %xmm4	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm4		/* z*(C2+z*C4) */
+	addsd	L(DP_C1)(%rip), %xmm3	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm3		/* z*(C1+z*C3) */
+	addsd	L(DP_C0)(%rip), %xmm4	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm4		/* y*(C0+z*(C2+z*C4)) */
+
+	addsd	%xmm4, %xmm3		/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	/* 1.0+y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	L(DP_ONES)(%rip), %xmm3
+
+	mulsd	(%rsi,%rax,8), %xmm3	/* DP result */
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+	.p2align	4
+L(sin_poly):
+	/* Here if cos(x) calculated using sin(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))))
+	 */
+
+	movaps	%xmm0, %xmm4		/* t */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	L(DP_S4)(%rip), %xmm2	/* S4 */
+	mulsd	%xmm0, %xmm2		/* z*S4 */
+	movsd	L(DP_S3)(%rip), %xmm3	/* S3 */
+	mulsd	%xmm0, %xmm3		/* z*S3 */
+	lea	L(DP_ONES)(%rip), %rsi
+	addsd	L(DP_S2)(%rip), %xmm2	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm2		/* z*(S2+z*S4) */
+	addsd	L(DP_S1)(%rip), %xmm3	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm3		/* z*(S1+z*S3) */
+	addsd	L(DP_S0)(%rip), %xmm2	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm2		/* y*(S0+z*(S2+z*S4)) */
+	/* t*s, where s = sign(x) * (-1.0)^((n>>2)&1) */
+	mulsd	(%rsi,%rax,8), %xmm4
+	/* y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm2, %xmm3
+	/* t*s*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	mulsd	%xmm4, %xmm3
+	/* t*s*(1.0+y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+
+
+
+
+	.p2align	4
+L(large_args):
+	/* Here if |x|>=9*Pi/4 */
+	cmpl	$0x7f800000, %eax	/* x is Inf or NaN?  */
+	jae	L(arg_inf_or_nan)
+
+	/* Here if finite |x|>=9*Pi/4 */
+	cmpl	$0x4b000000, %eax	/* |x|<2^23?  */
+	jae	L(very_large_args)
+
+	/* Here if 9*Pi/4<=|x|<2^23 */
+	movsd	L(DP_INVPIO4)(%rip), %xmm1 /* 1/(Pi/4) */
+	mulsd	%xmm0, %xmm1		/* |x|/(Pi/4) */
+	cvttsd2si %xmm1, %eax		/* k=trunc(|x|/(Pi/4)) */
+	addl	$1, %eax		/* k+1 */
+	movl	%eax, %edx
+	andl	$0xfffffffe, %edx	/* j=(k+1)&0xfffffffe */
+	cvtsi2sdl %edx, %xmm4		/* DP j */
+	movsd	L(DP_PIO4HI)(%rip), %xmm2 /* -PIO4HI = high part of -Pi/4 */
+	mulsd	%xmm4, %xmm2		/* -j*PIO4HI */
+	movsd	L(DP_PIO4LO)(%rip), %xmm3 /* -PIO4LO = low part of -Pi/4 */
+	addsd	%xmm2, %xmm0		/* |x| - j*PIO4HI */
+	addl	$2, %eax		/* n */
+	mulsd	%xmm3, %xmm4		/* j*PIO4LO */
+	addsd	%xmm4, %xmm0		/* t = |x| - j*PIO4HI - j*PIO4LO */
+	jmp	L(reconstruction)
+
+	.p2align	4
+L(very_large_args):
+	/* Here if finite |x|>=2^23 */
+
+	/* bitpos = (ix>>23) - BIAS_32 + 59; */
+	shrl	$23, %eax		/* eb = biased exponent of x */
+	/* bitpos = eb - 0x7f + 59, where 0x7f is exponent bias */
+	subl	$68, %eax
+	movl	$28, %ecx		/* %cl=28 */
+	movl	%eax, %edx		/* bitpos copy */
+
+	/* j = bitpos/28; */
+	div	%cl			/* j in register %al=%ax/%cl */
+	movapd	%xmm0, %xmm3		/* |x| */
+	/* clear unneeded remainder from %ah */
+	andl	$0xff, %eax
+
+	imull	$28, %eax, %ecx		/* j*28 */
+	lea	L(_FPI)(%rip), %rsi
+	movsd	L(DP_HI_MASK)(%rip), %xmm4 /* DP_HI_MASK */
+	movapd	%xmm0, %xmm5		/* |x| */
+	mulsd	-16(%rsi,%rax,8), %xmm3	/* tmp3 = FPI[j-2]*|x| */
+	movapd	%xmm0, %xmm1		/* |x| */
+	mulsd	-8(%rsi,%rax,8), %xmm5	/* tmp2 = FPI[j-1]*|x| */
+	mulsd	(%rsi,%rax,8), %xmm0	/* tmp0 = FPI[j]*|x| */
+	addl	$19, %ecx		/* j*28+19 */
+	mulsd	8(%rsi,%rax,8), %xmm1	/* tmp1 = FPI[j+1]*|x| */
+	cmpl	%ecx, %edx		/* bitpos>=j*28+19?  */
+	jl	L(very_large_skip1)
+
+	/* Here if bitpos>=j*28+19 */
+	andpd	%xmm3, %xmm4		/* HI(tmp3) */
+	subsd	%xmm4, %xmm3		/* tmp3 = tmp3 - HI(tmp3) */
+L(very_large_skip1):
+
+	movsd	L(DP_2POW52)(%rip), %xmm6
+	movapd	%xmm5, %xmm2		/* tmp2 copy */
+	addsd	%xmm3, %xmm5		/* tmp5 = tmp3 + tmp2 */
+	movl	$1, %edx
+	addsd	%xmm5, %xmm6		/* tmp6 = tmp5 + 2^52 */
+	movsd	8+L(DP_2POW52)(%rip), %xmm4
+	movd	%xmm6, %eax		/* k = I64_LO(tmp6); */
+	addsd	%xmm6, %xmm4		/* tmp4 = tmp6 - 2^52 */
+	comisd	%xmm5, %xmm4		/* tmp4 > tmp5?  */
+	jbe	L(very_large_skip2)
+
+	/* Here if tmp4 > tmp5 */
+	subl	$1, %eax		/* k-- */
+	addsd	8+L(DP_ONES)(%rip), %xmm4 /* tmp4 -= 1.0 */
+L(very_large_skip2):
+
+	andl	%eax, %edx		/* k&1 */
+	lea	L(DP_ZERONE)(%rip), %rsi
+	subsd	%xmm4, %xmm3		/* tmp3 -= tmp4 */
+	addsd	(%rsi,%rdx,8), %xmm3	/* t  = DP_ZERONE[k&1] + tmp3 */
+	addsd	%xmm2, %xmm3		/* t += tmp2 */
+	addsd	%xmm3, %xmm0		/* t += tmp0 */
+	addl	$3, %eax		/* n=k+3 */
+	addsd	%xmm1, %xmm0		/* t += tmp1 */
+	mulsd	L(DP_PIO4)(%rip), %xmm0	/* t *= PI04 */
+
+	jmp	L(reconstruction)	/* end of very_large_args peth */
+
+
+	.p2align	4
+L(arg_less_pio4):
+	/* Here if |x|<Pi/4 */
+	cmpl	$0x3d000000, %eax	/* |x|<2^-5?  */
+	jl	L(arg_less_2pn5)
+
+	/* Here if 2^-5<=|x|<Pi/4 */
+	mulsd	%xmm0, %xmm0		/* y=x^2 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=x^4 */
+	movsd	L(DP_C4)(%rip), %xmm3	/* C4 */
+	mulsd	%xmm0, %xmm3		/* z*C4 */
+	movsd	L(DP_C3)(%rip), %xmm5	/* C3 */
+	mulsd	%xmm0, %xmm5		/* z*C3 */
+	addsd	L(DP_C2)(%rip), %xmm3	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm3		/* z*(C2+z*C4) */
+	addsd	L(DP_C1)(%rip), %xmm5	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm5		/* z*(C1+z*C3) */
+	addsd	L(DP_C0)(%rip), %xmm3	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm3		/* y*(C0+z*(C2+z*C4)) */
+	/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	%xmm5, %xmm3
+	/* 1.0 + y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	L(DP_ONES)(%rip), %xmm3
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+	.p2align	4
+L(arg_less_2pn5):
+	/* Here if |x|<2^-5 */
+	cmpl	$0x32000000, %eax	/* |x|<2^-27?  */
+	jl	L(arg_less_2pn27)
+
+	/* Here if 2^-27<=|x|<2^-5 */
+	mulsd	%xmm0, %xmm0		/* DP x^2 */
+	movsd	L(DP_COS2_1)(%rip), %xmm3 /* DP DP_COS2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_COS2_1 */
+	addsd	L(DP_COS2_0)(%rip), %xmm3 /* DP DP_COS2_0+x^2*DP_COS2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_COS2_0+x^4*DP_COS2_1 */
+	/* DP 1.0+x^2*DP_COS2_0+x^4*DP_COS2_1 */
+	addsd	L(DP_ONES)(%rip), %xmm3
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+	.p2align	4
+L(arg_less_2pn27):
+	/* Here if |x|<2^-27 */
+	andps	L(SP_ABS_MASK)(%rip),%xmm7 /* |x| */
+	movss	L(SP_ONE)(%rip), %xmm0	/* 1.0 */
+	subss	%xmm7, %xmm0		/* result is 1.0-|x| */
+	ret
+
+	.p2align	4
+L(arg_inf_or_nan):
+	/* Here if |x| is Inf or NAN */
+	jne	L(skip_errno_setting)	/* in case of x is NaN */
+
+	/* Here if x is Inf. Set errno to EDOM.  */
+	call	JUMPTARGET(__errno_location)
+	lea	(%rax), %rax
+	movl	$EDOM, (%rax)
+
+	.p2align	4
+L(skip_errno_setting):
+	/* Here if |x| is Inf or NAN. Continued.  */
+	movaps	%xmm7, %xmm0		/* load x */
+	subss	%xmm0, %xmm0		/* Result is NaN */
+	ret
+END(__cosf)
+
+
+
+	.section .rodata, "a"
+	.p2align 3
+L(PIO4J): /* Table of j*Pi/4, for j=0,1,..,10 */
+	.long	0x00000000,0x00000000
+	.long	0x54442d18,0x3fe921fb
+	.long	0x54442d18,0x3ff921fb
+	.long	0x7f3321d2,0x4002d97c
+	.long	0x54442d18,0x400921fb
+	.long	0x2955385e,0x400f6a7a
+	.long	0x7f3321d2,0x4012d97c
+	.long	0xe9bba775,0x4015fdbb
+	.long	0x54442d18,0x401921fb
+	.long	0xbeccb2bb,0x401c463a
+	.long	0x2955385e,0x401f6a7a
+	.type L(PIO4J), @object
+	ASM_SIZE_DIRECTIVE(L(PIO4J))
+
+	.p2align 3
+L(_FPI): /* 4/Pi broken into sum of positive DP values */
+	.long	0x00000000,0x00000000
+	.long	0x6c000000,0x3ff45f30
+	.long	0x2a000000,0x3e3c9c88
+	.long	0xa8000000,0x3c54fe13
+	.long	0xd0000000,0x3aaf47d4
+	.long	0x6c000000,0x38fbb81b
+	.long	0xe0000000,0x3714acc9
+	.long	0x7c000000,0x3560e410
+	.long	0x56000000,0x33bca2c7
+	.long	0xac000000,0x31fbd778
+	.long	0xe0000000,0x300b7246
+	.long	0xe8000000,0x2e5d2126
+	.long	0x48000000,0x2c970032
+	.long	0xe8000000,0x2ad77504
+	.long	0xe0000000,0x290921cf
+	.long	0xb0000000,0x274deb1c
+	.long	0xe0000000,0x25829a73
+	.long	0xbe000000,0x23fd1046
+	.long	0x10000000,0x2224baed
+	.long	0x8e000000,0x20709d33
+	.long	0x80000000,0x1e535a2f
+	.long	0x64000000,0x1cef904e
+	.long	0x30000000,0x1b0d6398
+	.long	0x24000000,0x1964ce7d
+	.long	0x16000000,0x17b908bf
+	.type L(_FPI), @object
+	ASM_SIZE_DIRECTIVE(L(_FPI))
+
+/* Coefficients of polynomial
+   for cos(x)~=1.0+x^2*DP_COS2_0+x^4*DP_COS2_1, |x|<2^-5.  */
+	.p2align 3
+L(DP_COS2_0):
+	.long	0xff5cc6fd,0xbfdfffff
+	.type L(DP_COS2_0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_COS2_0))
+
+	.p2align 3
+L(DP_COS2_1):
+	.long	0xb178dac5,0x3fa55514
+	.type L(DP_COS2_1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_COS2_1))
+
+	.p2align 3
+L(DP_ZERONE):
+	.long	0x00000000,0x00000000	/* 0.0 */
+	.long	0x00000000,0xbff00000	/* 1.0 */
+	.type L(DP_ZERONE), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ZERONE))
+
+	.p2align 3
+L(DP_ONES):
+	.long	0x00000000,0x3ff00000	/* +1.0 */
+	.long	0x00000000,0xbff00000	/* -1.0 */
+	.type L(DP_ONES), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ONES))
+
+/* Coefficients of polynomial
+   for sin(t)~=t+t^3*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_S3):
+	.long	0x64e6b5b4,0x3ec71d72
+	.type L(DP_S3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S3))
+
+	.p2align 3
+L(DP_S1):
+	.long	0x10c2688b,0x3f811111
+	.type L(DP_S1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S1))
+
+	.p2align 3
+L(DP_S4):
+	.long	0x1674b58a,0xbe5a947e
+	.type L(DP_S4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S4))
+
+	.p2align 3
+L(DP_S2):
+	.long	0x8b4bd1f9,0xbf2a019f
+	.type L(DP_S2),@object
+	ASM_SIZE_DIRECTIVE(L(DP_S2))
+
+	.p2align 3
+L(DP_S0):
+	.long	0x55551cd9,0xbfc55555
+	.type L(DP_S0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S0))
+
+/* Coefficients of polynomial
+   for cos(t)~=1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_C3):
+	.long	0x9ac43cc0,0x3efa00eb
+	.type L(DP_C3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C3))
+
+	.p2align 3
+L(DP_C1):
+	.long	0x545c50c7,0x3fa55555
+	.type L(DP_C1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C1))
+
+	.p2align 3
+L(DP_C4):
+	.long	0xdd8844d7,0xbe923c97
+	.type L(DP_C4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C4))
+
+	.p2align 3
+L(DP_C2):
+	.long	0x348b6874,0xbf56c16b
+	.type L(DP_C2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C2))
+
+	.p2align 3
+L(DP_C0):
+	.long	0xfffe98ae,0xbfdfffff
+	.type L(DP_C0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C0))
+
+	.p2align 3
+L(DP_PIO4):
+	.long	0x54442d18,0x3fe921fb	/* Pi/4 */
+	.type L(DP_PIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4))
+
+	.p2align 3
+L(DP_2POW52):
+	.long	0x00000000,0x43300000	/* +2^52 */
+	.long	0x00000000,0xc3300000	/* -2^52 */
+	.type L(DP_2POW52), @object
+	ASM_SIZE_DIRECTIVE(L(DP_2POW52))
+
+	.p2align 3
+L(DP_INVPIO4):
+	.long	0x6dc9c883,0x3ff45f30	/* 4/Pi */
+	.type L(DP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_INVPIO4))
+
+	.p2align 3
+L(DP_PIO4HI):
+	.long	0x54000000,0xbfe921fb	/* High part of Pi/4 */
+	.type L(DP_PIO4HI), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4HI))
+
+	.p2align 3
+L(DP_PIO4LO):
+	.long	0x11A62633,0xbe010b46	/* Low part of Pi/4 */
+	.type L(DP_PIO4LO), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4LO))
+
+	.p2align 2
+L(SP_INVPIO4):
+	.long	0x3fa2f983		/* 4/Pi */
+	.type L(SP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(SP_INVPIO4))
+
+	.p2align 4
+L(DP_ABS_MASK): /* Mask for getting DP absolute value */
+	.long	0xffffffff,0x7fffffff
+	.long	0xffffffff,0x7fffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 3
+L(DP_HI_MASK): /* Mask for getting high 21 bits of DP value */
+	.long	0x00000000,0xffffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 4
+L(SP_ABS_MASK): /* Mask for getting SP absolute value */
+	.long	0x7fffffff,0x7fffffff
+	.long	0x7fffffff,0x7fffffff
+	.type L(SP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(SP_ABS_MASK))
+
+	.p2align 2
+L(SP_ONE):
+	.long	0x3f800000		/* 1.0 */
+	.type L(SP_ONE), @object
+	ASM_SIZE_DIRECTIVE(L(SP_ONE))
+
+weak_alias(__cosf, cosf)
diff --git a/sysdeps/x86_64/fpu/s_sinf.S b/sysdeps/x86_64/fpu/s_sinf.S
new file mode 100644
index 0000000000..295ba3df85
--- /dev/null
+++ b/sysdeps/x86_64/fpu/s_sinf.S
@@ -0,0 +1,558 @@
+/* Optimized sinf function.
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#define __need_Emath
+#include <bits/errno.h>
+
+/* Short algorithm description:
+ *
+ *  1) if |x| == 0: return x.
+ *  2) if |x| <  2^-27: return x-x*DP_SMALL, raise underflow only when needed.
+ *  3) if |x| <  2^-5 : return x+x^3*DP_SIN2_0+x^5*DP_SIN2_1.
+ *  4) if |x| <   Pi/4: return x+x^3*(S0+x^2*(S1+x^2*(S2+x^2*(S3+x^2*S4)))).
+ *  5) if |x| < 9*Pi/4:
+ *      5.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0x0e, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      5.2) Reconstruction:
+ *          s = sign(x) * (-1.0)^((n>>2)&1)
+ *          if(n&2 != 0) {
+ *              using cos(t) polynomial for |t|<Pi/4, result is
+ *              s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4))))).
+ *          } else {
+ *              using sin(t) polynomial for |t|<Pi/4, result is
+ *              s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4))))).
+ *          }
+ *  6) if |x| < 2^23, large args:
+ *      6.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      6.2) Reconstruction same as (5.2).
+ *  7) if |x| >= 2^23, very large args:
+ *      7.1) Range reduction: k=trunc(|x|/(Pi/4)), j=(k+1)&0xfffffffe, n=k+1,
+ *           t=|x|-j*Pi/4.
+ *      7.2) Reconstruction same as (5.2).
+ *  8) if x is Inf, return x-x, and set errno=EDOM.
+ *  9) if x is NaN, return x-x.
+ *
+ * Special cases:
+ *  sin(+-0)==+-0 not raising inexact/underflow,
+ *  sin(subnormal) raises inexact/underflow
+ *  sin(min_normalized) raises inexact/underflow
+ *  sin(normalized) raises inexact
+ *  sin(Inf) = NaN, raises invalid, sets errno to EDOM
+ *  sin(NaN) = NaN
+ */
+
+	.text
+ENTRY(__sinf)
+	/* Input: single precision x in %xmm0 */
+
+	movd	%xmm0, %eax		/* Bits of x */
+	movaps	%xmm0, %xmm7		/* Copy of x */
+	cvtss2sd %xmm0, %xmm0		/* DP x */
+	movss	L(SP_ABS_MASK)(%rip), %xmm3
+	movl	%eax, %edi		/* Copy of x bits */
+	andl	$0x7fffffff, %eax	/* |x| */
+
+	cmpl	$0x3f490fdb, %eax	/* |x|<Pi/4?  */
+	jb	L(arg_less_pio4)
+
+	/* Here if |x|>=Pi/4 */
+	andps	%xmm7, %xmm3		/* SP |x| */
+	andpd	L(DP_ABS_MASK)(%rip),%xmm0 /* DP |x| */
+	movss	L(SP_INVPIO4)(%rip), %xmm2 /* SP 1/(Pi/4) */
+
+	cmpl	$0x40e231d6, %eax	/* |x|<9*Pi/4?  */
+	jae	L(large_args)
+
+	/* Here if Pi/4<=|x|<9*Pi/4 */
+	mulss	%xmm3, %xmm2		/* SP |x|/(Pi/4) */
+	movl	%edi, %ecx		/* Load x */
+	cvttss2si %xmm2, %eax		/* k, number of Pi/4 in x */
+	lea	L(PIO4J)(%rip), %rsi
+	shrl	$31, %ecx		/* sign of x */
+	addl	$1, %eax		/* k+1 */
+	movl	$0x0e, %edx
+	andl	%eax, %edx		/* j = (k+1)&0x0e */
+	subsd	(%rsi,%rdx,8), %xmm0	/* t = |x| - j * Pi/4 */
+
+L(reconstruction):
+	/* Input: %eax=n, %xmm0=t, %ecx=sign(x) */
+	testl	$2, %eax		/* n&2 != 0?  */
+	jz	L(sin_poly)
+
+/*L(cos_poly):*/
+	/* Here if sin(x) calculated using cos(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s     * (1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))))
+	 */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	L(DP_C4)(%rip), %xmm4	/* C4 */
+	mulsd	%xmm0, %xmm4		/* z*C4 */
+	xorl	%eax, %ecx		/* (-1.0)^((n>>2)&1) XOR sign(x) */
+	movsd	L(DP_C3)(%rip), %xmm3	/* C3 */
+	mulsd	%xmm0, %xmm3		/* z*C3 */
+	lea	L(DP_ONES)(%rip), %rsi
+	addsd	L(DP_C2)(%rip), %xmm4	/* C2+z*C4 */
+	mulsd	%xmm0, %xmm4		/* z*(C2+z*C4) */
+	addsd	L(DP_C1)(%rip), %xmm3	/* C1+z*C3 */
+	mulsd	%xmm0, %xmm3		/* z*(C1+z*C3) */
+	addsd	L(DP_C0)(%rip), %xmm4	/* C0+z*(C2+z*C4) */
+	mulsd	%xmm1, %xmm4		/* y*(C0+z*(C2+z*C4)) */
+
+	/* y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	%xmm4, %xmm3
+	/* 1.0+y*(C0+y*(C1+y*(C2+y*(C3+y*C4)))) */
+	addsd	L(DP_ONES)(%rip), %xmm3
+
+	mulsd	(%rsi,%rcx,8), %xmm3	/* DP result */
+	cvtsd2ss %xmm3, %xmm0 		/* SP result */
+	ret
+
+	.p2align	4
+L(sin_poly):
+	/* Here if sin(x) calculated using sin(t) polynomial for |t|<Pi/4:
+	 * y = t*t; z = y*y;
+	 * s = sign(x) * (-1.0)^((n>>2)&1)
+	 * result = s * t * (1.0+t^2*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))))
+	 */
+
+	movaps	%xmm0, %xmm4		/* t */
+	shrl	$2, %eax		/* n>>2 */
+	mulsd	%xmm0, %xmm0		/* y=t^2 */
+	andl	$1, %eax		/* (n>>2)&1 */
+	movaps	%xmm0, %xmm1		/* y */
+	xorl	%eax, %ecx		/* (-1.0)^((n>>2)&1) XOR sign(x) */
+	mulsd	%xmm0, %xmm0		/* z=t^4 */
+
+	movsd	L(DP_S4)(%rip), %xmm2	/* S4 */
+	mulsd	%xmm0, %xmm2		/* z*S4 */
+	movsd	L(DP_S3)(%rip), %xmm3	/* S3 */
+	mulsd	%xmm0, %xmm3		/* z*S3 */
+	lea	L(DP_ONES)(%rip), %rsi
+	addsd	L(DP_S2)(%rip), %xmm2	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm2		/* z*(S2+z*S4) */
+	addsd	L(DP_S1)(%rip), %xmm3	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm3		/* z*(S1+z*S3) */
+	addsd	L(DP_S0)(%rip), %xmm2	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm2		/* y*(S0+z*(S2+z*S4)) */
+	/* t*s, where s = sign(x) * (-1.0)^((n>>2)&1) */
+	mulsd	(%rsi,%rcx,8), %xmm4
+	/* y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm2, %xmm3
+	/* t*s*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	mulsd	%xmm4, %xmm3
+	/* t*s*(1.0+y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	cvtsd2ss %xmm3, %xmm0 		/* SP result */
+	ret
+
+
+	.p2align	4
+L(large_args):
+	/* Here if |x|>=9*Pi/4 */
+	cmpl	$0x7f800000, %eax	/* x is Inf or NaN?  */
+	jae	L(arg_inf_or_nan)
+
+	/* Here if finite |x|>=9*Pi/4 */
+	cmpl	$0x4b000000, %eax	/* |x|<2^23?  */
+	jae	L(very_large_args)
+
+	/* Here if 9*Pi/4<=|x|<2^23 */
+	movsd	L(DP_INVPIO4)(%rip), %xmm1 /* 1/(Pi/4) */
+	mulsd	%xmm0, %xmm1		/* |x|/(Pi/4) */
+	cvttsd2si %xmm1, %eax		/* k=trunc(|x|/(Pi/4)) */
+	addl	$1, %eax		/* k+1 */
+	movl	%eax, %edx
+	andl	$0xfffffffe, %edx	/* j=(k+1)&0xfffffffe */
+	cvtsi2sdl %edx, %xmm4		/* DP j */
+	movl	%edi, %ecx		/* Load x */
+	movsd	L(DP_PIO4HI)(%rip), %xmm2 /* -PIO4HI = high part of -Pi/4 */
+	shrl	$31, %ecx		/* sign bit of x */
+	mulsd	%xmm4, %xmm2		/* -j*PIO4HI */
+	movsd	L(DP_PIO4LO)(%rip), %xmm3 /* -PIO4LO = low part of -Pi/4 */
+	addsd	%xmm2, %xmm0		/* |x| - j*PIO4HI */
+	mulsd	%xmm3, %xmm4		/* j*PIO4LO */
+	addsd	%xmm4, %xmm0		/* t = |x| - j*PIO4HI - j*PIO4LO */
+	jmp	L(reconstruction)
+
+	.p2align	4
+L(very_large_args):
+	/* Here if finite |x|>=2^23 */
+
+	/* bitpos = (ix>>23) - BIAS_32 + 59; */
+	shrl	$23, %eax		/* eb = biased exponent of x */
+	/* bitpos = eb - 0x7f + 59, where 0x7f is exponent bias */
+	subl	$68, %eax
+	movl	$28, %ecx		/* %cl=28 */
+	movl	%eax, %edx		/* bitpos copy */
+
+	/* j = bitpos/28; */
+	div	%cl			/* j in register %al=%ax/%cl */
+	movapd	%xmm0, %xmm3		/* |x| */
+	/* clear unneeded remainder from %ah */
+	andl	$0xff, %eax
+
+	imull	$28, %eax, %ecx		/* j*28 */
+	lea	L(_FPI)(%rip), %rsi
+	movsd	L(DP_HI_MASK)(%rip), %xmm4 /* DP_HI_MASK */
+	movapd	%xmm0, %xmm5		/* |x| */
+	mulsd	-16(%rsi,%rax,8), %xmm3	/* tmp3 = FPI[j-2]*|x| */
+	movapd	%xmm0, %xmm1		/* |x| */
+	mulsd	-8(%rsi,%rax,8), %xmm5	/* tmp2 = FPI[j-1]*|x| */
+	mulsd	(%rsi,%rax,8), %xmm0	/* tmp0 = FPI[j]*|x| */
+	addl	$19, %ecx		/* j*28+19 */
+	mulsd	8(%rsi,%rax,8), %xmm1	/* tmp1 = FPI[j+1]*|x| */
+	cmpl	%ecx, %edx		/* bitpos>=j*28+19?  */
+	jl	L(very_large_skip1)
+
+	/* Here if bitpos>=j*28+19 */
+	andpd	%xmm3, %xmm4		/* HI(tmp3) */
+	subsd	%xmm4, %xmm3		/* tmp3 = tmp3 - HI(tmp3) */
+L(very_large_skip1):
+
+	movsd	L(DP_2POW52)(%rip), %xmm6
+	movapd	%xmm5, %xmm2		/* tmp2 copy */
+	addsd	%xmm3, %xmm5		/* tmp5 = tmp3 + tmp2 */
+	movl	$1, %edx
+	addsd	%xmm5, %xmm6		/* tmp6 = tmp5 + 2^52 */
+	movsd	8+L(DP_2POW52)(%rip), %xmm4
+	movd	%xmm6, %eax		/* k = I64_LO(tmp6); */
+	addsd	%xmm6, %xmm4		/* tmp4 = tmp6 - 2^52 */
+	movl	%edi, %ecx		/* Load x */
+	comisd	%xmm5, %xmm4		/* tmp4 > tmp5?  */
+	jbe	L(very_large_skip2)
+
+	/* Here if tmp4 > tmp5 */
+	subl	$1, %eax		/* k-- */
+	addsd	8+L(DP_ONES)(%rip), %xmm4 /* tmp4 -= 1.0 */
+L(very_large_skip2):
+
+	andl	%eax, %edx		/* k&1 */
+	lea	L(DP_ZERONE)(%rip), %rsi
+	subsd	%xmm4, %xmm3		/* tmp3 -= tmp4 */
+	addsd	(%rsi,%rdx,8), %xmm3	/* t  = DP_ZERONE[k&1] + tmp3 */
+	addsd	%xmm2, %xmm3		/* t += tmp2 */
+	shrl	$31, %ecx		/* sign of x */
+	addsd	%xmm3, %xmm0		/* t += tmp0 */
+	addl	$1, %eax		/* n=k+1 */
+	addsd	%xmm1, %xmm0		/* t += tmp1 */
+	mulsd	L(DP_PIO4)(%rip), %xmm0	/* t *= PI04 */
+
+	jmp	L(reconstruction)	/* end of very_large_args peth */
+
+
+	.p2align	4
+L(arg_less_pio4):
+	/* Here if |x|<Pi/4 */
+	cmpl	$0x3d000000, %eax	/* |x|<2^-5?  */
+	jl	L(arg_less_2pn5)
+
+	/* Here if 2^-5<=|x|<Pi/4 */
+	movaps	%xmm0, %xmm3		/* x */
+	mulsd	%xmm0, %xmm0		/* y=x^2 */
+	movaps	%xmm0, %xmm1		/* y */
+	mulsd	%xmm0, %xmm0		/* z=x^4 */
+	movsd	L(DP_S4)(%rip), %xmm4	/* S4 */
+	mulsd	%xmm0, %xmm4		/* z*S4 */
+	movsd	L(DP_S3)(%rip), %xmm5	/* S3 */
+	mulsd	%xmm0, %xmm5		/* z*S3 */
+	addsd	L(DP_S2)(%rip), %xmm4	/* S2+z*S4 */
+	mulsd	%xmm0, %xmm4		/* z*(S2+z*S4) */
+	addsd	L(DP_S1)(%rip), %xmm5	/* S1+z*S3 */
+	mulsd	%xmm0, %xmm5		/* z*(S1+z*S3) */
+	addsd	L(DP_S0)(%rip), %xmm4	/* S0+z*(S2+z*S4) */
+	mulsd	%xmm1, %xmm4		/* y*(S0+z*(S2+z*S4)) */
+	mulsd	%xmm3, %xmm5		/* x*z*(S1+z*S3) */
+	mulsd	%xmm3, %xmm4		/* x*y*(S0+z*(S2+z*S4)) */
+	/* x*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm5, %xmm4
+	/* x + x*y*(S0+y*(S1+y*(S2+y*(S3+y*S4)))) */
+	addsd	%xmm4, %xmm3
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+	.p2align	4
+L(arg_less_2pn5):
+	/* Here if |x|<2^-5 */
+	cmpl	$0x32000000, %eax	/* |x|<2^-27?  */
+	jl	L(arg_less_2pn27)
+
+	/* Here if 2^-27<=|x|<2^-5 */
+	movaps	%xmm0, %xmm1		/* DP x */
+	mulsd	%xmm0, %xmm0		/* DP x^2 */
+	movsd	L(DP_SIN2_1)(%rip), %xmm3 /* DP DP_SIN2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_SIN2_1 */
+	addsd	L(DP_SIN2_0)(%rip), %xmm3 /* DP DP_SIN2_0+x^2*DP_SIN2_1 */
+	mulsd	%xmm0, %xmm3		/* DP x^2*DP_SIN2_0+x^4*DP_SIN2_1 */
+	mulsd	%xmm1, %xmm3		/* DP x^3*DP_SIN2_0+x^5*DP_SIN2_1 */
+	addsd	%xmm1, %xmm3		/* DP x+x^3*DP_SIN2_0+x^5*DP_SIN2_1 */
+	cvtsd2ss %xmm3, %xmm0		/* SP result */
+	ret
+
+	.p2align	4
+L(arg_less_2pn27):
+	cmpl	$0, %eax		/* x=0?  */
+	je	L(arg_zero)		/* in case x=0 return sin(+-0)==+-0 */
+	/* Here if |x|<2^-27 */
+	/*
+	 * Special cases here:
+	 *  sin(subnormal) raises inexact/underflow
+	 *  sin(min_normalized) raises inexact/underflow
+	 *  sin(normalized) raises inexact
+	 */
+	movaps	%xmm0, %xmm3		/* Copy of DP x */
+	mulsd	L(DP_SMALL)(%rip), %xmm0 /* x*DP_SMALL */
+	subsd	%xmm0, %xmm3		/* Result is x-x*DP_SMALL */
+	cvtsd2ss %xmm3, %xmm0		/* Result converted to SP */
+	ret
+
+	.p2align	4
+L(arg_zero):
+	movaps	%xmm7, %xmm0		/* SP x */
+	ret
+
+	.p2align	4
+L(arg_inf_or_nan):
+	/* Here if |x| is Inf or NAN */
+	jne	L(skip_errno_setting)	/* in case of x is NaN */
+
+	/* Here if x is Inf. Set errno to EDOM.  */
+	call	JUMPTARGET(__errno_location)
+	lea	(%rax), %rax
+	movl	$EDOM, (%rax)
+
+	.p2align	4
+L(skip_errno_setting):
+	/* Here if |x| is Inf or NAN. Continued.  */
+	movaps	%xmm7, %xmm0		/* load x */
+	subss	%xmm0, %xmm0		/* Result is NaN */
+	ret
+END(__sinf)
+
+
+
+	.section .rodata, "a"
+	.p2align 3
+L(PIO4J): /* Table of j*Pi/4, for j=0,1,..,10 */
+	.long	0x00000000,0x00000000
+	.long	0x54442d18,0x3fe921fb
+	.long	0x54442d18,0x3ff921fb
+	.long	0x7f3321d2,0x4002d97c
+	.long	0x54442d18,0x400921fb
+	.long	0x2955385e,0x400f6a7a
+	.long	0x7f3321d2,0x4012d97c
+	.long	0xe9bba775,0x4015fdbb
+	.long	0x54442d18,0x401921fb
+	.long	0xbeccb2bb,0x401c463a
+	.long	0x2955385e,0x401f6a7a
+	.type L(PIO4J), @object
+	ASM_SIZE_DIRECTIVE(L(PIO4J))
+
+	.p2align 3
+L(_FPI): /* 4/Pi broken into sum of positive DP values */
+	.long	0x00000000,0x00000000
+	.long	0x6c000000,0x3ff45f30
+	.long	0x2a000000,0x3e3c9c88
+	.long	0xa8000000,0x3c54fe13
+	.long	0xd0000000,0x3aaf47d4
+	.long	0x6c000000,0x38fbb81b
+	.long	0xe0000000,0x3714acc9
+	.long	0x7c000000,0x3560e410
+	.long	0x56000000,0x33bca2c7
+	.long	0xac000000,0x31fbd778
+	.long	0xe0000000,0x300b7246
+	.long	0xe8000000,0x2e5d2126
+	.long	0x48000000,0x2c970032
+	.long	0xe8000000,0x2ad77504
+	.long	0xe0000000,0x290921cf
+	.long	0xb0000000,0x274deb1c
+	.long	0xe0000000,0x25829a73
+	.long	0xbe000000,0x23fd1046
+	.long	0x10000000,0x2224baed
+	.long	0x8e000000,0x20709d33
+	.long	0x80000000,0x1e535a2f
+	.long	0x64000000,0x1cef904e
+	.long	0x30000000,0x1b0d6398
+	.long	0x24000000,0x1964ce7d
+	.long	0x16000000,0x17b908bf
+	.type L(_FPI), @object
+	ASM_SIZE_DIRECTIVE(L(_FPI))
+
+/* Coefficients of polynomial
+   for sin(x)~=x+x^3*DP_SIN2_0+x^5*DP_SIN2_1, |x|<2^-5.  */
+	.p2align 3
+L(DP_SIN2_0):
+	.long	0x5543d49d,0xbfc55555
+	.type L(DP_SIN2_0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SIN2_0))
+
+	.p2align 3
+L(DP_SIN2_1):
+	.long	0x75cec8c5,0x3f8110f4
+	.type L(DP_SIN2_1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SIN2_1))
+
+	.p2align 3
+L(DP_ZERONE):
+	.long	0x00000000,0x00000000	/* 0.0 */
+	.long	0x00000000,0xbff00000	/* 1.0 */
+	.type L(DP_ZERONE), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ZERONE))
+
+	.p2align 3
+L(DP_ONES):
+	.long	0x00000000,0x3ff00000	/* +1.0 */
+	.long	0x00000000,0xbff00000	/* -1.0 */
+	.type L(DP_ONES), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ONES))
+
+/* Coefficients of polynomial
+   for sin(t)~=t+t^3*(S0+t^2*(S1+t^2*(S2+t^2*(S3+t^2*S4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_S3):
+	.long	0x64e6b5b4,0x3ec71d72
+	.type L(DP_S3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S3))
+
+	.p2align 3
+L(DP_S1):
+	.long	0x10c2688b,0x3f811111
+	.type L(DP_S1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S1))
+
+	.p2align 3
+L(DP_S4):
+	.long	0x1674b58a,0xbe5a947e
+	.type L(DP_S4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S4))
+
+	.p2align 3
+L(DP_S2):
+	.long	0x8b4bd1f9,0xbf2a019f
+	.type L(DP_S2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S2))
+
+	.p2align 3
+L(DP_S0):
+	.long	0x55551cd9,0xbfc55555
+	.type L(DP_S0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_S0))
+
+	.p2align 3
+L(DP_SMALL):
+	.long	0x00000000,0x3cd00000	/* 2^(-50) */
+	.type L(DP_SMALL), @object
+	ASM_SIZE_DIRECTIVE(L(DP_SMALL))
+
+/* Coefficients of polynomial
+   for cos(t)~=1.0+t^2*(C0+t^2*(C1+t^2*(C2+t^2*(C3+t^2*C4)))), |t|<Pi/4.  */
+	.p2align 3
+L(DP_C3):
+	.long	0x9ac43cc0,0x3efa00eb
+	.type L(DP_C3), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C3))
+
+	.p2align 3
+L(DP_C1):
+	.long	0x545c50c7,0x3fa55555
+	.type L(DP_C1), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C1))
+
+	.p2align 3
+L(DP_C4):
+	.long	0xdd8844d7,0xbe923c97
+	.type L(DP_C4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C4))
+
+	.p2align 3
+L(DP_C2):
+	.long	0x348b6874,0xbf56c16b
+	.type L(DP_C2), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C2))
+
+	.p2align 3
+L(DP_C0):
+	.long	0xfffe98ae,0xbfdfffff
+	.type L(DP_C0), @object
+	ASM_SIZE_DIRECTIVE(L(DP_C0))
+
+	.p2align 3
+L(DP_PIO4):
+	.long	0x54442d18,0x3fe921fb	/* Pi/4 */
+	.type L(DP_PIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4))
+
+	.p2align 3
+L(DP_2POW52):
+	.long	0x00000000,0x43300000	/* +2^52 */
+	.long	0x00000000,0xc3300000	/* -2^52 */
+	.type L(DP_2POW52), @object
+	ASM_SIZE_DIRECTIVE(L(DP_2POW52))
+
+	.p2align 3
+L(DP_INVPIO4):
+	.long	0x6dc9c883,0x3ff45f30	/* 4/Pi */
+	.type L(DP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(DP_INVPIO4))
+
+	.p2align 3
+L(DP_PIO4HI):
+	.long	0x54000000,0xbfe921fb	/* High part of Pi/4 */
+	.type L(DP_PIO4HI), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4HI))
+
+	.p2align 3
+L(DP_PIO4LO):
+	.long	0x11A62633,0xbe010b46	/* Low part of Pi/4 */
+	.type L(DP_PIO4LO), @object
+	ASM_SIZE_DIRECTIVE(L(DP_PIO4LO))
+
+	.p2align 2
+L(SP_INVPIO4):
+	.long	0x3fa2f983		/* 4/Pi */
+	.type L(SP_INVPIO4), @object
+	ASM_SIZE_DIRECTIVE(L(SP_INVPIO4))
+
+	.p2align 4
+L(DP_ABS_MASK): /* Mask for getting DP absolute value */
+	.long	0xffffffff,0x7fffffff
+	.long	0xffffffff,0x7fffffff
+	.type L(DP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 3
+L(DP_HI_MASK): /* Mask for getting high 21 bits of DP value */
+	.long	0x00000000,0xffffffff
+	.type L(DP_ABS_MASK),@object
+	ASM_SIZE_DIRECTIVE(L(DP_ABS_MASK))
+
+	.p2align 4
+L(SP_ABS_MASK): /* Mask for getting SP absolute value */
+	.long	0x7fffffff,0x7fffffff
+	.long	0x7fffffff,0x7fffffff
+	.type L(SP_ABS_MASK), @object
+	ASM_SIZE_DIRECTIVE(L(SP_ABS_MASK))
+
+weak_alias(__sinf, sinf)