about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2016-06-27 17:23:19 +0000
committerJoseph Myers <joseph@codesourcery.com>2016-06-27 17:24:30 +0000
commit26b0bf96000a825ad3381be52ebd8adf2afc785f (patch)
tree4b150f0e6d04ab864f1f53ce136e7bd925553aae /sysdeps
parentb87c1ec3fa398646f042a68f0ce0f7d09c1348c7 (diff)
downloadglibc-26b0bf96000a825ad3381be52ebd8adf2afc785f.tar.gz
glibc-26b0bf96000a825ad3381be52ebd8adf2afc785f.tar.xz
glibc-26b0bf96000a825ad3381be52ebd8adf2afc785f.zip
Avoid "inexact" exceptions in i386/x86_64 ceil functions (bug 15479).
As discussed in
<https://sourceware.org/ml/libc-alpha/2016-05/msg00577.html>, TS
18661-1 disallows ceil, floor, round and trunc functions from raising
the "inexact" exception, in accordance with general IEEE 754 semantics
for when that exception is raised.  Fixing this for x87 floating point
is more complicated than for the other versions of these functions,
because they use the frndint instruction that raises "inexact" and
this can only be avoided by saving and restoring the whole
floating-point environment.

As I noted in
<https://sourceware.org/ml/libc-alpha/2016-06/msg00128.html>, I have
now implemented a GCC option -fno-fp-int-builtin-inexact for GCC 7,
such that GCC will inline these functions on x86, without caring about
"inexact", when the default -ffp-int-builtin-inexact is in effect.
This allows users to get optimized code depending on the options they
pass to the compiler, while making the out-of-line functions follow TS
18661-1 semantics and avoid "inexact".

This patch duly fixes the out-of-line ceil function implementations to
avoid "inexact", in the same way as the nearbyint implementations.

I do not know how the performance of implementations such as these
based on saving the environment and changing the rounding mode
temporarily compares to that of the C versions or SSE 4.1 versions (of
course, for 32-bit x86 SSE implementations still need to get the
return value in an x87 register); it's entirely possible other
implementations could be faster in some cases.

Tested for x86_64 and x86.

	[BZ #15479]
	* sysdeps/i386/fpu/s_ceil.S (__ceil): Save and restore
	floating-point environment rather than just control word.
	* sysdeps/i386/fpu/s_ceilf.S (__ceilf): Likewise.
	* sysdeps/i386/fpu/s_ceill.S (__ceill): Save and restore
	floating-point environment, with "invalid" exceptions merged in,
	rather than just control word.
	* sysdeps/x86_64/fpu/s_ceill.S (__ceill): Likewise.
	* math/libm-test.inc (ceil_test_data): Do not allow spurious
	"inexact" exceptions.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/i386/fpu/s_ceil.S12
-rw-r--r--sysdeps/i386/fpu/s_ceilf.S12
-rw-r--r--sysdeps/i386/fpu/s_ceill.S17
-rw-r--r--sysdeps/x86_64/fpu/s_ceill.S15
4 files changed, 33 insertions, 23 deletions
diff --git a/sysdeps/i386/fpu/s_ceil.S b/sysdeps/i386/fpu/s_ceil.S
index f32fa26d34..1226bb2f87 100644
--- a/sysdeps/i386/fpu/s_ceil.S
+++ b/sysdeps/i386/fpu/s_ceil.S
@@ -9,10 +9,10 @@ RCSID("$NetBSD: s_ceil.S,v 1.4 1995/05/08 23:52:13 jtc Exp $")
 
 ENTRY(__ceil)
 	fldl	4(%esp)
-	subl	$8,%esp
-	cfi_adjust_cfa_offset (8)
+	subl	$32,%esp
+	cfi_adjust_cfa_offset (32)
 
-	fstcw	4(%esp)			/* store fpu control word */
+	fnstenv	4(%esp)			/* store fpu environment */
 
 	/* We use here %edx although only the low 1 bits are defined.
 	   But none of the operations should care and they are faster
@@ -25,10 +25,10 @@ ENTRY(__ceil)
 
 	frndint				/* round */
 
-	fldcw	4(%esp)			/* restore original control word */
+	fldenv	4(%esp)			/* restore original environment */
 
-	addl	$8,%esp
-	cfi_adjust_cfa_offset (-8)
+	addl	$32,%esp
+	cfi_adjust_cfa_offset (-32)
 	ret
 END (__ceil)
 weak_alias (__ceil, ceil)
diff --git a/sysdeps/i386/fpu/s_ceilf.S b/sysdeps/i386/fpu/s_ceilf.S
index 4fe703b179..d345c0973b 100644
--- a/sysdeps/i386/fpu/s_ceilf.S
+++ b/sysdeps/i386/fpu/s_ceilf.S
@@ -9,10 +9,10 @@ RCSID("$NetBSD: s_ceilf.S,v 1.3 1995/05/08 23:52:44 jtc Exp $")
 
 ENTRY(__ceilf)
 	flds	4(%esp)
-	subl	$8,%esp
-	cfi_adjust_cfa_offset (8)
+	subl	$32,%esp
+	cfi_adjust_cfa_offset (32)
 
-	fstcw	4(%esp)			/* store fpu control word */
+	fnstenv	4(%esp)			/* store fpu environment */
 
 	/* We use here %edx although only the low 1 bits are defined.
 	   But none of the operations should care and they are faster
@@ -25,10 +25,10 @@ ENTRY(__ceilf)
 
 	frndint				/* round */
 
-	fldcw	4(%esp)			/* restore original control word */
+	fldenv	4(%esp)			/* restore original environment */
 
-	addl	$8,%esp
-	cfi_adjust_cfa_offset (-8)
+	addl	$32,%esp
+	cfi_adjust_cfa_offset (-32)
 	ret
 END (__ceilf)
 weak_alias (__ceilf, ceilf)
diff --git a/sysdeps/i386/fpu/s_ceill.S b/sysdeps/i386/fpu/s_ceill.S
index 4b272c522a..7c08f43b24 100644
--- a/sysdeps/i386/fpu/s_ceill.S
+++ b/sysdeps/i386/fpu/s_ceill.S
@@ -10,10 +10,10 @@ RCSID("$NetBSD: $")
 
 ENTRY(__ceill)
 	fldt	4(%esp)
-	subl	$8,%esp
-	cfi_adjust_cfa_offset (8)
+	subl	$32,%esp
+	cfi_adjust_cfa_offset (32)
 
-	fstcw	4(%esp)			/* store fpu control word */
+	fnstenv	4(%esp)			/* store fpu environment */
 
 	/* We use here %edx although only the low 1 bits are defined.
 	   But none of the operations should care and they are faster
@@ -26,10 +26,15 @@ ENTRY(__ceill)
 
 	frndint				/* round */
 
-	fldcw	4(%esp)			/* restore original control word */
+	/* Preserve "invalid" exceptions from sNaN input.  */
+	fnstsw
+	andl	$0x1, %eax
+	orl	%eax, 8(%esp)
 
-	addl	$8,%esp
-	cfi_adjust_cfa_offset (-8)
+	fldenv	4(%esp)			/* restore original environment */
+
+	addl	$32,%esp
+	cfi_adjust_cfa_offset (-32)
 	ret
 END (__ceill)
 weak_alias (__ceill, ceill)
diff --git a/sysdeps/x86_64/fpu/s_ceill.S b/sysdeps/x86_64/fpu/s_ceill.S
index 910c371d58..9d8b79dbee 100644
--- a/sysdeps/x86_64/fpu/s_ceill.S
+++ b/sysdeps/x86_64/fpu/s_ceill.S
@@ -11,20 +11,25 @@
 ENTRY(__ceill)
 	fldt	8(%rsp)
 
-	fstcw	-4(%rsp)		/* store fpu control word */
+	fnstenv	-28(%rsp)		/* store fpu environment */
 
 	/* We use here %edx although only the low 1 bits are defined.
 	   But none of the operations should care and they are faster
 	   than the 16 bit operations.  */
 	movl	$0x0800,%edx		/* round towards +oo */
-	orl	-4(%rsp),%edx
+	orl	-28(%rsp),%edx
 	andl	$0xfbff,%edx
-	movl	%edx,-8(%rsp)
-	fldcw	-8(%rsp)		/* load modified control word */
+	movl	%edx,-32(%rsp)
+	fldcw	-32(%rsp)		/* load modified control word */
 
 	frndint				/* round */
 
-	fldcw	-4(%rsp)		/* restore original control word */
+	/* Preserve "invalid" exceptions from sNaN input.  */
+	fnstsw
+	andl	$0x1, %eax
+	orl	%eax, -24(%rsp)
+
+	fldenv	-28(%rsp)		/* restore original environment */
 
 	ret
 END (__ceill)