diff options
author | Joseph Myers <joseph@codesourcery.com> | 2016-06-27 17:25:47 +0000 |
---|---|---|
committer | Joseph Myers <joseph@codesourcery.com> | 2016-06-27 17:25:47 +0000 |
commit | 623629de066dc2f404470e76ff074fc5ba643c6c (patch) | |
tree | 8b320bfc1e37b0a2b6bc4419d453948c519e9d2a /sysdeps | |
parent | 26b0bf96000a825ad3381be52ebd8adf2afc785f (diff) | |
download | glibc-623629de066dc2f404470e76ff074fc5ba643c6c.tar.gz glibc-623629de066dc2f404470e76ff074fc5ba643c6c.tar.xz glibc-623629de066dc2f404470e76ff074fc5ba643c6c.zip |
Avoid "inexact" exceptions in i386/x86_64 floor functions (bug 15479).
As discussed in <https://sourceware.org/ml/libc-alpha/2016-05/msg00577.html>, TS 18661-1 disallows ceil, floor, round and trunc functions from raising the "inexact" exception, in accordance with general IEEE 754 semantics for when that exception is raised. Fixing this for x87 floating point is more complicated than for the other versions of these functions, because they use the frndint instruction that raises "inexact" and this can only be avoided by saving and restoring the whole floating-point environment. As I noted in <https://sourceware.org/ml/libc-alpha/2016-06/msg00128.html>, I have now implemented a GCC option -fno-fp-int-builtin-inexact for GCC 7, such that GCC will inline these functions on x86, without caring about "inexact", when the default -ffp-int-builtin-inexact is in effect. This allows users to get optimized code depending on the options they pass to the compiler, while making the out-of-line functions follow TS 18661-1 semantics and avoid "inexact". This patch duly fixes the out-of-line floor function implementations to avoid "inexact", in the same way as the nearbyint implementations. I do not know how the performance of implementations such as these based on saving the environment and changing the rounding mode temporarily compares to that of the C versions or SSE 4.1 versions (of course, for 32-bit x86 SSE implementations still need to get the return value in an x87 register); it's entirely possible other implementations could be faster in some cases. Tested for x86_64 and x86. [BZ #15479] * sysdeps/i386/fpu/s_floor.S (__floor): Save and restore floating-point environment rather than just control word. * sysdeps/i386/fpu/s_floorf.S (__floorf): Likewise. * sysdeps/i386/fpu/s_floorl.S (__floorl): Save and restore floating-point environment, with "invalid" exceptions merged in, rather than just control word. * sysdeps/x86_64/fpu/s_floorl.S (__floorl): Likewise. * math/libm-test.inc (floor_test_data): Do not allow spurious "inexact" exceptions.
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/i386/fpu/s_floor.S | 12 | ||||
-rw-r--r-- | sysdeps/i386/fpu/s_floorf.S | 12 | ||||
-rw-r--r-- | sysdeps/i386/fpu/s_floorl.S | 17 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/s_floorl.S | 15 |
4 files changed, 33 insertions, 23 deletions
diff --git a/sysdeps/i386/fpu/s_floor.S b/sysdeps/i386/fpu/s_floor.S index 2d6287dc79..ed837dae40 100644 --- a/sysdeps/i386/fpu/s_floor.S +++ b/sysdeps/i386/fpu/s_floor.S @@ -9,10 +9,10 @@ RCSID("$NetBSD: s_floor.S,v 1.4 1995/05/09 00:01:59 jtc Exp $") ENTRY(__floor) fldl 4(%esp) - subl $8,%esp - cfi_adjust_cfa_offset (8) + subl $32,%esp + cfi_adjust_cfa_offset (32) - fstcw 4(%esp) /* store fpu control word */ + fnstenv 4(%esp) /* store fpu environment */ /* We use here %edx although only the low 1 bits are defined. But none of the operations should care and they are faster @@ -25,10 +25,10 @@ ENTRY(__floor) frndint /* round */ - fldcw 4(%esp) /* restore original control word */ + fldenv 4(%esp) /* restore original environment */ - addl $8,%esp - cfi_adjust_cfa_offset (-8) + addl $32,%esp + cfi_adjust_cfa_offset (-32) ret END (__floor) weak_alias (__floor, floor) diff --git a/sysdeps/i386/fpu/s_floorf.S b/sysdeps/i386/fpu/s_floorf.S index e969fbe587..84b6f7ed99 100644 --- a/sysdeps/i386/fpu/s_floorf.S +++ b/sysdeps/i386/fpu/s_floorf.S @@ -9,10 +9,10 @@ RCSID("$NetBSD: s_floorf.S,v 1.3 1995/05/09 00:04:32 jtc Exp $") ENTRY(__floorf) flds 4(%esp) - subl $8,%esp - cfi_adjust_cfa_offset (8) + subl $32,%esp + cfi_adjust_cfa_offset (32) - fstcw 4(%esp) /* store fpu control word */ + fnstenv 4(%esp) /* store fpu environment */ /* We use here %edx although only the low 1 bits are defined. But none of the operations should care and they are faster @@ -25,10 +25,10 @@ ENTRY(__floorf) frndint /* round */ - fldcw 4(%esp) /* restore original control word */ + fldenv 4(%esp) /* restore original environment */ - addl $8,%esp - cfi_adjust_cfa_offset (-8) + addl $32,%esp + cfi_adjust_cfa_offset (-32) ret END (__floorf) weak_alias (__floorf, floorf) diff --git a/sysdeps/i386/fpu/s_floorl.S b/sysdeps/i386/fpu/s_floorl.S index 1206554c4a..dc74a0c446 100644 --- a/sysdeps/i386/fpu/s_floorl.S +++ b/sysdeps/i386/fpu/s_floorl.S @@ -10,10 +10,10 @@ RCSID("$NetBSD: $") ENTRY(__floorl) fldt 4(%esp) - subl $8,%esp - cfi_adjust_cfa_offset (8) + subl $32,%esp + cfi_adjust_cfa_offset (32) - fstcw 4(%esp) /* store fpu control word */ + fnstenv 4(%esp) /* store fpu environment */ /* We use here %edx although only the low 1 bits are defined. But none of the operations should care and they are faster @@ -26,10 +26,15 @@ ENTRY(__floorl) frndint /* round */ - fldcw 4(%esp) /* restore original control word */ + /* Preserve "invalid" exceptions from sNaN input. */ + fnstsw + andl $0x1, %eax + orl %eax, 8(%esp) - addl $8,%esp - cfi_adjust_cfa_offset (-8) + fldenv 4(%esp) /* restore original environment */ + + addl $32,%esp + cfi_adjust_cfa_offset (-32) ret END (__floorl) weak_alias (__floorl, floorl) diff --git a/sysdeps/x86_64/fpu/s_floorl.S b/sysdeps/x86_64/fpu/s_floorl.S index f9ecc388df..535fdd8571 100644 --- a/sysdeps/x86_64/fpu/s_floorl.S +++ b/sysdeps/x86_64/fpu/s_floorl.S @@ -10,20 +10,25 @@ ENTRY(__floorl) fldt 8(%rsp) - fstcw -4(%rsp) /* store fpu control word */ + fnstenv -28(%rsp) /* store fpu environment */ /* We use here %edx although only the low 1 bits are defined. But none of the operations should care and they are faster than the 16 bit operations. */ movl $0x400,%edx /* round towards -oo */ - orl -4(%rsp),%edx + orl -28(%rsp),%edx andl $0xf7ff,%edx - movl %edx,-8(%rsp) - fldcw -8(%rsp) /* load modified control word */ + movl %edx,-32(%rsp) + fldcw -32(%rsp) /* load modified control word */ frndint /* round */ - fldcw -4(%rsp) /* restore original control word */ + /* Preserve "invalid" exceptions from sNaN input. */ + fnstsw + andl $0x1, %eax + orl %eax, -24(%rsp) + + fldenv -28(%rsp) /* restore original environment */ ret END (__floorl) |