about summary refs log tree commit diff
path: root/sysdeps/aarch64/fpu/fenv_private.h
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2018-08-28 20:48:49 +0000
committerJoseph Myers <joseph@codesourcery.com>2018-08-28 20:48:49 +0000
commitff6b24501f70da7d6375d6f5929262b9509db39e (patch)
treecee9b22e4bf7880c23718a20e63dc64f659e79d4 /sysdeps/aarch64/fpu/fenv_private.h
parent761404b74d9853ce1608195e24f25b78a910591a (diff)
downloadglibc-ff6b24501f70da7d6375d6f5929262b9509db39e.tar.gz
glibc-ff6b24501f70da7d6375d6f5929262b9509db39e.tar.xz
glibc-ff6b24501f70da7d6375d6f5929262b9509db39e.zip
Split fenv_private.h out of math_private.h more consistently.
On some architectures, the parts of math_private.h relating to the
floating-point environment are in a separate file fenv_private.h
included from math_private.h.  As this is purely an
architecture-specific convention used by several architectures,
however, all such architectures still need their own math_private.h,
even if it has nothing to do beyond #include <fenv_private.h> and
peculiarity of including the i386 file directly instead of having a
shared file in sysdeps/x86.

This patch makes the fenv_private.h name an architecture-independent
convention in glibc.  The include of fenv_private.h from
math_private.h becomes architecture-independent (until callers are
updated to include fenv_private.h directly so the include from
math_private.h is no longer needed).  Some architecture math_private.h
headers are removed if no longer needed, or renamed to fenv_private.h
if all they define belongs in that header; architecture fenv_private.h
headers now do require #include_next <fenv_private.h>.  The i386
fenv_private.h file moves to sysdeps/x86/fpu/ to reflect how it is
actually shared with x86_64.  The generic math_private.h gets a new
include of <stdbool.h>, as needed for bool in some prototypes in that
header (previously that was indirectly included via include/fenv.h,
which now only gets included too late in math_private.h, after those
prototypes).

Tested for x86_64 and x86, and tested with build-many-glibcs.py that
installed stripped shared libraries are unchanged by the patch.

	* sysdeps/aarch64/fpu/fenv_private.h: New file.  Based on ....
	* sysdeps/aarch64/fpu/math_private.h: ... this file.  All contents
	moved to fenv_private.h except for ...
	(TOINT_INTRINSICS): Kept in math_private.h.
	(roundtoint): Likewise.
	(converttoint): Likewise.
	* sysdeps/arm/fenv_private.h: Change multiple-include guard to
	[ARM_FENV_PRIVATE_H].  Include next <fenv_private.h>.
	* sysdeps/arm/math_private.h: Remove.
	* sysdeps/generic/fenv_private.h: New file.  Contents moved from
	....
	* sysdeps/generic/math_private.h: ... this file.  Include
	<stdbool.h>.  Do not include <fenv.h> or <get-rounding-mode.h>.
	Include <fenv_private.h>.  Remove functions and macros moved to
	fenv_private.h.
	* sysdeps/i386/fpu/math_private.h: Remove.
	* sysdeps/mips/math_private.h: Move to ....
	* sysdeps/mips/fpu/fenv_private.h: ... here.  Change
	multiple-include guard to [MIPS_FENV_PRIVATE_H].  Remove
	[__mips_hard_float] conditional.  Include next <fenv_private.h>.
	* sysdeps/powerpc/fpu/fenv_private.h: Change multiple-include
	guard to [POWERPC_FENV_PRIVATE_H].  Include next <fenv_private.h>.
	* sysdeps/powerpc/fpu/math_private.h: Do not include
	<fenv_private.h>.
	* sysdeps/riscv/rvf/math_private.h: Move to ....
	* sysdeps/riscv/rvf/fenv_private.h: ... here.  Change
	multiple-include guard to [RISCV_FENV_PRIVATE_H].  Include next
	<fenv_private.h>.
	* sysdeps/sparc/fpu/fenv_private.h: Change multiple-include guard
	to [SPARC_FENV_PRIVATE_H].  Include next <fenv_private.h>.
	* sysdeps/sparc/fpu/math_private.h: Remove.
	* sysdeps/i386/fpu/fenv_private.h: Move to ....
	* sysdeps/x86/fpu/fenv_private.h: ... here.  Change
	multiple-include guard to [X86_FENV_PRIVATE_H].  Include next
	<fenv_private.h>.
	* sysdeps/x86_64/fpu/math_private.h: Do not include
	<sysdeps/i386/fpu/fenv_private.h>.
Diffstat (limited to 'sysdeps/aarch64/fpu/fenv_private.h')
-rw-r--r--sysdeps/aarch64/fpu/fenv_private.h303
1 files changed, 303 insertions, 0 deletions
diff --git a/sysdeps/aarch64/fpu/fenv_private.h b/sysdeps/aarch64/fpu/fenv_private.h
new file mode 100644
index 0000000000..e4f2483480
--- /dev/null
+++ b/sysdeps/aarch64/fpu/fenv_private.h
@@ -0,0 +1,303 @@
+/* Private floating point rounding and exceptions handling.  AArch64 version.
+   Copyright (C) 2014-2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef AARCH64_FENV_PRIVATE_H
+#define AARCH64_FENV_PRIVATE_H 1
+
+#include <fenv.h>
+#include <fpu_control.h>
+
+static __always_inline void
+libc_feholdexcept_aarch64 (fenv_t *envp)
+{
+  fpu_control_t fpcr;
+  fpu_control_t new_fpcr;
+  fpu_fpsr_t fpsr;
+  fpu_fpsr_t new_fpsr;
+
+  _FPU_GETCW (fpcr);
+  _FPU_GETFPSR (fpsr);
+  envp->__fpcr = fpcr;
+  envp->__fpsr = fpsr;
+
+  /* Clear exception flags and set all exceptions to non-stop.  */
+  new_fpcr = fpcr & ~(FE_ALL_EXCEPT << FE_EXCEPT_SHIFT);
+  new_fpsr = fpsr & ~FE_ALL_EXCEPT;
+
+  if (__glibc_unlikely (new_fpcr != fpcr))
+    _FPU_SETCW (new_fpcr);
+
+  if (new_fpsr != fpsr)
+    _FPU_SETFPSR (new_fpsr);
+}
+
+#define libc_feholdexcept  libc_feholdexcept_aarch64
+#define libc_feholdexceptf libc_feholdexcept_aarch64
+#define libc_feholdexceptl libc_feholdexcept_aarch64
+
+static __always_inline void
+libc_fesetround_aarch64 (int round)
+{
+  fpu_control_t fpcr;
+
+  _FPU_GETCW (fpcr);
+
+  /* Check whether rounding modes are different.  */
+  round = (fpcr ^ round) & _FPU_FPCR_RM_MASK;
+
+  /* Set new rounding mode if different.  */
+  if (__glibc_unlikely (round != 0))
+    _FPU_SETCW (fpcr ^ round);
+}
+
+#define libc_fesetround  libc_fesetround_aarch64
+#define libc_fesetroundf libc_fesetround_aarch64
+#define libc_fesetroundl libc_fesetround_aarch64
+
+static __always_inline void
+libc_feholdexcept_setround_aarch64 (fenv_t *envp, int round)
+{
+  fpu_control_t fpcr;
+  fpu_control_t new_fpcr;
+  fpu_fpsr_t fpsr;
+  fpu_fpsr_t new_fpsr;
+
+  _FPU_GETCW (fpcr);
+  _FPU_GETFPSR (fpsr);
+  envp->__fpcr = fpcr;
+  envp->__fpsr = fpsr;
+
+  /* Clear exception flags, set all exceptions to non-stop,
+     and set new rounding mode.  */
+  new_fpcr = fpcr & ~((FE_ALL_EXCEPT << FE_EXCEPT_SHIFT) | _FPU_FPCR_RM_MASK);
+  new_fpcr |= round;
+  new_fpsr = fpsr & ~FE_ALL_EXCEPT;
+
+  if (__glibc_unlikely (new_fpcr != fpcr))
+    _FPU_SETCW (new_fpcr);
+
+  if (new_fpsr != fpsr)
+    _FPU_SETFPSR (new_fpsr);
+}
+
+#define libc_feholdexcept_setround  libc_feholdexcept_setround_aarch64
+#define libc_feholdexcept_setroundf libc_feholdexcept_setround_aarch64
+#define libc_feholdexcept_setroundl libc_feholdexcept_setround_aarch64
+
+static __always_inline int
+libc_fetestexcept_aarch64 (int ex)
+{
+  fpu_fpsr_t fpsr;
+
+  _FPU_GETFPSR (fpsr);
+  return fpsr & ex & FE_ALL_EXCEPT;
+}
+
+#define libc_fetestexcept  libc_fetestexcept_aarch64
+#define libc_fetestexceptf libc_fetestexcept_aarch64
+#define libc_fetestexceptl libc_fetestexcept_aarch64
+
+static __always_inline void
+libc_fesetenv_aarch64 (const fenv_t *envp)
+{
+  fpu_control_t fpcr;
+  fpu_control_t new_fpcr;
+
+  _FPU_GETCW (fpcr);
+  new_fpcr = envp->__fpcr;
+
+  if (__glibc_unlikely (fpcr != new_fpcr))
+    _FPU_SETCW (new_fpcr);
+
+  _FPU_SETFPSR (envp->__fpsr);
+}
+
+#define libc_fesetenv  libc_fesetenv_aarch64
+#define libc_fesetenvf libc_fesetenv_aarch64
+#define libc_fesetenvl libc_fesetenv_aarch64
+#define libc_feresetround_noex  libc_fesetenv_aarch64
+#define libc_feresetround_noexf libc_fesetenv_aarch64
+#define libc_feresetround_noexl libc_fesetenv_aarch64
+
+static __always_inline int
+libc_feupdateenv_test_aarch64 (const fenv_t *envp, int ex)
+{
+  fpu_control_t fpcr;
+  fpu_control_t new_fpcr;
+  fpu_fpsr_t fpsr;
+  fpu_fpsr_t new_fpsr;
+  int excepts;
+
+  _FPU_GETCW (fpcr);
+  _FPU_GETFPSR (fpsr);
+
+  /* Merge current exception flags with the saved fenv.  */
+  excepts = fpsr & FE_ALL_EXCEPT;
+  new_fpcr = envp->__fpcr;
+  new_fpsr = envp->__fpsr | excepts;
+
+  if (__glibc_unlikely (fpcr != new_fpcr))
+    _FPU_SETCW (new_fpcr);
+
+  if (fpsr != new_fpsr)
+    _FPU_SETFPSR (new_fpsr);
+
+  /* Raise the exceptions if enabled in the new FP state.  */
+  if (__glibc_unlikely (excepts & (new_fpcr >> FE_EXCEPT_SHIFT)))
+    __feraiseexcept (excepts);
+
+  return excepts & ex;
+}
+
+#define libc_feupdateenv_test  libc_feupdateenv_test_aarch64
+#define libc_feupdateenv_testf libc_feupdateenv_test_aarch64
+#define libc_feupdateenv_testl libc_feupdateenv_test_aarch64
+
+static __always_inline void
+libc_feupdateenv_aarch64 (const fenv_t *envp)
+{
+  libc_feupdateenv_test_aarch64 (envp, 0);
+}
+
+#define libc_feupdateenv  libc_feupdateenv_aarch64
+#define libc_feupdateenvf libc_feupdateenv_aarch64
+#define libc_feupdateenvl libc_feupdateenv_aarch64
+
+static __always_inline void
+libc_feholdsetround_aarch64 (fenv_t *envp, int round)
+{
+  fpu_control_t fpcr;
+  fpu_fpsr_t fpsr;
+
+  _FPU_GETCW (fpcr);
+  _FPU_GETFPSR (fpsr);
+  envp->__fpcr = fpcr;
+  envp->__fpsr = fpsr;
+
+  /* Check whether rounding modes are different.  */
+  round = (fpcr ^ round) & _FPU_FPCR_RM_MASK;
+
+  /* Set new rounding mode if different.  */
+  if (__glibc_unlikely (round != 0))
+    _FPU_SETCW (fpcr ^ round);
+}
+
+#define libc_feholdsetround  libc_feholdsetround_aarch64
+#define libc_feholdsetroundf libc_feholdsetround_aarch64
+#define libc_feholdsetroundl libc_feholdsetround_aarch64
+
+static __always_inline void
+libc_feresetround_aarch64 (fenv_t *envp)
+{
+  fpu_control_t fpcr;
+  int round;
+
+  _FPU_GETCW (fpcr);
+
+  /* Check whether rounding modes are different.  */
+  round = (envp->__fpcr ^ fpcr) & _FPU_FPCR_RM_MASK;
+
+  /* Restore the rounding mode if it was changed.  */
+  if (__glibc_unlikely (round != 0))
+    _FPU_SETCW (fpcr ^ round);
+}
+
+#define libc_feresetround  libc_feresetround_aarch64
+#define libc_feresetroundf libc_feresetround_aarch64
+#define libc_feresetroundl libc_feresetround_aarch64
+
+/* We have support for rounding mode context.  */
+#define HAVE_RM_CTX 1
+
+static __always_inline void
+libc_feholdsetround_aarch64_ctx (struct rm_ctx *ctx, int r)
+{
+  fpu_control_t fpcr;
+  int round;
+
+  _FPU_GETCW (fpcr);
+  ctx->env.__fpcr = fpcr;
+
+  /* Check whether rounding modes are different.  */
+  round = (fpcr ^ r) & _FPU_FPCR_RM_MASK;
+  ctx->updated_status = round != 0;
+
+  /* Set the rounding mode if changed.  */
+  if (__glibc_unlikely (round != 0))
+    _FPU_SETCW (fpcr ^ round);
+}
+
+#define libc_feholdsetround_ctx		libc_feholdsetround_aarch64_ctx
+#define libc_feholdsetroundf_ctx	libc_feholdsetround_aarch64_ctx
+#define libc_feholdsetroundl_ctx	libc_feholdsetround_aarch64_ctx
+
+static __always_inline void
+libc_feresetround_aarch64_ctx (struct rm_ctx *ctx)
+{
+  /* Restore the rounding mode if updated.  */
+  if (__glibc_unlikely (ctx->updated_status))
+    _FPU_SETCW (ctx->env.__fpcr);
+}
+
+#define libc_feresetround_ctx		libc_feresetround_aarch64_ctx
+#define libc_feresetroundf_ctx		libc_feresetround_aarch64_ctx
+#define libc_feresetroundl_ctx		libc_feresetround_aarch64_ctx
+
+static __always_inline void
+libc_feholdsetround_noex_aarch64_ctx (struct rm_ctx *ctx, int r)
+{
+  fpu_control_t fpcr;
+  fpu_fpsr_t fpsr;
+  int round;
+
+  _FPU_GETCW (fpcr);
+  _FPU_GETFPSR (fpsr);
+  ctx->env.__fpcr = fpcr;
+  ctx->env.__fpsr = fpsr;
+
+  /* Check whether rounding modes are different.  */
+  round = (fpcr ^ r) & _FPU_FPCR_RM_MASK;
+  ctx->updated_status = round != 0;
+
+  /* Set the rounding mode if changed.  */
+  if (__glibc_unlikely (round != 0))
+    _FPU_SETCW (fpcr ^ round);
+}
+
+#define libc_feholdsetround_noex_ctx	libc_feholdsetround_noex_aarch64_ctx
+#define libc_feholdsetround_noexf_ctx	libc_feholdsetround_noex_aarch64_ctx
+#define libc_feholdsetround_noexl_ctx	libc_feholdsetround_noex_aarch64_ctx
+
+static __always_inline void
+libc_feresetround_noex_aarch64_ctx (struct rm_ctx *ctx)
+{
+  /* Restore the rounding mode if updated.  */
+  if (__glibc_unlikely (ctx->updated_status))
+    _FPU_SETCW (ctx->env.__fpcr);
+
+  /* Write new FPSR to restore exception flags.  */
+  _FPU_SETFPSR (ctx->env.__fpsr);
+}
+
+#define libc_feresetround_noex_ctx	libc_feresetround_noex_aarch64_ctx
+#define libc_feresetround_noexf_ctx	libc_feresetround_noex_aarch64_ctx
+#define libc_feresetround_noexl_ctx	libc_feresetround_noex_aarch64_ctx
+
+#include_next <fenv_private.h>
+
+#endif