about summary refs log tree commit diff
path: root/sysdeps/aarch64/fpu/sve_utils.h
diff options
context:
space:
mode:
authorJoe Ramsay <Joe.Ramsay@arm.com>2023-06-28 12:19:36 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2023-06-30 09:04:10 +0100
commitaed39a3aa3ea68b14dce3395fb14b1416541e6c6 (patch)
tree38f866205e31b1bef745122636dbaa61922cb1cc /sysdeps/aarch64/fpu/sve_utils.h
parent84e93afc734a3c30e35ed2d21466a44259ac577e (diff)
downloadglibc-aed39a3aa3ea68b14dce3395fb14b1416541e6c6.tar.gz
glibc-aed39a3aa3ea68b14dce3395fb14b1416541e6c6.tar.xz
glibc-aed39a3aa3ea68b14dce3395fb14b1416541e6c6.zip
aarch64: Add vector implementations of cos routines
Replace the loop-over-scalar placeholder routines with optimised
implementations from Arm Optimized Routines (AOR).

Also add some headers containing utilities for aarch64 libmvec
routines, and update libm-test-ulps.

Data tables for new routines are used via a pointer with a
barrier on it, in order to prevent overly aggressive constant
inlining in GCC. This allows a single adrp, combined with offset
loads, to be used for every constant in the table.

Special-case handlers are marked NOINLINE in order to confine the
save/restore overhead of switching from vector to normal calling
standard. This way we only incur the extra memory access in the
exceptional cases. NOINLINE definitions have been moved to
math_private.h in order to reduce duplication.

AOR exposes a config option, WANT_SIMD_EXCEPT, to enable
selective masking (and later fixing up) of invalid lanes, in
order to trigger fp exceptions correctly (AdvSIMD only). This is
tested and maintained in AOR, however it is configured off at
source level here for performance reasons. We keep the
WANT_SIMD_EXCEPT blocks in routine sources to greatly simplify
the upstreaming process from AOR to glibc.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
Diffstat (limited to 'sysdeps/aarch64/fpu/sve_utils.h')
-rw-r--r--sysdeps/aarch64/fpu/sve_utils.h55
1 files changed, 0 insertions, 55 deletions
diff --git a/sysdeps/aarch64/fpu/sve_utils.h b/sysdeps/aarch64/fpu/sve_utils.h
deleted file mode 100644
index 5ce3d2e8d6..0000000000
--- a/sysdeps/aarch64/fpu/sve_utils.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Helpers for SVE vector math functions.
-
-   Copyright (C) 2023 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <https://www.gnu.org/licenses/>.  */
-
-#include <arm_sve.h>
-
-#define SV_NAME_F1(fun) _ZGVsMxv_##fun##f
-#define SV_NAME_D1(fun) _ZGVsMxv_##fun
-#define SV_NAME_F2(fun) _ZGVsMxvv_##fun##f
-#define SV_NAME_D2(fun) _ZGVsMxvv_##fun
-
-static __always_inline svfloat32_t
-sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
-{
-  svbool_t p = svpfirst (cmp, svpfalse ());
-  while (svptest_any (cmp, p))
-    {
-      float elem = svclastb_n_f32 (p, 0, x);
-      elem = (*f) (elem);
-      svfloat32_t y2 = svdup_n_f32 (elem);
-      y = svsel_f32 (p, y2, y);
-      p = svpnext_b32 (cmp, p);
-    }
-  return y;
-}
-
-static __always_inline svfloat64_t
-sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
-{
-  svbool_t p = svpfirst (cmp, svpfalse ());
-  while (svptest_any (cmp, p))
-    {
-      double elem = svclastb_n_f64 (p, 0, x);
-      elem = (*f) (elem);
-      svfloat64_t y2 = svdup_n_f64 (elem);
-      y = svsel_f64 (p, y2, y);
-      p = svpnext_b64 (cmp, p);
-    }
-  return y;
-}