about summary refs log tree commit diff
path: root/sysdeps/aarch64/multiarch/memcpy.c
diff options
context:
space:
mode:
authorNaohiro Tamura <naohirot@jp.fujitsu.com>2021-05-27 07:42:35 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2021-05-27 09:47:53 +0100
commitfa527f345cbbe852ec085932fbea979956c195b5 (patch)
tree6d84a3c41c4ed9ac4ffce5c33e7448dd70075e87 /sysdeps/aarch64/multiarch/memcpy.c
parentf12ec02f5389a443d892241c486d87b3c5940ff6 (diff)
downloadglibc-fa527f345cbbe852ec085932fbea979956c195b5.tar.gz
glibc-fa527f345cbbe852ec085932fbea979956c195b5.tar.xz
glibc-fa527f345cbbe852ec085932fbea979956c195b5.zip
aarch64: Added optimized memcpy and memmove for A64FX
This patch optimizes the performance of memcpy/memmove for A64FX [1]
which implements ARMv8-A SVE and has L1 64KB cache per core and L2 8MB
cache per NUMA node.

The performance optimization makes use of Scalable Vector Register
with several techniques such as loop unrolling, memory access
alignment, cache zero fill, and software pipelining.

SVE assembler code for memcpy/memmove is implemented as Vector Length
Agnostic code so theoretically it can be run on any SOC which supports
ARMv8-A SVE standard.

We confirmed that all testcases have been passed by running 'make
check' and 'make xcheck' not only on A64FX but also on ThunderX2.

And also we confirmed that the SVE 512 bit vector register performance
is roughly 4 times better than Advanced SIMD 128 bit register and 8
times better than scalar 64 bit register by running 'make bench'.

[1] https://github.com/fujitsu/A64FX

Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Reviewed-by: Szabolcs Nagy <Szabolcs.Nagy@arm.com>
Diffstat (limited to 'sysdeps/aarch64/multiarch/memcpy.c')
-rw-r--r--sysdeps/aarch64/multiarch/memcpy.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/sysdeps/aarch64/multiarch/memcpy.c b/sysdeps/aarch64/multiarch/memcpy.c
index 0e0a5cbcfb..25e0081eeb 100644
--- a/sysdeps/aarch64/multiarch/memcpy.c
+++ b/sysdeps/aarch64/multiarch/memcpy.c
@@ -33,6 +33,9 @@ extern __typeof (__redirect_memcpy) __memcpy_simd attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_thunderx attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_thunderx2 attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_falkor attribute_hidden;
+# if HAVE_AARCH64_SVE_ASM
+extern __typeof (__redirect_memcpy) __memcpy_a64fx attribute_hidden;
+# endif
 
 libc_ifunc (__libc_memcpy,
             (IS_THUNDERX (midr)
@@ -40,12 +43,17 @@ libc_ifunc (__libc_memcpy,
 	     : (IS_FALKOR (midr) || IS_PHECDA (midr)
 		? __memcpy_falkor
 		: (IS_THUNDERX2 (midr) || IS_THUNDERX2PA (midr)
-		  ? __memcpy_thunderx2
-		  : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
-		     || IS_NEOVERSE_V1 (midr)
-		     ? __memcpy_simd
+		   ? __memcpy_thunderx2
+		   : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
+		      || IS_NEOVERSE_V1 (midr)
+		      ? __memcpy_simd
+# if HAVE_AARCH64_SVE_ASM
+		     : (IS_A64FX (midr)
+			? __memcpy_a64fx
+			: __memcpy_generic))))));
+# else
 		     : __memcpy_generic)))));
-
+# endif
 # undef memcpy
 strong_alias (__libc_memcpy, memcpy);
 #endif