about summary refs log tree commit diff
path: root/sysdeps/aarch64/multiarch/memset.c
diff options
context:
space:
mode:
authorNaohiro Tamura <naohirot@jp.fujitsu.com>2021-05-27 07:44:12 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2021-05-27 09:47:53 +0100
commit4f26956d5ba394eb3ade6c1c20b5c16864a00766 (patch)
tree8857e5f8fceb8555922d2fde86a38e8b6b486f4e /sysdeps/aarch64/multiarch/memset.c
parentfa527f345cbbe852ec085932fbea979956c195b5 (diff)
downloadglibc-4f26956d5ba394eb3ade6c1c20b5c16864a00766.tar.gz
glibc-4f26956d5ba394eb3ade6c1c20b5c16864a00766.tar.xz
glibc-4f26956d5ba394eb3ade6c1c20b5c16864a00766.zip
aarch64: Added optimized memset for A64FX
This patch optimizes the performance of memset for A64FX [1] which
implements ARMv8-A SVE and has L1 64KB cache per core and L2 8MB cache
per NUMA node.

The performance optimization makes use of Scalable Vector Register
with several techniques such as loop unrolling, memory access
alignment, cache zero fill and prefetch.

SVE assembler code for memset is implemented as Vector Length Agnostic
code so theoretically it can be run on any SOC which supports ARMv8-A
SVE standard.

We confirmed that all testcases have been passed by running 'make
check' and 'make xcheck' not only on A64FX but also on ThunderX2.

And also we confirmed that the SVE 512 bit vector register performance
is roughly 4 times better than Advanced SIMD 128 bit register and 8
times better than scalar 64 bit register by running 'make bench'.

[1] https://github.com/fujitsu/A64FX

Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Reviewed-by: Szabolcs Nagy <Szabolcs.Nagy@arm.com>
Diffstat (limited to 'sysdeps/aarch64/multiarch/memset.c')
-rw-r--r--sysdeps/aarch64/multiarch/memset.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/sysdeps/aarch64/multiarch/memset.c b/sysdeps/aarch64/multiarch/memset.c
index 28d3926bc2..d7d9bbbda0 100644
--- a/sysdeps/aarch64/multiarch/memset.c
+++ b/sysdeps/aarch64/multiarch/memset.c
@@ -31,16 +31,25 @@ extern __typeof (__redirect_memset) __libc_memset;
 extern __typeof (__redirect_memset) __memset_falkor attribute_hidden;
 extern __typeof (__redirect_memset) __memset_emag attribute_hidden;
 extern __typeof (__redirect_memset) __memset_kunpeng attribute_hidden;
+# if HAVE_AARCH64_SVE_ASM
+extern __typeof (__redirect_memset) __memset_a64fx attribute_hidden;
+# endif
 extern __typeof (__redirect_memset) __memset_generic attribute_hidden;
 
 libc_ifunc (__libc_memset,
 	    IS_KUNPENG920 (midr)
 	    ?__memset_kunpeng
 	    : ((IS_FALKOR (midr) || IS_PHECDA (midr)) && zva_size == 64
-	     ? __memset_falkor
-	     : (IS_EMAG (midr) && zva_size == 64
-	       ? __memset_emag
-	       : __memset_generic)));
+	      ? __memset_falkor
+	      : (IS_EMAG (midr) && zva_size == 64
+		? __memset_emag
+# if HAVE_AARCH64_SVE_ASM
+		: (IS_A64FX (midr)
+		  ? __memset_a64fx
+		  : __memset_generic))));
+# else
+		  : __memset_generic)));
+# endif
 
 # undef memset
 strong_alias (__libc_memset, memset);