about summary refs log tree commit diff
path: root/sysdeps/aarch64
diff options
context:
space:
mode:
authorFeng Xue <fxue@os.amperecomputing.com>2018-07-30 02:21:42 -0400
committerFeng Xue <fxue@os.amperecomputing.com>2019-02-01 07:59:18 -0500
commitc7d3890ff51bceb38fac0947ce1f2bb0c34f6b15 (patch)
tree2888ed321d26b3c0df7347cf25e0772a41fadf12 /sysdeps/aarch64
parent07c3d1ec03ee3633918afb59213cd1bac2ab276e (diff)
downloadglibc-c7d3890ff51bceb38fac0947ce1f2bb0c34f6b15.tar.gz
glibc-c7d3890ff51bceb38fac0947ce1f2bb0c34f6b15.tar.xz
glibc-c7d3890ff51bceb38fac0947ce1f2bb0c34f6b15.zip
aarch64: Optimized memset specific to AmpereComputing emag
This version uses general register based memory store instead of
vector register based, for the former is faster than the latter
in emag.

The fact that DC ZVA size in emag is 64-byte, is used by IFUNC
dispatch to select this memset, so that cost of runtime-check on
DC ZVA size can be saved.

    * sysdeps/aarch64/multiarch/Makefile (sysdep_routines):
    Add memset_emag.
    * sysdeps/aarch64/multiarch/ifunc-impl-list.c
    (__libc_ifunc_impl_list): Add __memset_emag to memset ifunc.
    * sysdeps/aarch64/multiarch/memset.c (libc_ifunc):
    Add IS_EMAG check for ifunc dispatch.
    * sysdeps/aarch64/multiarch/memset_base64.S: New file.
    * sysdeps/aarch64/multiarch/memset_emag.S: New file.
Diffstat (limited to 'sysdeps/aarch64')
-rw-r--r--sysdeps/aarch64/multiarch/Makefile3
-rw-r--r--sysdeps/aarch64/multiarch/ifunc-impl-list.c1
-rw-r--r--sysdeps/aarch64/multiarch/memset.c5
-rw-r--r--sysdeps/aarch64/multiarch/memset_base64.S178
-rw-r--r--sysdeps/aarch64/multiarch/memset_emag.S32
5 files changed, 217 insertions, 2 deletions
diff --git a/sysdeps/aarch64/multiarch/Makefile b/sysdeps/aarch64/multiarch/Makefile
index b1a5f59fcd..3c6c879997 100644
--- a/sysdeps/aarch64/multiarch/Makefile
+++ b/sysdeps/aarch64/multiarch/Makefile
@@ -1,5 +1,6 @@
 ifeq ($(subdir),string)
 sysdep_routines += memcpy_generic memcpy_thunderx memcpy_thunderx2 \
-		   memcpy_falkor memmove_falkor memset_generic memset_falkor \
+		   memcpy_falkor memmove_falkor \
+		   memset_generic memset_falkor memset_emag \
 		   strlen_generic strlen_asimd
 endif
diff --git a/sysdeps/aarch64/multiarch/ifunc-impl-list.c b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
index a00c329bca..d1f7df61d7 100644
--- a/sysdeps/aarch64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
@@ -51,6 +51,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      /* Enable this on non-falkor processors too so that other cores
 		 can do a comparative analysis with __memset_generic.  */
 	      IFUNC_IMPL_ADD (array, i, memset, (zva_size == 64), __memset_falkor)
+	      IFUNC_IMPL_ADD (array, i, memset, (zva_size == 64), __memset_emag)
 	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))
 
   IFUNC_IMPL (i, name, strlen,
diff --git a/sysdeps/aarch64/multiarch/memset.c b/sysdeps/aarch64/multiarch/memset.c
index 6ba8630a18..481758702f 100644
--- a/sysdeps/aarch64/multiarch/memset.c
+++ b/sysdeps/aarch64/multiarch/memset.c
@@ -29,12 +29,15 @@
 extern __typeof (__redirect_memset) __libc_memset;
 
 extern __typeof (__redirect_memset) __memset_falkor attribute_hidden;
+extern __typeof (__redirect_memset) __memset_emag attribute_hidden;
 extern __typeof (__redirect_memset) __memset_generic attribute_hidden;
 
 libc_ifunc (__libc_memset,
 	    ((IS_FALKOR (midr) || IS_PHECDA (midr)) && zva_size == 64
 	     ? __memset_falkor
-	     : __memset_generic));
+	     : (IS_EMAG (midr) && zva_size == 64
+	       ? __memset_emag
+	       : __memset_generic)));
 
 # undef memset
 strong_alias (__libc_memset, memset);
diff --git a/sysdeps/aarch64/multiarch/memset_base64.S b/sysdeps/aarch64/multiarch/memset_base64.S
new file mode 100644
index 0000000000..9a623259b9
--- /dev/null
+++ b/sysdeps/aarch64/multiarch/memset_base64.S
@@ -0,0 +1,178 @@
+/* Copyright (C) 2018 Free Software Foundation, Inc.
+
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include "memset-reg.h"
+
+#ifndef MEMSET
+# define MEMSET __memset_base64
+#endif
+
+#ifndef DC_ZVA_THRESHOLD
+# define DC_ZVA_THRESHOLD 512
+#endif
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses
+ *
+ */
+
+ENTRY_ALIGN (MEMSET, 6)
+
+	DELOUSE (0)
+	DELOUSE (2)
+
+	bfi	valw, valw, 8, 8
+	bfi	valw, valw, 16, 16
+	bfi	val, val, 32, 32
+
+	add	dstend, dstin, count
+
+	cmp	count, 96
+	b.hi	L(set_long)
+	cmp	count, 16
+	b.hs	L(set_medium)
+
+	/* Set 0..15 bytes.  */
+	tbz	count, 3, 1f
+	str	val, [dstin]
+	str	val, [dstend, -8]
+	ret
+
+	.p2align 3
+1:	tbz	count, 2, 2f
+	str	valw, [dstin]
+	str	valw, [dstend, -4]
+	ret
+2:	cbz	count, 3f
+	strb	valw, [dstin]
+	tbz	count, 1, 3f
+	strh	valw, [dstend, -2]
+3:	ret
+
+	.p2align 3
+	/* Set 16..96 bytes.  */
+L(set_medium):
+	stp	val, val, [dstin]
+	tbnz	count, 6, L(set96)
+	stp	val, val, [dstend, -16]
+	tbz	count, 5, 1f
+	stp	val, val, [dstin, 16]
+	stp	val, val, [dstend, -32]
+1:	ret
+
+	.p2align 4
+	/* Set 64..96 bytes.  Write 64 bytes from the start and
+	   32 bytes from the end.  */
+L(set96):
+	stp	val, val, [dstin, 16]
+	stp	val, val, [dstin, 32]
+	stp	val, val, [dstin, 48]
+	stp	val, val, [dstend, -32]
+	stp	val, val, [dstend, -16]
+	ret
+
+	.p2align 4
+L(set_long):
+	stp	val, val, [dstin]
+	cmp	count, DC_ZVA_THRESHOLD
+	ccmp	val, 0, 0, cs
+	bic	dst, dstin, 15
+	b.eq	L(zva_64)
+
+	/* Small-size or non-zero memset does not use DC ZVA. */
+	sub	count, dstend, dst
+
+	/*
+	 * Adjust count and bias for loop. By substracting extra 1 from count,
+	 * it is easy to use tbz instruction to check whether loop tailing
+	 * count is less than 33 bytes, so as to bypass 2 unneccesary stps.
+	 */
+	sub	count, count, 64+16+1
+	nop
+
+1:	stp	val, val, [dst, 16]
+	stp	val, val, [dst, 32]
+	stp	val, val, [dst, 48]
+	stp	val, val, [dst, 64]!
+	subs	count, count, 64
+	b.hs	1b
+
+	tbz	count, 5, 1f	/* Remaining count is less than 33 bytes? */
+	stp	val, val, [dst, 16]
+	stp	val, val, [dst, 32]
+1:	stp	val, val, [dstend, -32]
+	stp	val, val, [dstend, -16]
+	ret
+
+	.p2align 3
+L(zva_64):
+	stp	val, val, [dst, 16]
+	stp	val, val, [dst, 32]
+	stp	val, val, [dst, 48]
+	bic	dst, dst, 63
+
+	/*
+	 * Previous memory writes might cross cache line boundary, and cause
+	 * cache line partially dirty. Zeroing this kind of cache line using
+	 * DC ZVA will incur extra cost, for it requires loading untouched
+	 * part of the line from memory before zeoring.
+	 *
+	 * So, write the first 64 byte aligned block using stp to force
+	 * fully dirty cache line.
+	 */
+	stp	val, val, [dst, 64]
+	stp	val, val, [dst, 80]
+	stp	val, val, [dst, 96]
+	stp	val, val, [dst, 112]
+
+	sub	count, dstend, dst
+	/*
+	 * Adjust count and bias for loop. By substracting extra 1 from count,
+	 * it is easy to use tbz instruction to check whether loop tailing
+	 * count is less than 33 bytes, so as to bypass 2 unneccesary stps.
+	 */
+	sub	count, count, 128+64+64+1
+	add	dst, dst, 128
+	nop
+
+	/* DC ZVA sets 64 bytes each time. */
+1:	dc	zva, dst
+	add	dst, dst, 64
+	subs	count, count, 64
+	b.hs	1b
+
+	/*
+	 * Write the last 64 byte aligned block using stp to force fully
+	 * dirty cache line.
+	 */
+	stp	val, val, [dst, 0]
+	stp	val, val, [dst, 16]
+	stp	val, val, [dst, 32]
+	stp	val, val, [dst, 48]
+
+	tbz	count, 5, 1f	/* Remaining count is less than 33 bytes? */
+	stp	val, val, [dst, 64]
+	stp	val, val, [dst, 80]
+1:	stp	val, val, [dstend, -32]
+	stp	val, val, [dstend, -16]
+	ret
+
+END (MEMSET)
+libc_hidden_builtin_def (MEMSET)
diff --git a/sysdeps/aarch64/multiarch/memset_emag.S b/sysdeps/aarch64/multiarch/memset_emag.S
new file mode 100644
index 0000000000..1c1fabc624
--- /dev/null
+++ b/sysdeps/aarch64/multiarch/memset_emag.S
@@ -0,0 +1,32 @@
+/* Optimized memset for AmpereComputing emag processor.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define MEMSET __memset_emag
+
+/*
+ * Using dc zva to zero memory does not produce better performance if
+ * memory size is not very large, especially when there are multiple
+ * processes/threads contending memory/cache. Here we use a somewhat
+ * large threshold to trigger usage of dc zva.
+*/
+# define DC_ZVA_THRESHOLD 1024
+
+# include "./memset_base64.S"
+#endif