about summary refs log tree commit diff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2023-01-11 13:50:59 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2024-04-09 18:37:21 +0100
commit50ebfdfe8a9e6f0695941d023277d4dd8b95be9a (patch)
treef373eb91e32a489d0245772d88e86d0dda67a6b4
parentfa9cd4fd3c45fec2211a4603777fec2e0501f32b (diff)
downloadglibc-50ebfdfe8a9e6f0695941d023277d4dd8b95be9a.tar.gz
glibc-50ebfdfe8a9e6f0695941d023277d4dd8b95be9a.tar.xz
glibc-50ebfdfe8a9e6f0695941d023277d4dd8b95be9a.zip
AArch64: Optimize memchr
Optimize the main loop - large strings are 40% faster on modern CPUs.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
(cherry picked from commit ce758d4f063820c2bc743e12797d7454c66be718)
-rw-r--r--sysdeps/aarch64/memchr.S27
1 files changed, 14 insertions, 13 deletions
diff --git a/sysdeps/aarch64/memchr.S b/sysdeps/aarch64/memchr.S
index d6c588dba7..76f801b6c0 100644
--- a/sysdeps/aarch64/memchr.S
+++ b/sysdeps/aarch64/memchr.S
@@ -30,7 +30,6 @@
 # define MEMCHR __memchr
 #endif
 
-/* Arguments and results.  */
 #define srcin		x0
 #define chrin		w1
 #define cntin		x2
@@ -73,42 +72,44 @@ ENTRY (MEMCHR)
 
 	rbit	synd, synd
 	clz	synd, synd
-	add	result, srcin, synd, lsr 2
 	cmp	cntin, synd, lsr 2
+	add	result, srcin, synd, lsr 2
 	csel	result, result, xzr, hi
 	ret
 
+	.p2align 3
 L(start_loop):
 	sub	tmp, src, srcin
-	add	tmp, tmp, 16
+	add	tmp, tmp, 17
 	subs	cntrem, cntin, tmp
-	b.ls	L(nomatch)
+	b.lo	L(nomatch)
 
 	/* Make sure that it won't overread by a 16-byte chunk */
-	add	tmp, cntrem, 15
-	tbnz	tmp, 4, L(loop32_2)
-
+	tbz	cntrem, 4, L(loop32_2)
+	sub	src, src, 16
 	.p2align 4
 L(loop32):
-	ldr	qdata, [src, 16]!
+	ldr	qdata, [src, 32]!
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbnz	synd, L(end)
 
 L(loop32_2):
-	ldr	qdata, [src, 16]!
-	subs	cntrem, cntrem, 32
+	ldr	qdata, [src, 16]
 	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
-	b.ls	L(end)
+	subs	cntrem, cntrem, 32
+	b.lo	L(end_2)
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbz	synd, L(loop32)
+L(end_2):
+	add	src, src, 16
 L(end):
 	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
+	sub	cntrem, src, srcin
 	fmov	synd, dend
-	add	tmp, srcin, cntin
-	sub	cntrem, tmp, src
+	sub	cntrem, cntin, cntrem
 #ifndef __AARCH64EB__
 	rbit	synd, synd
 #endif