about summary refs log tree commit diff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2023-02-01 18:45:19 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2024-04-09 17:38:05 +0100
commit49f1bd625603ece209dd3cb67035f6664221c666 (patch)
treee2e7738f82b1347a5e1319b6f9978bbdffee51c0
parent3972a306be01e02f8db61c60617f8421371658e0 (diff)
downloadglibc-49f1bd625603ece209dd3cb67035f6664221c666.tar.gz
glibc-49f1bd625603ece209dd3cb67035f6664221c666.tar.xz
glibc-49f1bd625603ece209dd3cb67035f6664221c666.zip
AArch64: Improve SVE memcpy and memmove
Improve SVE memcpy by copying 2 vectors if the size is small enough.
This improves performance of random memcpy by ~9% on Neoverse V1, and
33-64 byte copies are ~16% faster.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
(cherry picked from commit d2d3f3720ce627a4fe154d8dd14db716a32bcc6e)
-rw-r--r--sysdeps/aarch64/multiarch/memcpy_sve.S34
1 files changed, 14 insertions, 20 deletions
diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S
index a70907ec55..6bc8390fe8 100644
--- a/sysdeps/aarch64/multiarch/memcpy_sve.S
+++ b/sysdeps/aarch64/multiarch/memcpy_sve.S
@@ -67,14 +67,15 @@ ENTRY (__memcpy_sve)
 
 	cmp	count, 128
 	b.hi	L(copy_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	/* Medium copies: 33..128 bytes.  */
@@ -102,14 +103,6 @@ L(copy96):
 	stp	C_q, D_q, [dstend, -32]
 	ret
 
-L(vlen128):
-	whilelo p1.b, vlen, count
-	ld1b	z0.b, p0/z, [src, 0, mul vl]
-	ld1b	z1.b, p1/z, [src, 1, mul vl]
-	st1b	z0.b, p0, [dstin, 0, mul vl]
-	st1b	z1.b, p1, [dstin, 1, mul vl]
-	ret
-
 	.p2align 4
 	/* Copy more than 128 bytes.  */
 L(copy_long):
@@ -158,14 +151,15 @@ ENTRY (__memmove_sve)
 
 	cmp	count, 128
 	b.hi	L(move_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	.p2align 4