about summary refs log tree commit diff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-06-15 10:41:28 -0700
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-06-15 14:25:55 -0700
commit89a25c6f64746732b87eaf433af0964b564d4a92 (patch)
treee1abe35fdb54d1a7358955964ae022b86a7de775 /sysdeps/x86_64
parentb446822b6ae4e8149902a78cdd4a886634ad6321 (diff)
downloadglibc-89a25c6f64746732b87eaf433af0964b564d4a92.tar.gz
glibc-89a25c6f64746732b87eaf433af0964b564d4a92.tar.xz
glibc-89a25c6f64746732b87eaf433af0964b564d4a92.zip
x86: Cleanup bounds checking in large memcpy case
1. Fix incorrect lower-bound threshold in L(large_memcpy_2x).
   Previously was using `__x86_rep_movsb_threshold` and should
   have been using `__x86_shared_non_temporal_threshold`.

2. Avoid reloading __x86_shared_non_temporal_threshold before
   the L(large_memcpy_4x) bounds check.

3. Document the second bounds check for L(large_memcpy_4x)
   more clearly.
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S29
1 files changed, 21 insertions, 8 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index af51177d5d..d1518b8bab 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -118,7 +118,13 @@
 # define LARGE_LOAD_SIZE (VEC_SIZE * 4)
 #endif
 
-/* Amount to shift rdx by to compare for memcpy_large_4x.  */
+/* Amount to shift __x86_shared_non_temporal_threshold by for
+   bound for memcpy_large_4x. This is essentially use to to
+   indicate that the copy is far beyond the scope of L3
+   (assuming no user config x86_non_temporal_threshold) and to
+   use a more aggressively unrolled loop.  NB: before
+   increasing the value also update initialization of
+   x86_non_temporal_threshold.  */
 #ifndef LOG_4X_MEMCPY_THRESH
 # define LOG_4X_MEMCPY_THRESH 4
 #endif
@@ -724,9 +730,14 @@ L(skip_short_movsb_check):
 	.p2align 4,, 10
 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc)
 L(large_memcpy_2x_check):
-	cmp	__x86_rep_movsb_threshold(%rip), %RDX_LP
-	jb	L(more_8x_vec_check)
+	/* Entry from L(large_memcpy_2x) has a redundant load of
+	   __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x)
+	   is only use for the non-erms memmove which is generally less
+	   common.  */
 L(large_memcpy_2x):
+	mov	__x86_shared_non_temporal_threshold(%rip), %R11_LP
+	cmp	%R11_LP, %RDX_LP
+	jb	L(more_8x_vec_check)
 	/* To reach this point it is impossible for dst > src and
 	   overlap. Remaining to check is src > dst and overlap. rcx
 	   already contains dst - src. Negate rcx to get src - dst. If
@@ -774,18 +785,21 @@ L(large_memcpy_2x):
 	/* ecx contains -(dst - src). not ecx will return dst - src - 1
 	   which works for testing aliasing.  */
 	notl	%ecx
+	movq	%rdx, %r10
 	testl	$(PAGE_SIZE - VEC_SIZE * 8), %ecx
 	jz	L(large_memcpy_4x)
 
-	movq	%rdx, %r10
-	shrq	$LOG_4X_MEMCPY_THRESH, %r10
-	cmp	__x86_shared_non_temporal_threshold(%rip), %r10
+	/* r11 has __x86_shared_non_temporal_threshold.  Shift it left
+	   by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold.
+	 */
+	shlq	$LOG_4X_MEMCPY_THRESH, %r11
+	cmp	%r11, %rdx
 	jae	L(large_memcpy_4x)
 
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 2 - 1), %edx
 	/* r10 stores outer loop counter.  */
-	shrq	$((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10
+	shrq	$(LOG_PAGE_SIZE + 1), %r10
 	/* Copy 4x VEC at a time from 2 pages.  */
 	.p2align 4
 L(loop_large_memcpy_2x_outer):
@@ -850,7 +864,6 @@ L(large_memcpy_2x_end):
 
 	.p2align 4
 L(large_memcpy_4x):
-	movq	%rdx, %r10
 	/* edx will store remainder size for copying tail.  */
 	andl	$(PAGE_SIZE * 4 - 1), %edx
 	/* r10 stores outer loop counter.  */