about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
diff options
context:
space:
mode:
authorPaul Pluzhnikov <ppluzhnikov@google.com>2023-05-23 03:57:01 +0000
committerPaul Pluzhnikov <ppluzhnikov@google.com>2023-05-23 10:25:11 +0000
commit1e9d5987fd94b88bdf4ebfb9f13d4a472d529cdd (patch)
tree687d8966ab7a4d94d6d3f684a0410ba4fa3b1cd4 /sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
parentec9a66cd01a73c185bb42cdc032f88b472598feb (diff)
downloadglibc-1e9d5987fd94b88bdf4ebfb9f13d4a472d529cdd.tar.gz
glibc-1e9d5987fd94b88bdf4ebfb9f13d4a472d529cdd.tar.xz
glibc-1e9d5987fd94b88bdf4ebfb9f13d4a472d529cdd.zip
Fix misspellings in sysdeps/x86_64 -- BZ 25337.
Applying this commit results in bit-identical rebuild of libc.so.6
math/libm.so.6 elf/ld-linux-x86-64.so.2 mathvec/libmvec.so.1

Reviewed-by: Florian Weimer <fweimer@redhat.com>
Diffstat (limited to 'sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S8
1 files changed, 4 insertions, 4 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index d1b92785b0..51eb622bc8 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -445,7 +445,7 @@ L(more_8x_vec_check):
 	shrq	$63, %r8
 	/* Get 4k difference dst - src.  */
 	andl	$(PAGE_SIZE - 256), %ecx
-	/* If r8 is non-zero must do foward for correctness. Otherwise
+	/* If r8 is non-zero must do forward for correctness. Otherwise
 	   if ecx is non-zero there is 4k False Alaising so do backward
 	   copy.  */
 	addl	%r8d, %ecx
@@ -460,7 +460,7 @@ L(more_8x_vec_forward):
 	/* First vec was already loaded into VEC(0).  */
 	VMOVU	-VEC_SIZE(%rsi, %rdx), %VMM(5)
 	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %VMM(6)
-	/* Save begining of dst.  */
+	/* Save beginning of dst.  */
 	movq	%rdi, %rcx
 	/* Align dst to VEC_SIZE - 1.  */
 	orq	$(VEC_SIZE - 1), %rdi
@@ -517,7 +517,7 @@ L(more_8x_vec_backward):
 	/* First vec was also loaded into VEC(0).  */
 	VMOVU	VEC_SIZE(%rsi), %VMM(5)
 	VMOVU	(VEC_SIZE * 2)(%rsi), %VMM(6)
-	/* Begining of region for 4x backward copy stored in rcx.  */
+	/* Beginning of region for 4x backward copy stored in rcx.  */
 	leaq	(VEC_SIZE * -4 + -1)(%rdi, %rdx), %rcx
 	VMOVU	(VEC_SIZE * 3)(%rsi), %VMM(7)
 	VMOVU	-VEC_SIZE(%rsi, %rdx), %VMM(8)
@@ -611,7 +611,7 @@ L(movsb):
 	movq	%rdi, %r8
 # endif
 	/* If above __x86_rep_movsb_stop_threshold most likely is
-	   candidate for NT moves aswell.  */
+	   candidate for NT moves as well.  */
 	cmp	__x86_rep_movsb_stop_threshold(%rip), %RDX_LP
 	jae	L(large_memcpy_2x_check)
 # if AVOID_SHORT_DISTANCE_REP_MOVSB || ALIGN_MOVSB