about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/memcmpeq-evex.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64/multiarch/memcmpeq-evex.S')
-rw-r--r--sysdeps/x86_64/multiarch/memcmpeq-evex.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/sysdeps/x86_64/multiarch/memcmpeq-evex.S b/sysdeps/x86_64/multiarch/memcmpeq-evex.S
index 7ae3e3c8c9..3666f649cd 100644
--- a/sysdeps/x86_64/multiarch/memcmpeq-evex.S
+++ b/sysdeps/x86_64/multiarch/memcmpeq-evex.S
@@ -26,7 +26,7 @@
       and loading from either s1 or s2 would cause a page cross.
    2. Use xmm vector compare when size >= 8 bytes.
    3. Optimistically compare up to first 4 * VEC_SIZE one at a
-      to check for early mismatches. Only do this if its guranteed the
+      to check for early mismatches. Only do this if its guaranteed the
       work is not wasted.
    4. If size is 8 * VEC_SIZE or less, unroll the loop.
    5. Compare 4 * VEC_SIZE at a time with the aligned first memory
@@ -97,7 +97,7 @@ ENTRY_P2ALIGN (MEMCMPEQ, 6)
 	/* Fall through for [0, VEC_SIZE] as its the hottest.  */
 	ja	L(more_1x_vec)
 
-	/* Create mask of bytes that are guranteed to be valid because
+	/* Create mask of bytes that are guaranteed to be valid because
 	   of length (edx). Using masked movs allows us to skip checks
 	   for page crosses/zero size.  */
 	mov	$-1, %VRAX
@@ -253,7 +253,7 @@ L(loop_4x_vec):
 	   oring with VEC(4). Result is stored in VEC(4).  */
 	vpternlogd $0xf6, (VEC_SIZE * 2)(%rdx), %VMM(3), %VMM(4)
 
-	/* Seperate logic as we can only use testb for VEC_SIZE == 64.
+	/* Separate logic as we can only use testb for VEC_SIZE == 64.
 	 */
 # if VEC_SIZE == 64
 	testb	%dil, %dil