about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:32:24 -0800
committerH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:32:37 -0800
commit82d0b4a4d76db554eb6757acb790fcea30b19965 (patch)
treea3cb4225e0d8370936489c41be107825ba20a301 /sysdeps/x86_64/multiarch
parentecd8b842cf37ea112e59cd9085ff1f1b6e208ae0 (diff)
downloadglibc-82d0b4a4d76db554eb6757acb790fcea30b19965.tar.gz
glibc-82d0b4a4d76db554eb6757acb790fcea30b19965.tar.xz
glibc-82d0b4a4d76db554eb6757acb790fcea30b19965.zip
x86-64 memset/wmemset: Properly handle the length parameter [BZ# 24097]
On x32, the size_t parameter may be passed in the lower 32 bits of a
64-bit register with the non-zero upper 32 bits.  The string/memory
functions written in assembly can only use the lower 32 bits of a
64-bit register as length or must clear the upper 32 bits before using
the full 64-bit register for length.

This pach fixes memset/wmemset for x32.  Tested on x86-64 and x32.  On
x86-64, libc.so is the same with and withou the fix.

	[BZ# 24097]
	CVE-2019-6488
	* sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S: Use
	RDX_LP for length.  Clear the upper 32 bits of RDX register.
	* sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S: Likewise.
	* sysdeps/x86_64/x32/Makefile (tests): Add tst-size_t-wmemset.
	* sysdeps/x86_64/x32/tst-size_t-memset.c: New file.
	* sysdeps/x86_64/x32/tst-size_t-wmemset.c: Likewise.
Diffstat (limited to 'sysdeps/x86_64/multiarch')
-rw-r--r--sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S6
-rw-r--r--sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S34
2 files changed, 26 insertions, 14 deletions
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
index 6506ce6252..cd7a32c7c2 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
@@ -29,12 +29,16 @@
 	.section .text.avx512,"ax",@progbits
 #if defined PIC
 ENTRY (MEMSET_CHK)
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END (MEMSET_CHK)
 #endif
 
 ENTRY (MEMSET)
+# ifdef __ILP32__
+	/* Clear the upper 32 bits.  */
+	mov	%edx, %edx
+# endif
 	vpxor	%xmm0, %xmm0, %xmm0
 	vmovd	%esi, %xmm1
 	lea	(%rdi, %rdx), %rsi
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 71a0887c68..5e0d307d85 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -75,8 +75,8 @@
 	.section SECTION(.text),"ax",@progbits
 #if VEC_SIZE == 16 && IS_IN (libc)
 ENTRY (__bzero)
-	movq	%rdi, %rax /* Set return value.  */
-	movq	%rsi, %rdx /* Set n.  */
+	mov	%RDI_LP, %RAX_LP /* Set return value.  */
+	mov	%RSI_LP, %RDX_LP /* Set n.  */
 	pxor	%xmm0, %xmm0
 	jmp	L(entry_from_bzero)
 END (__bzero)
@@ -86,13 +86,13 @@ weak_alias (__bzero, bzero)
 #if IS_IN (libc)
 # if defined SHARED
 ENTRY_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned))
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END_CHK (WMEMSET_CHK_SYMBOL (__wmemset_chk, unaligned))
 # endif
 
 ENTRY (WMEMSET_SYMBOL (__wmemset, unaligned))
-	shlq	$2, %rdx
+	shl	$2, %RDX_LP
 	WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN (%esi, %rdi)
 	jmp	L(entry_from_bzero)
 END (WMEMSET_SYMBOL (__wmemset, unaligned))
@@ -100,13 +100,17 @@ END (WMEMSET_SYMBOL (__wmemset, unaligned))
 
 #if defined SHARED && IS_IN (libc)
 ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned))
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned))
 #endif
 
 ENTRY (MEMSET_SYMBOL (__memset, unaligned))
 	MEMSET_VDUP_TO_VEC0_AND_SET_RETURN (%esi, %rdi)
+# ifdef __ILP32__
+	/* Clear the upper 32 bits.  */
+	mov	%edx, %edx
+# endif
 L(entry_from_bzero):
 	cmpq	$VEC_SIZE, %rdx
 	jb	L(less_vec)
@@ -122,14 +126,14 @@ END (MEMSET_SYMBOL (__memset, unaligned))
 
 # if VEC_SIZE == 16
 ENTRY (__memset_chk_erms)
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END (__memset_chk_erms)
 
 /* Only used to measure performance of REP STOSB.  */
 ENTRY (__memset_erms)
 	/* Skip zero length.  */
-	testq	%rdx, %rdx
+	test	%RDX_LP, %RDX_LP
 	jnz	 L(stosb)
 	movq	%rdi, %rax
 	ret
@@ -141,11 +145,11 @@ ENTRY (MEMSET_SYMBOL (__memset, erms))
 L(stosb):
 	/* Issue vzeroupper before rep stosb.  */
 	VZEROUPPER
-	movq	%rdx, %rcx
+	mov	%RDX_LP, %RCX_LP
 	movzbl	%sil, %eax
-	movq	%rdi, %rdx
+	mov	%RDI_LP, %RDX_LP
 	rep stosb
-	movq	%rdx, %rax
+	mov	%RDX_LP, %RAX_LP
 	ret
 # if VEC_SIZE == 16
 END (__memset_erms)
@@ -155,16 +159,20 @@ END (MEMSET_SYMBOL (__memset, erms))
 
 # if defined SHARED && IS_IN (libc)
 ENTRY_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms))
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END_CHK (MEMSET_CHK_SYMBOL (__memset_chk, unaligned_erms))
 # endif
 
 ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms))
 	MEMSET_VDUP_TO_VEC0_AND_SET_RETURN (%esi, %rdi)
-	cmpq	$VEC_SIZE, %rdx
+# ifdef __ILP32__
+	/* Clear the upper 32 bits.  */
+	mov	%edx, %edx
+# endif
+	cmp	$VEC_SIZE, %RDX_LP
 	jb	L(less_vec)
-	cmpq	$(VEC_SIZE * 2), %rdx
+	cmp	$(VEC_SIZE * 2), %RDX_LP
 	ja	L(stosb_more_2x_vec)
 	/* From VEC and to 2 * VEC.  No branch when size == VEC_SIZE.  */
 	VMOVU	%VEC(0), -VEC_SIZE(%rdi,%rdx)