about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:27:25 -0800
committerH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:27:36 -0800
commit231c56760c1e2ded21ad96bbb860b1f08c556c7a (patch)
tree4aca6b1947a0188731cbf37e27b307cc8603b1ef /sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
parentb304fc201d2f6baf52ea790df8643e99772243cd (diff)
downloadglibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.tar.gz
glibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.tar.xz
glibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.zip
x86-64 memcpy: Properly handle the length parameter [BZ# 24097]
On x32, the size_t parameter may be passed in the lower 32 bits of a
64-bit register with the non-zero upper 32 bits.  The string/memory
functions written in assembly can only use the lower 32 bits of a
64-bit register as length or must clear the upper 32 bits before using
the full 64-bit register for length.

This pach fixes memcpy for x32.  Tested on x86-64 and x32.  On x86-64,
libc.so is the same with and withou the fix.

	[BZ# 24097]
	CVE-2019-6488
	* sysdeps/x86_64/multiarch/memcpy-ssse3-back.S: Use RDX_LP for
	length.  Clear the upper 32 bits of RDX register.
	* sysdeps/x86_64/multiarch/memcpy-ssse3.S: Likewise.
	* sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S:
	Likewise.
	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:
	Likewise.
	* sysdeps/x86_64/x32/Makefile (tests): Add tst-size_t-memcpy.
	tst-size_t-wmemchr.
	* sysdeps/x86_64/x32/tst-size_t-memcpy.c: New file.
Diffstat (limited to 'sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S')
-rw-r--r--sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S16
1 files changed, 10 insertions, 6 deletions
diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
index b39324df0a..27df2baab5 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
@@ -24,27 +24,31 @@
 
 	.section .text.avx512,"ax",@progbits
 ENTRY (__mempcpy_chk_avx512_no_vzeroupper)
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END (__mempcpy_chk_avx512_no_vzeroupper)
 
 ENTRY (__mempcpy_avx512_no_vzeroupper)
-	movq	%rdi, %rax
-	addq	%rdx, %rax
+	mov	%RDI_LP, %RAX_LP
+	add	%RDX_LP, %RAX_LP
 	jmp	L(start)
 END (__mempcpy_avx512_no_vzeroupper)
 
 ENTRY (__memmove_chk_avx512_no_vzeroupper)
-	cmpq	%rdx, %rcx
+	cmp	%RDX_LP, %RCX_LP
 	jb	HIDDEN_JUMPTARGET (__chk_fail)
 END (__memmove_chk_avx512_no_vzeroupper)
 
 ENTRY (__memmove_avx512_no_vzeroupper)
-	mov	%rdi, %rax
+	mov	%RDI_LP, %RAX_LP
 # ifdef USE_AS_MEMPCPY
-	add	%rdx, %rax
+	add	%RDX_LP, %RAX_LP
 # endif
 L(start):
+# ifdef __ILP32__
+	/* Clear the upper 32 bits.  */
+	mov	%edx, %edx
+# endif
 	lea	(%rsi, %rdx), %rcx
 	lea	(%rdi, %rdx), %r9
 	cmp	$512, %rdx