about summary refs log tree commit diff
path: root/sysdeps/i386
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-01-02 12:20:21 -0200
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-01-02 17:52:51 -0200
commit23d27709a423aec32821e9a5198a10267107bae2 (patch)
tree70f3e22be28951df11af9a80c07ba2ff487ed103 /sysdeps/i386
parent62210e7eb1b270c72c2ee61a14015285cd817262 (diff)
downloadglibc-23d27709a423aec32821e9a5198a10267107bae2.tar.gz
glibc-23d27709a423aec32821e9a5198a10267107bae2.tar.xz
glibc-23d27709a423aec32821e9a5198a10267107bae2.zip
Fix i686 memchr for large input sizes
Similar to BZ#19387 and BZ#20971, both i686 memchr optimized assembly
implementations (memchr-sse2-bsf and memchr-sse2) do not handle the
size overflow correctly.

It is shown by the new tests added by commit 3daef2c8ee4df29, where
both implementation fails with size as SIZE_MAX.

This patch uses a similar strategy used on 3daef2c8ee4df2, where
saturared math is used for overflow case.

Checked on i686-linux-gnu.

	[BZ #21014]
	* sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S (MEMCHR): Avoid overflow
	in pointer addition.
	* sysdeps/i386/i686/multiarch/memchr-sse2.S (MEMCHR): Likewise.
Diffstat (limited to 'sysdeps/i386')
-rw-r--r--sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S10
-rw-r--r--sysdeps/i386/i686/multiarch/memchr-sse2.S8
2 files changed, 15 insertions, 3 deletions
diff --git a/sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S b/sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S
index c035329ece..dd316486e6 100644
--- a/sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S
+++ b/sysdeps/i386/i686/multiarch/memchr-sse2-bsf.S
@@ -149,9 +149,15 @@ L(crosscache):
 	.p2align 4
 L(unaligned_no_match):
 # ifndef USE_AS_RAWMEMCHR
-	sub	$16, %edx
+        /* Calculate the last acceptable address and check for possible
+           addition overflow by using satured math:
+           edx = ecx + edx
+           edx |= -(edx < ecx)  */
 	add	%ecx, %edx
-	jle	L(return_null)
+	sbb	%eax, %eax
+	or	%eax, %edx
+	sub	$16, %edx
+	jbe	L(return_null)
 	add	$16, %edi
 # else
 	add	$16, %edx
diff --git a/sysdeps/i386/i686/multiarch/memchr-sse2.S b/sysdeps/i386/i686/multiarch/memchr-sse2.S
index f1a11b5c67..910679cfc0 100644
--- a/sysdeps/i386/i686/multiarch/memchr-sse2.S
+++ b/sysdeps/i386/i686/multiarch/memchr-sse2.S
@@ -118,8 +118,14 @@ L(crosscache):
 # ifndef USE_AS_RAWMEMCHR
 	jnz	L(match_case2_prolog1)
 	lea	-16(%edx), %edx
+        /* Calculate the last acceptable address and check for possible
+           addition overflow by using satured math:
+           edx = ecx + edx
+           edx |= -(edx < ecx)  */
 	add	%ecx, %edx
-	jle	L(return_null)
+	sbb	%eax, %eax
+	or	%eax, %edx
+	jbe	L(return_null)
 	lea	16(%edi), %edi
 # else
 	jnz	L(match_case1_prolog1)