diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2019-02-04 06:31:01 -0800 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2019-02-04 06:31:13 -0800 |
commit | 3f635fb43389b54f682fc9ed2acc0b2aaf4a923d (patch) | |
tree | b157161b8ab3dd20e7304706c6cf8ecd6a91164b /sysdeps/x86_64/memcmp.S | |
parent | 2ab5741b8a96d02eb89e455b1971a19e7aef67bc (diff) | |
download | glibc-3f635fb43389b54f682fc9ed2acc0b2aaf4a923d.tar.gz glibc-3f635fb43389b54f682fc9ed2acc0b2aaf4a923d.tar.xz glibc-3f635fb43389b54f682fc9ed2acc0b2aaf4a923d.zip |
x86-64 memcmp: Use unsigned Jcc instructions on size [BZ #24155]
Since the size argument is unsigned. we should use unsigned Jcc instructions, instead of signed, to check size. Tested on x86-64 and x32, with and without --disable-multi-arch. [BZ #24155] CVE-2019-7309 * NEWS: Updated for CVE-2019-7309. * sysdeps/x86_64/memcmp.S: Use RDX_LP for size. Clear the upper 32 bits of RDX register for x32. Use unsigned Jcc instructions, instead of signed. * sysdeps/x86_64/x32/Makefile (tests): Add tst-size_t-memcmp-2. * sysdeps/x86_64/x32/tst-size_t-memcmp-2.c: New test.
Diffstat (limited to 'sysdeps/x86_64/memcmp.S')
-rw-r--r-- | sysdeps/x86_64/memcmp.S | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/sysdeps/x86_64/memcmp.S b/sysdeps/x86_64/memcmp.S index 1fc487caa5..1322bb3b92 100644 --- a/sysdeps/x86_64/memcmp.S +++ b/sysdeps/x86_64/memcmp.S @@ -21,14 +21,18 @@ .text ENTRY (memcmp) - test %rdx, %rdx +#ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +#endif + test %RDX_LP, %RDX_LP jz L(finz) cmpq $1, %rdx - jle L(finr1b) + jbe L(finr1b) subq %rdi, %rsi movq %rdx, %r10 cmpq $32, %r10 - jge L(gt32) + jae L(gt32) /* Handle small chunks and last block of less than 32 bytes. */ L(small): testq $1, %r10 @@ -156,7 +160,7 @@ L(A32): movq %r11, %r10 andq $-32, %r10 cmpq %r10, %rdi - jge L(mt16) + jae L(mt16) /* Pre-unroll to be ready for unrolled 64B loop. */ testq $32, %rdi jz L(A64) @@ -178,7 +182,7 @@ L(A64): movq %r11, %r10 andq $-64, %r10 cmpq %r10, %rdi - jge L(mt32) + jae L(mt32) L(A64main): movdqu (%rdi,%rsi), %xmm0 @@ -216,7 +220,7 @@ L(mt32): movq %r11, %r10 andq $-32, %r10 cmpq %r10, %rdi - jge L(mt16) + jae L(mt16) L(A32main): movdqu (%rdi,%rsi), %xmm0 @@ -254,7 +258,7 @@ L(ATR): movq %r11, %r10 andq $-32, %r10 cmpq %r10, %rdi - jge L(mt16) + jae L(mt16) testq $16, %rdi jz L(ATR32) @@ -325,7 +329,7 @@ L(ATR64main): movq %r11, %r10 andq $-32, %r10 cmpq %r10, %rdi - jge L(mt16) + jae L(mt16) L(ATR32res): movdqa (%rdi,%rsi), %xmm0 |