diff options
author | Andrew Senkevich <andrew.n.senkevich@gmail.com> | 2018-03-23 16:19:45 +0100 |
---|---|---|
committer | Andrew Senkevich <andrew.n.senkevich@gmail.com> | 2018-03-23 16:19:45 +0100 |
commit | cd66c0e584c6d692bc8347b5e72723d02b8a8ada (patch) | |
tree | 29f535d9674e6e6dac23236338e6a2f30ce29ad3 /sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S | |
parent | a44061398c3b531b37e134a6a97accb2251fa28a (diff) | |
download | glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.tar.gz glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.tar.xz glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.zip |
Fix i386 memmove issue (bug 22644).
[BZ #22644] * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed branch conditions. * string/test-memmove.c (do_test2): New testcase.
Diffstat (limited to 'sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S')
-rw-r--r-- | sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S index 9c3bbe7e17..9aa17de99c 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S +++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S @@ -72,7 +72,7 @@ ENTRY (MEMCPY) cmp %edx, %eax # ifdef USE_AS_MEMMOVE - jg L(check_forward) + ja L(check_forward) L(mm_len_0_or_more_backward): /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128] @@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward): jbe L(mm_len_0_16_bytes_backward) cmpl $32, %ecx - jg L(mm_len_32_or_more_backward) + ja L(mm_len_32_or_more_backward) /* Copy [0..32] and return. */ movdqu (%eax), %xmm0 @@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward): L(mm_len_32_or_more_backward): cmpl $64, %ecx - jg L(mm_len_64_or_more_backward) + ja L(mm_len_64_or_more_backward) /* Copy [0..64] and return. */ movdqu (%eax), %xmm0 @@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward): L(mm_len_64_or_more_backward): cmpl $128, %ecx - jg L(mm_len_128_or_more_backward) + ja L(mm_len_128_or_more_backward) /* Copy [0..128] and return. */ movdqu (%eax), %xmm0 @@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward): add %ecx, %eax cmp %edx, %eax movl SRC(%esp), %eax - jle L(forward) + jbe L(forward) PUSH (%esi) PUSH (%edi) PUSH (%ebx) @@ -269,7 +269,7 @@ L(check_forward): add %edx, %ecx cmp %eax, %ecx movl LEN(%esp), %ecx - jle L(forward) + jbe L(forward) /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] separately. */ |