diff options
author | Andrew Senkevich <andrew.n.senkevich@gmail.com> | 2018-03-23 16:19:45 +0100 |
---|---|---|
committer | Andrew Senkevich <andrew.n.senkevich@gmail.com> | 2018-03-23 16:19:45 +0100 |
commit | cd66c0e584c6d692bc8347b5e72723d02b8a8ada (patch) | |
tree | 29f535d9674e6e6dac23236338e6a2f30ce29ad3 | |
parent | a44061398c3b531b37e134a6a97accb2251fa28a (diff) | |
download | glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.tar.gz glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.tar.xz glibc-cd66c0e584c6d692bc8347b5e72723d02b8a8ada.zip |
Fix i386 memmove issue (bug 22644).
[BZ #22644] * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed branch conditions. * string/test-memmove.c (do_test2): New testcase.
-rw-r--r-- | ChangeLog | 8 | ||||
-rw-r--r-- | string/test-memmove.c | 58 | ||||
-rw-r--r-- | sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S | 12 |
3 files changed, 72 insertions, 6 deletions
diff --git a/ChangeLog b/ChangeLog index 18ed09e5cb..afdb7663dd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +2018-03-23 Andrew Senkevich <andrew.senkevich@intel.com> + Max Horn <max@quendi.de> + + [BZ #22644] + * sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S: Fixed + branch conditions. + * string/test-memmove.c (do_test2): New testcase. + 2018-03-22 Joseph Myers <joseph@codesourcery.com> * sysdeps/generic/frame.h: Remove file. diff --git a/string/test-memmove.c b/string/test-memmove.c index edc7a4c3bf..64e3651ba4 100644 --- a/string/test-memmove.c +++ b/string/test-memmove.c @@ -24,6 +24,7 @@ # define TEST_NAME "memmove" #endif #include "test-string.h" +#include <support/test-driver.h> char *simple_memmove (char *, const char *, size_t); @@ -245,6 +246,60 @@ do_random_tests (void) } } +static void +do_test2 (void) +{ + size_t size = 0x20000000; + uint32_t * large_buf; + + large_buf = mmap ((void*) 0x70000000, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + + if (large_buf == MAP_FAILED) + error (EXIT_UNSUPPORTED, errno, "Large mmap failed"); + + if ((uintptr_t) large_buf > 0x80000000 - 128 + || 0x80000000 - (uintptr_t) large_buf > 0x20000000) + { + error (0, 0, "Large mmap allocated improperly"); + ret = EXIT_UNSUPPORTED; + munmap ((void *) large_buf, size); + return; + } + + size_t bytes_move = 0x80000000 - (uintptr_t) large_buf; + size_t arr_size = bytes_move / sizeof (uint32_t); + size_t i; + + FOR_EACH_IMPL (impl, 0) + { + for (i = 0; i < arr_size; i++) + large_buf[i] = (uint32_t) i; + + uint32_t * dst = &large_buf[33]; + +#ifdef TEST_BCOPY + CALL (impl, (char *) large_buf, (char *) dst, bytes_move); +#else + CALL (impl, (char *) dst, (char *) large_buf, bytes_move); +#endif + + for (i = 0; i < arr_size; i++) + { + if (dst[i] != (uint32_t) i) + { + error (0, 0, + "Wrong result in function %s dst \"%p\" src \"%p\" offset \"%zd\"", + impl->name, dst, large_buf, i); + ret = 1; + break; + } + } + } + + munmap ((void *) large_buf, size); +} + int test_main (void) { @@ -284,6 +339,9 @@ test_main (void) } do_random_tests (); + + do_test2 (); + return ret; } diff --git a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S index 9c3bbe7e17..9aa17de99c 100644 --- a/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S +++ b/sysdeps/i386/i686/multiarch/memcpy-sse2-unaligned.S @@ -72,7 +72,7 @@ ENTRY (MEMCPY) cmp %edx, %eax # ifdef USE_AS_MEMMOVE - jg L(check_forward) + ja L(check_forward) L(mm_len_0_or_more_backward): /* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128] @@ -81,7 +81,7 @@ L(mm_len_0_or_more_backward): jbe L(mm_len_0_16_bytes_backward) cmpl $32, %ecx - jg L(mm_len_32_or_more_backward) + ja L(mm_len_32_or_more_backward) /* Copy [0..32] and return. */ movdqu (%eax), %xmm0 @@ -92,7 +92,7 @@ L(mm_len_0_or_more_backward): L(mm_len_32_or_more_backward): cmpl $64, %ecx - jg L(mm_len_64_or_more_backward) + ja L(mm_len_64_or_more_backward) /* Copy [0..64] and return. */ movdqu (%eax), %xmm0 @@ -107,7 +107,7 @@ L(mm_len_32_or_more_backward): L(mm_len_64_or_more_backward): cmpl $128, %ecx - jg L(mm_len_128_or_more_backward) + ja L(mm_len_128_or_more_backward) /* Copy [0..128] and return. */ movdqu (%eax), %xmm0 @@ -132,7 +132,7 @@ L(mm_len_128_or_more_backward): add %ecx, %eax cmp %edx, %eax movl SRC(%esp), %eax - jle L(forward) + jbe L(forward) PUSH (%esi) PUSH (%edi) PUSH (%ebx) @@ -269,7 +269,7 @@ L(check_forward): add %edx, %ecx cmp %eax, %ecx movl LEN(%esp), %ecx - jle L(forward) + jbe L(forward) /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] separately. */ |