From 049816c3be14e47e5fe10f8cd5a38bb611d34ce5 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Fri, 23 Jun 2017 12:45:57 -0700 Subject: x86-64: Optimize L(between_2_3) in memcmp-avx2-movbe.S Turn movzbl -1(%rdi, %rdx), %edi movzbl -1(%rsi, %rdx), %esi orl %edi, %eax orl %esi, %ecx into movb -1(%rdi, %rdx), %al movb -1(%rsi, %rdx), %cl * sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S (between_2_3): Replace movzbl and orl with movb. --- sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'sysdeps/x86_64/multiarch') diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S index 2cd2f71b85..abcc61c381 100644 --- a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S +++ b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S @@ -144,10 +144,8 @@ L(between_2_3): shll $8, %ecx bswap %eax bswap %ecx - movzbl -1(%rdi, %rdx), %edi - movzbl -1(%rsi, %rdx), %esi - orl %edi, %eax - orl %esi, %ecx + movb -1(%rdi, %rdx), %al + movb -1(%rsi, %rdx), %cl /* Subtraction is okay because the upper 8 bits are zero. */ subl %ecx, %eax ret -- cgit 1.4.1