about summary refs log tree commit diff
path: root/sysdeps/x86_64/dl-trampoline.S
diff options
context:
space:
mode:
authorH.J. Lu <hongjiu.lu@intel.com>2009-07-29 08:40:54 -0700
committerUlrich Drepper <drepper@redhat.com>2009-07-29 08:40:54 -0700
commit09e0389eb12491d3e9ef74b299b66efdd67adb1c (patch)
tree22b5562ed2f15dfcf93ddb46a23f863455d28129 /sysdeps/x86_64/dl-trampoline.S
parentb48a267b8fbb885191a04cffdb4050a4d4c8a20b (diff)
downloadglibc-09e0389eb12491d3e9ef74b299b66efdd67adb1c.tar.gz
glibc-09e0389eb12491d3e9ef74b299b66efdd67adb1c.tar.xz
glibc-09e0389eb12491d3e9ef74b299b66efdd67adb1c.zip
Properly restore AVX registers on x86-64.
tst-audit4 and tst-audit5 fail under AVX emulator due to je instead of
jne. This patch fixes them.
Diffstat (limited to 'sysdeps/x86_64/dl-trampoline.S')
-rw-r--r--sysdeps/x86_64/dl-trampoline.S20
1 files changed, 10 insertions, 10 deletions
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 7ecf1b0c64..5a4c6ddecd 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -203,49 +203,49 @@ L(no_avx1):
 	vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu			(LR_VECTOR_OFFSET)(%rsp), %ymm0
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu	  (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6
 
 1:	vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8
 	vpmovmskb %xmm8, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7
 
 L(no_avx2):
@@ -361,13 +361,13 @@ L(no_avx3):
 	vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2
 	vpmovmskb %xmm2, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0
 
 1:	vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
 	vpmovmskb %xmm2, %esi
 	cmpl $0xffff, %esi
-	je 1f
+	jne 1f
 	vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1
 
 L(no_avx4):