about summary refs log tree commit diff
path: root/sysdeps/x86_64/dl-trampoline.S
diff options
context:
space:
mode:
authorH.J. Lu <hongjiu.lu@intel.com>2009-08-08 10:54:42 -0700
committerUlrich Drepper <drepper@redhat.com>2009-08-08 10:54:42 -0700
commit4e1e2f42472744569f1540dd8410d23180e24bf9 (patch)
tree420047379cb0d341d37510158d4ca1a88ec57606 /sysdeps/x86_64/dl-trampoline.S
parentfc1870e6a484ad3211648c9ae51bc076913518aa (diff)
downloadglibc-4e1e2f42472744569f1540dd8410d23180e24bf9.tar.gz
glibc-4e1e2f42472744569f1540dd8410d23180e24bf9.tar.xz
glibc-4e1e2f42472744569f1540dd8410d23180e24bf9.zip
Support mixed SSE/AVX audit and check AVX only once.
This patch fixes mixed SSE/AVX audit and checks AVX only once in
_dl_runtime_profile. When an AVX or SSE register value in pltenter is
modified, we have to make sure that the SSE part value is the same in both
lr_xmm and lr_vector fields so that pltexit will get the correct value
from either lr_xmm or lr_vector fields. AVX-enabled pltenter should
update both lr_xmm and lr_vector fields to support stacked AVX/SSE
pltenter functions.
Diffstat (limited to 'sysdeps/x86_64/dl-trampoline.S')
-rw-r--r--sysdeps/x86_64/dl-trampoline.S244
1 files changed, 7 insertions, 237 deletions
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 20da6956f1..f9c60ad5cf 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -146,247 +146,17 @@ L(have_avx):
 2:	movl	%eax, L(have_avx)(%rip)
 	cmpl	$0, %eax
 
-1:	js	L(no_avx1)
+1:	js	L(no_avx)
 
-	/* This is to support AVX audit modules.  */
-	vmovdqu %ymm0,		      (LR_VECTOR_OFFSET)(%rsp)
-	vmovdqu %ymm1, (LR_VECTOR_OFFSET +   VECTOR_SIZE)(%rsp)
-	vmovdqu %ymm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp)
-	vmovdqu %ymm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp)
-	vmovdqu %ymm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp)
-	vmovdqu %ymm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp)
-	vmovdqu %ymm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp)
-	vmovdqu %ymm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp)
+#  define RESTORE_AVX
+#  include "dl-trampoline.h"
 
-	/* Save xmm0-xmm7 registers to detect if any of them are
-	   changed by audit module.  */
-	vmovdqa %xmm0,		    (LR_SIZE)(%rsp)
-	vmovdqa %xmm1, (LR_SIZE +   XMM_SIZE)(%rsp)
-	vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp)
-	vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp)
-	vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp)
-	vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp)
-	vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp)
-	vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp)
-
-L(no_avx1):
-# endif
-
-	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
-	movq 48(%rbx), %rdx	# Load return address if needed.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-	movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	leaq 16(%rbx), %r8
-	call _dl_profile_fixup	# Call resolver.
-
-	movq %rax, %r11		# Save return value.
-
-	movq 8(%rbx), %rax	# Get back register content.
-	movq LR_RDX_OFFSET(%rsp), %rdx
-	movq  LR_R8_OFFSET(%rsp), %r8
-	movq  LR_R9_OFFSET(%rsp), %r9
-
-	movaps		    (LR_XMM_OFFSET)(%rsp), %xmm0
-	movaps	 (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
-	movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
-	movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
-	movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
-	movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
-	movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
-	movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
-
-# ifdef HAVE_AVX_SUPPORT
-	cmpl	$0, L(have_avx)(%rip)
-	js	L(no_avx2)
-
-	/* Check if any xmm0-xmm7 registers are changed by audit
-	   module.  */
-	vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu			(LR_VECTOR_OFFSET)(%rsp), %ymm0
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu	  (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6
-
-1:	vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8
-	vpmovmskb %xmm8, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7
-
-L(no_avx2):
-1:
-# endif
-	movq 16(%rbx), %r10	# Anything in framesize?
-	testq %r10, %r10
-	jns 3f
-
-	/* There's nothing in the frame size, so there
-	   will be no call to the _dl_call_pltexit. */
-
-	/* Get back registers content.  */
-	movq LR_RCX_OFFSET(%rsp), %rcx
-	movq LR_RSI_OFFSET(%rsp), %rsi
-	movq LR_RDI_OFFSET(%rsp), %rdi
-
-	movq %rbx, %rsp
-	movq (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	jmp *%r11		# Jump to function address.
-
-3:
-	cfi_adjust_cfa_offset(48)
-	cfi_rel_offset(%rbx, 0)
-	cfi_def_cfa_register(%rbx)
-
-	/* At this point we need to prepare new stack for the function
-	   which has to be called.  We copy the original stack to a
-	   temporary buffer of the size specified by the 'framesize'
-	   returned from _dl_profile_fixup */
-
-	leaq LR_RSP_OFFSET(%rbx), %rsi	# stack
-	addq $8, %r10
-	andq $0xfffffffffffffff0, %r10
-	movq %r10, %rcx
-	subq %r10, %rsp
-	movq %rsp, %rdi
-	shrq $3, %rcx
-	rep
-	movsq
-
-	movq 24(%rdi), %rcx	# Get back register content.
-	movq 32(%rdi), %rsi
-	movq 40(%rdi), %rdi
-
-	call *%r11
-
-	mov 24(%rbx), %rsp	# Drop the copied stack content
-
-	/* Now we have to prepare the La_x86_64_retval structure for the
-	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
-	   so we just need to allocate the sizeof(La_x86_64_retval) space on
-	   the stack, since the alignment has already been taken care of. */
-# ifdef HAVE_AVX_SUPPORT
-	/* sizeof(La_x86_64_retval).  Need extra space for 2 SSE
-	   registers to detect if xmm0/xmm1 registers are changed
-	   by audit module.  */
-	subq $(LRV_SIZE + XMM_SIZE*2), %rsp
-# else
-	subq $LRV_SIZE, %rsp	# sizeof(La_x86_64_retval)
-# endif
-	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
-
-	/* Fill in the La_x86_64_retval structure.  */
-	movq %rax, LRV_RAX_OFFSET(%rcx)
-	movq %rdx, LRV_RDX_OFFSET(%rcx)
-
-	movaps %xmm0, LRV_XMM0_OFFSET(%rcx)
-	movaps %xmm1, LRV_XMM1_OFFSET(%rcx)
-
-# ifdef HAVE_AVX_SUPPORT
-	cmpl	$0, L(have_avx)(%rip)
-	js	L(no_avx3)
-
-	/* This is to support AVX audit modules.  */
-	vmovdqu %ymm0, LRV_VECTOR0_OFFSET(%rcx)
-	vmovdqu %ymm1, LRV_VECTOR1_OFFSET(%rcx)
-
-	/* Save xmm0/xmm1 registers to detect if they are changed
-	   by audit module.  */
-	vmovdqa %xmm0,		  (LRV_SIZE)(%rcx)
-	vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE)(%rcx)
-
-L(no_avx3):
-# endif
-
-	fstpt LRV_ST0_OFFSET(%rcx)
-	fstpt LRV_ST1_OFFSET(%rcx)
-
-	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-        movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	call _dl_call_pltexit
-
-	/* Restore return registers.  */
-	movq LRV_RAX_OFFSET(%rsp), %rax
-	movq LRV_RDX_OFFSET(%rsp), %rdx
-
-	movaps LRV_XMM0_OFFSET(%rsp), %xmm0
-	movaps LRV_XMM1_OFFSET(%rsp), %xmm1
-
-# ifdef HAVE_AVX_SUPPORT
-	cmpl	$0, L(have_avx)(%rip)
-	js	L(no_avx4)
-
-	/* Check if xmm0/xmm1 registers are changed by audit module.  */
-	vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2
-	vpmovmskb %xmm2, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0
-
-1:	vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
-	vpmovmskb %xmm2, %esi
-	cmpl $0xffff, %esi
-	jne 1f
-	vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1
-
-L(no_avx4):
-1:
+	.align 16
+L(no_avx):
 # endif
 
-	fldt LRV_ST1_OFFSET(%rsp)
-	fldt LRV_ST0_OFFSET(%rsp)
-
-	movq %rbx, %rsp
-	movq (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	retq
+#  undef RESTORE_AVX
+#  include "dl-trampoline.h"
 
 	cfi_endproc
 	.size _dl_runtime_profile, .-_dl_runtime_profile