about summary refs log tree commit diff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/Makefile1
-rw-r--r--sysdeps/x86_64/bits/link.h12
-rw-r--r--sysdeps/x86_64/dl-trampoline.S179
-rw-r--r--sysdeps/x86_64/dl-trampoline.h291
-rwxr-xr-xsysdeps/x86_64/elf/configure25
-rw-r--r--sysdeps/x86_64/elf/configure.in11
-rw-r--r--sysdeps/x86_64/link-defines.sym28
7 files changed, 423 insertions, 124 deletions
diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
index da82093381..78fdb04fcb 100644
--- a/sysdeps/x86_64/Makefile
+++ b/sysdeps/x86_64/Makefile
@@ -4,6 +4,7 @@ long-double-fcts = yes
 ifeq ($(subdir),csu)
 sysdep_routines += hp-timing
 elide-routines.os += hp-timing
+gen-as-const-headers += link-defines.sym
 endif
 
 ifeq ($(subdir),gmon)
diff --git a/sysdeps/x86_64/bits/link.h b/sysdeps/x86_64/bits/link.h
index 5676b78753..643a293bb0 100644
--- a/sysdeps/x86_64/bits/link.h
+++ b/sysdeps/x86_64/bits/link.h
@@ -65,10 +65,19 @@ __END_DECLS
 /* Registers for entry into PLT on x86-64.  */
 # if __GNUC_PREREQ (4,0)
 typedef float La_x86_64_xmm __attribute__ ((__vector_size__ (16)));
+typedef float La_x86_64_ymm __attribute__ ((__vector_size__ (32)));
 # else
 typedef float La_x86_64_xmm __attribute__ ((__mode__ (__V4SF__)));
 # endif
 
+typedef union
+{
+# if __GNUC_PREREQ (4,0)
+  La_x86_64_ymm ymm[2];
+# endif
+  La_x86_64_xmm xmm[4];
+} La_x86_64_vector __attribute__ ((aligned(16)));
+
 typedef struct La_x86_64_regs
 {
   uint64_t lr_rdx;
@@ -80,6 +89,7 @@ typedef struct La_x86_64_regs
   uint64_t lr_rbp;
   uint64_t lr_rsp;
   La_x86_64_xmm lr_xmm[8];
+  La_x86_64_vector lr_vector[8];
 } La_x86_64_regs;
 
 /* Return values for calls from PLT on x86-64.  */
@@ -91,6 +101,8 @@ typedef struct La_x86_64_retval
   La_x86_64_xmm lrv_xmm1;
   long double lrv_st0;
   long double lrv_st1;
+  La_x86_64_vector lrv_vector0;
+  La_x86_64_vector lrv_vector1;
 } La_x86_64_retval;
 
 
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 33e6115f7b..f605351f30 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -17,7 +17,9 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+#include <config.h>
 #include <sysdep.h>
+#include <link-defines.h>
 
 	.text
 	.globl _dl_runtime_resolve
@@ -89,135 +91,64 @@ _dl_runtime_profile:
 
 	/* Actively align the La_x86_64_regs structure.  */
 	andq $0xfffffffffffffff0, %rsp
-	subq $192, %rsp		# sizeof(La_x86_64_regs)
+# ifdef HAVE_AVX_SUPPORT
+	/* sizeof(La_x86_64_regs).  Need extra space for 8 SSE registers
+	   to detect if any xmm0-xmm7 registers are changed by audit
+	   module.  */
+	subq $(LR_SIZE + XMM_SIZE*8), %rsp
+#else
+	subq $LR_SIZE, %rsp		# sizeof(La_x86_64_regs)
+#endif
 	movq %rsp, 24(%rbx)
 
-	movq %rdx,   (%rsp)	# Fill the La_x86_64_regs structure.
-	movq %r8,   8(%rsp)
-	movq %r9,  16(%rsp)
-	movq %rcx, 24(%rsp)
-	movq %rsi, 32(%rsp)
-	movq %rdi, 40(%rsp)
-	movq %rbp, 48(%rsp)
-	leaq 48(%rbx), %rax
-	movq %rax, 56(%rsp)
-	movaps %xmm0,  64(%rsp)
-	movaps %xmm1,  80(%rsp)
-	movaps %xmm2,  96(%rsp)
-	movaps %xmm3, 112(%rsp)
-	movaps %xmm4, 128(%rsp)
-	movaps %xmm5, 144(%rsp)
-	movaps %xmm6, 160(%rsp)
-	movaps %xmm7, 176(%rsp)
-
-	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
-	movq 48(%rbx), %rdx	# Load return address if needed.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-	movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	leaq 16(%rbx), %r8
-	call _dl_profile_fixup	# Call resolver.
-
-	movq %rax, %r11		# Save return value.
-
-	movq 8(%rbx), %rax	# Get back register content.
-	movq      (%rsp), %rdx
-	movq     8(%rsp), %r8
-	movq    16(%rsp), %r9
-	movaps  64(%rsp), %xmm0
-	movaps  80(%rsp), %xmm1
-	movaps  96(%rsp), %xmm2
-	movaps 112(%rsp), %xmm3
-	movaps 128(%rsp), %xmm4
-	movaps 144(%rsp), %xmm5
-	movaps 160(%rsp), %xmm6
-	movaps 176(%rsp), %xmm7
-
-	movq 16(%rbx), %r10	# Anything in framesize?
-	testq %r10, %r10
-	jns 1f
-
-	/* There's nothing in the frame size, so there
-	   will be no call to the _dl_call_pltexit. */
-
-	movq 24(%rsp), %rcx	# Get back registers content.
-	movq 32(%rsp), %rsi
-	movq 40(%rsp), %rdi
-
-	movq %rbx, %rsp
-	movq (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	jmp *%r11		# Jump to function address.
+	/* Fill the La_x86_64_regs structure.  */
+	movq %rdx, LR_RDX_OFFSET(%rsp)
+	movq %r8,  LR_R8_OFFSET(%rsp)
+	movq %r9,  LR_R9_OFFSET(%rsp)
+	movq %rcx, LR_RCX_OFFSET(%rsp)
+	movq %rsi, LR_RSI_OFFSET(%rsp)
+	movq %rdi, LR_RDI_OFFSET(%rsp)
+	movq %rbp, LR_RBP_OFFSET(%rsp)
 
-1:
-	cfi_adjust_cfa_offset(48)
-	cfi_rel_offset(%rbx, 0)
-	cfi_def_cfa_register(%rbx)
+# ifdef HAVE_AVX_SUPPORT
+	jmp *L(save_and_restore_vector)(%rip)
 
-	/* At this point we need to prepare new stack for the function
-	   which has to be called.  We copy the original stack to a
-	   temporary buffer of the size specified by the 'framesize'
-	   returned from _dl_profile_fixup */
-
-	leaq 56(%rbx), %rsi	# stack
-	addq $8, %r10
-	andq $0xfffffffffffffff0, %r10
-	movq %r10, %rcx
-	subq %r10, %rsp
-	movq %rsp, %rdi
-	shrq $3, %rcx
-	rep
-	movsq
-
-	movq 24(%rdi), %rcx	# Get back register content.
-	movq 32(%rdi), %rsi
-	movq 40(%rdi), %rdi
-
-	call *%r11
-
-	mov 24(%rbx), %rsp	# Drop the copied stack content
-
-	/* Now we have to prepare the La_x86_64_retval structure for the
-	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
-	   so we just need to allocate the sizeof(La_x86_64_retval) space on
-	   the stack, since the alignment has already been taken care of. */
-
-	subq $80, %rsp		# sizeof(La_x86_64_retval)
-	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
-
-	movq %rax, (%rcx)	# Fill in the La_x86_64_retval structure.
-	movq %rdx, 8(%rcx)
-	movaps %xmm0, 16(%rcx)
-	movaps %xmm1, 32(%rcx)
-	fstpt 48(%rcx)
-	fstpt 64(%rcx)
-
-	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
-	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
-        movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
-	call _dl_call_pltexit
-
-	movq  (%rsp), %rax	# Restore return registers.
-	movq 8(%rsp), %rdx
-	movaps 16(%rsp), %xmm0
-	movaps 32(%rsp), %xmm1
-	fldt 64(%rsp)
-	fldt 48(%rsp)
-
-	movq %rbx, %rsp
-	movq  (%rsp), %rbx
-	cfi_restore(rbx)
-	cfi_def_cfa_register(%rsp)
-
-	addq $48, %rsp		# Adjust the stack to the return value
-				# (eats the reloc index and link_map)
-	cfi_adjust_cfa_offset(-48)
-	retq
+	.align 16
+L(save_and_restore_vector_sse):
+# endif
+
+# define MOVXMM movaps
+# include "dl-trampoline.h"
+
+# ifdef HAVE_AVX_SUPPORT
+#  undef  MOVXMM
+#  define MOVXMM vmovdqa
+#  define RESTORE_AVX
+	.align 16
+L(save_and_restore_vector_avx):
+#  include "dl-trampoline.h"
+# endif
 
 	cfi_endproc
 	.size _dl_runtime_profile, .-_dl_runtime_profile
+
+# ifdef HAVE_AVX_SUPPORT
+L(check_avx):
+	mov	%rbx,%r11		# Save rbx
+	movl	$1, %eax
+	cpuid
+	mov	%r11,%rbx		# Restore rbx
+	leaq    L(save_and_restore_vector_sse)(%rip), %rax
+	andl	$(1 << 28), %ecx	# Check if AVX is available.
+	jz	L(ret)
+	leaq    L(save_and_restore_vector_avx)(%rip), %rax
+L(ret):
+	movq	%rax,L(save_and_restore_vector)(%rip)
+	jmp	*%rax
+
+	.section .data.rel.local,"aw",@progbits
+	.align	8
+L(save_and_restore_vector):
+	.quad L(check_avx)
+# endif
 #endif
diff --git a/sysdeps/x86_64/dl-trampoline.h b/sysdeps/x86_64/dl-trampoline.h
new file mode 100644
index 0000000000..d63b7d03c4
--- /dev/null
+++ b/sysdeps/x86_64/dl-trampoline.h
@@ -0,0 +1,291 @@
+/* Partial PLT profile trampoline to save and restore x86-64 vector
+   registers.
+   Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+	leaq 48(%rbx), %rax
+	movq %rax, LR_RSP_OFFSET(%rsp)
+
+	/* This is to provide backward binary compatility for existing
+	   audit modules.  */
+	MOVXMM %xmm0,		   (LR_XMM_OFFSET)(%rsp)
+	MOVXMM %xmm1, (LR_XMM_OFFSET +   XMM_SIZE)(%rsp)
+	MOVXMM %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp)
+	MOVXMM %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp)
+	MOVXMM %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp)
+	MOVXMM %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp)
+	MOVXMM %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp)
+	MOVXMM %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp)
+
+#ifdef RESTORE_AVX
+	/* This is to support AVX audit modules.  */
+	vmovdqu %ymm0,		      (LR_VECTOR_OFFSET)(%rsp)
+	vmovdqu %ymm1, (LR_VECTOR_OFFSET +   VECTOR_SIZE)(%rsp)
+	vmovdqu %ymm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp)
+	vmovdqu %ymm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp)
+	vmovdqu %ymm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp)
+	vmovdqu %ymm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp)
+	vmovdqu %ymm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp)
+	vmovdqu %ymm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp)
+
+	/* Save xmm0-xmm7 registers to detect if any of them are
+	   changed by audit module.  */
+	vmovdqa %xmm0,		    (LR_SIZE)(%rsp)
+	vmovdqa %xmm1, (LR_SIZE +   XMM_SIZE)(%rsp)
+	vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp)
+	vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp)
+	vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp)
+	vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp)
+	vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp)
+	vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp)
+#endif
+
+	movq %rsp, %rcx		# La_x86_64_regs pointer to %rcx.
+	movq 48(%rbx), %rdx	# Load return address if needed.
+	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
+	movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
+	leaq 16(%rbx), %r8
+	call _dl_profile_fixup	# Call resolver.
+
+	movq %rax, %r11		# Save return value.
+
+	movq 8(%rbx), %rax	# Get back register content.
+	movq LR_RDX_OFFSET(%rsp), %rdx
+	movq  LR_R8_OFFSET(%rsp), %r8
+	movq  LR_R9_OFFSET(%rsp), %r9
+
+#ifdef RESTORE_AVX
+	/* Check if any xmm0-xmm7 registers are changed by audit
+	   module.  */
+	vmovdqa (LR_XMM_OFFSET)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm0
+	vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 1f
+
+	/* We restore AVX registers only if xmm0-xmm7 registers are
+	   unchanged.  */
+	vmovdqu			(LR_VECTOR_OFFSET)(%rsp), %ymm0
+	vmovdqu	  (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %ymm1
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %ymm2
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %ymm3
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %ymm4
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %ymm5
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %ymm6
+	vmovdqu (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %ymm7
+	jmp 2f
+
+1:
+	vmovdqa		     (LR_XMM_OFFSET)(%rsp), %xmm0
+	vmovdqa   (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
+	vmovdqa (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
+
+2:
+#else
+	movaps		    (LR_XMM_OFFSET)(%rsp), %xmm0
+	movaps	 (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
+	movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
+	movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
+	movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
+	movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
+	movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
+	movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
+#endif
+
+	movq 16(%rbx), %r10	# Anything in framesize?
+	testq %r10, %r10
+	jns 3f
+
+	/* There's nothing in the frame size, so there
+	   will be no call to the _dl_call_pltexit. */
+
+	/* Get back registers content.  */
+	movq LR_RCX_OFFSET(%rsp), %rcx
+	movq LR_RSI_OFFSET(%rsp), %rsi
+	movq LR_RDI_OFFSET(%rsp), %rdi
+
+	movq %rbx, %rsp
+	movq (%rsp), %rbx
+	cfi_restore(rbx)
+	cfi_def_cfa_register(%rsp)
+
+	addq $48, %rsp		# Adjust the stack to the return value
+				# (eats the reloc index and link_map)
+	cfi_adjust_cfa_offset(-48)
+	jmp *%r11		# Jump to function address.
+
+3:
+	cfi_adjust_cfa_offset(48)
+	cfi_rel_offset(%rbx, 0)
+	cfi_def_cfa_register(%rbx)
+
+	/* At this point we need to prepare new stack for the function
+	   which has to be called.  We copy the original stack to a
+	   temporary buffer of the size specified by the 'framesize'
+	   returned from _dl_profile_fixup */
+
+	leaq LR_RSP_OFFSET(%rbx), %rsi	# stack
+	addq $8, %r10
+	andq $0xfffffffffffffff0, %r10
+	movq %r10, %rcx
+	subq %r10, %rsp
+	movq %rsp, %rdi
+	shrq $3, %rcx
+	rep
+	movsq
+
+	movq 24(%rdi), %rcx	# Get back register content.
+	movq 32(%rdi), %rsi
+	movq 40(%rdi), %rdi
+
+	call *%r11
+
+	mov 24(%rbx), %rsp	# Drop the copied stack content
+
+	/* Now we have to prepare the La_x86_64_retval structure for the
+	   _dl_call_pltexit.  The La_x86_64_regs is being pointed by rsp now,
+	   so we just need to allocate the sizeof(La_x86_64_retval) space on
+	   the stack, since the alignment has already been taken care of. */
+#ifdef RESTORE_AVX
+	/* sizeof(La_x86_64_retval).  Need extra space for 2 SSE
+	   registers to detect if xmm0/xmm1 registers are changed
+	   by audit module.  */
+	subq $(LRV_SIZE + XMM_SIZE*2), %rsp
+#else
+	subq $LRV_SIZE, %rsp	# sizeof(La_x86_64_retval)
+#endif
+	movq %rsp, %rcx		# La_x86_64_retval argument to %rcx.
+
+	/* Fill in the La_x86_64_retval structure.  */
+	movq %rax, LRV_RAX_OFFSET(%rcx)
+	movq %rdx, LRV_RDX_OFFSET(%rcx)
+
+	MOVXMM %xmm0, LRV_XMM0_OFFSET(%rcx)
+	MOVXMM %xmm1, LRV_XMM1_OFFSET(%rcx)
+
+#ifdef RESTORE_AVX
+	/* This is to support AVX audit modules.  */
+	vmovdqu %ymm0, LRV_VECTOR0_OFFSET(%rcx)
+	vmovdqu %ymm1, LRV_VECTOR1_OFFSET(%rcx)
+
+	/* Save xmm0/xmm1 registers to detect if they are changed
+	   by audit module.  */
+	vmovdqa %xmm0,		  (LRV_SIZE)(%rcx)
+	vmovdqa %xmm1, (LRV_SIZE + XMM_SIZE)(%rcx)
+#endif
+
+	fstpt LRV_ST0_OFFSET(%rcx)
+	fstpt LRV_ST1_OFFSET(%rcx)
+
+	movq 24(%rbx), %rdx	# La_x86_64_regs argument to %rdx.
+	movq 40(%rbx), %rsi	# Copy args pushed by PLT in register.
+        movq 32(%rbx), %rdi	# %rdi: link_map, %rsi: reloc_index
+	call _dl_call_pltexit
+
+	/* Restore return registers.  */
+	movq LRV_RAX_OFFSET(%rsp), %rax
+	movq LRV_RDX_OFFSET(%rsp), %rdx
+
+#ifdef RESTORE_AVX
+	/* Check if xmm0/xmm1 registers are changed by audit module.  */
+	vmovdqa LRV_XMM0_OFFSET(%rsp), %xmm0
+	vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 4f
+
+	/* We restore AVX registers only if xmm0/xmm1 registers are
+	   unchanged.  */
+	vmovdqa LRV_XMM1_OFFSET(%rsp), %xmm0
+	vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm0, %xmm1
+	vpmovmskb %xmm1, %esi
+	cmpl $0xffff, %esi
+	jne 4f
+
+	vmovdqu LRV_VECTOR0_OFFSET(%rsp), %ymm0
+	vmovdqu LRV_VECTOR1_OFFSET(%rsp), %ymm1
+	jmp 5f
+
+4:
+	vmovdqa LRV_XMM0_OFFSET(%rsp), %xmm0
+	vmovdqa LRV_XMM1_OFFSET(%rsp), %xmm1
+5:
+#else
+	movaps LRV_XMM0_OFFSET(%rsp), %xmm0
+	movaps LRV_XMM1_OFFSET(%rsp), %xmm1
+#endif
+
+	fldt LRV_ST0_OFFSET(%rsp)
+	fldt LRV_ST0_OFFSET(%rsp)
+
+	movq %rbx, %rsp
+	movq (%rsp), %rbx
+	cfi_restore(rbx)
+	cfi_def_cfa_register(%rsp)
+
+	addq $48, %rsp		# Adjust the stack to the return value
+				# (eats the reloc index and link_map)
+	cfi_adjust_cfa_offset(-48)
+	retq
diff --git a/sysdeps/x86_64/elf/configure b/sysdeps/x86_64/elf/configure
index 774654997d..221e74c2b8 100755
--- a/sysdeps/x86_64/elf/configure
+++ b/sysdeps/x86_64/elf/configure
@@ -79,3 +79,28 @@ cat >>confdefs.h <<\_ACEOF
 #define PI_STATIC_AND_HIDDEN 1
 _ACEOF
 
+
+{ $as_echo "$as_me:$LINENO: checking for AVX support" >&5
+$as_echo_n "checking for AVX support... " >&6; }
+if test "${libc_cv_cc_avx+set}" = set; then
+  $as_echo_n "(cached) " >&6
+else
+  if { ac_try='${CC-cc} -mavx -xc /dev/null -S -o /dev/null'
+  { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; }; then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi
+fi
+{ $as_echo "$as_me:$LINENO: result: $libc_cv_cc_avx" >&5
+$as_echo "$libc_cv_cc_avx" >&6; }
+if test $libc_cv_cc_avx = yes; then
+  cat >>confdefs.h <<\_ACEOF
+#define HAVE_AVX_SUPPORT 1
+_ACEOF
+
+fi
diff --git a/sysdeps/x86_64/elf/configure.in b/sysdeps/x86_64/elf/configure.in
index 9cb59d009c..14d1875302 100644
--- a/sysdeps/x86_64/elf/configure.in
+++ b/sysdeps/x86_64/elf/configure.in
@@ -32,3 +32,14 @@ fi
 dnl It is always possible to access static and hidden symbols in an
 dnl position independent way.
 AC_DEFINE(PI_STATIC_AND_HIDDEN)
+
+dnl Check if -mavx works.
+AC_CACHE_CHECK(for AVX support, libc_cv_cc_avx, [dnl
+if AC_TRY_COMMAND([${CC-cc} -mavx -xc /dev/null -S -o /dev/null]); then
+  libc_cv_cc_avx=yes
+else
+  libc_cv_cc_avx=no
+fi])
+if test $libc_cv_cc_avx = yes; then
+  AC_DEFINE(HAVE_AVX_SUPPORT)
+fi
diff --git a/sysdeps/x86_64/link-defines.sym b/sysdeps/x86_64/link-defines.sym
new file mode 100644
index 0000000000..1694d883ad
--- /dev/null
+++ b/sysdeps/x86_64/link-defines.sym
@@ -0,0 +1,28 @@
+#include "link.h"
+#include <stddef.h>
+
+--
+VECTOR_SIZE		sizeof (La_x86_64_vector)
+XMM_SIZE		sizeof (La_x86_64_xmm)
+
+LR_SIZE			sizeof (struct La_x86_64_regs)
+LR_RDX_OFFSET		offsetof (struct La_x86_64_regs, lr_rdx)
+LR_R8_OFFSET		offsetof (struct La_x86_64_regs, lr_r8)
+LR_R9_OFFSET		offsetof (struct La_x86_64_regs, lr_r9)
+LR_RCX_OFFSET		offsetof (struct La_x86_64_regs, lr_rcx)
+LR_RSI_OFFSET		offsetof (struct La_x86_64_regs, lr_rsi)
+LR_RDI_OFFSET		offsetof (struct La_x86_64_regs, lr_rdi)
+LR_RBP_OFFSET		offsetof (struct La_x86_64_regs, lr_rbp)
+LR_RSP_OFFSET		offsetof (struct La_x86_64_regs, lr_rsp)
+LR_XMM_OFFSET		offsetof (struct La_x86_64_regs, lr_xmm)
+LR_VECTOR_OFFSET	offsetof (struct La_x86_64_regs, lr_vector)
+
+LRV_SIZE		sizeof (struct La_x86_64_retval)
+LRV_RAX_OFFSET		offsetof (struct La_x86_64_retval, lrv_rax)
+LRV_RDX_OFFSET		offsetof (struct La_x86_64_retval, lrv_rdx)
+LRV_XMM0_OFFSET		offsetof (struct La_x86_64_retval, lrv_xmm0)
+LRV_XMM1_OFFSET		offsetof (struct La_x86_64_retval, lrv_xmm1)
+LRV_ST0_OFFSET		offsetof (struct La_x86_64_retval, lrv_st0)
+LRV_ST1_OFFSET		offsetof (struct La_x86_64_retval, lrv_st1)
+LRV_VECTOR0_OFFSET	offsetof (struct La_x86_64_retval, lrv_vector0)
+LRV_VECTOR1_OFFSET	offsetof (struct La_x86_64_retval, lrv_vector1)