about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2016-02-19 15:43:45 -0800
committerH.J. Lu <hjl.tools@gmail.com>2016-02-19 15:45:09 -0800
commit8d9c92017d85f23ba6a2b3614b2f2bcf1820d6f0 (patch)
treed1e153725deb1ed9714f982bd94585a45a6ca8a2 /sysdeps
parenteab5028860f8e20c543bccb2d7cc100d167ead34 (diff)
downloadglibc-8d9c92017d85f23ba6a2b3614b2f2bcf1820d6f0.tar.gz
glibc-8d9c92017d85f23ba6a2b3614b2f2bcf1820d6f0.tar.xz
glibc-8d9c92017d85f23ba6a2b3614b2f2bcf1820d6f0.zip
[x86_64] Set DL_RUNTIME_UNALIGNED_VEC_SIZE to 8
Due to GCC bug:

   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066

__tls_get_addr may be called with 8-byte stack alignment.  Although
this bug has been fixed in GCC 4.9.4, 5.3 and 6, we can't assume
that stack will be always aligned at 16 bytes.  Since SSE optimized
memory/string functions with aligned SSE register load and store are
used in the dynamic linker, we must set DL_RUNTIME_UNALIGNED_VEC_SIZE
to 8 so that _dl_runtime_resolve_sse will align the stack before
calling _dl_fixup:

Dump of assembler code for function _dl_runtime_resolve_sse:
   0x00007ffff7deea90 <+0>:	push   %rbx
   0x00007ffff7deea91 <+1>:	mov    %rsp,%rbx
   0x00007ffff7deea94 <+4>:	and    $0xfffffffffffffff0,%rsp
                                ^^^^^^^^^^^ Align stack to 16 bytes
   0x00007ffff7deea98 <+8>:	sub    $0x100,%rsp
   0x00007ffff7deea9f <+15>:	mov    %rax,0xc0(%rsp)
   0x00007ffff7deeaa7 <+23>:	mov    %rcx,0xc8(%rsp)
   0x00007ffff7deeaaf <+31>:	mov    %rdx,0xd0(%rsp)
   0x00007ffff7deeab7 <+39>:	mov    %rsi,0xd8(%rsp)
   0x00007ffff7deeabf <+47>:	mov    %rdi,0xe0(%rsp)
   0x00007ffff7deeac7 <+55>:	mov    %r8,0xe8(%rsp)
   0x00007ffff7deeacf <+63>:	mov    %r9,0xf0(%rsp)
   0x00007ffff7deead7 <+71>:	movaps %xmm0,(%rsp)
   0x00007ffff7deeadb <+75>:	movaps %xmm1,0x10(%rsp)
   0x00007ffff7deeae0 <+80>:	movaps %xmm2,0x20(%rsp)
   0x00007ffff7deeae5 <+85>:	movaps %xmm3,0x30(%rsp)
   0x00007ffff7deeaea <+90>:	movaps %xmm4,0x40(%rsp)
   0x00007ffff7deeaef <+95>:	movaps %xmm5,0x50(%rsp)
   0x00007ffff7deeaf4 <+100>:	movaps %xmm6,0x60(%rsp)
   0x00007ffff7deeaf9 <+105>:	movaps %xmm7,0x70(%rsp)

	[BZ #19679]
	* sysdeps/x86_64/dl-trampoline.S (DL_RUNIME_UNALIGNED_VEC_SIZE):
	Renamed to ...
	(DL_RUNTIME_UNALIGNED_VEC_SIZE): This.  Set to 8.
	(DL_RUNIME_RESOLVE_REALIGN_STACK): Renamed to ...
	(DL_RUNTIME_RESOLVE_REALIGN_STACK): This.  Updated.
	(DL_RUNIME_RESOLVE_REALIGN_STACK): Renamed to ...
	(DL_RUNTIME_RESOLVE_REALIGN_STACK): This.
	* sysdeps/x86_64/dl-trampoline.h
	(DL_RUNIME_RESOLVE_REALIGN_STACK): Renamed to ...
	(DL_RUNTIME_RESOLVE_REALIGN_STACK): This.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/x86_64/dl-trampoline.S20
-rw-r--r--sysdeps/x86_64/dl-trampoline.h6
2 files changed, 15 insertions, 11 deletions
diff --git a/sysdeps/x86_64/dl-trampoline.S b/sysdeps/x86_64/dl-trampoline.S
index 9fb6b13983..39b8771aa7 100644
--- a/sysdeps/x86_64/dl-trampoline.S
+++ b/sysdeps/x86_64/dl-trampoline.S
@@ -33,15 +33,19 @@
 # define DL_STACK_ALIGNMENT 8
 #endif
 
-#ifndef DL_RUNIME_UNALIGNED_VEC_SIZE
-/* The maximum size of unaligned vector load and store.  */
-# define DL_RUNIME_UNALIGNED_VEC_SIZE 16
+#ifndef DL_RUNTIME_UNALIGNED_VEC_SIZE
+/* The maximum size in bytes of unaligned vector load and store in the
+   dynamic linker.  Since SSE optimized memory/string functions with
+   aligned SSE register load and store are used in the dynamic linker,
+   we must set this to 8 so that _dl_runtime_resolve_sse will align the
+   stack before calling _dl_fixup.  */
+# define DL_RUNTIME_UNALIGNED_VEC_SIZE 8
 #endif
 
 /* True if _dl_runtime_resolve should align stack to VEC_SIZE bytes.  */
-#define DL_RUNIME_RESOLVE_REALIGN_STACK \
+#define DL_RUNTIME_RESOLVE_REALIGN_STACK \
   (VEC_SIZE > DL_STACK_ALIGNMENT \
-   && VEC_SIZE > DL_RUNIME_UNALIGNED_VEC_SIZE)
+   && VEC_SIZE > DL_RUNTIME_UNALIGNED_VEC_SIZE)
 
 /* Align vector register save area to 16 bytes.  */
 #define REGISTER_SAVE_VEC_OFF	0
@@ -76,7 +80,7 @@
 #ifdef HAVE_AVX512_ASM_SUPPORT
 # define VEC_SIZE		64
 # define VMOVA			vmovdqa64
-# if DL_RUNIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
+# if DL_RUNTIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
 #  define VMOV			vmovdqa64
 # else
 #  define VMOV			vmovdqu64
@@ -100,7 +104,7 @@ strong_alias (_dl_runtime_profile_avx, _dl_runtime_profile_avx512)
 
 #define VEC_SIZE		32
 #define VMOVA			vmovdqa
-#if DL_RUNIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
 # define VMOV			vmovdqa
 #else
 # define VMOV			vmovdqu
@@ -119,7 +123,7 @@ strong_alias (_dl_runtime_profile_avx, _dl_runtime_profile_avx512)
 /* movaps/movups is 1-byte shorter.  */
 #define VEC_SIZE		16
 #define VMOVA			movaps
-#if DL_RUNIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK || VEC_SIZE <= DL_STACK_ALIGNMENT
 # define VMOV			movaps
 #else
 # define VMOV			movups
diff --git a/sysdeps/x86_64/dl-trampoline.h b/sysdeps/x86_64/dl-trampoline.h
index f4191833ab..b90836ab13 100644
--- a/sysdeps/x86_64/dl-trampoline.h
+++ b/sysdeps/x86_64/dl-trampoline.h
@@ -30,7 +30,7 @@
 #undef REGISTER_SAVE_AREA
 #undef LOCAL_STORAGE_AREA
 #undef BASE
-#if DL_RUNIME_RESOLVE_REALIGN_STACK
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK
 # define REGISTER_SAVE_AREA	(REGISTER_SAVE_AREA_RAW + 8)
 /* Local stack area before jumping to function address: RBX.  */
 # define LOCAL_STORAGE_AREA	8
@@ -57,7 +57,7 @@
 	cfi_startproc
 _dl_runtime_resolve:
 	cfi_adjust_cfa_offset(16) # Incorporate PLT
-#if DL_RUNIME_RESOLVE_REALIGN_STACK
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK
 # if LOCAL_STORAGE_AREA != 8
 #  error LOCAL_STORAGE_AREA must be 8
 # endif
@@ -146,7 +146,7 @@ _dl_runtime_resolve:
 	VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 5)(%rsp), %VEC(5)
 	VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 6)(%rsp), %VEC(6)
 	VMOV (REGISTER_SAVE_VEC_OFF + VEC_SIZE * 7)(%rsp), %VEC(7)
-#if DL_RUNIME_RESOLVE_REALIGN_STACK
+#if DL_RUNTIME_RESOLVE_REALIGN_STACK
 	mov %RBX_LP, %RSP_LP
 	cfi_def_cfa_register(%rsp)
 	movq (%rsp), %rbx