about summary refs log tree commit diff
path: root/elf/dl-tls.c
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2021-09-15 15:16:19 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2021-10-04 15:07:05 +0100
commit83b5323261bb72313bffcf37476c1b8f0847c736 (patch)
tree2359c0540ec0009d3b010f35fd4cec6a2ca89253 /elf/dl-tls.c
parent958309cba24caf58ea5e118b20eccadbb3638f2d (diff)
downloadglibc-83b5323261bb72313bffcf37476c1b8f0847c736.tar.gz
glibc-83b5323261bb72313bffcf37476c1b8f0847c736.tar.xz
glibc-83b5323261bb72313bffcf37476c1b8f0847c736.zip
elf: Avoid deadlock between pthread_create and ctors [BZ #28357]
The fix for bug 19329 caused a regression such that pthread_create can
deadlock when concurrent ctors from dlopen are waiting for it to finish.
Use a new GL(dl_load_tls_lock) in pthread_create that is not taken
around ctors in dlopen.

The new lock is also used in __tls_get_addr instead of GL(dl_load_lock).

The new lock is held in _dl_open_worker and _dl_close_worker around
most of the logic before/after the init/fini routines.  When init/fini
routines are running then TLS is in a consistent, usable state.
In _dl_open_worker the new lock requires catching and reraising dlopen
failures that happen in the critical section.

The new lock is reinitialized in a fork child, to keep the existing
behaviour and it is kept recursive in case malloc interposition or TLS
access from signal handlers can retake it.  It is not obvious if this
is necessary or helps, but avoids changing the preexisting behaviour.

The new lock may be more appropriate for dl_iterate_phdr too than
GL(dl_load_write_lock), since TLS state of an incompletely loaded
module may be accessed.  If the new lock can replace the old one,
that can be a separate change.

Fixes bug 28357.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
Diffstat (limited to 'elf/dl-tls.c')
-rw-r--r--elf/dl-tls.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index d554ae4497..9260d2d696 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -532,7 +532,7 @@ _dl_allocate_tls_init (void *result)
   size_t maxgen = 0;
 
   /* Protects global dynamic TLS related state.  */
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
+  __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
 
   /* Check if the current dtv is big enough.   */
   if (dtv[-1].counter < GL(dl_tls_max_dtv_idx))
@@ -606,7 +606,7 @@ _dl_allocate_tls_init (void *result)
       listp = listp->next;
       assert (listp != NULL);
     }
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
+  __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
 
   /* The DTV version is up-to-date now.  */
   dtv[0].counter = maxgen;
@@ -745,7 +745,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
 
 	 Here the dtv needs to be updated to new_gen generation count.
 
-	 This code may be called during TLS access when GL(dl_load_lock)
+	 This code may be called during TLS access when GL(dl_load_tls_lock)
 	 is not held.  In that case the user code has to synchronize with
 	 dlopen and dlclose calls of relevant modules.  A module m is
 	 relevant if the generation of m <= new_gen and dlclose of m is
@@ -867,11 +867,11 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
   if (__glibc_unlikely (the_map->l_tls_offset
 			!= FORCED_DYNAMIC_TLS_OFFSET))
     {
-      __rtld_lock_lock_recursive (GL(dl_load_lock));
+      __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
       if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
 	{
 	  the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
-	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
+	  __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
 	}
       else if (__glibc_likely (the_map->l_tls_offset
 			       != FORCED_DYNAMIC_TLS_OFFSET))
@@ -883,7 +883,7 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
 #else
 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
 #endif
-	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
+	  __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
 
 	  dtv[GET_ADDR_MODULE].pointer.to_free = NULL;
 	  dtv[GET_ADDR_MODULE].pointer.val = p;
@@ -891,7 +891,7 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
 	  return (char *) p + GET_ADDR_OFFSET;
 	}
       else
-	__rtld_lock_unlock_recursive (GL(dl_load_lock));
+	__rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
     }
   struct dtv_pointer result = allocate_and_init (the_map);
   dtv[GET_ADDR_MODULE].pointer = result;
@@ -962,7 +962,7 @@ _dl_tls_get_addr_soft (struct link_map *l)
     return NULL;
 
   dtv_t *dtv = THREAD_DTV ();
-  /* This may be called without holding the GL(dl_load_lock).  Reading
+  /* This may be called without holding the GL(dl_load_tls_lock).  Reading
      arbitrary gen value is fine since this is best effort code.  */
   size_t gen = atomic_load_relaxed (&GL(dl_tls_generation));
   if (__glibc_unlikely (dtv[0].counter != gen))