about summary refs log tree commit diff
path: root/elf
diff options
context:
space:
mode:
authorAlexandre Oliva <aoliva@redhat.com>2015-03-17 01:14:11 -0300
committerAlexandre Oliva <aoliva@redhat.com>2015-03-17 00:31:49 -0300
commitf8aeae347377f3dfa8cbadde057adf1827fb1d44 (patch)
treecee16d94d0f5b7061455667057da4f141f98b9ae /elf
parentb97eb2bdb1ed72982a7821c3078be591051cef59 (diff)
downloadglibc-f8aeae347377f3dfa8cbadde057adf1827fb1d44.tar.gz
glibc-f8aeae347377f3dfa8cbadde057adf1827fb1d44.tar.xz
glibc-f8aeae347377f3dfa8cbadde057adf1827fb1d44.zip
Fix DTV race, assert, DTV_SURPLUS Static TLS limit, and nptl_db garbage
for  ChangeLog

	[BZ #17090]
	[BZ #17620]
	[BZ #17621]
	[BZ #17628]
	* NEWS: Update.
	* elf/dl-tls.c (_dl_update_slotinfo): Clean up outdated DTV
	entries with Static TLS too.  Skip entries past the end of the
	allocated DTV, from Alan Modra.
	(tls_get_addr_tail): Update to glibc_likely/unlikely.  Move
	Static TLS DTV entry set up from...
	 (_dl_allocate_tls_init): ... here (fix modid assertion), ...
	* elf/dl-reloc.c (_dl_nothread_init_static_tls): ... here...
	* nptl/allocatestack.c (init_one_static_tls): ... and here...
	* elf/dlopen.c (dl_open_worker): Drop l_tls_modid upper bound
	for Static TLS.
	* elf/tlsdeschtab.h (map_generation): Return size_t.  Check
	that the slot we find is associated with the given map before
	using its generation count.
	* nptl_db/db_info.c: Include ldsodefs.h.
	(rtld_global, dtv_slotinfo_list, dtv_slotinfo): New typedefs.
	* nptl_db/structs.def (DB_RTLD_VARIABLE): New macro.
	(DB_MAIN_VARIABLE, DB_RTLD_GLOBAL_FIELD): Likewise.
	(link_map::l_tls_offset): New struct field.
	(dtv_t::counter): Likewise.
	(rtld_global): New struct.
	(_rtld_global): New rtld variable.
	(dl_tls_dtv_slotinfo_list): New rtld global field.
	(dtv_slotinfo_list): New struct.
	(dtv_slotinfo): Likewise.
	* nptl_db/td_symbol_list.c: Drop gnu/lib-names.h include.
	(td_lookup): Rename to...
	(td_mod_lookup): ... this.  Use new mod parameter instead of
	LIBPTHREAD_SO.
	* nptl_db/td_thr_tlsbase.c: Include link.h.
	(dtv_slotinfo_list, dtv_slotinfo): New functions.
	(td_thr_tlsbase): Check DTV generation.  Compute Static TLS
	addresses even if the DTV is out of date or missing them.
	* nptl_db/fetch-value.c (_td_locate_field): Do not refuse to
	index zero-length arrays.
	* nptl_db/thread_dbP.h: Include gnu/lib-names.h.
	(td_lookup): Make it a macro implemented in terms of...
	(td_mod_lookup): ... this declaration.
	* nptl_db/db-symbols.awk (DB_RTLD_VARIABLE): Override.
	(DB_MAIN_VARIABLE): Likewise.
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-open.c12
-rw-r--r--elf/dl-reloc.c6
-rw-r--r--elf/dl-tls.c63
-rw-r--r--elf/tlsdeschtab.h4
4 files changed, 35 insertions, 50 deletions
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 47b4cb500a..0dbe07fb68 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -533,17 +533,7 @@ TLS generation counter wrapped!  Please report this."));
 	  && imap->l_tls_blocksize > 0)
 	{
 	  /* For static TLS we have to allocate the memory here and
-	     now.  This includes allocating memory in the DTV.  But we
-	     cannot change any DTV other than our own.  So, if we
-	     cannot guarantee that there is room in the DTV we don't
-	     even try it and fail the load.
-
-	     XXX We could track the minimum DTV slots allocated in
-	     all threads.  */
-	  if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
-	    _dl_signal_error (0, "dlopen", NULL, N_("\
-cannot load any more object with static TLS"));
-
+	     now, but we can delay updating the DTV.  */
 	  imap->l_need_tls_init = 0;
 #ifdef SHARED
 	  /* Update the slot information data for at least the
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index b72287d984..0872636f16 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -137,12 +137,6 @@ _dl_nothread_init_static_tls (struct link_map *map)
 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
 #endif
 
-  /* Fill in the DTV slot so that a later LD/GD access will find it.  */
-  dtv_t *dtv = THREAD_DTV ();
-  assert (map->l_tls_modid <= dtv[-1].counter);
-  dtv[map->l_tls_modid].pointer.val = dest;
-  dtv[map->l_tls_modid].pointer.is_static = true;
-
   /* Initialize the memory.  */
   memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
 	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 9d36d96aa7..20c7e33c41 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -493,17 +493,14 @@ _dl_allocate_tls_init (void *result)
 	  assert (listp->slotinfo[cnt].gen <= GL(dl_tls_generation));
 	  maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
 
+	  dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
+	  dtv[map->l_tls_modid].pointer.is_static = false;
+
 	  if (map->l_tls_offset == NO_TLS_OFFSET
 	      || map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET)
-	    {
-	      /* For dynamically loaded modules we simply store
-		 the value indicating deferred allocation.  */
-	      dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
-	      dtv[map->l_tls_modid].pointer.is_static = false;
-	      continue;
-	    }
+	    continue;
 
-	  assert (map->l_tls_modid == cnt);
+	  assert (map->l_tls_modid == total + cnt);
 	  assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
 #if TLS_TCB_AT_TP
 	  assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
@@ -515,8 +512,6 @@ _dl_allocate_tls_init (void *result)
 #endif
 
 	  /* Copy the initialization image and clear the BSS part.  */
-	  dtv[map->l_tls_modid].pointer.val = dest;
-	  dtv[map->l_tls_modid].pointer.is_static = true;
 	  memset (__mempcpy (dest, map->l_tls_initimage,
 			     map->l_tls_initimage_size), '\0',
 		  map->l_tls_blocksize - map->l_tls_initimage_size);
@@ -679,13 +674,16 @@ _dl_update_slotinfo (unsigned long int req_modid)
 	      struct link_map *map = listp->slotinfo[cnt].map;
 	      if (map == NULL)
 		{
-		  /* If this modid was used at some point the memory
-		     might still be allocated.  */
-		  if (! dtv[total + cnt].pointer.is_static
-		      && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
+		  if (dtv[-1].counter >= total + cnt)
 		    {
-		      free (dtv[total + cnt].pointer.val);
+		      /* If this modid was used at some point the memory
+			 might still be allocated.  */
+		      if (! dtv[total + cnt].pointer.is_static
+			  && (dtv[total + cnt].pointer.val
+			      != TLS_DTV_UNALLOCATED))
+			free (dtv[total + cnt].pointer.val);
 		      dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
+		      dtv[total + cnt].pointer.is_static = false;
 		    }
 
 		  continue;
@@ -718,10 +716,8 @@ _dl_update_slotinfo (unsigned long int req_modid)
 		   memalign and not malloc.  */
 		free (dtv[modid].pointer.val);
 
-	      /* This module is loaded dynamically- We defer memory
-		 allocation.  */
-	      dtv[modid].pointer.is_static = false;
 	      dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
+	      dtv[modid].pointer.is_static = false;
 
 	      if (modid == req_modid)
 		the_map = map;
@@ -759,13 +755,12 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
       the_map = listp->slotinfo[idx].map;
     }
 
- again:
   /* Make sure that, if a dlopen running in parallel forces the
      variable into static storage, we'll wait until the address in the
      static TLS block is set up, and use that.  If we're undecided
      yet, make sure we make the decision holding the lock as well.  */
-  if (__builtin_expect (the_map->l_tls_offset
-			!= FORCED_DYNAMIC_TLS_OFFSET, 0))
+  if (__glibc_unlikely (the_map->l_tls_offset
+			!= FORCED_DYNAMIC_TLS_OFFSET))
     {
       __rtld_lock_lock_recursive (GL(dl_load_lock));
       if (__glibc_likely (the_map->l_tls_offset == NO_TLS_OFFSET))
@@ -773,22 +768,28 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
 	  the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
 	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
 	}
-      else
+      else if (__glibc_likely (the_map->l_tls_offset
+			       != FORCED_DYNAMIC_TLS_OFFSET))
 	{
+#if TLS_TCB_AT_TP
+	  void *p = (char *) THREAD_SELF - the_map->l_tls_offset;
+#elif TLS_DTV_AT_TP
+	  void *p = (char *) THREAD_SELF + the_map->l_tls_offset + TLS_PRE_TCB_SIZE;
+#else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+#endif
 	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
-	  if (__builtin_expect (the_map->l_tls_offset
-				!= FORCED_DYNAMIC_TLS_OFFSET, 1))
-	    {
-	      void *p = dtv[GET_ADDR_MODULE].pointer.val;
-	      if (__glibc_unlikely (p == TLS_DTV_UNALLOCATED))
-		goto again;
 
-	      return (char *) p + GET_ADDR_OFFSET;
-	    }
+	  dtv[GET_ADDR_MODULE].pointer.is_static = true;
+	  dtv[GET_ADDR_MODULE].pointer.val = p;
+
+	  return (char *) p + GET_ADDR_OFFSET;
 	}
+      else
+	__rtld_lock_unlock_recursive (GL(dl_load_lock));
     }
   void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
-  dtv[GET_ADDR_MODULE].pointer.is_static = false;
+  assert (!dtv[GET_ADDR_MODULE].pointer.is_static);
 
   return (char *) p + GET_ADDR_OFFSET;
 }
diff --git a/elf/tlsdeschtab.h b/elf/tlsdeschtab.h
index d7e7955db2..d13b4e57c7 100644
--- a/elf/tlsdeschtab.h
+++ b/elf/tlsdeschtab.h
@@ -42,7 +42,7 @@ eq_tlsdesc (void *p, void *q)
   return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset;
 }
 
-inline static int
+inline static size_t
 map_generation (struct link_map *map)
 {
   size_t idx = map->l_tls_modid;
@@ -58,7 +58,7 @@ map_generation (struct link_map *map)
 	     we can assume that, if the generation count is zero, we
 	     still haven't determined the generation count for this
 	     module.  */
-	  if (listp->slotinfo[idx].gen)
+	  if (listp->slotinfo[idx].map == map && listp->slotinfo[idx].gen)
 	    return listp->slotinfo[idx].gen;
 	  else
 	    break;