about summary refs log tree commit diff
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2021-02-11 13:24:47 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2021-02-15 12:05:21 +0000
commit191f5ae0f316e967e7e230fd9a919dce89d49b9f (patch)
treecc6867d1faf1794a533372d50cc959dacad49e57
parent14e2476505d784bed608bd585c845d0d3043b5ca (diff)
downloadglibc-191f5ae0f316e967e7e230fd9a919dce89d49b9f.tar.gz
glibc-191f5ae0f316e967e7e230fd9a919dce89d49b9f.tar.xz
glibc-191f5ae0f316e967e7e230fd9a919dce89d49b9f.zip
elf: Remove lazy tlsdesc relocation related code nsz/bug19329
Remove generic tlsdesc code related to lazy tlsdesc processing since
lazy tlsdesc relocation is no longer supported.  This includes removing
GL(dl_load_lock) from _dl_make_tlsdesc_dynamic which is only called at
load time when that lock is already held.

Added a documentation comment too.
-rw-r--r--elf/tlsdeschtab.h53
-rw-r--r--sysdeps/aarch64/tlsdesc.c1
-rw-r--r--sysdeps/arm/tlsdesc.c1
-rw-r--r--sysdeps/i386/tlsdesc.c1
-rw-r--r--sysdeps/x86_64/tlsdesc.c1
5 files changed, 6 insertions, 51 deletions
diff --git a/elf/tlsdeschtab.h b/elf/tlsdeschtab.h
index 21695fd1e9..85bd0415e1 100644
--- a/elf/tlsdeschtab.h
+++ b/elf/tlsdeschtab.h
@@ -78,6 +78,10 @@ map_generation (struct link_map *map)
   return GL(dl_tls_generation) + 1;
 }
 
+/* Returns the data pointer for a given map and tls offset that is used
+   to fill in one of the GOT entries referenced by a TLSDESC relocation
+   when using dynamic TLS.  This requires allocation, returns NULL on
+   allocation failure.  */
 void *
 _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
 {
@@ -85,18 +89,12 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
   void **entry;
   struct tlsdesc_dynamic_arg *td, test;
 
-  /* FIXME: We could use a per-map lock here, but is it worth it?  */
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
-
   ht = map->l_mach.tlsdesc_table;
   if (! ht)
     {
       ht = htab_create ();
       if (! ht)
-	{
-	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
-	  return 0;
-	}
+	return 0;
       map->l_mach.tlsdesc_table = ht;
     }
 
@@ -104,15 +102,11 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
   test.tlsinfo.ti_offset = ti_offset;
   entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc);
   if (! entry)
-    {
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
-      return 0;
-    }
+    return 0;
 
   if (*entry)
     {
       td = *entry;
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
       return td;
     }
 
@@ -122,44 +116,9 @@ _dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset)
      thread.  */
   td->gen_count = map_generation (map);
   td->tlsinfo = test.tlsinfo;
-
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
   return td;
 }
 
 # endif /* SHARED */
 
-/* The idea of the following two functions is to stop multiple threads
-   from attempting to resolve the same TLS descriptor without busy
-   waiting.  Ideally, we should be able to release the lock right
-   after changing td->entry, and then using say a condition variable
-   or a futex wake to wake up any waiting threads, but let's try to
-   avoid introducing such dependencies.  */
-
-static int
-__attribute__ ((unused))
-_dl_tlsdesc_resolve_early_return_p (struct tlsdesc volatile *td, void *caller)
-{
-  if (caller != atomic_load_relaxed (&td->entry))
-    return 1;
-
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
-  if (caller != atomic_load_relaxed (&td->entry))
-    {
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
-      return 1;
-    }
-
-  atomic_store_relaxed (&td->entry, _dl_tlsdesc_resolve_hold);
-
-  return 0;
-}
-
-static void
-__attribute__ ((unused))
-_dl_tlsdesc_wake_up_held_fixups (void)
-{
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
-}
-
 #endif
diff --git a/sysdeps/aarch64/tlsdesc.c b/sysdeps/aarch64/tlsdesc.c
index 9b6a8b0ec4..3c1bcd8fe6 100644
--- a/sysdeps/aarch64/tlsdesc.c
+++ b/sysdeps/aarch64/tlsdesc.c
@@ -22,7 +22,6 @@
 #include <tls.h>
 #include <dl-tlsdesc.h>
 #include <dl-unmap-segments.h>
-#define _dl_tlsdesc_resolve_hold 0
 #include <tlsdeschtab.h>
 
 /* Unmap the dynamic object, but also release its TLS descriptor table
diff --git a/sysdeps/arm/tlsdesc.c b/sysdeps/arm/tlsdesc.c
index 2f70adef5c..b3d1735af4 100644
--- a/sysdeps/arm/tlsdesc.c
+++ b/sysdeps/arm/tlsdesc.c
@@ -20,7 +20,6 @@
 #include <tls.h>
 #include <dl-tlsdesc.h>
 #include <dl-unmap-segments.h>
-#define _dl_tlsdesc_resolve_hold 0
 #include <tlsdeschtab.h>
 
 /* Unmap the dynamic object, but also release its TLS descriptor table
diff --git a/sysdeps/i386/tlsdesc.c b/sysdeps/i386/tlsdesc.c
index 436a21f66b..364c7769ce 100644
--- a/sysdeps/i386/tlsdesc.c
+++ b/sysdeps/i386/tlsdesc.c
@@ -20,7 +20,6 @@
 #include <tls.h>
 #include <dl-tlsdesc.h>
 #include <dl-unmap-segments.h>
-#define _dl_tlsdesc_resolve_hold 0
 #include <tlsdeschtab.h>
 
 /* Unmap the dynamic object, but also release its TLS descriptor table
diff --git a/sysdeps/x86_64/tlsdesc.c b/sysdeps/x86_64/tlsdesc.c
index ecf864d6ee..3bb80517a3 100644
--- a/sysdeps/x86_64/tlsdesc.c
+++ b/sysdeps/x86_64/tlsdesc.c
@@ -20,7 +20,6 @@
 #include <tls.h>
 #include <dl-tlsdesc.h>
 #include <dl-unmap-segments.h>
-#define _dl_tlsdesc_resolve_hold 0
 #include <tlsdeschtab.h>
 
 /* Unmap the dynamic object, but also release its TLS descriptor table