about summary refs log tree commit diff
path: root/elf/dl-close.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2023-09-08 12:32:14 +0200
committerFlorian Weimer <fweimer@redhat.com>2023-09-08 12:34:27 +0200
commit6985865bc3ad5b23147ee73466583dd7fdf65892 (patch)
tree5e058aad7a342ef64266c4decc089c4d81b6bcca /elf/dl-close.c
parent434bf72a94de68f0cc7fbf3c44bf38c1911b70cb (diff)
downloadglibc-6985865bc3ad5b23147ee73466583dd7fdf65892.tar.gz
glibc-6985865bc3ad5b23147ee73466583dd7fdf65892.tar.xz
glibc-6985865bc3ad5b23147ee73466583dd7fdf65892.zip
elf: Always call destructors in reverse constructor order (bug 30785)
The current implementation of dlclose (and process exit) re-sorts the
link maps before calling ELF destructors.  Destructor order is not the
reverse of the constructor order as a result: The second sort takes
relocation dependencies into account, and other differences can result
from ambiguous inputs, such as cycles.  (The force_first handling in
_dl_sort_maps is not effective for dlclose.)  After the changes in
this commit, there is still a required difference due to
dlopen/dlclose ordering by the application, but the previous
discrepancies went beyond that.

A new global (namespace-spanning) list of link maps,
_dl_init_called_list, is updated right before ELF constructors are
called from _dl_init.

In dl_close_worker, the maps variable, an on-stack variable length
array, is eliminated.  (VLAs are problematic, and dlclose should not
call malloc because it cannot readily deal with malloc failure.)
Marking still-used objects uses the namespace list directly, with
next and next_idx replacing the done_index variable.

After marking, _dl_init_called_list is used to call the destructors
of now-unused maps in reverse destructor order.  These destructors
can call dlopen.  Previously, new objects do not have l_map_used set.
This had to change: There is no copy of the link map list anymore,
so processing would cover newly opened (and unmarked) mappings,
unloading them.  Now, _dl_init (indirectly) sets l_map_used, too.
(dlclose is handled by the existing reentrancy guard.)

After _dl_init_called_list traversal, two more loops follow.  The
processing order changes to the original link map order in the
namespace.  Previously, dependency order was used.  The difference
should not matter because relocation dependencies could already
reorder link maps in the old code.

The changes to _dl_fini remove the sorting step and replace it with
a traversal of _dl_init_called_list.  The l_direct_opencount
decrement outside the loader lock is removed because it appears
incorrect: the counter manipulation could race with other dynamic
loader operations.

tst-audit23 needs adjustments to the changes in LA_ACT_DELETE
notifications.  The new approach for checking la_activity should
make it clearer that la_activty calls come in pairs around namespace
updates.

The dependency sorting test cases need updates because the destructor
order is always the opposite order of constructor order, even with
relocation dependencies or cycles present.

There is a future cleanup opportunity to remove the now-constant
force_first and for_fini arguments from the _dl_sort_maps function.

Fixes commit 1df71d32fe5f5905ffd5d100e5e9ca8ad62 ("elf: Implement
force_first handling in _dl_sort_maps_dfs (bug 28937)").

Reviewed-by: DJ Delorie <dj@redhat.com>
Diffstat (limited to 'elf/dl-close.c')
-rw-r--r--elf/dl-close.c113
1 files changed, 72 insertions, 41 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 1c7a861db1..c9a7d06577 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -138,30 +138,31 @@ _dl_close_worker (struct link_map *map, bool force)
 
   bool any_tls = false;
   const unsigned int nloaded = ns->_ns_nloaded;
-  struct link_map *maps[nloaded];
 
-  /* Run over the list and assign indexes to the link maps and enter
-     them into the MAPS array.  */
+  /* Run over the list and assign indexes to the link maps.  */
   int idx = 0;
   for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
     {
       l->l_map_used = 0;
       l->l_map_done = 0;
       l->l_idx = idx;
-      maps[idx] = l;
       ++idx;
     }
   assert (idx == nloaded);
 
-  /* Keep track of the lowest index link map we have covered already.  */
-  int done_index = -1;
-  while (++done_index < nloaded)
+  /* Keep marking link maps until no new link maps are found.  */
+  for (struct link_map *l = ns->_ns_loaded; l != NULL; )
     {
-      struct link_map *l = maps[done_index];
+      /* next is reset to earlier link maps for remarking.  */
+      struct link_map *next = l->l_next;
+      int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL.  */
 
       if (l->l_map_done)
-	/* Already handled.  */
-	continue;
+	{
+	  /* Already handled.  */
+	  l = next;
+	  continue;
+	}
 
       /* Check whether this object is still used.  */
       if (l->l_type == lt_loaded
@@ -171,7 +172,10 @@ _dl_close_worker (struct link_map *map, bool force)
 	     acquire is sufficient and correct.  */
 	  && atomic_load_acquire (&l->l_tls_dtor_count) == 0
 	  && !l->l_map_used)
-	continue;
+	{
+	  l = next;
+	  continue;
+	}
 
       /* We need this object and we handle it now.  */
       l->l_map_used = 1;
@@ -198,8 +202,11 @@ _dl_close_worker (struct link_map *map, bool force)
 			 already processed it, then we need to go back
 			 and process again from that point forward to
 			 ensure we keep all of its dependencies also.  */
-		      if ((*lp)->l_idx - 1 < done_index)
-			done_index = (*lp)->l_idx - 1;
+		      if ((*lp)->l_idx < next_idx)
+			{
+			  next = *lp;
+			  next_idx = next->l_idx;
+			}
 		    }
 		}
 
@@ -219,44 +226,65 @@ _dl_close_worker (struct link_map *map, bool force)
 		if (!jmap->l_map_used)
 		  {
 		    jmap->l_map_used = 1;
-		    if (jmap->l_idx - 1 < done_index)
-		      done_index = jmap->l_idx - 1;
+		    if (jmap->l_idx < next_idx)
+		      {
+			  next = jmap;
+			  next_idx = next->l_idx;
+		      }
 		  }
 	      }
 	  }
-    }
 
-  /* Sort the entries.  We can skip looking for the binary itself which is
-     at the front of the search list for the main namespace.  */
-  _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
+      l = next;
+    }
 
-  /* Call all termination functions at once.  */
-  bool unload_any = false;
-  bool scope_mem_left = false;
-  unsigned int unload_global = 0;
-  unsigned int first_loaded = ~0;
-  for (unsigned int i = 0; i < nloaded; ++i)
+  /* Call the destructors in reverse constructor order, and remove the
+     closed link maps from the list.  */
+  for (struct link_map **init_called_head = &_dl_init_called_list;
+       *init_called_head != NULL; )
     {
-      struct link_map *imap = maps[i];
+      struct link_map *imap = *init_called_head;
 
-      /* All elements must be in the same namespace.  */
-      assert (imap->l_ns == nsid);
-
-      if (!imap->l_map_used)
+      /* _dl_init_called_list is global, to produce a global odering.
+	 Ignore the other namespaces (and link maps that are still used).  */
+      if (imap->l_ns != nsid || imap->l_map_used)
+	init_called_head = &imap->l_init_called_next;
+      else
 	{
 	  assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
 
-	  /* Call its termination function.  Do not do it for
-	     half-cooked objects.  Temporarily disable exception
-	     handling, so that errors are fatal.  */
-	  if (imap->l_init_called)
+	  /* _dl_init_called_list is updated at the same time as
+	     l_init_called.  */
+	  assert (imap->l_init_called);
+
+	  if (imap->l_info[DT_FINI_ARRAY] != NULL
+	      || imap->l_info[DT_FINI] != NULL)
 	    _dl_catch_exception (NULL, _dl_call_fini, imap);
 
 #ifdef SHARED
 	  /* Auditing checkpoint: we remove an object.  */
 	  _dl_audit_objclose (imap);
 #endif
+	  /* Unlink this link map.  */
+	  *init_called_head = imap->l_init_called_next;
+	}
+    }
+
+
+  bool unload_any = false;
+  bool scope_mem_left = false;
+  unsigned int unload_global = 0;
+
+  /* For skipping un-unloadable link maps in the second loop.  */
+  struct link_map *first_loaded = ns->_ns_loaded;
 
+  /* Iterate over the namespace to find objects to unload.  Some
+     unloadable objects may not be on _dl_init_called_list due to
+     dlopen failure.  */
+  for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next)
+    {
+      if (!imap->l_map_used)
+	{
 	  /* This object must not be used anymore.  */
 	  imap->l_removed = 1;
 
@@ -267,8 +295,8 @@ _dl_close_worker (struct link_map *map, bool force)
 	    ++unload_global;
 
 	  /* Remember where the first dynamically loaded object is.  */
-	  if (i < first_loaded)
-	    first_loaded = i;
+	  if (first_loaded == NULL)
+	      first_loaded = imap;
 	}
       /* Else imap->l_map_used.  */
       else if (imap->l_type == lt_loaded)
@@ -404,8 +432,8 @@ _dl_close_worker (struct link_map *map, bool force)
 	    imap->l_loader = NULL;
 
 	  /* Remember where the first dynamically loaded object is.  */
-	  if (i < first_loaded)
-	    first_loaded = i;
+	  if (first_loaded == NULL)
+	      first_loaded = imap;
 	}
     }
 
@@ -476,10 +504,11 @@ _dl_close_worker (struct link_map *map, bool force)
 
   /* Check each element of the search list to see if all references to
      it are gone.  */
-  for (unsigned int i = first_loaded; i < nloaded; ++i)
+  for (struct link_map *imap = first_loaded; imap != NULL; )
     {
-      struct link_map *imap = maps[i];
-      if (!imap->l_map_used)
+      if (imap->l_map_used)
+	imap = imap->l_next;
+      else
 	{
 	  assert (imap->l_type == lt_loaded);
 
@@ -690,7 +719,9 @@ _dl_close_worker (struct link_map *map, bool force)
 	  if (imap == GL(dl_initfirst))
 	    GL(dl_initfirst) = NULL;
 
+	  struct link_map *next = imap->l_next;
 	  free (imap);
+	  imap = next;
 	}
     }