about summary refs log tree commit diff
path: root/elf/dl-fini.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2023-10-18 11:30:38 +0200
committerFlorian Weimer <fweimer@redhat.com>2023-10-18 11:30:38 +0200
commitdd32e1db386c77c61850a7cbd0c126b7b3c63ece (patch)
tree9f0c9c3ead1a6be1c63b6841c85ec5597bf10f6a /elf/dl-fini.c
parent2ad9b674cf6cd6ba59c064427cb7aeb43a66d8a9 (diff)
downloadglibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.tar.gz
glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.tar.xz
glibc-dd32e1db386c77c61850a7cbd0c126b7b3c63ece.zip
Revert "elf: Always call destructors in reverse constructor order (bug 30785)"
This reverts commit 6985865bc3ad5b23147ee73466583dd7fdf65892.

Reason for revert:

The commit changes the order of ELF destructor calls too much relative
to what applications expect or can handle.  In particular, during
process exit and _dl_fini, after the revert commit, we no longer call
the destructors of the main program first; that only happens after
some dlopen'ed objects have been destructed.  This robs applications
of an opportunity to influence destructor order by calling dlclose
explicitly from the main program's ELF destructors.  A couple of
different approaches involving reverse constructor order were tried,
and none of them worked really well.  It seems we need to keep the
dependency sorting in _dl_fini.

There is also an ambiguity regarding nested dlopen calls from ELF
constructors: Should those destructors run before or after the object
that called dlopen?  Commit 6985865bc3ad5b2314 used reverse order
of the start of ELF constructor calls for destructors, but arguably
using completion of constructors is more correct.  However, that alone
is not sufficient to address application compatibility issues (it
does not change _dl_fini ordering at all).
Diffstat (limited to 'elf/dl-fini.c')
-rw-r--r--elf/dl-fini.c152
1 files changed, 100 insertions, 52 deletions
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index e201d36651..9acb64f47c 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -24,68 +24,116 @@
 void
 _dl_fini (void)
 {
-  /* Call destructors strictly in the reverse order of constructors.
-     This causes fewer surprises than some arbitrary reordering based
-     on new (relocation) dependencies.  None of the objects are
-     unmapped, so applications can deal with this if their DSOs remain
-     in a consistent state after destructors have run.  */
-
-  /* Protect against concurrent loads and unloads.  */
-  __rtld_lock_lock_recursive (GL(dl_load_lock));
-
-  /* Ignore objects which are opened during shutdown.  */
-  struct link_map *local_init_called_list = _dl_init_called_list;
-
-  for (struct link_map *l = local_init_called_list; l != NULL;
-       l = l->l_init_called_next)
-      /* Bump l_direct_opencount of all objects so that they
-	 are not dlclose()ed from underneath us.  */
-      ++l->l_direct_opencount;
-
-  /* After this point, everything linked from local_init_called_list
-     cannot be unloaded because of the reference counter update.  */
-  __rtld_lock_unlock_recursive (GL(dl_load_lock));
-
-  /* Perform two passes: One for non-audit modules, one for audit
-     modules.  This way, audit modules receive unload notifications
-     for non-audit objects, and the destructors for audit modules
-     still run.  */
+  /* Lots of fun ahead.  We have to call the destructors for all still
+     loaded objects, in all namespaces.  The problem is that the ELF
+     specification now demands that dependencies between the modules
+     are taken into account.  I.e., the destructor for a module is
+     called before the ones for any of its dependencies.
+
+     To make things more complicated, we cannot simply use the reverse
+     order of the constructors.  Since the user might have loaded objects
+     using `dlopen' there are possibly several other modules with its
+     dependencies to be taken into account.  Therefore we have to start
+     determining the order of the modules once again from the beginning.  */
+
+  /* We run the destructors of the main namespaces last.  As for the
+     other namespaces, we pick run the destructors in them in reverse
+     order of the namespace ID.  */
+#ifdef SHARED
+  int do_audit = 0;
+ again:
+#endif
+  for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns)
+    {
+      /* Protect against concurrent loads and unloads.  */
+      __rtld_lock_lock_recursive (GL(dl_load_lock));
+
+      unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
+      /* No need to do anything for empty namespaces or those used for
+	 auditing DSOs.  */
+      if (nloaded == 0
+#ifdef SHARED
+	  || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit
+#endif
+	  )
+	__rtld_lock_unlock_recursive (GL(dl_load_lock));
+      else
+	{
 #ifdef SHARED
-  int last_pass = GLRO(dl_naudit) > 0;
-  Lmid_t last_ns = -1;
-  for (int do_audit = 0; do_audit <= last_pass; ++do_audit)
+	  _dl_audit_activity_nsid (ns, LA_ACT_DELETE);
 #endif
-    for (struct link_map *l = local_init_called_list; l != NULL;
-	 l = l->l_init_called_next)
-      {
+
+	  /* Now we can allocate an array to hold all the pointers and
+	     copy the pointers in.  */
+	  struct link_map *maps[nloaded];
+
+	  unsigned int i;
+	  struct link_map *l;
+	  assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
+	  for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
+	    /* Do not handle ld.so in secondary namespaces.  */
+	    if (l == l->l_real)
+	      {
+		assert (i < nloaded);
+
+		maps[i] = l;
+		l->l_idx = i;
+		++i;
+
+		/* Bump l_direct_opencount of all objects so that they
+		   are not dlclose()ed from underneath us.  */
+		++l->l_direct_opencount;
+	      }
+	  assert (ns != LM_ID_BASE || i == nloaded);
+	  assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
+	  unsigned int nmaps = i;
+
+	  /* Now we have to do the sorting.  We can skip looking for the
+	     binary itself which is at the front of the search list for
+	     the main namespace.  */
+	  _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true);
+
+	  /* We do not rely on the linked list of loaded object anymore
+	     from this point on.  We have our own list here (maps).  The
+	     various members of this list cannot vanish since the open
+	     count is too high and will be decremented in this loop.  So
+	     we release the lock so that some code which might be called
+	     from a destructor can directly or indirectly access the
+	     lock.  */
+	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+	  /* 'maps' now contains the objects in the right order.  Now
+	     call the destructors.  We have to process this array from
+	     the front.  */
+	  for (i = 0; i < nmaps; ++i)
+	    {
+	      struct link_map *l = maps[i];
+
+	      if (l->l_init_called)
+		{
+		  _dl_call_fini (l);
 #ifdef SHARED
-	if (GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing != do_audit)
-	  continue;
-
-	/* Avoid back-to-back calls of _dl_audit_activity_nsid for the
-	   same namespace.  */
-	if (last_ns != l->l_ns)
-	  {
-	    if (last_ns >= 0)
-	      _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
-	    _dl_audit_activity_nsid (l->l_ns, LA_ACT_DELETE);
-	    last_ns = l->l_ns;
-	  }
+		  /* Auditing checkpoint: another object closed.  */
+		  _dl_audit_objclose (l);
 #endif
+		}
 
-	/* There is no need to re-enable exceptions because _dl_fini
-	   is not called from a context where exceptions are caught.  */
-	_dl_call_fini (l);
+	      /* Correct the previous increment.  */
+	      --l->l_direct_opencount;
+	    }
 
 #ifdef SHARED
-	/* Auditing checkpoint: another object closed.  */
-	_dl_audit_objclose (l);
+	  _dl_audit_activity_nsid (ns, LA_ACT_CONSISTENT);
 #endif
-      }
+	}
+    }
 
 #ifdef SHARED
-  if (last_ns >= 0)
-    _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
+  if (! do_audit && GLRO(dl_naudit) > 0)
+    {
+      do_audit = 1;
+      goto again;
+    }
 
   if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS))
     _dl_debug_printf ("\nruntime linker statistics:\n"