about summary refs log tree commit diff
path: root/elf/dl-close.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2005-04-27 01:39:11 +0000
committerUlrich Drepper <drepper@redhat.com>2005-04-27 01:39:11 +0000
commitbfc832ccf15e467b6271e8b237e467a30efd3d12 (patch)
tree6f96e6258327c795b2cf93643b64b950e2f9d239 /elf/dl-close.c
parent462be6908c551edd67cba1bfe93187f6e7aae6e9 (diff)
downloadglibc-bfc832ccf15e467b6271e8b237e467a30efd3d12.tar.gz
glibc-bfc832ccf15e467b6271e8b237e467a30efd3d12.tar.xz
glibc-bfc832ccf15e467b6271e8b237e467a30efd3d12.zip
* elf/dl-close.c: Include stddef.h.
	(_dl_close): If called recursively, just remember GC needs to be rerun
	and decrease l_direct_opencount.  Avoid GC if l_direct_opencount
	decreased to 1.  Rerun GC at the end if any destructor unloaded some
	additional libraries.
	* elf/Makefile: Add rules to build and run unload6 test.
	* elf/unload6.c: New test.
	* elf/unload6mod1.c: New file.
	* elf/unload6mod2.c: New file.
	* elf/unload6mod3.c: New file.

	* malloc/hooks.c (mem2chunk_check): Add magic_p argument, set *magic_p
	if magic_p is not NULL.
	(top_check): Invoke MALLOC_FAILURE_ACTION if MORECORE failed.
	(malloc_check): Fail if sz == -1.
	(free_check): Adjust mem2chunk_check caller.
	(realloc_check): Likewise.  Fail if bytes == -1.  If bytes == 0 and
	oldmem != NULL, call free_check and return NULL.  If reallocating
	and returning NULL, invert magic byte again to make oldmem valid
	region for further checking.
	(memalign_check): Fail if bytes == -1.
	* malloc/Makefile: Add rules to build and run tst-mcheck.
	* malloc/tst-mcheck.c: New test.
Diffstat (limited to 'elf/dl-close.c')
-rw-r--r--elf/dl-close.c55
1 files changed, 41 insertions, 14 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c
index cd4fa7cfbe..754dd678fe 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -20,6 +20,7 @@
 #include <assert.h>
 #include <dlfcn.h>
 #include <libintl.h>
+#include <stddef.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -105,10 +106,6 @@ _dl_close (void *_map)
   struct link_map *map = _map;
   Lmid_t ns = map->l_ns;
   unsigned int i;
-#ifdef USE_TLS
-  bool any_tls = false;
-#endif
-
   /* First see whether we can remove the object at all.  */
   if (__builtin_expect (map->l_flags_1 & DF_1_NODELETE, 0)
       && map->l_init_called)
@@ -124,9 +121,17 @@ _dl_close (void *_map)
   /* One less direct use.  */
   --map->l_direct_opencount;
 
-  /* Decrement the reference count.  */
-  if (map->l_direct_opencount > 1 || map->l_type != lt_loaded)
+  /* If _dl_close is called recursively (some destructor call dlclose),
+     just record that the parent _dl_close will need to do garbage collection
+     again and return.  */
+  static enum { not_pending, pending, rerun } dl_close_state;
+
+  if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+      || dl_close_state != not_pending)
     {
+      if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+	dl_close_state = rerun;
+
       /* There are still references to this object.  Do nothing more.  */
       if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
 	_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
@@ -136,12 +141,18 @@ _dl_close (void *_map)
       return;
     }
 
+ retry:
+  dl_close_state = pending;
+
+#ifdef USE_TLS
+  bool any_tls = false;
+#endif
   const unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
   char used[nloaded];
   char done[nloaded];
   struct link_map *maps[nloaded];
 
-  /* Run over the list and assign indeces to the link maps and enter
+  /* Run over the list and assign indexes to the link maps and enter
      them into the MAPS array.  */
   int idx = 0;
   for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
@@ -302,7 +313,7 @@ _dl_close (void *_map)
 	  if (imap->l_searchlist.r_list == NULL
 	      && imap->l_initfini != NULL)
 	    {
-	      /* The object is still used.  But the object we are
+	      /* The object is still used.  But one of the objects we are
 		 unloading right now is responsible for loading it.  If
 		 the current object does not have it's own scope yet we
 		 have to create one.  This has to be done before running
@@ -318,15 +329,27 @@ _dl_close (void *_map)
 	      imap->l_searchlist.r_nlist = cnt;
 
 	      for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt)
-		if (imap->l_scope[cnt] == &map->l_searchlist)
+		/* This relies on l_scope[] entries being always set either
+		   to its own l_symbolic_searchlist address, or some other map's
+		   l_searchlist address.  */
+		if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist)
 		  {
-		    imap->l_scope[cnt] = &imap->l_searchlist;
-		    break;
+		    struct link_map *tmap;
+
+		    tmap = (struct link_map *) ((char *) imap->l_scope[cnt]
+						- offsetof (struct link_map,
+							    l_searchlist));
+		    assert (tmap->l_ns == ns);
+		    if (tmap->l_idx != -1)
+		      {
+			imap->l_scope[cnt] = &imap->l_searchlist;
+			break;
+		      }
 		  }
 	    }
 
 	  /* The loader is gone, so mark the object as not having one.
-	     Note: l_idx == -1 -> object will be removed.  */
+	     Note: l_idx != -1 -> object will be removed.  */
 	  if (imap->l_loader != NULL && imap->l_loader->l_idx != -1)
 	    imap->l_loader = NULL;
 
@@ -583,8 +606,12 @@ _dl_close (void *_map)
   r->r_state = RT_CONSISTENT;
   _dl_debug_state ();
 
-  /* Release the lock.  */
+  /* Recheck if we need to retry, release the lock.  */
  out:
+  if (dl_close_state == rerun)
+    goto retry;
+
+  dl_close_state = not_pending;
   __rtld_lock_unlock_recursive (GL(dl_load_lock));
 }
 
@@ -654,7 +681,7 @@ libc_freeres_fn (free_mem)
 	free_slotinfo (&GL(dl_tls_dtv_slotinfo_list));
       else
 # endif
-        /* The first element of the list does not have to be deallocated.
+	/* The first element of the list does not have to be deallocated.
 	   It was allocated in the dynamic linker (i.e., with a different
 	   malloc), and in the static library it's in .bss space.  */
 	free_slotinfo (&GL(dl_tls_dtv_slotinfo_list)->next);