summary refs log tree commit diff
path: root/elf
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2015-10-29 09:26:03 +0100
committerFlorian Weimer <fweimer@redhat.com>2015-10-29 09:26:03 +0100
commit58acfe6fa7b1e28ec15ce59c5a90fe36a9c11a85 (patch)
treebf53ad2bf8249a03702408bcaca07b91ace4f6b7 /elf
parent2145f97cee01140d5369a8c67dc92eacfe8e4417 (diff)
downloadglibc-58acfe6fa7b1e28ec15ce59c5a90fe36a9c11a85.tar.gz
glibc-58acfe6fa7b1e28ec15ce59c5a90fe36a9c11a85.tar.xz
glibc-58acfe6fa7b1e28ec15ce59c5a90fe36a9c11a85.zip
_dl_fini: Rewrite to use VLA instead of extend_alloca
In this case, extend_alloca is used to work around the lack of
deallocation on scope exit.  A VLA is automatically deallocated in this
way, so it is the more fitting approach.

To implement this, it is necessary to eliminate the goto.  In addition,
this change eliminates the trivially-true assert; the assert is always
skipped if nloaded > 0.

	* elf/dl-fini.c (_dl_fini): Rewrite to use variable-length array
	instead of extend_alloca.  Change control flow to avoid a goto.
	Remove assert which is trivially always true.
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-fini.c193
1 files changed, 89 insertions, 104 deletions
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 6cfe6519c4..0790b04d86 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -16,7 +16,6 @@
    License along with the GNU C Library; if not, see
    <http://www.gnu.org/licenses/>.  */
 
-#include <alloca.h>
 #include <assert.h>
 #include <string.h>
 #include <ldsodefs.h>
@@ -140,8 +139,6 @@ _dl_fini (void)
      using `dlopen' there are possibly several other modules with its
      dependencies to be taken into account.  Therefore we have to start
      determining the order of the modules once again from the beginning.  */
-  struct link_map **maps = NULL;
-  size_t maps_size = 0;
 
   /* We run the destructors of the main namespaces last.  As for the
      other namespaces, we pick run the destructors in them in reverse
@@ -155,7 +152,6 @@ _dl_fini (void)
       /* Protect against concurrent loads and unloads.  */
       __rtld_lock_lock_recursive (GL(dl_load_lock));
 
-      unsigned int nmaps = 0;
       unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
       /* No need to do anything for empty namespaces or those used for
 	 auditing DSOs.  */
@@ -164,118 +160,107 @@ _dl_fini (void)
 	  || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit
 #endif
 	  )
-	goto out;
-
-      /* XXX Could it be (in static binaries) that there is no object
-	 loaded?  */
-      assert (ns != LM_ID_BASE || nloaded > 0);
-
-      /* Now we can allocate an array to hold all the pointers and copy
-	 the pointers in.  */
-      if (maps_size < nloaded * sizeof (struct link_map *))
+	__rtld_lock_unlock_recursive (GL(dl_load_lock));
+      else
 	{
-	  if (maps_size == 0)
+	  /* Now we can allocate an array to hold all the pointers and
+	     copy the pointers in.  */
+	  struct link_map *maps[nloaded];
+
+	  unsigned int i;
+	  struct link_map *l;
+	  assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
+	  for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
+	    /* Do not handle ld.so in secondary namespaces.  */
+	    if (l == l->l_real)
+	      {
+		assert (i < nloaded);
+
+		maps[i] = l;
+		l->l_idx = i;
+		++i;
+
+		/* Bump l_direct_opencount of all objects so that they
+		   are not dlclose()ed from underneath us.  */
+		++l->l_direct_opencount;
+	      }
+	  assert (ns != LM_ID_BASE || i == nloaded);
+	  assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
+	  unsigned int nmaps = i;
+
+	  /* Now we have to do the sorting.  */
+	  _dl_sort_fini (maps, nmaps, NULL, ns);
+
+	  /* We do not rely on the linked list of loaded object anymore
+	     from this point on.  We have our own list here (maps).  The
+	     various members of this list cannot vanish since the open
+	     count is too high and will be decremented in this loop.  So
+	     we release the lock so that some code which might be called
+	     from a destructor can directly or indirectly access the
+	     lock.  */
+	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+	  /* 'maps' now contains the objects in the right order.  Now
+	     call the destructors.  We have to process this array from
+	     the front.  */
+	  for (i = 0; i < nmaps; ++i)
 	    {
-	      maps_size = nloaded * sizeof (struct link_map *);
-	      maps = (struct link_map **) alloca (maps_size);
-	    }
-	  else
-	    maps = (struct link_map **)
-	      extend_alloca (maps, maps_size,
-			     nloaded * sizeof (struct link_map *));
-	}
+	      struct link_map *l = maps[i];
 
-      unsigned int i;
-      struct link_map *l;
-      assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
-      for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
-	/* Do not handle ld.so in secondary namespaces.  */
-	if (l == l->l_real)
-	  {
-	    assert (i < nloaded);
-
-	    maps[i] = l;
-	    l->l_idx = i;
-	    ++i;
-
-	    /* Bump l_direct_opencount of all objects so that they are
-	       not dlclose()ed from underneath us.  */
-	    ++l->l_direct_opencount;
-	  }
-      assert (ns != LM_ID_BASE || i == nloaded);
-      assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
-      nmaps = i;
-
-      /* Now we have to do the sorting.  */
-      _dl_sort_fini (maps, nmaps, NULL, ns);
-
-      /* We do not rely on the linked list of loaded object anymore from
-	 this point on.  We have our own list here (maps).  The various
-	 members of this list cannot vanish since the open count is too
-	 high and will be decremented in this loop.  So we release the
-	 lock so that some code which might be called from a destructor
-	 can directly or indirectly access the lock.  */
-    out:
-      __rtld_lock_unlock_recursive (GL(dl_load_lock));
-
-      /* 'maps' now contains the objects in the right order.  Now call the
-	 destructors.  We have to process this array from the front.  */
-      for (i = 0; i < nmaps; ++i)
-	{
-	  l = maps[i];
-
-	  if (l->l_init_called)
-	    {
-	      /* Make sure nothing happens if we are called twice.  */
-	      l->l_init_called = 0;
-
-	      /* Is there a destructor function?  */
-	      if (l->l_info[DT_FINI_ARRAY] != NULL
-		  || l->l_info[DT_FINI] != NULL)
+	      if (l->l_init_called)
 		{
-		  /* When debugging print a message first.  */
-		  if (__builtin_expect (GLRO(dl_debug_mask)
-					& DL_DEBUG_IMPCALLS, 0))
-		    _dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
-				      DSO_FILENAME (l->l_name),
-				      ns);
-
-		  /* First see whether an array is given.  */
-		  if (l->l_info[DT_FINI_ARRAY] != NULL)
+		  /* Make sure nothing happens if we are called twice.  */
+		  l->l_init_called = 0;
+
+		  /* Is there a destructor function?  */
+		  if (l->l_info[DT_FINI_ARRAY] != NULL
+		      || l->l_info[DT_FINI] != NULL)
 		    {
-		      ElfW(Addr) *array =
-			(ElfW(Addr) *) (l->l_addr
-					+ l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
-		      unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
-					/ sizeof (ElfW(Addr)));
-		      while (i-- > 0)
-			((fini_t) array[i]) ();
+		      /* When debugging print a message first.  */
+		      if (__builtin_expect (GLRO(dl_debug_mask)
+					    & DL_DEBUG_IMPCALLS, 0))
+			_dl_debug_printf ("\ncalling fini: %s [%lu]\n\n",
+					  DSO_FILENAME (l->l_name),
+					  ns);
+
+		      /* First see whether an array is given.  */
+		      if (l->l_info[DT_FINI_ARRAY] != NULL)
+			{
+			  ElfW(Addr) *array =
+			    (ElfW(Addr) *) (l->l_addr
+					    + l->l_info[DT_FINI_ARRAY]->d_un.d_ptr);
+			  unsigned int i = (l->l_info[DT_FINI_ARRAYSZ]->d_un.d_val
+					    / sizeof (ElfW(Addr)));
+			  while (i-- > 0)
+			    ((fini_t) array[i]) ();
+			}
+
+		      /* Next try the old-style destructor.  */
+		      if (l->l_info[DT_FINI] != NULL)
+			DL_CALL_DT_FINI
+			  (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr);
 		    }
 
-		  /* Next try the old-style destructor.  */
-		  if (l->l_info[DT_FINI] != NULL)
-		     DL_CALL_DT_FINI(l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr);
-		}
-
 #ifdef SHARED
-	      /* Auditing checkpoint: another object closed.  */
-	      if (!do_audit && __builtin_expect (GLRO(dl_naudit) > 0, 0))
-		{
-		  struct audit_ifaces *afct = GLRO(dl_audit);
-		  for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
+		  /* Auditing checkpoint: another object closed.  */
+		  if (!do_audit && __builtin_expect (GLRO(dl_naudit) > 0, 0))
 		    {
-		      if (afct->objclose != NULL)
-			/* Return value is ignored.  */
-			(void) afct->objclose (&l->l_audit[cnt].cookie);
-
-		      afct = afct->next;
+		      struct audit_ifaces *afct = GLRO(dl_audit);
+		      for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
+			{
+			  if (afct->objclose != NULL)
+			    /* Return value is ignored.  */
+			    (void) afct->objclose (&l->l_audit[cnt].cookie);
+
+			  afct = afct->next;
+			}
 		    }
-		}
 #endif
-	    }
+		}
 
-	  /* Correct the previous increment.  */
-	  --l->l_direct_opencount;
+	      /* Correct the previous increment.  */
+	      --l->l_direct_opencount;
+	    }
 	}
     }