about summary refs log tree commit diff
path: root/elf/dl-misc.c
diff options
context:
space:
mode:
authorPaul Pluzhnikov <ppluzhnikov@google.com>2013-12-18 16:46:18 -0800
committerPaul Pluzhnikov <ppluzhnikov@google.com>2013-12-18 16:46:18 -0800
commit1f33d36a8a9e78c81bed59b47f260723f56bb7e6 (patch)
treeff72018a2e0021f7a1110289681ed8ab4516df37 /elf/dl-misc.c
parent35e8f7ab94c910659de9d507aa0f3e1f8973d914 (diff)
downloadglibc-1f33d36a8a9e78c81bed59b47f260723f56bb7e6.tar.gz
glibc-1f33d36a8a9e78c81bed59b47f260723f56bb7e6.tar.xz
glibc-1f33d36a8a9e78c81bed59b47f260723f56bb7e6.zip
Patch 2/4 of the effort to make TLS access async-signal-safe.
Add a signal-safe malloc replacement.

2013-12-18  Andrew Hunter  <ahh@google.com>

	* sysdeps/generic/ldsodefs.h (__signal_safe_memalign): New prototype.
	(__signal_safe_malloc, __signal_safe_free): Likewise.
	(__signal_safe_realloc, __signal_safe_calloc): Likewise.
	* elf/dl-misc.c (__signal_safe_allocator_header): New struct.
	(__signal_safe_memalign, __signal_safe_malloc): New function.
	(__signal_safe_free, __signal_safe_realloc): Likewise.
	(__signal_safe_calloc): Likewise.
	* elf/dl-tls.c (allocate_dtv, _dl_clear_dtv): Call signal-safe
	functions.
	(_dl_deallocate_tls, _dl_update_slotinfo): Likewise.
Diffstat (limited to 'elf/dl-misc.c')
-rw-r--r--elf/dl-misc.c132
1 files changed, 132 insertions, 0 deletions
diff --git a/elf/dl-misc.c b/elf/dl-misc.c
index 5fc13a44a4..cec65d083a 100644
--- a/elf/dl-misc.c
+++ b/elf/dl-misc.c
@@ -19,6 +19,7 @@
 #include <assert.h>
 #include <fcntl.h>
 #include <ldsodefs.h>
+#include <libc-symbols.h>
 #include <limits.h>
 #include <link.h>
 #include <stdarg.h>
@@ -364,3 +365,134 @@ _dl_higher_prime_number (unsigned long int n)
 
   return *low;
 }
+
+/* To support accessing TLS variables from signal handlers, we need an
+   async signal safe memory allocator.  These routines are never
+   themselves invoked reentrantly (all calls to them are surrounded by
+   signal masks) but may be invoked concurrently from many threads.
+   The current implementation is not particularly performant nor space
+   efficient, but it will be used rarely (and only in binaries that use
+   dlopen.)  The API matches that of malloc() and friends.  */
+
+struct __signal_safe_allocator_header
+{
+  size_t size;
+  void *start;
+};
+
+void *weak_function
+__signal_safe_memalign (size_t boundary, size_t size)
+{
+  struct __signal_safe_allocator_header *header;
+  if (boundary < sizeof (*header))
+    boundary = sizeof (*header);
+
+  /* Boundary must be a power of two.  */
+  if (boundary & (boundary - 1) == 0)
+    return NULL;
+
+  size_t pg = GLRO (dl_pagesize);
+  size_t padded_size;
+  if (boundary <= pg)
+    {
+      /* We'll get a pointer certainly aligned to boundary, so just
+	 add one more boundary-sized chunk to hold the header.  */
+      padded_size = roundup (size, boundary) + boundary;
+    }
+  else
+    {
+      /* If we want K pages aligned to a J-page boundary, K+J+1 pages
+	 contains at least one such region that isn't directly at the start
+	 (so we can place the header.)	This is wasteful, but you're the one
+	 who wanted 64K-aligned TLS.  */
+      padded_size = roundup (size, pg) + boundary + pg;
+    }
+
+
+  size_t actual_size = roundup (padded_size, pg);
+  void *actual = mmap (NULL, actual_size, PROT_READ | PROT_WRITE,
+		       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (actual == MAP_FAILED)
+    return NULL;
+
+  if (boundary <= pg)
+    {
+      header = actual + boundary - sizeof (*header);
+    }
+  else
+    {
+      intptr_t actual_pg = ((intptr_t) actual) / pg;
+      intptr_t boundary_pg = boundary / pg;
+      intptr_t start_pg = actual_pg + boundary_pg;
+      start_pg -= start_pg % boundary_pg;
+      if (start_pg > (actual_pg + 1))
+	{
+	  int ret = munmap (actual, (start_pg - actual_pg - 1) * pg);
+	  assert (ret == 0);
+	  actual = (void *) ((start_pg - 1) * pg);
+	}
+      char *start = (void *) (start_pg * pg);
+      header = start - sizeof (*header);
+
+    }
+  header->size = actual_size;
+  header->start = actual;
+  void *ptr = header;
+  ptr += sizeof (*header);
+  if (((intptr_t) ptr) % boundary != 0)
+    _dl_fatal_printf ("__signal_safe_memalign produced incorrect alignment\n");
+  return ptr;
+}
+
+void * weak_function
+__signal_safe_malloc (size_t size)
+{
+  return __signal_safe_memalign (1, size);
+}
+
+void weak_function
+__signal_safe_free (void *ptr)
+{
+  if (ptr == NULL)
+    return;
+
+  struct __signal_safe_allocator_header *header = ((char *) ptr) - sizeof (*header);
+  int ret = munmap (header->start, header->size);
+
+  assert (ret == 0);
+}
+
+void * weak_function
+__signal_safe_realloc (void *ptr, size_t size)
+{
+  if (size == 0)
+    {
+      __signal_safe_free (ptr);
+      return NULL;
+    }
+  if (ptr == NULL)
+    return __signal_safe_malloc (size);
+
+  struct __signal_safe_allocator_header *header = ((char *) ptr) - sizeof (*header);
+  size_t old_size = header->size;
+  if (old_size - sizeof (*header) >= size)
+    return ptr;
+
+  void *new_ptr = __signal_safe_malloc (size);
+  if (new_ptr == NULL)
+    return NULL;
+
+  memcpy (new_ptr, ptr, old_size);
+  __signal_safe_free (ptr);
+
+  return new_ptr;
+}
+
+void * weak_function
+__signal_safe_calloc (size_t nmemb, size_t size)
+{
+  void *ptr = __signal_safe_malloc (nmemb * size);
+  if (ptr == NULL)
+    return NULL;
+  return memset (ptr, 0, nmemb * size);
+}