about summary refs log tree commit diff
path: root/malloc
diff options
context:
space:
mode:
authorSiddhesh Poyarekar <siddhesh@sourceware.org>2021-07-22 18:38:10 +0530
committerSiddhesh Poyarekar <siddhesh@sourceware.org>2021-07-22 18:38:10 +0530
commit0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd (patch)
treebaf150b3e1b728c6c13b95116241a61bff5f21ab /malloc
parentb5bd5bfe88f496463ec9fab680a8edf64d7c2a42 (diff)
downloadglibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.tar.gz
glibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.tar.xz
glibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.zip
Move malloc_{g,s}et_state to libc_malloc_debug
These deprecated functions are only safe to call from
__malloc_initialize_hook and as a result, are not useful in the
general case.  Move the implementations to libc_malloc_debug so that
existing binaries that need it will now have to preload the debug DSO
to work correctly.

This also allows simplification of the core malloc implementation by
dropping all the undumping support code that was added to make
malloc_set_state work.

One known breakage is that of ancient emacs binaries that depend on
this.  They will now crash when running with this libc.  With
LD_BIND_NOW=1, it will terminate immediately because of not being able
to find malloc_set_state but with lazy binding it will crash in
unpredictable ways.  It will need a preloaded libc_malloc_debug.so so
that its initialization hook is executed to allow its malloc
implementation to work properly.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Tested-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'malloc')
-rw-r--r--malloc/Makefile5
-rw-r--r--malloc/Versions4
-rw-r--r--malloc/hooks.c114
-rw-r--r--malloc/malloc-debug.c182
-rw-r--r--malloc/malloc.c55
5 files changed, 193 insertions, 167 deletions
diff --git a/malloc/Makefile b/malloc/Makefile
index b89af21d19..96328da247 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -331,3 +331,8 @@ tst-compathooks-on-malloc-check-ENV = \
 	LD_PRELOAD=$(objpfx)libc_malloc_debug.so
 tst-mallocstate-ENV = LD_PRELOAD=$(objpfx)libc_malloc_debug.so
 tst-mallocstate-malloc-check-ENV = LD_PRELOAD=$(objpfx)libc_malloc_debug.so
+
+# The test needs malloc_get_state/malloc_set_state which is in
+# libc_malloc_debug.so.
+$(objpfx)tst-mallocstate: $(objpfx)libc_malloc_debug.so
+$(objpfx)tst-mallocstate-malloc-check: $(objpfx)libc_malloc_debug.so
diff --git a/malloc/Versions b/malloc/Versions
index cbb73d18c1..0a0bcf4bb5 100644
--- a/malloc/Versions
+++ b/malloc/Versions
@@ -25,7 +25,7 @@ libc {
     free;
 
     # m*
-    mallinfo; malloc; malloc_get_state; malloc_set_state; malloc_stats;
+    mallinfo; malloc; malloc_stats;
     malloc_trim; malloc_usable_size; mallopt; mcheck; memalign; mprobe; mtrace;
     muntrace;
 
@@ -121,6 +121,8 @@ libc_malloc_debug {
     muntrace;
 
     mallinfo;
+    malloc_get_state;
+    malloc_set_state;
     malloc_stats;
     malloc_trim;
     malloc_usable_size;
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 6c212fbc21..8e1afe55e5 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -39,120 +39,6 @@ void *weak_variable (*__malloc_hook) (size_t, const void *) = NULL;
 void *weak_variable (*__realloc_hook) (void *, size_t, const void *) = NULL;
 void *weak_variable (*__memalign_hook) (size_t, size_t, const void *) = NULL;
 
-#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
-
-/* Support for restoring dumped heaps contained in historic Emacs
-   executables.  The heap saving feature (malloc_get_state) is no
-   longer implemented in this version of glibc, but we have a heap
-   rewriter in malloc_set_state which transforms the heap into a
-   version compatible with current malloc.  */
-
-#define MALLOC_STATE_MAGIC   0x444c4541l
-#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
-
-struct malloc_save_state
-{
-  long magic;
-  long version;
-  mbinptr av[NBINS * 2 + 2];
-  char *sbrk_base;
-  int sbrked_mem_bytes;
-  unsigned long trim_threshold;
-  unsigned long top_pad;
-  unsigned int n_mmaps_max;
-  unsigned long mmap_threshold;
-  int check_action;
-  unsigned long max_sbrked_mem;
-  unsigned long max_total_mem;	/* Always 0, for backwards compatibility.  */
-  unsigned int n_mmaps;
-  unsigned int max_n_mmaps;
-  unsigned long mmapped_mem;
-  unsigned long max_mmapped_mem;
-  int using_malloc_checking;
-  unsigned long max_fast;
-  unsigned long arena_test;
-  unsigned long arena_max;
-  unsigned long narenas;
-};
-
-/* Dummy implementation which always fails.  We need to provide this
-   symbol so that existing Emacs binaries continue to work with
-   BIND_NOW.  */
-void *
-attribute_compat_text_section
-malloc_get_state (void)
-{
-  __set_errno (ENOSYS);
-  return NULL;
-}
-compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
-
-int
-attribute_compat_text_section
-malloc_set_state (void *msptr)
-{
-  struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
-
-  if (ms->magic != MALLOC_STATE_MAGIC)
-    return -1;
-
-  /* Must fail if the major version is too high. */
-  if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
-    return -2;
-
-  /* We do not need to perform locking here because malloc_set_state
-     must be called before the first call into the malloc subsytem
-     (usually via __malloc_initialize_hook).  pthread_create always
-     calls calloc and thus must be called only afterwards, so there
-     cannot be more than one thread when we reach this point.  */
-
-  /* Patch the dumped heap.  We no longer try to integrate into the
-     existing heap.  Instead, we mark the existing chunks as mmapped.
-     Together with the update to dumped_main_arena_start and
-     dumped_main_arena_end, realloc and free will recognize these
-     chunks as dumped fake mmapped chunks and never free them.  */
-
-  /* Find the chunk with the lowest address with the heap.  */
-  mchunkptr chunk = NULL;
-  {
-    size_t *candidate = (size_t *) ms->sbrk_base;
-    size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
-    while (candidate < end)
-      if (*candidate != 0)
-	{
-	  chunk = mem2chunk ((void *) (candidate + 1));
-	  break;
-	}
-      else
-	++candidate;
-  }
-  if (chunk == NULL)
-    return 0;
-
-  /* Iterate over the dumped heap and patch the chunks so that they
-     are treated as fake mmapped chunks.  */
-  mchunkptr top = ms->av[2];
-  while (chunk < top)
-    {
-      if (inuse (chunk))
-	{
-	  /* Mark chunk as mmapped, to trigger the fallback path.  */
-	  size_t size = chunksize (chunk);
-	  set_head (chunk, size | IS_MMAPPED);
-	}
-      chunk = next_chunk (chunk);
-    }
-
-  /* The dumped fake mmapped chunks all lie in this address range.  */
-  dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
-  dumped_main_arena_end = top;
-
-  return 0;
-}
-compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
-
-#endif	/* SHLIB_COMPAT */
-
 /*
  * Local variables:
  * c-basic-offset: 2
diff --git a/malloc/malloc-debug.c b/malloc/malloc-debug.c
index f5290aaa6d..b7744460e9 100644
--- a/malloc/malloc-debug.c
+++ b/malloc/malloc-debug.c
@@ -145,6 +145,19 @@ memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
 
 static size_t pagesize;
 
+/* These variables are used for undumping support.  Chunked are marked
+   as using mmap, but we leave them alone if they fall into this
+   range.  NB: The chunk size for these chunks only includes the
+   initial size field (of SIZE_SZ bytes), there is no trailing size
+   field (unlike with regular mmapped chunks).  */
+static mchunkptr dumped_main_arena_start; /* Inclusive.  */
+static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
+
+/* True if the pointer falls into the dumped arena.  Use this after
+   chunk_is_mmapped indicates a chunk is mmapped.  */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+  ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
 /* The allocator functions.  */
 
 static void *
@@ -184,7 +197,9 @@ __debug_free (void *mem)
   if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK))
     mem = free_mcheck (mem);
 
-  if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
+  if (DUMPED_MAIN_ARENA_CHUNK (mem2chunk (mem)))
+    /* Do nothing.  */;
+  else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
     free_check (mem);
   else
     __libc_free (mem);
@@ -207,7 +222,32 @@ __debug_realloc (void *oldmem, size_t bytes)
   if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)
        || !realloc_mcheck_before (&oldmem, &bytes, &oldsize, &victim)))
     {
-      if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
+      mchunkptr oldp = mem2chunk (oldmem);
+
+      /* If this is a faked mmapped chunk from the dumped main arena,
+	 always make a copy (and do not free the old chunk).  */
+      if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+	{
+	  if (bytes == 0 && oldmem != NULL)
+	    victim = NULL;
+	  else
+	    {
+	      const INTERNAL_SIZE_T osize = chunksize (oldp);
+	      /* Must alloc, copy, free. */
+	      victim = __debug_malloc (bytes);
+	      /* Copy as many bytes as are available from the old chunk
+		 and fit into the new size.  NB: The overhead for faked
+		 mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
+		 regular mmapped chunks.  */
+	      if (victim != NULL)
+		{
+		  if (bytes > osize - SIZE_SZ)
+		    bytes = osize - SIZE_SZ;
+		  memcpy (victim, oldmem, bytes);
+		}
+	    }
+	}
+      else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
 	victim =  realloc_check (oldmem, bytes);
       else
 	victim = __libc_realloc (oldmem, bytes);
@@ -357,6 +397,13 @@ malloc_usable_size (void *mem)
   if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
     return malloc_check_get_size (mem);
 
+  if (mem != NULL)
+    {
+      mchunkptr p = mem2chunk (mem);
+     if (DUMPED_MAIN_ARENA_CHUNK (p))
+       return chunksize (p) - SIZE_SZ;
+    }
+
   return musable (mem);
 }
 
@@ -453,3 +500,134 @@ malloc_trim (size_t s)
 
   return LIBC_SYMBOL (malloc_trim) (s);
 }
+
+#if SHLIB_COMPAT (libc_malloc_debug, GLIBC_2_0, GLIBC_2_25)
+
+/* Support for restoring dumped heaps contained in historic Emacs
+   executables.  The heap saving feature (malloc_get_state) is no
+   longer implemented in this version of glibc, but we have a heap
+   rewriter in malloc_set_state which transforms the heap into a
+   version compatible with current malloc.  */
+
+#define MALLOC_STATE_MAGIC   0x444c4541l
+#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
+
+struct malloc_save_state
+{
+  long magic;
+  long version;
+  mbinptr av[NBINS * 2 + 2];
+  char *sbrk_base;
+  int sbrked_mem_bytes;
+  unsigned long trim_threshold;
+  unsigned long top_pad;
+  unsigned int n_mmaps_max;
+  unsigned long mmap_threshold;
+  int check_action;
+  unsigned long max_sbrked_mem;
+  unsigned long max_total_mem;	/* Always 0, for backwards compatibility.  */
+  unsigned int n_mmaps;
+  unsigned int max_n_mmaps;
+  unsigned long mmapped_mem;
+  unsigned long max_mmapped_mem;
+  int using_malloc_checking;
+  unsigned long max_fast;
+  unsigned long arena_test;
+  unsigned long arena_max;
+  unsigned long narenas;
+};
+
+/* Dummy implementation which always fails.  We need to provide this
+   symbol so that existing Emacs binaries continue to work with
+   BIND_NOW.  */
+void *
+malloc_get_state (void)
+{
+  __set_errno (ENOSYS);
+  return NULL;
+}
+compat_symbol (libc_malloc_debug, malloc_get_state, malloc_get_state,
+	       GLIBC_2_0);
+
+int
+malloc_set_state (void *msptr)
+{
+  struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
+
+  if (ms->magic != MALLOC_STATE_MAGIC)
+    return -1;
+
+  /* Must fail if the major version is too high. */
+  if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
+    return -2;
+
+  if (debug_initialized == 1)
+    return -1;
+
+  bool check_was_enabled = __is_malloc_debug_enabled (MALLOC_CHECK_HOOK);
+
+  /* It's not too late, so disable MALLOC_CHECK_ and all of the hooks.  */
+  __malloc_hook = NULL;
+  __realloc_hook = NULL;
+  __free_hook = NULL;
+  __memalign_hook = NULL;
+  __malloc_debug_disable (MALLOC_CHECK_HOOK);
+
+  /* We do not need to perform locking here because malloc_set_state
+     must be called before the first call into the malloc subsytem (usually via
+     __malloc_initialize_hook).  pthread_create always calls calloc and thus
+     must be called only afterwards, so there cannot be more than one thread
+     when we reach this point.  Also handle initialization if either we ended
+     up being called before the first malloc or through the hook when
+     malloc-check was enabled.  */
+  if (debug_initialized < 0)
+    generic_hook_ini ();
+  else if (check_was_enabled)
+    __libc_free (__libc_malloc (0));
+
+  /* Patch the dumped heap.  We no longer try to integrate into the
+     existing heap.  Instead, we mark the existing chunks as mmapped.
+     Together with the update to dumped_main_arena_start and
+     dumped_main_arena_end, realloc and free will recognize these
+     chunks as dumped fake mmapped chunks and never free them.  */
+
+  /* Find the chunk with the lowest address with the heap.  */
+  mchunkptr chunk = NULL;
+  {
+    size_t *candidate = (size_t *) ms->sbrk_base;
+    size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
+    while (candidate < end)
+      if (*candidate != 0)
+	{
+	  chunk = mem2chunk ((void *) (candidate + 1));
+	  break;
+	}
+      else
+	++candidate;
+  }
+  if (chunk == NULL)
+    return 0;
+
+  /* Iterate over the dumped heap and patch the chunks so that they
+     are treated as fake mmapped chunks.  */
+  mchunkptr top = ms->av[2];
+  while (chunk < top)
+    {
+      if (inuse (chunk))
+	{
+	  /* Mark chunk as mmapped, to trigger the fallback path.  */
+	  size_t size = chunksize (chunk);
+	  set_head (chunk, size | IS_MMAPPED);
+	}
+      chunk = next_chunk (chunk);
+    }
+
+  /* The dumped fake mmapped chunks all lie in this address range.  */
+  dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
+  dumped_main_arena_end = top;
+
+  return 0;
+}
+compat_symbol (libc_malloc_debug, malloc_set_state, malloc_set_state,
+	       GLIBC_2_0);
+#endif
diff --git a/malloc/malloc.c b/malloc/malloc.c
index b8fcb2f2d3..38b649fcba 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1921,19 +1921,6 @@ static struct malloc_state main_arena =
   .attached_threads = 1
 };
 
-/* These variables are used for undumping support.  Chunked are marked
-   as using mmap, but we leave them alone if they fall into this
-   range.  NB: The chunk size for these chunks only includes the
-   initial size field (of SIZE_SZ bytes), there is no trailing size
-   field (unlike with regular mmapped chunks).  */
-static mchunkptr dumped_main_arena_start; /* Inclusive.  */
-static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
-
-/* True if the pointer falls into the dumped arena.  Use this after
-   chunk_is_mmapped indicates a chunk is mmapped.  */
-#define DUMPED_MAIN_ARENA_CHUNK(p) \
-  ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
-
 /* There is only one instance of the malloc parameters.  */
 
 static struct malloc_par mp_ =
@@ -2083,7 +2070,7 @@ do_check_chunk (mstate av, mchunkptr p)
           assert (prev_inuse (p));
         }
     }
-  else if (!DUMPED_MAIN_ARENA_CHUNK (p))
+  else
     {
       /* address is outside main heap  */
       if (contiguous (av) && av->top != initial_top (av))
@@ -2948,11 +2935,6 @@ munmap_chunk (mchunkptr p)
 
   assert (chunk_is_mmapped (p));
 
-  /* Do nothing if the chunk is a faked mmapped chunk in the dumped
-     main arena.  We never free this memory.  */
-  if (DUMPED_MAIN_ARENA_CHUNK (p))
-    return;
-
   uintptr_t mem = (uintptr_t) chunk2mem (p);
   uintptr_t block = (uintptr_t) p - prev_size (p);
   size_t total_size = prev_size (p) + size;
@@ -3275,8 +3257,7 @@ __libc_free (void *mem)
 	 Dumped fake mmapped chunks do not affect the threshold.  */
       if (!mp_.no_dyn_threshold
           && chunksize_nomask (p) > mp_.mmap_threshold
-          && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
-	  && !DUMPED_MAIN_ARENA_CHUNK (p))
+          && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
         {
           mp_.mmap_threshold = chunksize (p);
           mp_.trim_threshold = 2 * mp_.mmap_threshold;
@@ -3343,12 +3324,9 @@ __libc_realloc (void *oldmem, size_t bytes)
   /* Little security check which won't hurt performance: the allocator
      never wrapps around at the end of the address space.  Therefore
      we can exclude some size values which might appear here by
-     accident or by "design" from some intruder.  We need to bypass
-     this check for dumped fake mmap chunks from the old main arena
-     because the new malloc may provide additional alignment.  */
+     accident or by "design" from some intruder.  */
   if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
-       || __builtin_expect (misaligned_chunk (oldp), 0))
-      && !DUMPED_MAIN_ARENA_CHUNK (oldp))
+       || __builtin_expect (misaligned_chunk (oldp), 0)))
       malloc_printerr ("realloc(): invalid pointer");
 
   if (!checked_request2size (bytes, &nb))
@@ -3359,24 +3337,6 @@ __libc_realloc (void *oldmem, size_t bytes)
 
   if (chunk_is_mmapped (oldp))
     {
-      /* If this is a faked mmapped chunk from the dumped main arena,
-	 always make a copy (and do not free the old chunk).  */
-      if (DUMPED_MAIN_ARENA_CHUNK (oldp))
-	{
-	  /* Must alloc, copy, free. */
-	  void *newmem = __libc_malloc (bytes);
-	  if (newmem == 0)
-	    return NULL;
-	  /* Copy as many bytes as are available from the old chunk
-	     and fit into the new size.  NB: The overhead for faked
-	     mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
-	     regular mmapped chunks.  */
-	  if (bytes > oldsize - SIZE_SZ)
-	    bytes = oldsize - SIZE_SZ;
-	  memcpy (newmem, oldmem, bytes);
-	  return newmem;
-	}
-
       void *newmem;
 
 #if HAVE_MREMAP
@@ -5056,12 +5016,7 @@ musable (void *mem)
       p = mem2chunk (mem);
 
       if (chunk_is_mmapped (p))
-	{
-	  if (DUMPED_MAIN_ARENA_CHUNK (p))
-	    result = chunksize (p) - SIZE_SZ;
-	  else
-	    result = chunksize (p) - CHUNK_HDR_SZ;
-	}
+	result = chunksize (p) - CHUNK_HDR_SZ;
       else if (inuse (p))
 	result = memsize (p);