summary refs log tree commit diff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2016-05-13 14:16:39 +0200
committerFlorian Weimer <fweimer@redhat.com>2016-05-13 14:16:39 +0200
commit4cf6c72fd2a482e7499c29162349810029632c3f (patch)
tree4c6a45d1e76bb4eb1afdfb39cf96a2b2c5751753 /malloc/malloc.c
parent567c710bcad634bc94a21f00b9e1c24ca9f9f57c (diff)
downloadglibc-4cf6c72fd2a482e7499c29162349810029632c3f.tar.gz
glibc-4cf6c72fd2a482e7499c29162349810029632c3f.tar.xz
glibc-4cf6c72fd2a482e7499c29162349810029632c3f.zip
malloc: Rewrite dumped heap for compatibility in __malloc_set_state
This will allow us to change many aspects of the malloc implementation
while preserving compatibility with existing Emacs binaries.

As a result, existing Emacs binaries will have a larger RSS, and Emacs
needs a few more milliseconds to start.  This overhead is specific
to Emacs (and will go away once Emacs switches to its internal malloc).

The new checks to make free and realloc compatible with the dumped heap
are confined to the mmap paths, which are already quite slow due to the
munmap overhead.

This commit weakens some security checks, but only for heap pointers
in the dumped main arena.  By default, this area is empty, so those
checks are as effective as before.
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c55
1 files changed, 46 insertions, 9 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index ea97df2cb4..44524ff984 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1758,6 +1758,17 @@ static struct malloc_state main_arena =
   .attached_threads = 1
 };
 
+/* These variables are used for undumping support.  Chunked are marked
+   as using mmap, but we leave them alone if they fall into this
+   range.  */
+static mchunkptr dumped_main_arena_start; /* Inclusive.  */
+static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
+
+/* True if the pointer falls into the dumped arena.  Use this after
+   chunk_is_mmapped indicates a chunk is mmapped.  */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+  ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
 /* There is only one instance of the malloc parameters.  */
 
 static struct malloc_par mp_ =
@@ -1947,7 +1958,7 @@ do_check_chunk (mstate av, mchunkptr p)
           assert (prev_inuse (p));
         }
     }
-  else
+  else if (!DUMPED_MAIN_ARENA_CHUNK (p))
     {
       /* address is outside main heap  */
       if (contiguous (av) && av->top != initial_top (av))
@@ -2822,6 +2833,11 @@ munmap_chunk (mchunkptr p)
 
   assert (chunk_is_mmapped (p));
 
+  /* Do nothing if the chunk is a faked mmapped chunk in the dumped
+     main arena.  We never free this memory.  */
+  if (DUMPED_MAIN_ARENA_CHUNK (p))
+    return;
+
   uintptr_t block = (uintptr_t) p - p->prev_size;
   size_t total_size = p->prev_size + size;
   /* Unfortunately we have to do the compilers job by hand here.  Normally
@@ -2942,10 +2958,12 @@ __libc_free (void *mem)
 
   if (chunk_is_mmapped (p))                       /* release mmapped memory. */
     {
-      /* see if the dynamic brk/mmap threshold needs adjusting */
+      /* See if the dynamic brk/mmap threshold needs adjusting.
+	 Dumped fake mmapped chunks do not affect the threshold.  */
       if (!mp_.no_dyn_threshold
           && p->size > mp_.mmap_threshold
-          && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
+          && p->size <= DEFAULT_MMAP_THRESHOLD_MAX
+	  && !DUMPED_MAIN_ARENA_CHUNK (p))
         {
           mp_.mmap_threshold = chunksize (p);
           mp_.trim_threshold = 2 * mp_.mmap_threshold;
@@ -2995,12 +3013,15 @@ __libc_realloc (void *oldmem, size_t bytes)
   else
     ar_ptr = arena_for_chunk (oldp);
 
-  /* Little security check which won't hurt performance: the
-     allocator never wrapps around at the end of the address space.
-     Therefore we can exclude some size values which might appear
-     here by accident or by "design" from some intruder.  */
-  if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
-      || __builtin_expect (misaligned_chunk (oldp), 0))
+  /* Little security check which won't hurt performance: the allocator
+     never wrapps around at the end of the address space.  Therefore
+     we can exclude some size values which might appear here by
+     accident or by "design" from some intruder.  We need to bypass
+     this check for dumped fake mmap chunks from the old main arena
+     because the new malloc may provide additional alignment.  */
+  if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
+       || __builtin_expect (misaligned_chunk (oldp), 0))
+      && !DUMPED_MAIN_ARENA_CHUNK (oldp))
     {
       malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
 		       ar_ptr);
@@ -3011,6 +3032,22 @@ __libc_realloc (void *oldmem, size_t bytes)
 
   if (chunk_is_mmapped (oldp))
     {
+      /* If this is a faked mmapped chunk from the dumped main arena,
+	 always make a copy (and do not free the old chunk).  */
+      if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+	{
+	  /* Must alloc, copy, free. */
+	  void *newmem = __libc_malloc (bytes);
+	  if (newmem == 0)
+	    return NULL;
+	  /* Copy as many bytes as are available from the old chunk
+	     and fit into the new size.  */
+	  if (bytes > oldsize - 2 * SIZE_SZ)
+	    bytes = oldsize - 2 * SIZE_SZ;
+	  memcpy (newmem, oldmem, bytes);
+	  return newmem;
+	}
+
       void *newmem;
 
 #if HAVE_MREMAP