about summary refs log tree commit diff
path: root/elf/dl-minimal-malloc.c
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2021-11-03 11:20:50 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2021-11-09 14:11:25 -0300
commitb05fae4d8e34604a72ee36d2d3164391b76fcf0b (patch)
treea66b3f943d6b70b27326261fcb60063378e4c67f /elf/dl-minimal-malloc.c
parentdb6c4935fae6005d46af413b32aa92f4f6059dce (diff)
downloadglibc-b05fae4d8e34604a72ee36d2d3164391b76fcf0b.tar.gz
glibc-b05fae4d8e34604a72ee36d2d3164391b76fcf0b.tar.xz
glibc-b05fae4d8e34604a72ee36d2d3164391b76fcf0b.zip
elf: Use the minimal malloc on tunables_strdup
The rtld_malloc functions are moved to its own file so it can be
used on csu code.  Also, the functiosn are renamed to __minimal_*
(since there are now used not only on loader code).

Using the __minimal_malloc on tunables_strdup() avoids potential
issues with sbrk() calls while processing the tunables (I see
sporadic elf/tst-dso-ordering9 on powerpc64le with different
tests failing due ASLR).

Also, using __minimal_malloc over plain mmap optimizes the memory
allocation on both static and dynamic case (since it will any unused
space in either the last page of data segments, avoiding mmap() call,
or from the previous mmap() call).

Checked on x86_64-linux-gnu, i686-linux-gnu, and powerpc64le-linux-gnu.

Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
Diffstat (limited to 'elf/dl-minimal-malloc.c')
-rw-r--r--elf/dl-minimal-malloc.c112
1 files changed, 112 insertions, 0 deletions
diff --git a/elf/dl-minimal-malloc.c b/elf/dl-minimal-malloc.c
new file mode 100644
index 0000000000..939b5271ca
--- /dev/null
+++ b/elf/dl-minimal-malloc.c
@@ -0,0 +1,112 @@
+/* Minimal malloc implementation for dynamic linker and static
+   initialization.
+   Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <assert.h>
+#include <string.h>
+#include <ldsodefs.h>
+#include <malloc/malloc-internal.h>
+
+static void *alloc_ptr, *alloc_end, *alloc_last_block;
+
+/* Allocate an aligned memory block.  */
+void *
+__minimal_malloc (size_t n)
+{
+  if (alloc_end == 0)
+    {
+      /* Consume any unused space in the last page of our data segment.  */
+      extern int _end attribute_hidden;
+      alloc_ptr = &_end;
+      alloc_end = (void *) 0 + (((alloc_ptr - (void *) 0)
+				 + GLRO(dl_pagesize) - 1)
+				& ~(GLRO(dl_pagesize) - 1));
+    }
+
+  /* Make sure the allocation pointer is ideally aligned.  */
+  alloc_ptr = (void *) 0 + (((alloc_ptr - (void *) 0) + MALLOC_ALIGNMENT - 1)
+			    & ~(MALLOC_ALIGNMENT - 1));
+
+  if (alloc_ptr + n >= alloc_end || n >= -(uintptr_t) alloc_ptr)
+    {
+      /* Insufficient space left; allocate another page plus one extra
+	 page to reduce number of mmap calls.  */
+      caddr_t page;
+      size_t nup = (n + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
+      if (__glibc_unlikely (nup == 0 && n != 0))
+	return NULL;
+      nup += GLRO(dl_pagesize);
+      page = __mmap (0, nup, PROT_READ|PROT_WRITE,
+		     MAP_ANON|MAP_PRIVATE, -1, 0);
+      if (page == MAP_FAILED)
+	return NULL;
+      if (page != alloc_end)
+	alloc_ptr = page;
+      alloc_end = page + nup;
+    }
+
+  alloc_last_block = (void *) alloc_ptr;
+  alloc_ptr += n;
+  return alloc_last_block;
+}
+
+/* We use this function occasionally since the real implementation may
+   be optimized when it can assume the memory it returns already is
+   set to NUL.  */
+void *
+__minimal_calloc (size_t nmemb, size_t size)
+{
+  /* New memory from the trivial malloc above is always already cleared.
+     (We make sure that's true in the rare occasion it might not be,
+     by clearing memory in free, below.)  */
+  size_t bytes = nmemb * size;
+
+#define HALF_SIZE_T (((size_t) 1) << (8 * sizeof (size_t) / 2))
+  if (__builtin_expect ((nmemb | size) >= HALF_SIZE_T, 0)
+      && size != 0 && bytes / size != nmemb)
+    return NULL;
+
+  return malloc (bytes);
+}
+
+/* This will rarely be called.  */
+void
+__minimal_free (void *ptr)
+{
+  /* We can free only the last block allocated.  */
+  if (ptr == alloc_last_block)
+    {
+      /* Since this is rare, we clear the freed block here
+	 so that calloc can presume malloc returns cleared memory.  */
+      memset (alloc_last_block, '\0', alloc_ptr - alloc_last_block);
+      alloc_ptr = alloc_last_block;
+    }
+}
+
+/* This is only called with the most recent block returned by malloc.  */
+void *
+__minimal_realloc (void *ptr, size_t n)
+{
+  if (ptr == NULL)
+    return malloc (n);
+  assert (ptr == alloc_last_block);
+  size_t old_size = alloc_ptr - alloc_last_block;
+  alloc_ptr = alloc_last_block;
+  void *new = malloc (n);
+  return new != ptr ? memcpy (new, ptr, old_size) : new;
+}