about summary refs log tree commit diff
path: root/REORG.TODO/malloc
diff options
context:
space:
mode:
Diffstat (limited to 'REORG.TODO/malloc')
-rw-r--r--REORG.TODO/malloc/Depend1
-rw-r--r--REORG.TODO/malloc/Makefile233
-rw-r--r--REORG.TODO/malloc/Versions90
-rw-r--r--REORG.TODO/malloc/arena.c992
-rw-r--r--REORG.TODO/malloc/dynarray-skeleton.c499
-rw-r--r--REORG.TODO/malloc/dynarray.h176
-rw-r--r--REORG.TODO/malloc/dynarray_at_failure.c31
-rw-r--r--REORG.TODO/malloc/dynarray_emplace_enlarge.c69
-rw-r--r--REORG.TODO/malloc/dynarray_finalize.c62
-rw-r--r--REORG.TODO/malloc/dynarray_resize.c59
-rw-r--r--REORG.TODO/malloc/dynarray_resize_clear.c35
-rw-r--r--REORG.TODO/malloc/hooks.c585
-rw-r--r--REORG.TODO/malloc/malloc-hooks.h24
-rw-r--r--REORG.TODO/malloc/malloc-internal.h104
-rw-r--r--REORG.TODO/malloc/malloc.c5321
-rw-r--r--REORG.TODO/malloc/malloc.h164
-rw-r--r--REORG.TODO/malloc/mallocbug.c70
-rw-r--r--REORG.TODO/malloc/mcheck-init.c30
-rw-r--r--REORG.TODO/malloc/mcheck.c416
-rw-r--r--REORG.TODO/malloc/mcheck.h60
-rw-r--r--REORG.TODO/malloc/memusage.c936
-rwxr-xr-xREORG.TODO/malloc/memusage.sh274
-rw-r--r--REORG.TODO/malloc/memusagestat.c587
-rw-r--r--REORG.TODO/malloc/morecore.c53
-rw-r--r--REORG.TODO/malloc/mtrace.c348
-rw-r--r--REORG.TODO/malloc/mtrace.pl237
-rw-r--r--REORG.TODO/malloc/obstack.c423
-rw-r--r--REORG.TODO/malloc/obstack.h515
-rw-r--r--REORG.TODO/malloc/reallocarray.c37
-rw-r--r--REORG.TODO/malloc/scratch_buffer_grow.c52
-rw-r--r--REORG.TODO/malloc/scratch_buffer_grow_preserve.c63
-rw-r--r--REORG.TODO/malloc/scratch_buffer_set_array_size.c60
-rw-r--r--REORG.TODO/malloc/set-freeres.c49
-rw-r--r--REORG.TODO/malloc/thread-freeres.c31
-rw-r--r--REORG.TODO/malloc/tst-calloc.c128
-rw-r--r--REORG.TODO/malloc/tst-dynarray-at-fail.c125
-rw-r--r--REORG.TODO/malloc/tst-dynarray-fail.c418
-rw-r--r--REORG.TODO/malloc/tst-dynarray-shared.h77
-rw-r--r--REORG.TODO/malloc/tst-dynarray.c517
-rw-r--r--REORG.TODO/malloc/tst-interpose-aux-nothread.c20
-rw-r--r--REORG.TODO/malloc/tst-interpose-aux-thread.c20
-rw-r--r--REORG.TODO/malloc/tst-interpose-aux.c271
-rw-r--r--REORG.TODO/malloc/tst-interpose-aux.h30
-rw-r--r--REORG.TODO/malloc/tst-interpose-nothread.c20
-rw-r--r--REORG.TODO/malloc/tst-interpose-skeleton.c204
-rw-r--r--REORG.TODO/malloc/tst-interpose-static-nothread.c19
-rw-r--r--REORG.TODO/malloc/tst-interpose-static-thread.c19
-rw-r--r--REORG.TODO/malloc/tst-interpose-thread.c20
-rw-r--r--REORG.TODO/malloc/tst-malloc-backtrace.c53
-rw-r--r--REORG.TODO/malloc/tst-malloc-fork-deadlock.c206
-rw-r--r--REORG.TODO/malloc/tst-malloc-thread-exit.c137
-rw-r--r--REORG.TODO/malloc/tst-malloc-thread-fail.c442
-rw-r--r--REORG.TODO/malloc/tst-malloc-usable-static-tunables.c1
-rw-r--r--REORG.TODO/malloc/tst-malloc-usable-static.c1
-rw-r--r--REORG.TODO/malloc/tst-malloc-usable-tunables.c1
-rw-r--r--REORG.TODO/malloc/tst-malloc-usable.c49
-rw-r--r--REORG.TODO/malloc/tst-malloc.c95
-rw-r--r--REORG.TODO/malloc/tst-mallocfork.c51
-rw-r--r--REORG.TODO/malloc/tst-mallocfork2.c211
-rw-r--r--REORG.TODO/malloc/tst-mallocstate.c505
-rw-r--r--REORG.TODO/malloc/tst-mallopt.c75
-rw-r--r--REORG.TODO/malloc/tst-mcheck.c115
-rw-r--r--REORG.TODO/malloc/tst-memalign.c114
-rw-r--r--REORG.TODO/malloc/tst-mtrace.c105
-rwxr-xr-xREORG.TODO/malloc/tst-mtrace.sh43
-rw-r--r--REORG.TODO/malloc/tst-obstack.c67
-rw-r--r--REORG.TODO/malloc/tst-posix_memalign.c118
-rw-r--r--REORG.TODO/malloc/tst-pvalloc.c99
-rw-r--r--REORG.TODO/malloc/tst-realloc.c161
-rw-r--r--REORG.TODO/malloc/tst-reallocarray.c118
-rw-r--r--REORG.TODO/malloc/tst-scratch_buffer.c155
-rw-r--r--REORG.TODO/malloc/tst-trim1.c56
-rw-r--r--REORG.TODO/malloc/tst-valloc.c99
73 files changed, 17651 insertions, 0 deletions
diff --git a/REORG.TODO/malloc/Depend b/REORG.TODO/malloc/Depend
new file mode 100644
index 0000000000..910c6d9152
--- /dev/null
+++ b/REORG.TODO/malloc/Depend
@@ -0,0 +1 @@
+dlfcn
diff --git a/REORG.TODO/malloc/Makefile b/REORG.TODO/malloc/Makefile
new file mode 100644
index 0000000000..af025cbb24
--- /dev/null
+++ b/REORG.TODO/malloc/Makefile
@@ -0,0 +1,233 @@
+# Copyright (C) 1991-2017 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+
+#
+#	Makefile for malloc routines
+#
+subdir	:= malloc
+
+include ../Makeconfig
+
+dist-headers := malloc.h
+headers := $(dist-headers) obstack.h mcheck.h
+tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
+	 tst-mcheck tst-mallocfork tst-trim1 \
+	 tst-malloc-usable tst-realloc tst-reallocarray tst-posix_memalign \
+	 tst-pvalloc tst-memalign tst-mallopt \
+	 tst-malloc-backtrace tst-malloc-thread-exit \
+	 tst-malloc-thread-fail tst-malloc-fork-deadlock \
+	 tst-mallocfork2 \
+	 tst-interpose-nothread \
+	 tst-interpose-thread \
+
+tests-static := \
+	 tst-interpose-static-nothread \
+	 tst-interpose-static-thread \
+	 tst-malloc-usable-static \
+
+tests-internal := tst-mallocstate tst-scratch_buffer
+
+# The dynarray framework is only available inside glibc.
+tests-internal += \
+	 tst-dynarray \
+	 tst-dynarray-fail \
+	 tst-dynarray-at-fail \
+
+ifneq (no,$(have-tunables))
+tests += tst-malloc-usable-tunables
+tests-static += tst-malloc-usable-static-tunables
+endif
+
+tests += $(tests-static)
+test-srcs = tst-mtrace tst-dynarray tst-dynarray-fail
+
+routines = malloc morecore mcheck mtrace obstack reallocarray \
+  scratch_buffer_grow scratch_buffer_grow_preserve \
+  scratch_buffer_set_array_size \
+  dynarray_at_failure \
+  dynarray_emplace_enlarge \
+  dynarray_finalize \
+  dynarray_resize \
+  dynarray_resize_clear \
+
+install-lib := libmcheck.a
+non-lib.a := libmcheck.a
+
+# Additional library.
+extra-libs = libmemusage
+extra-libs-others = $(extra-libs)
+
+# Helper objects for some tests.
+extra-tests-objs += \
+  tst-interpose-aux-nothread.o \
+  tst-interpose-aux-thread.o \
+
+test-extras = \
+  tst-interpose-aux-nothread \
+  tst-interpose-aux-thread \
+
+libmemusage-routines = memusage
+libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
+
+$(objpfx)tst-malloc-backtrace: $(shared-thread-library)
+$(objpfx)tst-malloc-thread-exit: $(shared-thread-library)
+$(objpfx)tst-malloc-thread-fail: $(shared-thread-library)
+$(objpfx)tst-malloc-fork-deadlock: $(shared-thread-library)
+
+# Export the __malloc_initialize_hook variable to libc.so.
+LDFLAGS-tst-mallocstate = -rdynamic
+
+# These should be removed by `make clean'.
+extra-objs = mcheck-init.o libmcheck.a
+
+# Include the cleanup handler.
+aux := set-freeres thread-freeres
+
+# The Perl script to analyze the output of the mtrace functions.
+ifneq ($(PERL),no)
+install-bin-script = mtrace
+generated += mtrace
+
+# The Perl script will print addresses and to do this nicely we must know
+# whether we are on a 32 or 64 bit machine.
+ifneq ($(findstring wordsize-32,$(config-sysdirs)),)
+address-width=10
+else
+address-width=18
+endif
+endif
+
+# Unless we get a test for the availability of libgd which also works
+# for cross-compiling we disable the memusagestat generation in this
+# situation.
+ifneq ($(cross-compiling),yes)
+# If the gd library is available we build the `memusagestat' program.
+ifneq ($(LIBGD),no)
+others: $(objpfx)memusage
+install-bin = memusagestat
+install-bin-script += memusage
+generated += memusagestat memusage
+extra-objs += memusagestat.o
+
+# The configure.ac check for libgd and its headers did not use $SYSINCLUDES.
+# The directory specified by --with-headers usually contains only the basic
+# kernel interface headers, not something like libgd.  So the simplest thing
+# is to presume that the standard system headers will be ok for this file.
+$(objpfx)memusagestat.o: sysincludes = # nothing
+endif
+endif
+
+# Another goal which can be used to override the configure decision.
+.PHONY: do-memusagestat
+do-memusagestat: $(objpfx)memusagestat
+
+memusagestat-modules = memusagestat
+
+cpp-srcs-left := $(memusagestat-modules)
+lib := memusagestat
+include $(patsubst %,$(..)libof-iterator.mk,$(cpp-srcs-left))
+
+$(objpfx)memusagestat: $(memusagestat-modules:%=$(objpfx)%.o)
+	$(LINK.o) -o $@ $^ $(libgd-LDFLAGS) -lgd -lpng -lz -lm
+
+ifeq ($(run-built-tests),yes)
+ifeq (yes,$(build-shared))
+ifneq ($(PERL),no)
+tests-special += $(objpfx)tst-mtrace.out
+tests-special += $(objpfx)tst-dynarray-mem.out
+tests-special += $(objpfx)tst-dynarray-fail-mem.out
+endif
+endif
+endif
+
+include ../Rules
+
+CFLAGS-mcheck-init.c = $(PIC-ccflag)
+CFLAGS-obstack.c = $(uses-callbacks)
+
+$(objpfx)libmcheck.a: $(objpfx)mcheck-init.o
+	-rm -f $@
+	$(patsubst %/,cd % &&,$(objpfx)) \
+	$(LN_S) $(<F) $(@F)
+
+lib: $(objpfx)libmcheck.a
+
+ifeq ($(run-built-tests),yes)
+ifeq (yes,$(build-shared))
+ifneq ($(PERL),no)
+$(objpfx)tst-mtrace.out: tst-mtrace.sh $(objpfx)tst-mtrace
+	$(SHELL) $< $(common-objpfx) '$(test-program-prefix-before-env)' \
+		 '$(run-program-env)' '$(test-program-prefix-after-env)' ; \
+	$(evaluate-test)
+endif
+endif
+endif
+
+tst-mcheck-ENV = MALLOC_CHECK_=3
+tst-malloc-usable-ENV = MALLOC_CHECK_=3
+tst-malloc-usable-static-ENV = $(tst-malloc-usable-ENV)
+tst-malloc-usable-tunables-ENV = GLIBC_TUNABLES=glibc.malloc.check=3
+tst-malloc-usable-static-tunables-ENV = $(tst-malloc-usable-tunables-ENV)
+
+# Uncomment this for test releases.  For public releases it is too expensive.
+#CPPFLAGS-malloc.o += -DMALLOC_DEBUG=1
+
+sLIBdir := $(shell echo $(slibdir) | sed 's,lib\(\|64\)$$,\\\\$$LIB,')
+
+$(objpfx)mtrace: mtrace.pl
+	rm -f $@.new
+	sed -e 's|@PERL@|$(PERL)|' -e 's|@XXX@|$(address-width)|' \
+	    -e 's|@VERSION@|$(version)|' \
+	    -e 's|@PKGVERSION@|$(PKGVERSION)|' \
+	    -e 's|@REPORT_BUGS_TO@|$(REPORT_BUGS_TO)|' $^ > $@.new \
+	&& rm -f $@ && mv $@.new $@ && chmod +x $@
+
+$(objpfx)memusage: memusage.sh
+	rm -f $@.new
+	sed -e 's|@BASH@|$(BASH)|' -e 's|@VERSION@|$(version)|' \
+	    -e 's|@SLIBDIR@|$(sLIBdir)|' -e 's|@BINDIR@|$(bindir)|' \
+	    -e 's|@PKGVERSION@|$(PKGVERSION)|' \
+	    -e 's|@REPORT_BUGS_TO@|$(REPORT_BUGS_TO)|' $^ > $@.new \
+	&& rm -f $@ && mv $@.new $@ && chmod +x $@
+
+
+# The implementation uses `dlsym'
+$(objpfx)libmemusage.so: $(libdl)
+
+# Extra dependencies
+$(foreach o,$(all-object-suffixes),$(objpfx)malloc$(o)): arena.c hooks.c
+
+# Compile the tests with a flag which suppresses the mallopt call in
+# the test skeleton.
+$(tests:%=$(objpfx)%.o): CPPFLAGS += -DTEST_NO_MALLOPT
+
+$(objpfx)tst-interpose-nothread: $(objpfx)tst-interpose-aux-nothread.o
+$(objpfx)tst-interpose-thread: \
+  $(objpfx)tst-interpose-aux-thread.o $(shared-thread-library)
+$(objpfx)tst-interpose-static-nothread: $(objpfx)tst-interpose-aux-nothread.o
+$(objpfx)tst-interpose-static-thread: \
+  $(objpfx)tst-interpose-aux-thread.o $(static-thread-library)
+
+tst-dynarray-ENV = MALLOC_TRACE=$(objpfx)tst-dynarray.mtrace
+$(objpfx)tst-dynarray-mem.out: $(objpfx)tst-dynarray.out
+	$(common-objpfx)malloc/mtrace $(objpfx)tst-dynarray.mtrace > $@; \
+	$(evaluate-test)
+
+tst-dynarray-fail-ENV = MALLOC_TRACE=$(objpfx)tst-dynarray-fail.mtrace
+$(objpfx)tst-dynarray-fail-mem.out: $(objpfx)tst-dynarray-fail.out
+	$(common-objpfx)malloc/mtrace $(objpfx)tst-dynarray-fail.mtrace > $@; \
+	$(evaluate-test)
diff --git a/REORG.TODO/malloc/Versions b/REORG.TODO/malloc/Versions
new file mode 100644
index 0000000000..5b543069b3
--- /dev/null
+++ b/REORG.TODO/malloc/Versions
@@ -0,0 +1,90 @@
+libc {
+  GLIBC_2.0 {
+    # global variables
+    _obstack;
+
+    # interface of malloc functions
+    __libc_calloc; __libc_free; __libc_mallinfo; __libc_malloc;
+    __libc_mallopt; __libc_memalign; __libc_pvalloc; __libc_realloc;
+    __libc_valloc;
+    __malloc_initialize_hook; __free_hook; __malloc_hook; __realloc_hook;
+    __memalign_hook; __after_morecore_hook;
+    __malloc_initialized; __default_morecore; __morecore;
+
+    # functions used in inline functions or macros
+    _obstack_allocated_p; _obstack_begin; _obstack_begin_1;
+    _obstack_free; _obstack_memory_used; _obstack_newchunk;
+
+    # variables in normal name space
+    mallwatch; obstack_alloc_failed_handler; obstack_exit_failure;
+
+    # c*
+    calloc; cfree;
+
+    # f*
+    free;
+
+    # m*
+    mallinfo; malloc; malloc_get_state; malloc_set_state; malloc_stats;
+    malloc_trim; malloc_usable_size; mallopt; memalign; mprobe; mtrace;
+    muntrace;
+
+    # o*
+    obstack_free;
+
+    # p*
+    pvalloc;
+
+    # r*
+    realloc;
+
+    # t*
+    tr_break;
+
+    # v*
+    valloc;
+  }
+  GLIBC_2.1 {
+    # Special functions.
+    __libc_freeres;
+  }
+  GLIBC_2.2 {
+    # m*
+    mcheck_check_all; mcheck_pedantic;
+
+    # p*
+    posix_memalign;
+  }
+  GLIBC_2.10 {
+    malloc_info;
+  }
+  GLIBC_2.16 {
+    aligned_alloc;
+  }
+  GLIBC_2.26 {
+    reallocarray;
+  }
+  GLIBC_PRIVATE {
+    # Internal startup hook for libpthread.
+    __libc_malloc_pthread_startup;
+
+    # Internal destructor hook for libpthread.
+    __libc_thread_freeres;
+
+    # struct scratch_buffer support
+    __libc_scratch_buffer_grow;
+    __libc_scratch_buffer_grow_preserve;
+    __libc_scratch_buffer_set_array_size;
+
+
+    # Internal name for reallocarray
+    __libc_reallocarray;
+
+    # dynarray support
+    __libc_dynarray_at_failure;
+    __libc_dynarray_emplace_enlarge;
+    __libc_dynarray_finalize;
+    __libc_dynarray_resize;
+    __libc_dynarray_resize_clear;
+  }
+}
diff --git a/REORG.TODO/malloc/arena.c b/REORG.TODO/malloc/arena.c
new file mode 100644
index 0000000000..660d638c93
--- /dev/null
+++ b/REORG.TODO/malloc/arena.c
@@ -0,0 +1,992 @@
+/* Malloc implementation for multiple threads without lock contention.
+   Copyright (C) 2001-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <stdbool.h>
+
+#if HAVE_TUNABLES
+# define TUNABLE_NAMESPACE malloc
+#endif
+#include <elf/dl-tunables.h>
+
+/* Compile-time constants.  */
+
+#define HEAP_MIN_SIZE (32 * 1024)
+#ifndef HEAP_MAX_SIZE
+# ifdef DEFAULT_MMAP_THRESHOLD_MAX
+#  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
+# else
+#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
+# endif
+#endif
+
+/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
+   that are dynamically created for multi-threaded programs.  The
+   maximum size must be a power of two, for fast determination of
+   which heap belongs to a chunk.  It should be much larger than the
+   mmap threshold, so that requests with a size just below that
+   threshold can be fulfilled without creating too many heaps.  */
+
+/***************************************************************************/
+
+#define top(ar_ptr) ((ar_ptr)->top)
+
+/* A heap is a single contiguous memory region holding (coalesceable)
+   malloc_chunks.  It is allocated with mmap() and always starts at an
+   address aligned to HEAP_MAX_SIZE.  */
+
+typedef struct _heap_info
+{
+  mstate ar_ptr; /* Arena for this heap. */
+  struct _heap_info *prev; /* Previous heap. */
+  size_t size;   /* Current size in bytes. */
+  size_t mprotect_size; /* Size in bytes that has been mprotected
+                           PROT_READ|PROT_WRITE.  */
+  /* Make sure the following data is properly aligned, particularly
+     that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
+     MALLOC_ALIGNMENT. */
+  char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
+} heap_info;
+
+/* Get a compile-time error if the heap_info padding is not correct
+   to make alignment work as expected in sYSMALLOc.  */
+extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
+                                             + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
+                                            ? -1 : 1];
+
+/* Thread specific data.  */
+
+static __thread mstate thread_arena attribute_tls_model_ie;
+
+/* Arena free list.  free_list_lock synchronizes access to the
+   free_list variable below, and the next_free and attached_threads
+   members of struct malloc_state objects.  No other locks must be
+   acquired after free_list_lock has been acquired.  */
+
+__libc_lock_define_initialized (static, free_list_lock);
+static size_t narenas = 1;
+static mstate free_list;
+
+/* list_lock prevents concurrent writes to the next member of struct
+   malloc_state objects.
+
+   Read access to the next member is supposed to synchronize with the
+   atomic_write_barrier and the write to the next member in
+   _int_new_arena.  This suffers from data races; see the FIXME
+   comments in _int_new_arena and reused_arena.
+
+   list_lock also prevents concurrent forks.  At the time list_lock is
+   acquired, no arena lock must have been acquired, but it is
+   permitted to acquire arena locks subsequently, while list_lock is
+   acquired.  */
+__libc_lock_define_initialized (static, list_lock);
+
+/* Already initialized? */
+int __malloc_initialized = -1;
+
+/**************************************************************************/
+
+
+/* arena_get() acquires an arena and locks the corresponding mutex.
+   First, try the one last locked successfully by this thread.  (This
+   is the common case and handled with a macro for speed.)  Then, loop
+   once over the circularly linked list of arenas.  If no arena is
+   readily available, create a new one.  In this latter case, `size'
+   is just a hint as to how much memory will be required immediately
+   in the new arena. */
+
+#define arena_get(ptr, size) do { \
+      ptr = thread_arena;						      \
+      arena_lock (ptr, size);						      \
+  } while (0)
+
+#define arena_lock(ptr, size) do {					      \
+      if (ptr && !arena_is_corrupt (ptr))				      \
+        __libc_lock_lock (ptr->mutex);					      \
+      else								      \
+        ptr = arena_get2 ((size), NULL);				      \
+  } while (0)
+
+/* find the heap and corresponding arena for a given ptr */
+
+#define heap_for_ptr(ptr) \
+  ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
+#define arena_for_chunk(ptr) \
+  (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
+
+
+/**************************************************************************/
+
+/* atfork support.  */
+
+/* The following three functions are called around fork from a
+   multi-threaded process.  We do not use the general fork handler
+   mechanism to make sure that our handlers are the last ones being
+   called, so that other fork handlers can use the malloc
+   subsystem.  */
+
+void
+internal_function
+__malloc_fork_lock_parent (void)
+{
+  if (__malloc_initialized < 1)
+    return;
+
+  /* We do not acquire free_list_lock here because we completely
+     reconstruct free_list in __malloc_fork_unlock_child.  */
+
+  __libc_lock_lock (list_lock);
+
+  for (mstate ar_ptr = &main_arena;; )
+    {
+      __libc_lock_lock (ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+}
+
+void
+internal_function
+__malloc_fork_unlock_parent (void)
+{
+  if (__malloc_initialized < 1)
+    return;
+
+  for (mstate ar_ptr = &main_arena;; )
+    {
+      __libc_lock_unlock (ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+  __libc_lock_unlock (list_lock);
+}
+
+void
+internal_function
+__malloc_fork_unlock_child (void)
+{
+  if (__malloc_initialized < 1)
+    return;
+
+  /* Push all arenas to the free list, except thread_arena, which is
+     attached to the current thread.  */
+  __libc_lock_init (free_list_lock);
+  if (thread_arena != NULL)
+    thread_arena->attached_threads = 1;
+  free_list = NULL;
+  for (mstate ar_ptr = &main_arena;; )
+    {
+      __libc_lock_init (ar_ptr->mutex);
+      if (ar_ptr != thread_arena)
+        {
+	  /* This arena is no longer attached to any thread.  */
+	  ar_ptr->attached_threads = 0;
+          ar_ptr->next_free = free_list;
+          free_list = ar_ptr;
+        }
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+
+  __libc_lock_init (list_lock);
+}
+
+#if HAVE_TUNABLES
+static inline int do_set_mallopt_check (int32_t value);
+void
+TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
+{
+  int32_t value = (int32_t) valp->numval;
+  do_set_mallopt_check (value);
+  if (check_action != 0)
+    __malloc_check_init ();
+}
+
+# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
+static inline int do_ ## __name (__type value);				      \
+void									      \
+TUNABLE_CALLBACK (__name) (tunable_val_t *valp)				      \
+{									      \
+  __type value = (__type) (valp)->numval;				      \
+  do_ ## __name (value);						      \
+}
+
+TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
+TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
+TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
+TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
+TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
+TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
+TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
+#else
+/* Initialization routine. */
+#include <string.h>
+extern char **_environ;
+
+static char *
+internal_function
+next_env_entry (char ***position)
+{
+  char **current = *position;
+  char *result = NULL;
+
+  while (*current != NULL)
+    {
+      if (__builtin_expect ((*current)[0] == 'M', 0)
+          && (*current)[1] == 'A'
+          && (*current)[2] == 'L'
+          && (*current)[3] == 'L'
+          && (*current)[4] == 'O'
+          && (*current)[5] == 'C'
+          && (*current)[6] == '_')
+        {
+          result = &(*current)[7];
+
+          /* Save current position for next visit.  */
+          *position = ++current;
+
+          break;
+        }
+
+      ++current;
+    }
+
+  return result;
+}
+#endif
+
+
+#ifdef SHARED
+static void *
+__failing_morecore (ptrdiff_t d)
+{
+  return (void *) MORECORE_FAILURE;
+}
+
+extern struct dl_open_hook *_dl_open_hook;
+libc_hidden_proto (_dl_open_hook);
+#endif
+
+static void
+ptmalloc_init (void)
+{
+  if (__malloc_initialized >= 0)
+    return;
+
+  __malloc_initialized = 0;
+
+#ifdef SHARED
+  /* In case this libc copy is in a non-default namespace, never use brk.
+     Likewise if dlopened from statically linked program.  */
+  Dl_info di;
+  struct link_map *l;
+
+  if (_dl_open_hook != NULL
+      || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
+          && l->l_ns != LM_ID_BASE))
+    __morecore = __failing_morecore;
+#endif
+
+  thread_arena = &main_arena;
+
+#if HAVE_TUNABLES
+  /* Ensure initialization/consolidation and do it under a lock so that a
+     thread attempting to use the arena in parallel waits on us till we
+     finish.  */
+  __libc_lock_lock (main_arena.mutex);
+  malloc_consolidate (&main_arena);
+
+  TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
+  TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
+  TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
+  TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
+  TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
+  TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
+  TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
+  TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
+  __libc_lock_unlock (main_arena.mutex);
+#else
+  const char *s = NULL;
+  if (__glibc_likely (_environ != NULL))
+    {
+      char **runp = _environ;
+      char *envline;
+
+      while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
+                               0))
+        {
+          size_t len = strcspn (envline, "=");
+
+          if (envline[len] != '=')
+            /* This is a "MALLOC_" variable at the end of the string
+               without a '=' character.  Ignore it since otherwise we
+               will access invalid memory below.  */
+            continue;
+
+          switch (len)
+            {
+            case 6:
+              if (memcmp (envline, "CHECK_", 6) == 0)
+                s = &envline[7];
+              break;
+            case 8:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
+                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
+                  else if (memcmp (envline, "PERTURB_", 8) == 0)
+                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
+                }
+              break;
+            case 9:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
+                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
+                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
+                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
+                }
+              break;
+            case 10:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
+                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
+                }
+              break;
+            case 15:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
+                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
+                }
+              break;
+            default:
+              break;
+            }
+        }
+    }
+  if (s && s[0])
+    {
+      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
+      if (check_action != 0)
+        __malloc_check_init ();
+    }
+#endif
+
+#if HAVE_MALLOC_INIT_HOOK
+  void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
+  if (hook != NULL)
+    (*hook)();
+#endif
+  __malloc_initialized = 1;
+}
+
+/* Managing heaps and arenas (for concurrent threads) */
+
+#if MALLOC_DEBUG > 1
+
+/* Print the complete contents of a single heap to stderr. */
+
+static void
+dump_heap (heap_info *heap)
+{
+  char *ptr;
+  mchunkptr p;
+
+  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
+  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
+        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
+  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
+                   ~MALLOC_ALIGN_MASK);
+  for (;; )
+    {
+      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
+      if (p == top (heap->ar_ptr))
+        {
+          fprintf (stderr, " (top)\n");
+          break;
+        }
+      else if (p->size == (0 | PREV_INUSE))
+        {
+          fprintf (stderr, " (fence)\n");
+          break;
+        }
+      fprintf (stderr, "\n");
+      p = next_chunk (p);
+    }
+}
+#endif /* MALLOC_DEBUG > 1 */
+
+/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
+   addresses as opposed to increasing, new_heap would badly fragment the
+   address space.  In that case remember the second HEAP_MAX_SIZE part
+   aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
+   call (if it is already aligned) and try to reuse it next time.  We need
+   no locking for it, as kernel ensures the atomicity for us - worst case
+   we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
+   multiple threads, but only one will succeed.  */
+static char *aligned_heap_area;
+
+/* Create a new heap.  size is automatically rounded up to a multiple
+   of the page size. */
+
+static heap_info *
+internal_function
+new_heap (size_t size, size_t top_pad)
+{
+  size_t pagesize = GLRO (dl_pagesize);
+  char *p1, *p2;
+  unsigned long ul;
+  heap_info *h;
+
+  if (size + top_pad < HEAP_MIN_SIZE)
+    size = HEAP_MIN_SIZE;
+  else if (size + top_pad <= HEAP_MAX_SIZE)
+    size += top_pad;
+  else if (size > HEAP_MAX_SIZE)
+    return 0;
+  else
+    size = HEAP_MAX_SIZE;
+  size = ALIGN_UP (size, pagesize);
+
+  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
+     No swap space needs to be reserved for the following large
+     mapping (on Linux, this is the case for all non-writable mappings
+     anyway). */
+  p2 = MAP_FAILED;
+  if (aligned_heap_area)
+    {
+      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+                          MAP_NORESERVE);
+      aligned_heap_area = NULL;
+      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
+        {
+          __munmap (p2, HEAP_MAX_SIZE);
+          p2 = MAP_FAILED;
+        }
+    }
+  if (p2 == MAP_FAILED)
+    {
+      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
+      if (p1 != MAP_FAILED)
+        {
+          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
+                         & ~(HEAP_MAX_SIZE - 1));
+          ul = p2 - p1;
+          if (ul)
+            __munmap (p1, ul);
+          else
+            aligned_heap_area = p2 + HEAP_MAX_SIZE;
+          __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+        }
+      else
+        {
+          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+             is already aligned. */
+          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
+          if (p2 == MAP_FAILED)
+            return 0;
+
+          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
+            {
+              __munmap (p2, HEAP_MAX_SIZE);
+              return 0;
+            }
+        }
+    }
+  if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
+    {
+      __munmap (p2, HEAP_MAX_SIZE);
+      return 0;
+    }
+  h = (heap_info *) p2;
+  h->size = size;
+  h->mprotect_size = size;
+  LIBC_PROBE (memory_heap_new, 2, h, h->size);
+  return h;
+}
+
+/* Grow a heap.  size is automatically rounded up to a
+   multiple of the page size. */
+
+static int
+grow_heap (heap_info *h, long diff)
+{
+  size_t pagesize = GLRO (dl_pagesize);
+  long new_size;
+
+  diff = ALIGN_UP (diff, pagesize);
+  new_size = (long) h->size + diff;
+  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+    return -1;
+
+  if ((unsigned long) new_size > h->mprotect_size)
+    {
+      if (__mprotect ((char *) h + h->mprotect_size,
+                      (unsigned long) new_size - h->mprotect_size,
+                      PROT_READ | PROT_WRITE) != 0)
+        return -2;
+
+      h->mprotect_size = new_size;
+    }
+
+  h->size = new_size;
+  LIBC_PROBE (memory_heap_more, 2, h, h->size);
+  return 0;
+}
+
+/* Shrink a heap.  */
+
+static int
+shrink_heap (heap_info *h, long diff)
+{
+  long new_size;
+
+  new_size = (long) h->size - diff;
+  if (new_size < (long) sizeof (*h))
+    return -1;
+
+  /* Try to re-map the extra heap space freshly to save memory, and make it
+     inaccessible.  See malloc-sysdep.h to know when this is true.  */
+  if (__glibc_unlikely (check_may_shrink_heap ()))
+    {
+      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
+                         MAP_FIXED) == (char *) MAP_FAILED)
+        return -2;
+
+      h->mprotect_size = new_size;
+    }
+  else
+    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
+  /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
+
+  h->size = new_size;
+  LIBC_PROBE (memory_heap_less, 2, h, h->size);
+  return 0;
+}
+
+/* Delete a heap. */
+
+#define delete_heap(heap) \
+  do {									      \
+      if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
+        aligned_heap_area = NULL;					      \
+      __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
+    } while (0)
+
+static int
+internal_function
+heap_trim (heap_info *heap, size_t pad)
+{
+  mstate ar_ptr = heap->ar_ptr;
+  unsigned long pagesz = GLRO (dl_pagesize);
+  mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
+  heap_info *prev_heap;
+  long new_size, top_size, top_area, extra, prev_size, misalign;
+
+  /* Can this heap go away completely? */
+  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
+    {
+      prev_heap = heap->prev;
+      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
+      p = chunk_at_offset (prev_heap, prev_size);
+      /* fencepost must be properly aligned.  */
+      misalign = ((long) p) & MALLOC_ALIGN_MASK;
+      p = chunk_at_offset (prev_heap, prev_size - misalign);
+      assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
+      p = prev_chunk (p);
+      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
+      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
+      if (!prev_inuse (p))
+        new_size += prev_size (p);
+      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
+      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
+        break;
+      ar_ptr->system_mem -= heap->size;
+      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
+      delete_heap (heap);
+      heap = prev_heap;
+      if (!prev_inuse (p)) /* consolidate backward */
+        {
+          p = prev_chunk (p);
+          unlink (ar_ptr, p, bck, fwd);
+        }
+      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
+      assert (((char *) p + new_size) == ((char *) heap + heap->size));
+      top (ar_ptr) = top_chunk = p;
+      set_head (top_chunk, new_size | PREV_INUSE);
+      /*check_chunk(ar_ptr, top_chunk);*/
+    }
+
+  /* Uses similar logic for per-thread arenas as the main arena with systrim
+     and _int_free by preserving the top pad and rounding down to the nearest
+     page.  */
+  top_size = chunksize (top_chunk);
+  if ((unsigned long)(top_size) <
+      (unsigned long)(mp_.trim_threshold))
+    return 0;
+
+  top_area = top_size - MINSIZE - 1;
+  if (top_area < 0 || (size_t) top_area <= pad)
+    return 0;
+
+  /* Release in pagesize units and round down to the nearest page.  */
+  extra = ALIGN_DOWN(top_area - pad, pagesz);
+  if (extra == 0)
+    return 0;
+
+  /* Try to shrink. */
+  if (shrink_heap (heap, extra) != 0)
+    return 0;
+
+  ar_ptr->system_mem -= extra;
+
+  /* Success. Adjust top accordingly. */
+  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
+  /*check_chunk(ar_ptr, top_chunk);*/
+  return 1;
+}
+
+/* Create a new arena with initial size "size".  */
+
+/* If REPLACED_ARENA is not NULL, detach it from this thread.  Must be
+   called while free_list_lock is held.  */
+static void
+detach_arena (mstate replaced_arena)
+{
+  if (replaced_arena != NULL)
+    {
+      assert (replaced_arena->attached_threads > 0);
+      /* The current implementation only detaches from main_arena in
+	 case of allocation failure.  This means that it is likely not
+	 beneficial to put the arena on free_list even if the
+	 reference count reaches zero.  */
+      --replaced_arena->attached_threads;
+    }
+}
+
+static mstate
+_int_new_arena (size_t size)
+{
+  mstate a;
+  heap_info *h;
+  char *ptr;
+  unsigned long misalign;
+
+  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
+                mp_.top_pad);
+  if (!h)
+    {
+      /* Maybe size is too large to fit in a single heap.  So, just try
+         to create a minimally-sized arena and let _int_malloc() attempt
+         to deal with the large request via mmap_chunk().  */
+      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+      if (!h)
+        return 0;
+    }
+  a = h->ar_ptr = (mstate) (h + 1);
+  malloc_init_state (a);
+  a->attached_threads = 1;
+  /*a->next = NULL;*/
+  a->system_mem = a->max_system_mem = h->size;
+
+  /* Set up the top chunk, with proper alignment. */
+  ptr = (char *) (a + 1);
+  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
+  if (misalign > 0)
+    ptr += MALLOC_ALIGNMENT - misalign;
+  top (a) = (mchunkptr) ptr;
+  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
+
+  LIBC_PROBE (memory_arena_new, 2, a, size);
+  mstate replaced_arena = thread_arena;
+  thread_arena = a;
+  __libc_lock_init (a->mutex);
+
+  __libc_lock_lock (list_lock);
+
+  /* Add the new arena to the global list.  */
+  a->next = main_arena.next;
+  /* FIXME: The barrier is an attempt to synchronize with read access
+     in reused_arena, which does not acquire list_lock while
+     traversing the list.  */
+  atomic_write_barrier ();
+  main_arena.next = a;
+
+  __libc_lock_unlock (list_lock);
+
+  __libc_lock_lock (free_list_lock);
+  detach_arena (replaced_arena);
+  __libc_lock_unlock (free_list_lock);
+
+  /* Lock this arena.  NB: Another thread may have been attached to
+     this arena because the arena is now accessible from the
+     main_arena.next list and could have been picked by reused_arena.
+     This can only happen for the last arena created (before the arena
+     limit is reached).  At this point, some arena has to be attached
+     to two threads.  We could acquire the arena lock before list_lock
+     to make it less likely that reused_arena picks this new arena,
+     but this could result in a deadlock with
+     __malloc_fork_lock_parent.  */
+
+  __libc_lock_lock (a->mutex);
+
+  return a;
+}
+
+
+/* Remove an arena from free_list.  */
+static mstate
+get_free_list (void)
+{
+  mstate replaced_arena = thread_arena;
+  mstate result = free_list;
+  if (result != NULL)
+    {
+      __libc_lock_lock (free_list_lock);
+      result = free_list;
+      if (result != NULL)
+	{
+	  free_list = result->next_free;
+
+	  /* The arena will be attached to this thread.  */
+	  assert (result->attached_threads == 0);
+	  result->attached_threads = 1;
+
+	  detach_arena (replaced_arena);
+	}
+      __libc_lock_unlock (free_list_lock);
+
+      if (result != NULL)
+        {
+          LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
+          __libc_lock_lock (result->mutex);
+	  thread_arena = result;
+        }
+    }
+
+  return result;
+}
+
+/* Remove the arena from the free list (if it is present).
+   free_list_lock must have been acquired by the caller.  */
+static void
+remove_from_free_list (mstate arena)
+{
+  mstate *previous = &free_list;
+  for (mstate p = free_list; p != NULL; p = p->next_free)
+    {
+      assert (p->attached_threads == 0);
+      if (p == arena)
+	{
+	  /* Remove the requested arena from the list.  */
+	  *previous = p->next_free;
+	  break;
+	}
+      else
+	previous = &p->next_free;
+    }
+}
+
+/* Lock and return an arena that can be reused for memory allocation.
+   Avoid AVOID_ARENA as we have already failed to allocate memory in
+   it and it is currently locked.  */
+static mstate
+reused_arena (mstate avoid_arena)
+{
+  mstate result;
+  /* FIXME: Access to next_to_use suffers from data races.  */
+  static mstate next_to_use;
+  if (next_to_use == NULL)
+    next_to_use = &main_arena;
+
+  /* Iterate over all arenas (including those linked from
+     free_list).  */
+  result = next_to_use;
+  do
+    {
+      if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex))
+        goto out;
+
+      /* FIXME: This is a data race, see _int_new_arena.  */
+      result = result->next;
+    }
+  while (result != next_to_use);
+
+  /* Avoid AVOID_ARENA as we have already failed to allocate memory
+     in that arena and it is currently locked.   */
+  if (result == avoid_arena)
+    result = result->next;
+
+  /* Make sure that the arena we get is not corrupted.  */
+  mstate begin = result;
+  while (arena_is_corrupt (result) || result == avoid_arena)
+    {
+      result = result->next;
+      if (result == begin)
+	/* We looped around the arena list.  We could not find any
+	   arena that was either not corrupted or not the one we
+	   wanted to avoid.  */
+	return NULL;
+    }
+
+  /* No arena available without contention.  Wait for the next in line.  */
+  LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
+  __libc_lock_lock (result->mutex);
+
+out:
+  /* Attach the arena to the current thread.  */
+  {
+    /* Update the arena thread attachment counters.   */
+    mstate replaced_arena = thread_arena;
+    __libc_lock_lock (free_list_lock);
+    detach_arena (replaced_arena);
+
+    /* We may have picked up an arena on the free list.  We need to
+       preserve the invariant that no arena on the free list has a
+       positive attached_threads counter (otherwise,
+       arena_thread_freeres cannot use the counter to determine if the
+       arena needs to be put on the free list).  We unconditionally
+       remove the selected arena from the free list.  The caller of
+       reused_arena checked the free list and observed it to be empty,
+       so the list is very short.  */
+    remove_from_free_list (result);
+
+    ++result->attached_threads;
+
+    __libc_lock_unlock (free_list_lock);
+  }
+
+  LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
+  thread_arena = result;
+  next_to_use = result->next;
+
+  return result;
+}
+
+static mstate
+internal_function
+arena_get2 (size_t size, mstate avoid_arena)
+{
+  mstate a;
+
+  static size_t narenas_limit;
+
+  a = get_free_list ();
+  if (a == NULL)
+    {
+      /* Nothing immediately available, so generate a new arena.  */
+      if (narenas_limit == 0)
+        {
+          if (mp_.arena_max != 0)
+            narenas_limit = mp_.arena_max;
+          else if (narenas > mp_.arena_test)
+            {
+              int n = __get_nprocs ();
+
+              if (n >= 1)
+                narenas_limit = NARENAS_FROM_NCORES (n);
+              else
+                /* We have no information about the system.  Assume two
+                   cores.  */
+                narenas_limit = NARENAS_FROM_NCORES (2);
+            }
+        }
+    repeat:;
+      size_t n = narenas;
+      /* NB: the following depends on the fact that (size_t)0 - 1 is a
+         very large number and that the underflow is OK.  If arena_max
+         is set the value of arena_test is irrelevant.  If arena_test
+         is set but narenas is not yet larger or equal to arena_test
+         narenas_limit is 0.  There is no possibility for narenas to
+         be too big for the test to always fail since there is not
+         enough address space to create that many arenas.  */
+      if (__glibc_unlikely (n <= narenas_limit - 1))
+        {
+          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
+            goto repeat;
+          a = _int_new_arena (size);
+	  if (__glibc_unlikely (a == NULL))
+            catomic_decrement (&narenas);
+        }
+      else
+        a = reused_arena (avoid_arena);
+    }
+  return a;
+}
+
+/* If we don't have the main arena, then maybe the failure is due to running
+   out of mmapped areas, so we can try allocating on the main arena.
+   Otherwise, it is likely that sbrk() has failed and there is still a chance
+   to mmap(), so try one of the other arenas.  */
+static mstate
+arena_get_retry (mstate ar_ptr, size_t bytes)
+{
+  LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
+  if (ar_ptr != &main_arena)
+    {
+      __libc_lock_unlock (ar_ptr->mutex);
+      /* Don't touch the main arena if it is corrupt.  */
+      if (arena_is_corrupt (&main_arena))
+	return NULL;
+
+      ar_ptr = &main_arena;
+      __libc_lock_lock (ar_ptr->mutex);
+    }
+  else
+    {
+      __libc_lock_unlock (ar_ptr->mutex);
+      ar_ptr = arena_get2 (bytes, ar_ptr);
+    }
+
+  return ar_ptr;
+}
+
+static void __attribute__ ((section ("__libc_thread_freeres_fn")))
+arena_thread_freeres (void)
+{
+  mstate a = thread_arena;
+  thread_arena = NULL;
+
+  if (a != NULL)
+    {
+      __libc_lock_lock (free_list_lock);
+      /* If this was the last attached thread for this arena, put the
+	 arena on the free list.  */
+      assert (a->attached_threads > 0);
+      if (--a->attached_threads == 0)
+	{
+	  a->next_free = free_list;
+	  free_list = a;
+	}
+      __libc_lock_unlock (free_list_lock);
+    }
+}
+text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
+
+/*
+ * Local variables:
+ * c-basic-offset: 2
+ * End:
+ */
diff --git a/REORG.TODO/malloc/dynarray-skeleton.c b/REORG.TODO/malloc/dynarray-skeleton.c
new file mode 100644
index 0000000000..7a10e083f4
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray-skeleton.c
@@ -0,0 +1,499 @@
+/* Type-safe arrays which grow dynamically.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Pre-processor macros which act as parameters:
+
+   DYNARRAY_STRUCT
+      The struct tag of dynamic array to be defined.
+   DYNARRAY_ELEMENT
+      The type name of the element type.  Elements are copied
+      as if by memcpy, and can change address as the dynamic
+      array grows.
+   DYNARRAY_PREFIX
+      The prefix of the functions which are defined.
+
+   The following parameters are optional:
+
+   DYNARRAY_ELEMENT_FREE
+      DYNARRAY_ELEMENT_FREE (E) is evaluated to deallocate the
+      contents of elements. E is of type  DYNARRAY_ELEMENT *.
+   DYNARRAY_ELEMENT_INIT
+      DYNARRAY_ELEMENT_INIT (E) is evaluated to initialize a new
+      element.  E is of type  DYNARRAY_ELEMENT *.
+      If DYNARRAY_ELEMENT_FREE but not DYNARRAY_ELEMENT_INIT is
+      defined, new elements are automatically zero-initialized.
+      Otherwise, new elements have undefined contents.
+   DYNARRAY_INITIAL_SIZE
+      The size of the statically allocated array (default:
+      at least 2, more elements if they fit into 128 bytes).
+      Must be a preprocessor constant.  If DYNARRAY_INITIAL_SIZE is 0,
+      there is no statically allocated array at, and all non-empty
+      arrays are heap-allocated.
+   DYNARRAY_FINAL_TYPE
+      The name of the type which holds the final array.  If not
+      defined, is PREFIX##finalize not provided.  DYNARRAY_FINAL_TYPE
+      must be a struct type, with members of type DYNARRAY_ELEMENT and
+      size_t at the start (in this order).
+
+   These macros are undefined after this header file has been
+   included.
+
+   The following types are provided (their members are private to the
+   dynarray implementation):
+
+     struct DYNARRAY_STRUCT
+
+   The following functions are provided:
+
+     void DYNARRAY_PREFIX##init (struct DYNARRAY_STRUCT *);
+     void DYNARRAY_PREFIX##free (struct DYNARRAY_STRUCT *);
+     bool DYNARRAY_PREFIX##has_failed (const struct DYNARRAY_STRUCT *);
+     void DYNARRAY_PREFIX##mark_failed (struct DYNARRAY_STRUCT *);
+     size_t DYNARRAY_PREFIX##size (const struct DYNARRAY_STRUCT *);
+     DYNARRAY_ELEMENT *DYNARRAY_PREFIX##at (struct DYNARRAY_STRUCT *, size_t);
+     void DYNARRAY_PREFIX##add (struct DYNARRAY_STRUCT *, DYNARRAY_ELEMENT);
+     DYNARRAY_ELEMENT *DYNARRAY_PREFIX##emplace (struct DYNARRAY_STRUCT *);
+     bool DYNARRAY_PREFIX##resize (struct DYNARRAY_STRUCT *, size_t);
+     void DYNARRAY_PREFIX##remove_last (struct DYNARRAY_STRUCT *);
+     void DYNARRAY_PREFIX##clear (struct DYNARRAY_STRUCT *);
+
+   The following functions are provided are provided if the
+   prerequisites are met:
+
+     bool DYNARRAY_PREFIX##finalize (struct DYNARRAY_STRUCT *,
+                                     DYNARRAY_FINAL_TYPE *);
+       (if DYNARRAY_FINAL_TYPE is defined)
+     DYNARRAY_ELEMENT *DYNARRAY_PREFIX##finalize (struct DYNARRAY_STRUCT *,
+                                                  size_t *);
+       (if DYNARRAY_FINAL_TYPE is not defined)
+*/
+
+#include <malloc/dynarray.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef DYNARRAY_STRUCT
+# error "DYNARRAY_STRUCT must be defined"
+#endif
+
+#ifndef DYNARRAY_ELEMENT
+# error "DYNARRAY_ELEMENT must be defined"
+#endif
+
+#ifndef DYNARRAY_PREFIX
+# error "DYNARRAY_PREFIX must be defined"
+#endif
+
+#ifdef DYNARRAY_INITIAL_SIZE
+# if DYNARRAY_INITIAL_SIZE < 0
+#  error "DYNARRAY_INITIAL_SIZE must be non-negative"
+# endif
+# if DYNARRAY_INITIAL_SIZE > 0
+#  define DYNARRAY_HAVE_SCRATCH 1
+# else
+#  define DYNARRAY_HAVE_SCRATCH 0
+# endif
+#else
+/* Provide a reasonable default which limits the size of
+   DYNARRAY_STRUCT.  */
+# define DYNARRAY_INITIAL_SIZE \
+  (sizeof (DYNARRAY_ELEMENT) > 64 ? 2 : 128 / sizeof (DYNARRAY_ELEMENT))
+# define DYNARRAY_HAVE_SCRATCH 1
+#endif
+
+/* Public type definitions.  */
+
+/* All fields of this struct are private to the implementation.  */
+struct DYNARRAY_STRUCT
+{
+  union
+  {
+    struct dynarray_header dynarray_abstract;
+    struct
+    {
+      /* These fields must match struct dynarray_header.  */
+      size_t used;
+      size_t allocated;
+      DYNARRAY_ELEMENT *array;
+    } dynarray_header;
+  };
+
+#if DYNARRAY_HAVE_SCRATCH
+  /* Initial inline allocation.  */
+  DYNARRAY_ELEMENT scratch[DYNARRAY_INITIAL_SIZE];
+#endif
+};
+
+/* Internal use only: Helper macros.  */
+
+/* Ensure macro-expansion of DYNARRAY_PREFIX.  */
+#define DYNARRAY_CONCAT0(prefix, name) prefix##name
+#define DYNARRAY_CONCAT1(prefix, name) DYNARRAY_CONCAT0(prefix, name)
+#define DYNARRAY_NAME(name) DYNARRAY_CONCAT1(DYNARRAY_PREFIX, name)
+
+/* Address of the scratch buffer if any.  */
+#if DYNARRAY_HAVE_SCRATCH
+# define DYNARRAY_SCRATCH(list) (list)->scratch
+#else
+# define DYNARRAY_SCRATCH(list) NULL
+#endif
+
+/* Internal use only: Helper functions.  */
+
+/* Internal function.  Call DYNARRAY_ELEMENT_FREE with the array
+   elements.  Name mangling needed due to the DYNARRAY_ELEMENT_FREE
+   macro expansion.  */
+static inline void
+DYNARRAY_NAME (free__elements__) (DYNARRAY_ELEMENT *__dynarray_array,
+                                  size_t __dynarray_used)
+{
+#ifdef DYNARRAY_ELEMENT_FREE
+  for (size_t __dynarray_i = 0; __dynarray_i < __dynarray_used; ++__dynarray_i)
+    DYNARRAY_ELEMENT_FREE (&__dynarray_array[__dynarray_i]);
+#endif /* DYNARRAY_ELEMENT_FREE */
+}
+
+/* Internal function.  Free the non-scratch array allocation.  */
+static inline void
+DYNARRAY_NAME (free__array__) (struct DYNARRAY_STRUCT *list)
+{
+#if DYNARRAY_HAVE_SCRATCH
+  if (list->dynarray_header.array != list->scratch)
+    free (list->dynarray_header.array);
+#else
+  free (list->dynarray_header.array);
+#endif
+}
+
+/* Public functions.  */
+
+/* Initialize a dynamic array object.  This must be called before any
+   use of the object.  */
+__attribute__ ((nonnull (1)))
+static void
+DYNARRAY_NAME (init) (struct DYNARRAY_STRUCT *list)
+{
+  list->dynarray_header.used = 0;
+  list->dynarray_header.allocated = DYNARRAY_INITIAL_SIZE;
+  list->dynarray_header.array = DYNARRAY_SCRATCH (list);
+}
+
+/* Deallocate the dynamic array and its elements.  */
+__attribute__ ((unused, nonnull (1)))
+static void
+DYNARRAY_NAME (free) (struct DYNARRAY_STRUCT *list)
+{
+  DYNARRAY_NAME (free__elements__)
+    (list->dynarray_header.array, list->dynarray_header.used);
+  DYNARRAY_NAME (free__array__) (list);
+  DYNARRAY_NAME (init) (list);
+}
+
+/* Return true if the dynamic array is in an error state.  */
+__attribute__ ((nonnull (1)))
+static inline bool
+DYNARRAY_NAME (has_failed) (const struct DYNARRAY_STRUCT *list)
+{
+  return list->dynarray_header.allocated == __dynarray_error_marker ();
+}
+
+/* Mark the dynamic array as failed.  All elements are deallocated as
+   a side effect.  */
+__attribute__ ((nonnull (1)))
+static void
+DYNARRAY_NAME (mark_failed) (struct DYNARRAY_STRUCT *list)
+{
+  DYNARRAY_NAME (free__elements__)
+    (list->dynarray_header.array, list->dynarray_header.used);
+  DYNARRAY_NAME (free__array__) (list);
+  list->dynarray_header.array = DYNARRAY_SCRATCH (list);
+  list->dynarray_header.used = 0;
+  list->dynarray_header.allocated = __dynarray_error_marker ();
+}
+
+/* Return the number of elements which have been added to the dynamic
+   array.  */
+__attribute__ ((nonnull (1)))
+static inline size_t
+DYNARRAY_NAME (size) (const struct DYNARRAY_STRUCT *list)
+{
+  return list->dynarray_header.used;
+}
+
+/* Return a pointer to the array element at INDEX.  Terminate the
+   process if INDEX is out of bounds.  */
+__attribute__ ((nonnull (1)))
+static inline DYNARRAY_ELEMENT *
+DYNARRAY_NAME (at) (struct DYNARRAY_STRUCT *list, size_t index)
+{
+  if (__glibc_unlikely (index >= DYNARRAY_NAME (size) (list)))
+    __libc_dynarray_at_failure (DYNARRAY_NAME (size) (list), index);
+  return list->dynarray_header.array + index;
+}
+
+/* Internal function.  Slow path for the add function below.  */
+static void
+DYNARRAY_NAME (add__) (struct DYNARRAY_STRUCT *list, DYNARRAY_ELEMENT item)
+{
+  if (__glibc_unlikely
+      (!__libc_dynarray_emplace_enlarge (&list->dynarray_abstract,
+                                         DYNARRAY_SCRATCH (list),
+                                         sizeof (DYNARRAY_ELEMENT))))
+    {
+      DYNARRAY_NAME (mark_failed) (list);
+      return;
+    }
+
+  /* Copy the new element and increase the array length.  */
+  list->dynarray_header.array[list->dynarray_header.used++] = item;
+}
+
+/* Add ITEM at the end of the array, enlarging it by one element.
+   Mark *LIST as failed if the dynamic array allocation size cannot be
+   increased.  */
+__attribute__ ((unused, nonnull (1)))
+static inline void
+DYNARRAY_NAME (add) (struct DYNARRAY_STRUCT *list, DYNARRAY_ELEMENT item)
+{
+  /* Do nothing in case of previous error.  */
+  if (DYNARRAY_NAME (has_failed) (list))
+    return;
+
+  /* Enlarge the array if necessary.  */
+  if (__glibc_unlikely (list->dynarray_header.used
+                        == list->dynarray_header.allocated))
+    {
+      DYNARRAY_NAME (add__) (list, item);
+      return;
+    }
+
+  /* Copy the new element and increase the array length.  */
+  list->dynarray_header.array[list->dynarray_header.used++] = item;
+}
+
+/* Internal function.  Building block for the emplace functions below.
+   Assumes space for one more element in *LIST.  */
+static inline DYNARRAY_ELEMENT *
+DYNARRAY_NAME (emplace__tail__) (struct DYNARRAY_STRUCT *list)
+{
+  DYNARRAY_ELEMENT *result
+    = &list->dynarray_header.array[list->dynarray_header.used];
+  ++list->dynarray_header.used;
+#if defined (DYNARRAY_ELEMENT_INIT)
+  DYNARRAY_ELEMENT_INIT (result);
+#elif defined (DYNARRAY_ELEMENT_FREE)
+  memset (result, 0, sizeof (*result));
+#endif
+  return result;
+}
+
+/* Internal function.  Slow path for the emplace function below.  */
+static DYNARRAY_ELEMENT *
+DYNARRAY_NAME (emplace__) (struct DYNARRAY_STRUCT *list)
+{
+  if (__glibc_unlikely
+      (!__libc_dynarray_emplace_enlarge (&list->dynarray_abstract,
+                                         DYNARRAY_SCRATCH (list),
+                                         sizeof (DYNARRAY_ELEMENT))))
+    {
+      DYNARRAY_NAME (mark_failed) (list);
+      return NULL;
+    }
+  return DYNARRAY_NAME (emplace__tail__) (list);
+}
+
+/* Allocate a place for a new element in *LIST and return a pointer to
+   it.  The pointer can be NULL if the dynamic array cannot be
+   enlarged due to a memory allocation failure.  */
+__attribute__ ((unused, warn_unused_result, nonnull (1)))
+static
+/* Avoid inlining with the larger initialization code.  */
+#if !(defined (DYNARRAY_ELEMENT_INIT) || defined (DYNARRAY_ELEMENT_FREE))
+inline
+#endif
+DYNARRAY_ELEMENT *
+DYNARRAY_NAME (emplace) (struct DYNARRAY_STRUCT *list)
+{
+  /* Do nothing in case of previous error.  */
+  if (DYNARRAY_NAME (has_failed) (list))
+    return NULL;
+
+  /* Enlarge the array if necessary.  */
+  if (__glibc_unlikely (list->dynarray_header.used
+                        == list->dynarray_header.allocated))
+    return (DYNARRAY_NAME (emplace__) (list));
+  return DYNARRAY_NAME (emplace__tail__) (list);
+}
+
+/* Change the size of *LIST to SIZE.  If SIZE is larger than the
+   existing size, new elements are added (which can be initialized).
+   Otherwise, the list is truncated, and elements are freed.  Return
+   false on memory allocation failure (and mark *LIST as failed).  */
+__attribute__ ((unused, nonnull (1)))
+static bool
+DYNARRAY_NAME (resize) (struct DYNARRAY_STRUCT *list, size_t size)
+{
+  if (size > list->dynarray_header.used)
+    {
+      bool ok;
+#if defined (DYNARRAY_ELEMENT_INIT)
+      /* The new elements have to be initialized.  */
+      size_t old_size = list->dynarray_header.used;
+      ok = __libc_dynarray_resize (&list->dynarray_abstract,
+                                   size, DYNARRAY_SCRATCH (list),
+                                   sizeof (DYNARRAY_ELEMENT));
+      if (ok)
+        for (size_t i = old_size; i < size; ++i)
+          {
+            DYNARRAY_ELEMENT_INIT (&list->dynarray_header.array[i]);
+          }
+#elif defined (DYNARRAY_ELEMENT_FREE)
+      /* Zero initialization is needed so that the elements can be
+         safely freed.  */
+      ok = __libc_dynarray_resize_clear
+        (&list->dynarray_abstract, size,
+         DYNARRAY_SCRATCH (list), sizeof (DYNARRAY_ELEMENT));
+#else
+      ok =  __libc_dynarray_resize (&list->dynarray_abstract,
+                                    size, DYNARRAY_SCRATCH (list),
+                                    sizeof (DYNARRAY_ELEMENT));
+#endif
+      if (__glibc_unlikely (!ok))
+        DYNARRAY_NAME (mark_failed) (list);
+      return ok;
+    }
+  else
+    {
+      /* The list has shrunk in size.  Free the removed elements.  */
+      DYNARRAY_NAME (free__elements__)
+        (list->dynarray_header.array + size,
+         list->dynarray_header.used - size);
+      list->dynarray_header.used = size;
+      return true;
+    }
+}
+
+/* Remove the last element of LIST if it is present.  */
+__attribute__ ((unused, nonnull (1)))
+static void
+DYNARRAY_NAME (remove_last) (struct DYNARRAY_STRUCT *list)
+{
+  /* used > 0 implies that the array is the non-failed state.  */
+  if (list->dynarray_header.used > 0)
+    {
+      size_t new_length = list->dynarray_header.used - 1;
+#ifdef DYNARRAY_ELEMENT_FREE
+      DYNARRAY_ELEMENT_FREE (&list->dynarray_header.array[new_length]);
+#endif
+      list->dynarray_header.used = new_length;
+    }
+}
+
+/* Remove all elements from the list.  The elements are freed, but the
+   list itself is not.  */
+__attribute__ ((unused, nonnull (1)))
+static void
+DYNARRAY_NAME (clear) (struct DYNARRAY_STRUCT *list)
+{
+  /* free__elements__ does nothing if the list is in the failed
+     state.  */
+  DYNARRAY_NAME (free__elements__)
+    (list->dynarray_header.array, list->dynarray_header.used);
+  list->dynarray_header.used = 0;
+}
+
+#ifdef DYNARRAY_FINAL_TYPE
+/* Transfer the dynamic array to a permanent location at *RESULT.
+   Returns true on success on false on allocation failure.  In either
+   case, *LIST is re-initialized and can be reused.  A NULL pointer is
+   stored in *RESULT if LIST refers to an empty list.  On success, the
+   pointer in *RESULT is heap-allocated and must be deallocated using
+   free.  */
+__attribute__ ((unused, warn_unused_result, nonnull (1, 2)))
+static bool
+DYNARRAY_NAME (finalize) (struct DYNARRAY_STRUCT *list,
+                          DYNARRAY_FINAL_TYPE *result)
+{
+  struct dynarray_finalize_result res;
+  if (__libc_dynarray_finalize (&list->dynarray_abstract,
+                                DYNARRAY_SCRATCH (list),
+                                sizeof (DYNARRAY_ELEMENT), &res))
+    {
+      /* On success, the result owns all the data.  */
+      DYNARRAY_NAME (init) (list);
+      *result = (DYNARRAY_FINAL_TYPE) { res.array, res.length };
+      return true;
+    }
+  else
+    {
+      /* On error, we need to free all data.  */
+      DYNARRAY_NAME (free) (list);
+      errno = ENOMEM;
+      return false;
+    }
+}
+#else /* !DYNARRAY_FINAL_TYPE */
+/* Transfer the dynamic array to a heap-allocated array and return a
+   pointer to it.  The pointer is NULL if memory allocation fails, or
+   if the array is empty, so this function should be used only for
+   arrays which are known not be empty (usually because they always
+   have a sentinel at the end).  If LENGTHP is not NULL, the array
+   length is written to *LENGTHP.  *LIST is re-initialized and can be
+   reused.  */
+__attribute__ ((unused, warn_unused_result, nonnull (1)))
+static DYNARRAY_ELEMENT *
+DYNARRAY_NAME (finalize) (struct DYNARRAY_STRUCT *list, size_t *lengthp)
+{
+  struct dynarray_finalize_result res;
+  if (__libc_dynarray_finalize (&list->dynarray_abstract,
+                                DYNARRAY_SCRATCH (list),
+                                sizeof (DYNARRAY_ELEMENT), &res))
+    {
+      /* On success, the result owns all the data.  */
+      DYNARRAY_NAME (init) (list);
+      if (lengthp != NULL)
+        *lengthp = res.length;
+      return res.array;
+    }
+  else
+    {
+      /* On error, we need to free all data.  */
+      DYNARRAY_NAME (free) (list);
+      errno = ENOMEM;
+      return NULL;
+    }
+}
+#endif /* !DYNARRAY_FINAL_TYPE */
+
+/* Undo macro definitions.  */
+
+#undef DYNARRAY_CONCAT0
+#undef DYNARRAY_CONCAT1
+#undef DYNARRAY_NAME
+#undef DYNARRAY_SCRATCH
+#undef DYNARRAY_HAVE_SCRATCH
+
+#undef DYNARRAY_STRUCT
+#undef DYNARRAY_ELEMENT
+#undef DYNARRAY_PREFIX
+#undef DYNARRAY_ELEMENT_FREE
+#undef DYNARRAY_ELEMENT_INIT
+#undef DYNARRAY_INITIAL_SIZE
+#undef DYNARRAY_FINAL_TYPE
diff --git a/REORG.TODO/malloc/dynarray.h b/REORG.TODO/malloc/dynarray.h
new file mode 100644
index 0000000000..c73e08b6cf
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray.h
@@ -0,0 +1,176 @@
+/* Type-safe arrays which grow dynamically.  Shared definitions.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* To use the dynarray facility, you need to include
+   <malloc/dynarray-skeleton.c> and define the parameter macros
+   documented in that file.
+
+   A minimal example which provides a growing list of integers can be
+   defined like this:
+
+     struct int_array
+     {
+       // Pointer to result array followed by its length,
+       // as required by DYNARRAY_FINAL_TYPE.
+       int *array;
+       size_t length;
+     };
+
+     #define DYNARRAY_STRUCT dynarray_int
+     #define DYNARRAY_ELEMENT int
+     #define DYNARRAY_PREFIX dynarray_int_
+     #define DYNARRAY_FINAL_TYPE struct int_array
+     #include <malloc/dynarray-skeleton.c>
+
+   To create a three-element array with elements 1, 2, 3, use this
+   code:
+
+     struct dynarray_int dyn;
+     dynarray_int_init (&dyn);
+     for (int i = 1; i <= 3; ++i)
+       {
+         int *place = dynarray_int_emplace (&dyn);
+         assert (place != NULL);
+         *place = i;
+       }
+     struct int_array result;
+     bool ok = dynarray_int_finalize (&dyn, &result);
+     assert (ok);
+     assert (result.length == 3);
+     assert (result.array[0] == 1);
+     assert (result.array[1] == 2);
+     assert (result.array[2] == 3);
+     free (result.array);
+
+   If the elements contain resources which must be freed, define
+   DYNARRAY_ELEMENT_FREE appropriately, like this:
+
+     struct str_array
+     {
+       char **array;
+       size_t length;
+     };
+
+     #define DYNARRAY_STRUCT dynarray_str
+     #define DYNARRAY_ELEMENT char *
+     #define DYNARRAY_ELEMENT_FREE(ptr) free (*ptr)
+     #define DYNARRAY_PREFIX dynarray_str_
+     #define DYNARRAY_FINAL_TYPE struct str_array
+     #include <malloc/dynarray-skeleton.c>
+
+   Compared to scratch buffers, dynamic arrays have the following
+   features:
+
+   - They have an element type, and are not just an untyped buffer of
+     bytes.
+
+   - When growing, previously stored elements are preserved.  (It is
+     expected that scratch_buffer_grow_preserve and
+     scratch_buffer_set_array_size eventually go away because all
+     current users are moved to dynamic arrays.)
+
+   - Scratch buffers have a more aggressive growth policy because
+     growing them typically means a retry of an operation (across an
+     NSS service module boundary), which is expensive.
+
+   - For the same reason, scratch buffers have a much larger initial
+     stack allocation.  */
+
+#ifndef _DYNARRAY_H
+#define _DYNARRAY_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+
+struct dynarray_header
+{
+  size_t used;
+  size_t allocated;
+  void *array;
+};
+
+/* Marker used in the allocated member to indicate that an error was
+   encountered.  */
+static inline size_t
+__dynarray_error_marker (void)
+{
+  return -1;
+}
+
+/* Internal function.  See the has_failed function in
+   dynarray-skeleton.c.  */
+static inline bool
+__dynarray_error (struct dynarray_header *list)
+{
+  return list->allocated == __dynarray_error_marker ();
+}
+
+/* Internal function.  Enlarge the dynamically allocated area of the
+   array to make room for one more element.  SCRATCH is a pointer to
+   the scratch area (which is not heap-allocated and must not be
+   freed).  ELEMENT_SIZE is the size, in bytes, of one element.
+   Return false on failure, true on success.  */
+bool __libc_dynarray_emplace_enlarge (struct dynarray_header *,
+                                      void *scratch, size_t element_size);
+libc_hidden_proto (__libc_dynarray_emplace_enlarge)
+
+/* Internal function.  Enlarge the dynamically allocated area of the
+   array to make room for at least SIZE elements (which must be larger
+   than the existing used part of the dynamic array).  SCRATCH is a
+   pointer to the scratch area (which is not heap-allocated and must
+   not be freed).  ELEMENT_SIZE is the size, in bytes, of one element.
+   Return false on failure, true on success.  */
+bool __libc_dynarray_resize (struct dynarray_header *, size_t size,
+                             void *scratch, size_t element_size);
+libc_hidden_proto (__libc_dynarray_resize)
+
+/* Internal function.  Like __libc_dynarray_resize, but clear the new
+   part of the dynamic array.  */
+bool __libc_dynarray_resize_clear (struct dynarray_header *, size_t size,
+                                   void *scratch, size_t element_size);
+libc_hidden_proto (__libc_dynarray_resize_clear)
+
+/* Internal type.  */
+struct dynarray_finalize_result
+{
+  void *array;
+  size_t length;
+};
+
+/* Internal function.  Copy the dynamically-allocated area to an
+   explicitly-sized heap allocation.  SCRATCH is a pointer to the
+   embedded scratch space.  ELEMENT_SIZE is the size, in bytes, of the
+   element type.  On success, true is returned, and pointer and length
+   are written to *RESULT.  On failure, false is returned.  The caller
+   has to take care of some of the memory management; this function is
+   expected to be called from dynarray-skeleton.c.  */
+bool __libc_dynarray_finalize (struct dynarray_header *list, void *scratch,
+                               size_t element_size,
+                               struct dynarray_finalize_result *result);
+libc_hidden_proto (__libc_dynarray_finalize)
+
+
+/* Internal function.  Terminate the process after an index error.
+   SIZE is the number of elements of the dynamic array.  INDEX is the
+   lookup index which triggered the failure.  */
+void __libc_dynarray_at_failure (size_t size, size_t index)
+  __attribute__ ((noreturn));
+libc_hidden_proto (__libc_dynarray_at_failure)
+
+#endif /* _DYNARRAY_H */
diff --git a/REORG.TODO/malloc/dynarray_at_failure.c b/REORG.TODO/malloc/dynarray_at_failure.c
new file mode 100644
index 0000000000..fcc06f030b
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray_at_failure.c
@@ -0,0 +1,31 @@
+/* Report an dynamic array index out of bounds condition.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <dynarray.h>
+#include <stdio.h>
+
+void
+__libc_dynarray_at_failure (size_t size, size_t index)
+{
+  char buf[200];
+  __snprintf (buf, sizeof (buf), "Fatal glibc error: "
+              "array index %zu not less than array length %zu\n",
+              index, size);
+ __libc_fatal (buf);
+}
+libc_hidden_def (__libc_dynarray_at_failure)
diff --git a/REORG.TODO/malloc/dynarray_emplace_enlarge.c b/REORG.TODO/malloc/dynarray_emplace_enlarge.c
new file mode 100644
index 0000000000..dfc70017ce
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray_emplace_enlarge.c
@@ -0,0 +1,69 @@
+/* Increase the size of a dynamic array in preparation of an emplace operation.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <dynarray.h>
+#include <malloc-internal.h>
+#include <stdlib.h>
+#include <string.h>
+
+bool
+__libc_dynarray_emplace_enlarge (struct dynarray_header *list,
+                                 void *scratch, size_t element_size)
+{
+  size_t new_allocated;
+  if (list->allocated == 0)
+    {
+      /* No scratch buffer provided.  Choose a reasonable default
+         size.  */
+      if (element_size < 4)
+        new_allocated = 16;
+      if (element_size < 8)
+        new_allocated = 8;
+      else
+        new_allocated = 4;
+    }
+  else
+    /* Increase the allocated size, using an exponential growth
+       policy.  */
+    {
+      new_allocated = list->allocated + list->allocated / 2 + 1;
+      if (new_allocated <= list->allocated)
+        /* Overflow.  */
+        return false;
+    }
+
+  size_t new_size;
+  if (check_mul_overflow_size_t (new_allocated, element_size, &new_size))
+    return false;
+  void *new_array;
+  if (list->array == scratch)
+    {
+      /* The previous array was not heap-allocated.  */
+      new_array = malloc (new_size);
+      if (new_array != NULL && list->array != NULL)
+        memcpy (new_array, list->array, list->used * element_size);
+    }
+  else
+    new_array = realloc (list->array, new_size);
+  if (new_array == NULL)
+    return false;
+  list->array = new_array;
+  list->allocated = new_allocated;
+  return true;
+}
+libc_hidden_def (__libc_dynarray_emplace_enlarge)
diff --git a/REORG.TODO/malloc/dynarray_finalize.c b/REORG.TODO/malloc/dynarray_finalize.c
new file mode 100644
index 0000000000..6dd8705382
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray_finalize.c
@@ -0,0 +1,62 @@
+/* Copy the dynamically-allocated area to an explicitly-sized heap allocation.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <dynarray.h>
+#include <stdlib.h>
+#include <string.h>
+
+bool
+__libc_dynarray_finalize (struct dynarray_header *list,
+                          void *scratch, size_t element_size,
+                          struct dynarray_finalize_result *result)
+{
+  if (__dynarray_error (list))
+    /* The caller will reported the deferred error.  */
+    return false;
+
+  size_t used = list->used;
+
+  /* Empty list.  */
+  if (used == 0)
+    {
+      /* An empty list could still be backed by a heap-allocated
+         array.  Free it if necessary.  */
+      if (list->array != scratch)
+        free (list->array);
+      *result = (struct dynarray_finalize_result) { NULL, 0 };
+      return true;
+    }
+
+  size_t allocation_size = used * element_size;
+  void *heap_array = malloc (allocation_size);
+  if (heap_array != NULL)
+    {
+      /* The new array takes ownership of the strings.  */
+      if (list->array != NULL)
+        memcpy (heap_array, list->array, allocation_size);
+      if (list->array != scratch)
+        free (list->array);
+      *result = (struct dynarray_finalize_result)
+        { .array = heap_array, .length = used };
+      return true;
+    }
+  else
+    /* The caller will perform the freeing operation.  */
+    return false;
+}
+libc_hidden_def (__libc_dynarray_finalize)
diff --git a/REORG.TODO/malloc/dynarray_resize.c b/REORG.TODO/malloc/dynarray_resize.c
new file mode 100644
index 0000000000..e6dc9fbc68
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray_resize.c
@@ -0,0 +1,59 @@
+/* Increase the size of a dynamic array.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <dynarray.h>
+#include <malloc-internal.h>
+#include <stdlib.h>
+#include <string.h>
+
+bool
+__libc_dynarray_resize (struct dynarray_header *list, size_t size,
+                        void *scratch, size_t element_size)
+{
+  /* The existing allocation provides sufficient room.  */
+  if (size <= list->allocated)
+    {
+      list->used = size;
+      return true;
+    }
+
+  /* Otherwise, use size as the new allocation size.  The caller is
+     expected to provide the final size of the array, so there is no
+     over-allocation here.  */
+
+  size_t new_size_bytes;
+  if (check_mul_overflow_size_t (size, element_size, &new_size_bytes))
+    return false;
+  void *new_array;
+  if (list->array == scratch)
+    {
+      /* The previous array was not heap-allocated.  */
+      new_array = malloc (new_size_bytes);
+      if (new_array != NULL && list->array != NULL)
+        memcpy (new_array, list->array, list->used * element_size);
+    }
+  else
+    new_array = realloc (list->array, new_size_bytes);
+  if (new_array == NULL)
+    return false;
+  list->array = new_array;
+  list->allocated = size;
+  list->used = size;
+  return true;
+}
+libc_hidden_def (__libc_dynarray_resize)
diff --git a/REORG.TODO/malloc/dynarray_resize_clear.c b/REORG.TODO/malloc/dynarray_resize_clear.c
new file mode 100644
index 0000000000..0c4ced1d38
--- /dev/null
+++ b/REORG.TODO/malloc/dynarray_resize_clear.c
@@ -0,0 +1,35 @@
+/* Increase the size of a dynamic array and clear the new part.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <dynarray.h>
+#include <stdlib.h>
+#include <string.h>
+
+bool
+__libc_dynarray_resize_clear (struct dynarray_header *list, size_t size,
+                              void *scratch, size_t element_size)
+{
+  size_t old_size = list->used;
+  if (!__libc_dynarray_resize (list, size, scratch, element_size))
+    return false;
+  /* __libc_dynarray_resize already checked for overflow.  */
+  memset (list->array + (old_size * element_size), 0,
+          (size - old_size) * element_size);
+  return true;
+}
+libc_hidden_def (__libc_dynarray_resize_clear)
diff --git a/REORG.TODO/malloc/hooks.c b/REORG.TODO/malloc/hooks.c
new file mode 100644
index 0000000000..1d80be20d2
--- /dev/null
+++ b/REORG.TODO/malloc/hooks.c
@@ -0,0 +1,585 @@
+/* Malloc implementation for multiple threads without lock contention.
+   Copyright (C) 2001-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+/* What to do if the standard debugging hooks are in place and a
+   corrupt pointer is detected: do nothing (0), print an error message
+   (1), or call abort() (2). */
+
+/* Hooks for debugging versions.  The initial hooks just call the
+   initialization routine, then do the normal work. */
+
+static void *
+malloc_hook_ini (size_t sz, const void *caller)
+{
+  __malloc_hook = NULL;
+  ptmalloc_init ();
+  return __libc_malloc (sz);
+}
+
+static void *
+realloc_hook_ini (void *ptr, size_t sz, const void *caller)
+{
+  __malloc_hook = NULL;
+  __realloc_hook = NULL;
+  ptmalloc_init ();
+  return __libc_realloc (ptr, sz);
+}
+
+static void *
+memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
+{
+  __memalign_hook = NULL;
+  ptmalloc_init ();
+  return __libc_memalign (alignment, sz);
+}
+
+/* Whether we are using malloc checking.  */
+static int using_malloc_checking;
+
+/* A flag that is set by malloc_set_state, to signal that malloc checking
+   must not be enabled on the request from the user (via the MALLOC_CHECK_
+   environment variable).  It is reset by __malloc_check_init to tell
+   malloc_set_state that the user has requested malloc checking.
+
+   The purpose of this flag is to make sure that malloc checking is not
+   enabled when the heap to be restored was constructed without malloc
+   checking, and thus does not contain the required magic bytes.
+   Otherwise the heap would be corrupted by calls to free and realloc.  If
+   it turns out that the heap was created with malloc checking and the
+   user has requested it malloc_set_state just calls __malloc_check_init
+   again to enable it.  On the other hand, reusing such a heap without
+   further malloc checking is safe.  */
+static int disallow_malloc_check;
+
+/* Activate a standard set of debugging hooks. */
+void
+__malloc_check_init (void)
+{
+  if (disallow_malloc_check)
+    {
+      disallow_malloc_check = 0;
+      return;
+    }
+  using_malloc_checking = 1;
+  __malloc_hook = malloc_check;
+  __free_hook = free_check;
+  __realloc_hook = realloc_check;
+  __memalign_hook = memalign_check;
+}
+
+/* A simple, standard set of debugging hooks.  Overhead is `only' one
+   byte per chunk; still this will catch most cases of double frees or
+   overruns.  The goal here is to avoid obscure crashes due to invalid
+   usage, unlike in the MALLOC_DEBUG code. */
+
+static unsigned char
+magicbyte (const void *p)
+{
+  unsigned char magic;
+
+  magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
+  /* Do not return 1.  See the comment in mem2mem_check().  */
+  if (magic == 1)
+    ++magic;
+  return magic;
+}
+
+
+/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
+   highest address of the chunk, downwards.  The end of each block tells
+   us the size of that block, up to the actual size of the requested
+   memory.  Our magic byte is right at the end of the requested size, so we
+   must reach it with this iteration, otherwise we have witnessed a memory
+   corruption.  */
+static size_t
+malloc_check_get_size (mchunkptr p)
+{
+  size_t size;
+  unsigned char c;
+  unsigned char magic = magicbyte (p);
+
+  assert (using_malloc_checking == 1);
+
+  for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
+       (c = ((unsigned char *) p)[size]) != magic;
+       size -= c)
+    {
+      if (c <= 0 || size < (c + 2 * SIZE_SZ))
+        {
+          malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
+                           chunk2mem (p),
+			   chunk_is_mmapped (p) ? NULL : arena_for_chunk (p));
+          return 0;
+        }
+    }
+
+  /* chunk2mem size.  */
+  return size - 2 * SIZE_SZ;
+}
+
+/* Instrument a chunk with overrun detector byte(s) and convert it
+   into a user pointer with requested size req_sz. */
+
+static void *
+internal_function
+mem2mem_check (void *ptr, size_t req_sz)
+{
+  mchunkptr p;
+  unsigned char *m_ptr = ptr;
+  size_t max_sz, block_sz, i;
+  unsigned char magic;
+
+  if (!ptr)
+    return ptr;
+
+  p = mem2chunk (ptr);
+  magic = magicbyte (p);
+  max_sz = chunksize (p) - 2 * SIZE_SZ;
+  if (!chunk_is_mmapped (p))
+    max_sz += SIZE_SZ;
+  for (i = max_sz - 1; i > req_sz; i -= block_sz)
+    {
+      block_sz = MIN (i - req_sz, 0xff);
+      /* Don't allow the magic byte to appear in the chain of length bytes.
+         For the following to work, magicbyte cannot return 0x01.  */
+      if (block_sz == magic)
+        --block_sz;
+
+      m_ptr[i] = block_sz;
+    }
+  m_ptr[req_sz] = magic;
+  return (void *) m_ptr;
+}
+
+/* Convert a pointer to be free()d or realloc()ed to a valid chunk
+   pointer.  If the provided pointer is not valid, return NULL. */
+
+static mchunkptr
+internal_function
+mem2chunk_check (void *mem, unsigned char **magic_p)
+{
+  mchunkptr p;
+  INTERNAL_SIZE_T sz, c;
+  unsigned char magic;
+
+  if (!aligned_OK (mem))
+    return NULL;
+
+  p = mem2chunk (mem);
+  sz = chunksize (p);
+  magic = magicbyte (p);
+  if (!chunk_is_mmapped (p))
+    {
+      /* Must be a chunk in conventional heap memory. */
+      int contig = contiguous (&main_arena);
+      if ((contig &&
+           ((char *) p < mp_.sbrk_base ||
+            ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
+          sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
+          (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
+                               (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
+                               next_chunk (prev_chunk (p)) != p)))
+        return NULL;
+
+      for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
+        {
+          if (c == 0 || sz < (c + 2 * SIZE_SZ))
+            return NULL;
+        }
+    }
+  else
+    {
+      unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
+
+      /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
+         alignment relative to the beginning of a page.  Check this
+         first. */
+      offset = (unsigned long) mem & page_mask;
+      if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
+           offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
+           offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
+           offset < 0x2000) ||
+          !chunk_is_mmapped (p) || prev_inuse (p) ||
+          ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
+          ((prev_size (p) + sz) & page_mask) != 0)
+        return NULL;
+
+      for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
+        {
+          if (c == 0 || sz < (c + 2 * SIZE_SZ))
+            return NULL;
+        }
+    }
+  ((unsigned char *) p)[sz] ^= 0xFF;
+  if (magic_p)
+    *magic_p = (unsigned char *) p + sz;
+  return p;
+}
+
+/* Check for corruption of the top chunk, and try to recover if
+   necessary. */
+
+static int
+internal_function
+top_check (void)
+{
+  mchunkptr t = top (&main_arena);
+  char *brk, *new_brk;
+  INTERNAL_SIZE_T front_misalign, sbrk_size;
+  unsigned long pagesz = GLRO (dl_pagesize);
+
+  if (t == initial_top (&main_arena) ||
+      (!chunk_is_mmapped (t) &&
+       chunksize (t) >= MINSIZE &&
+       prev_inuse (t) &&
+       (!contiguous (&main_arena) ||
+        (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
+    return 0;
+
+  malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
+		   &main_arena);
+
+  /* Try to set up a new top chunk. */
+  brk = MORECORE (0);
+  front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
+  if (front_misalign > 0)
+    front_misalign = MALLOC_ALIGNMENT - front_misalign;
+  sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
+  sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
+  new_brk = (char *) (MORECORE (sbrk_size));
+  if (new_brk == (char *) (MORECORE_FAILURE))
+    {
+      __set_errno (ENOMEM);
+      return -1;
+    }
+  /* Call the `morecore' hook if necessary.  */
+  void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
+  if (hook)
+    (*hook)();
+  main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
+
+  top (&main_arena) = (mchunkptr) (brk + front_misalign);
+  set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
+
+  return 0;
+}
+
+static void *
+malloc_check (size_t sz, const void *caller)
+{
+  void *victim;
+
+  if (sz + 1 == 0)
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+
+  __libc_lock_lock (main_arena.mutex);
+  victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
+  __libc_lock_unlock (main_arena.mutex);
+  return mem2mem_check (victim, sz);
+}
+
+static void
+free_check (void *mem, const void *caller)
+{
+  mchunkptr p;
+
+  if (!mem)
+    return;
+
+  __libc_lock_lock (main_arena.mutex);
+  p = mem2chunk_check (mem, NULL);
+  if (!p)
+    {
+      __libc_lock_unlock (main_arena.mutex);
+
+      malloc_printerr (check_action, "free(): invalid pointer", mem,
+		       &main_arena);
+      return;
+    }
+  if (chunk_is_mmapped (p))
+    {
+      __libc_lock_unlock (main_arena.mutex);
+      munmap_chunk (p);
+      return;
+    }
+  _int_free (&main_arena, p, 1);
+  __libc_lock_unlock (main_arena.mutex);
+}
+
+static void *
+realloc_check (void *oldmem, size_t bytes, const void *caller)
+{
+  INTERNAL_SIZE_T nb;
+  void *newmem = 0;
+  unsigned char *magic_p;
+
+  if (bytes + 1 == 0)
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+  if (oldmem == 0)
+    return malloc_check (bytes, NULL);
+
+  if (bytes == 0)
+    {
+      free_check (oldmem, NULL);
+      return NULL;
+    }
+  __libc_lock_lock (main_arena.mutex);
+  const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
+  __libc_lock_unlock (main_arena.mutex);
+  if (!oldp)
+    {
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
+		       &main_arena);
+      return malloc_check (bytes, NULL);
+    }
+  const INTERNAL_SIZE_T oldsize = chunksize (oldp);
+
+  checked_request2size (bytes + 1, nb);
+  __libc_lock_lock (main_arena.mutex);
+
+  if (chunk_is_mmapped (oldp))
+    {
+#if HAVE_MREMAP
+      mchunkptr newp = mremap_chunk (oldp, nb);
+      if (newp)
+        newmem = chunk2mem (newp);
+      else
+#endif
+      {
+        /* Note the extra SIZE_SZ overhead. */
+        if (oldsize - SIZE_SZ >= nb)
+          newmem = oldmem; /* do nothing */
+        else
+          {
+            /* Must alloc, copy, free. */
+            if (top_check () >= 0)
+              newmem = _int_malloc (&main_arena, bytes + 1);
+            if (newmem)
+              {
+                memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
+                munmap_chunk (oldp);
+              }
+          }
+      }
+    }
+  else
+    {
+      if (top_check () >= 0)
+        {
+          INTERNAL_SIZE_T nb;
+          checked_request2size (bytes + 1, nb);
+          newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
+        }
+    }
+
+  /* mem2chunk_check changed the magic byte in the old chunk.
+     If newmem is NULL, then the old chunk will still be used though,
+     so we need to invert that change here.  */
+  if (newmem == NULL)
+    *magic_p ^= 0xFF;
+
+  __libc_lock_unlock (main_arena.mutex);
+
+  return mem2mem_check (newmem, bytes);
+}
+
+static void *
+memalign_check (size_t alignment, size_t bytes, const void *caller)
+{
+  void *mem;
+
+  if (alignment <= MALLOC_ALIGNMENT)
+    return malloc_check (bytes, NULL);
+
+  if (alignment < MINSIZE)
+    alignment = MINSIZE;
+
+  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
+     power of 2 and will cause overflow in the check below.  */
+  if (alignment > SIZE_MAX / 2 + 1)
+    {
+      __set_errno (EINVAL);
+      return 0;
+    }
+
+  /* Check for overflow.  */
+  if (bytes > SIZE_MAX - alignment - MINSIZE)
+    {
+      __set_errno (ENOMEM);
+      return 0;
+    }
+
+  /* Make sure alignment is power of 2.  */
+  if (!powerof2 (alignment))
+    {
+      size_t a = MALLOC_ALIGNMENT * 2;
+      while (a < alignment)
+        a <<= 1;
+      alignment = a;
+    }
+
+  __libc_lock_lock (main_arena.mutex);
+  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
+        NULL;
+  __libc_lock_unlock (main_arena.mutex);
+  return mem2mem_check (mem, bytes);
+}
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
+
+/* Get/set state: malloc_get_state() records the current state of all
+   malloc variables (_except_ for the actual heap contents and `hook'
+   function pointers) in a system dependent, opaque data structure.
+   This data structure is dynamically allocated and can be free()d
+   after use.  malloc_set_state() restores the state of all malloc
+   variables to the previously obtained state.  This is especially
+   useful when using this malloc as part of a shared library, and when
+   the heap contents are saved/restored via some other method.  The
+   primary example for this is GNU Emacs with its `dumping' procedure.
+   `Hook' function pointers are never saved or restored by these
+   functions, with two exceptions: If malloc checking was in use when
+   malloc_get_state() was called, then malloc_set_state() calls
+   __malloc_check_init() if possible; if malloc checking was not in
+   use in the recorded state but the user requested malloc checking,
+   then the hooks are reset to 0.  */
+
+#define MALLOC_STATE_MAGIC   0x444c4541l
+#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
+
+struct malloc_save_state
+{
+  long magic;
+  long version;
+  mbinptr av[NBINS * 2 + 2];
+  char *sbrk_base;
+  int sbrked_mem_bytes;
+  unsigned long trim_threshold;
+  unsigned long top_pad;
+  unsigned int n_mmaps_max;
+  unsigned long mmap_threshold;
+  int check_action;
+  unsigned long max_sbrked_mem;
+  unsigned long max_total_mem;	/* Always 0, for backwards compatibility.  */
+  unsigned int n_mmaps;
+  unsigned int max_n_mmaps;
+  unsigned long mmapped_mem;
+  unsigned long max_mmapped_mem;
+  int using_malloc_checking;
+  unsigned long max_fast;
+  unsigned long arena_test;
+  unsigned long arena_max;
+  unsigned long narenas;
+};
+
+/* Dummy implementation which always fails.  We need to provide this
+   symbol so that existing Emacs binaries continue to work with
+   BIND_NOW.  */
+void *
+attribute_compat_text_section
+malloc_get_state (void)
+{
+  __set_errno (ENOSYS);
+  return NULL;
+}
+compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
+
+int
+attribute_compat_text_section
+malloc_set_state (void *msptr)
+{
+  struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
+
+  if (ms->magic != MALLOC_STATE_MAGIC)
+    return -1;
+
+  /* Must fail if the major version is too high. */
+  if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
+    return -2;
+
+  /* We do not need to perform locking here because __malloc_set_state
+     must be called before the first call into the malloc subsytem
+     (usually via __malloc_initialize_hook).  pthread_create always
+     calls calloc and thus must be called only afterwards, so there
+     cannot be more than one thread when we reach this point.  */
+
+  /* Disable the malloc hooks (and malloc checking).  */
+  __malloc_hook = NULL;
+  __realloc_hook = NULL;
+  __free_hook = NULL;
+  __memalign_hook = NULL;
+  using_malloc_checking = 0;
+
+  /* Patch the dumped heap.  We no longer try to integrate into the
+     existing heap.  Instead, we mark the existing chunks as mmapped.
+     Together with the update to dumped_main_arena_start and
+     dumped_main_arena_end, realloc and free will recognize these
+     chunks as dumped fake mmapped chunks and never free them.  */
+
+  /* Find the chunk with the lowest address with the heap.  */
+  mchunkptr chunk = NULL;
+  {
+    size_t *candidate = (size_t *) ms->sbrk_base;
+    size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
+    while (candidate < end)
+      if (*candidate != 0)
+	{
+	  chunk = mem2chunk ((void *) (candidate + 1));
+	  break;
+	}
+      else
+	++candidate;
+  }
+  if (chunk == NULL)
+    return 0;
+
+  /* Iterate over the dumped heap and patch the chunks so that they
+     are treated as fake mmapped chunks.  */
+  mchunkptr top = ms->av[2];
+  while (chunk < top)
+    {
+      if (inuse (chunk))
+	{
+	  /* Mark chunk as mmapped, to trigger the fallback path.  */
+	  size_t size = chunksize (chunk);
+	  set_head (chunk, size | IS_MMAPPED);
+	}
+      chunk = next_chunk (chunk);
+    }
+
+  /* The dumped fake mmapped chunks all lie in this address range.  */
+  dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
+  dumped_main_arena_end = top;
+
+  return 0;
+}
+compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
+
+#endif	/* SHLIB_COMPAT */
+
+/*
+ * Local variables:
+ * c-basic-offset: 2
+ * End:
+ */
diff --git a/REORG.TODO/malloc/malloc-hooks.h b/REORG.TODO/malloc/malloc-hooks.h
new file mode 100644
index 0000000000..6b1d192bcd
--- /dev/null
+++ b/REORG.TODO/malloc/malloc-hooks.h
@@ -0,0 +1,24 @@
+/* Internal declarations of malloc hooks no longer in the public API.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_HOOKS_H
+#define _MALLOC_HOOKS_H
+
+void (*__malloc_initialize_hook) (void);
+
+#endif  /* _MALLOC_HOOKS_H */
diff --git a/REORG.TODO/malloc/malloc-internal.h b/REORG.TODO/malloc/malloc-internal.h
new file mode 100644
index 0000000000..dbd801a58e
--- /dev/null
+++ b/REORG.TODO/malloc/malloc-internal.h
@@ -0,0 +1,104 @@
+/* Internal declarations for malloc, for use within libc.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_INTERNAL_H
+#define _MALLOC_INTERNAL_H
+
+#include <malloc-machine.h>
+#include <malloc-sysdep.h>
+
+/* INTERNAL_SIZE_T is the word-size used for internal bookkeeping of
+   chunk sizes.
+
+   The default version is the same as size_t.
+
+   While not strictly necessary, it is best to define this as an
+   unsigned type, even if size_t is a signed type. This may avoid some
+   artificial size limitations on some systems.
+
+   On a 64-bit machine, you may be able to reduce malloc overhead by
+   defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
+   expense of not being able to handle more than 2^32 of malloced
+   space. If this limitation is acceptable, you are encouraged to set
+   this unless you are on a platform requiring 16byte alignments. In
+   this case the alignment requirements turn out to negate any
+   potential advantages of decreasing size_t word size.
+
+   Implementors: Beware of the possible combinations of:
+     - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
+       and might be the same width as int or as long
+     - size_t might have different width and signedness as INTERNAL_SIZE_T
+     - int and long might be 32 or 64 bits, and might be the same width
+
+   To deal with this, most comparisons and difference computations
+   among INTERNAL_SIZE_Ts should cast them to unsigned long, being
+   aware of the fact that casting an unsigned int to a wider long does
+   not sign-extend. (This also makes checking for negative numbers
+   awkward.) Some of these casts result in harmless compiler warnings
+   on some systems.  */
+#ifndef INTERNAL_SIZE_T
+# define INTERNAL_SIZE_T size_t
+#endif
+
+/* The corresponding word size.  */
+#define SIZE_SZ (sizeof (INTERNAL_SIZE_T))
+
+/* MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.  It
+   must be a power of two at least 2 * SIZE_SZ, even on machines for
+   which smaller alignments would suffice. It may be defined as larger
+   than this though. Note however that code and data structures are
+   optimized for the case of 8-byte alignment.  */
+#ifndef MALLOC_ALIGNMENT
+# define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
+                           ? __alignof__ (long double) : 2 * SIZE_SZ)
+#endif
+
+/* The corresponding bit mask value.  */
+#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
+
+
+/* Called in the parent process before a fork.  */
+void __malloc_fork_lock_parent (void) internal_function attribute_hidden;
+
+/* Called in the parent process after a fork.  */
+void __malloc_fork_unlock_parent (void) internal_function attribute_hidden;
+
+/* Called in the child process after a fork.  */
+void __malloc_fork_unlock_child (void) internal_function attribute_hidden;
+
+/* Set *RESULT to LEFT * RIGHT.  Return true if the multiplication
+   overflowed.  */
+static inline bool
+check_mul_overflow_size_t (size_t left, size_t right, size_t *result)
+{
+#if __GNUC__ >= 5
+  return __builtin_mul_overflow (left, right, result);
+#else
+  /* size_t is unsigned so the behavior on overflow is defined.  */
+  *result = left * right;
+  size_t half_size_t = ((size_t) 1) << (8 * sizeof (size_t) / 2);
+  if (__glibc_unlikely ((left | right) >= half_size_t))
+    {
+      if (__glibc_unlikely (right != 0 && *result / right != left))
+        return true;
+    }
+  return false;
+#endif
+}
+
+#endif /* _MALLOC_INTERNAL_H */
diff --git a/REORG.TODO/malloc/malloc.c b/REORG.TODO/malloc/malloc.c
new file mode 100644
index 0000000000..aa45626093
--- /dev/null
+++ b/REORG.TODO/malloc/malloc.c
@@ -0,0 +1,5321 @@
+/* Malloc implementation for multiple threads without lock contention.
+   Copyright (C) 1996-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Wolfram Gloger <wg@malloc.de>
+   and Doug Lea <dl@cs.oswego.edu>, 2001.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+/*
+  This is a version (aka ptmalloc2) of malloc/free/realloc written by
+  Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
+
+  There have been substantial changes made after the integration into
+  glibc in all parts of the code.  Do not look for much commonality
+  with the ptmalloc2 version.
+
+* Version ptmalloc2-20011215
+  based on:
+  VERSION 2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
+
+* Quickstart
+
+  In order to compile this implementation, a Makefile is provided with
+  the ptmalloc2 distribution, which has pre-defined targets for some
+  popular systems (e.g. "make posix" for Posix threads).  All that is
+  typically required with regard to compiler flags is the selection of
+  the thread package via defining one out of USE_PTHREADS, USE_THR or
+  USE_SPROC.  Check the thread-m.h file for what effects this has.
+  Many/most systems will additionally require USE_TSD_DATA_HACK to be
+  defined, so this is the default for "make posix".
+
+* Why use this malloc?
+
+  This is not the fastest, most space-conserving, most portable, or
+  most tunable malloc ever written. However it is among the fastest
+  while also being among the most space-conserving, portable and tunable.
+  Consistent balance across these factors results in a good general-purpose
+  allocator for malloc-intensive programs.
+
+  The main properties of the algorithms are:
+  * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
+    with ties normally decided via FIFO (i.e. least recently used).
+  * For small (<= 64 bytes by default) requests, it is a caching
+    allocator, that maintains pools of quickly recycled chunks.
+  * In between, and for combinations of large and small requests, it does
+    the best it can trying to meet both goals at once.
+  * For very large requests (>= 128KB by default), it relies on system
+    memory mapping facilities, if supported.
+
+  For a longer but slightly out of date high-level description, see
+     http://gee.cs.oswego.edu/dl/html/malloc.html
+
+  You may already by default be using a C library containing a malloc
+  that is  based on some version of this malloc (for example in
+  linux). You might still want to use the one in this file in order to
+  customize settings or to avoid overheads associated with library
+  versions.
+
+* Contents, described in more detail in "description of public routines" below.
+
+  Standard (ANSI/SVID/...)  functions:
+    malloc(size_t n);
+    calloc(size_t n_elements, size_t element_size);
+    free(void* p);
+    realloc(void* p, size_t n);
+    memalign(size_t alignment, size_t n);
+    valloc(size_t n);
+    mallinfo()
+    mallopt(int parameter_number, int parameter_value)
+
+  Additional functions:
+    independent_calloc(size_t n_elements, size_t size, void* chunks[]);
+    independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+    pvalloc(size_t n);
+    malloc_trim(size_t pad);
+    malloc_usable_size(void* p);
+    malloc_stats();
+
+* Vital statistics:
+
+  Supported pointer representation:       4 or 8 bytes
+  Supported size_t  representation:       4 or 8 bytes
+       Note that size_t is allowed to be 4 bytes even if pointers are 8.
+       You can adjust this by defining INTERNAL_SIZE_T
+
+  Alignment:                              2 * sizeof(size_t) (default)
+       (i.e., 8 byte alignment with 4byte size_t). This suffices for
+       nearly all current machines and C compilers. However, you can
+       define MALLOC_ALIGNMENT to be wider than this if necessary.
+
+  Minimum overhead per allocated chunk:   4 or 8 bytes
+       Each malloced chunk has a hidden word of overhead holding size
+       and status information.
+
+  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
+			  8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
+
+       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
+       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
+       needed; 4 (8) for a trailing size field and 8 (16) bytes for
+       free list pointers. Thus, the minimum allocatable size is
+       16/24/32 bytes.
+
+       Even a request for zero bytes (i.e., malloc(0)) returns a
+       pointer to something of the minimum allocatable size.
+
+       The maximum overhead wastage (i.e., number of extra bytes
+       allocated than were requested in malloc) is less than or equal
+       to the minimum size, except for requests >= mmap_threshold that
+       are serviced via mmap(), where the worst case wastage is 2 *
+       sizeof(size_t) bytes plus the remainder from a system page (the
+       minimal mmap unit); typically 4096 or 8192 bytes.
+
+  Maximum allocated size:  4-byte size_t: 2^32 minus about two pages
+			   8-byte size_t: 2^64 minus about two pages
+
+       It is assumed that (possibly signed) size_t values suffice to
+       represent chunk sizes. `Possibly signed' is due to the fact
+       that `size_t' may be defined on a system as either a signed or
+       an unsigned type. The ISO C standard says that it must be
+       unsigned, but a few systems are known not to adhere to this.
+       Additionally, even when size_t is unsigned, sbrk (which is by
+       default used to obtain memory from system) accepts signed
+       arguments, and may not be able to handle size_t-wide arguments
+       with negative sign bit.  Generally, values that would
+       appear as negative after accounting for overhead and alignment
+       are supported only via mmap(), which does not have this
+       limitation.
+
+       Requests for sizes outside the allowed range will perform an optional
+       failure action and then return null. (Requests may also
+       also fail because a system is out of memory.)
+
+  Thread-safety: thread-safe
+
+  Compliance: I believe it is compliant with the 1997 Single Unix Specification
+       Also SVID/XPG, ANSI C, and probably others as well.
+
+* Synopsis of compile-time options:
+
+    People have reported using previous versions of this malloc on all
+    versions of Unix, sometimes by tweaking some of the defines
+    below. It has been tested most extensively on Solaris and Linux.
+    People also report using it in stand-alone embedded systems.
+
+    The implementation is in straight, hand-tuned ANSI C.  It is not
+    at all modular. (Sorry!)  It uses a lot of macros.  To be at all
+    usable, this code should be compiled using an optimizing compiler
+    (for example gcc -O3) that can simplify expressions and control
+    paths. (FAQ: some macros import variables as arguments rather than
+    declare locals because people reported that some debuggers
+    otherwise get confused.)
+
+    OPTION                     DEFAULT VALUE
+
+    Compilation Environment options:
+
+    HAVE_MREMAP                0
+
+    Changing default word sizes:
+
+    INTERNAL_SIZE_T            size_t
+
+    Configuration and functionality options:
+
+    USE_PUBLIC_MALLOC_WRAPPERS NOT defined
+    USE_MALLOC_LOCK            NOT defined
+    MALLOC_DEBUG               NOT defined
+    REALLOC_ZERO_BYTES_FREES   1
+    TRIM_FASTBINS              0
+
+    Options for customizing MORECORE:
+
+    MORECORE                   sbrk
+    MORECORE_FAILURE           -1
+    MORECORE_CONTIGUOUS        1
+    MORECORE_CANNOT_TRIM       NOT defined
+    MORECORE_CLEARS            1
+    MMAP_AS_MORECORE_SIZE      (1024 * 1024)
+
+    Tuning options that are also dynamically changeable via mallopt:
+
+    DEFAULT_MXFAST             64 (for 32bit), 128 (for 64bit)
+    DEFAULT_TRIM_THRESHOLD     128 * 1024
+    DEFAULT_TOP_PAD            0
+    DEFAULT_MMAP_THRESHOLD     128 * 1024
+    DEFAULT_MMAP_MAX           65536
+
+    There are several other #defined constants and macros that you
+    probably don't want to touch unless you are extending or adapting malloc.  */
+
+/*
+  void* is the pointer type that malloc should say it returns
+*/
+
+#ifndef void
+#define void      void
+#endif /*void*/
+
+#include <stddef.h>   /* for size_t */
+#include <stdlib.h>   /* for getenv(), abort() */
+#include <unistd.h>   /* for __libc_enable_secure */
+
+#include <atomic.h>
+#include <_itoa.h>
+#include <bits/wordsize.h>
+#include <sys/sysinfo.h>
+
+#include <ldsodefs.h>
+
+#include <unistd.h>
+#include <stdio.h>    /* needed for malloc_stats */
+#include <errno.h>
+
+#include <shlib-compat.h>
+
+/* For uintptr_t.  */
+#include <stdint.h>
+
+/* For va_arg, va_start, va_end.  */
+#include <stdarg.h>
+
+/* For MIN, MAX, powerof2.  */
+#include <sys/param.h>
+
+/* For ALIGN_UP et. al.  */
+#include <libc-pointer-arith.h>
+
+#include <malloc/malloc-internal.h>
+
+/*
+  Debugging:
+
+  Because freed chunks may be overwritten with bookkeeping fields, this
+  malloc will often die when freed memory is overwritten by user
+  programs.  This can be very effective (albeit in an annoying way)
+  in helping track down dangling pointers.
+
+  If you compile with -DMALLOC_DEBUG, a number of assertion checks are
+  enabled that will catch more memory errors. You probably won't be
+  able to make much sense of the actual assertion errors, but they
+  should help you locate incorrectly overwritten memory.  The checking
+  is fairly extensive, and will slow down execution
+  noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
+  will attempt to check every non-mmapped allocated and free chunk in
+  the course of computing the summmaries. (By nature, mmapped regions
+  cannot be checked very much automatically.)
+
+  Setting MALLOC_DEBUG may also be helpful if you are trying to modify
+  this code. The assertions in the check routines spell out in more
+  detail the assumptions and invariants underlying the algorithms.
+
+  Setting MALLOC_DEBUG does NOT provide an automated mechanism for
+  checking that all accesses to malloced memory stay within their
+  bounds. However, there are several add-ons and adaptations of this
+  or other mallocs available that do this.
+*/
+
+#ifndef MALLOC_DEBUG
+#define MALLOC_DEBUG 0
+#endif
+
+#ifdef NDEBUG
+# define assert(expr) ((void) 0)
+#else
+# define assert(expr) \
+  ((expr)								      \
+   ? ((void) 0)								      \
+   : __malloc_assert (#expr, __FILE__, __LINE__, __func__))
+
+extern const char *__progname;
+
+static void
+__malloc_assert (const char *assertion, const char *file, unsigned int line,
+		 const char *function)
+{
+  (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
+		     __progname, __progname[0] ? ": " : "",
+		     file, line,
+		     function ? function : "", function ? ": " : "",
+		     assertion);
+  fflush (stderr);
+  abort ();
+}
+#endif
+
+
+/*
+  REALLOC_ZERO_BYTES_FREES should be set if a call to
+  realloc with zero bytes should be the same as a call to free.
+  This is required by the C standard. Otherwise, since this malloc
+  returns a unique pointer for malloc(0), so does realloc(p, 0).
+*/
+
+#ifndef REALLOC_ZERO_BYTES_FREES
+#define REALLOC_ZERO_BYTES_FREES 1
+#endif
+
+/*
+  TRIM_FASTBINS controls whether free() of a very small chunk can
+  immediately lead to trimming. Setting to true (1) can reduce memory
+  footprint, but will almost always slow down programs that use a lot
+  of small chunks.
+
+  Define this only if you are willing to give up some speed to more
+  aggressively reduce system-level memory footprint when releasing
+  memory in programs that use many small chunks.  You can get
+  essentially the same effect by setting MXFAST to 0, but this can
+  lead to even greater slowdowns in programs using many small chunks.
+  TRIM_FASTBINS is an in-between compile-time option, that disables
+  only those chunks bordering topmost memory from being placed in
+  fastbins.
+*/
+
+#ifndef TRIM_FASTBINS
+#define TRIM_FASTBINS  0
+#endif
+
+
+/* Definition for getting more memory from the OS.  */
+#define MORECORE         (*__morecore)
+#define MORECORE_FAILURE 0
+void * __default_morecore (ptrdiff_t);
+void *(*__morecore)(ptrdiff_t) = __default_morecore;
+
+
+#include <string.h>
+
+/*
+  MORECORE-related declarations. By default, rely on sbrk
+*/
+
+
+/*
+  MORECORE is the name of the routine to call to obtain more memory
+  from the system.  See below for general guidance on writing
+  alternative MORECORE functions, as well as a version for WIN32 and a
+  sample version for pre-OSX macos.
+*/
+
+#ifndef MORECORE
+#define MORECORE sbrk
+#endif
+
+/*
+  MORECORE_FAILURE is the value returned upon failure of MORECORE
+  as well as mmap. Since it cannot be an otherwise valid memory address,
+  and must reflect values of standard sys calls, you probably ought not
+  try to redefine it.
+*/
+
+#ifndef MORECORE_FAILURE
+#define MORECORE_FAILURE (-1)
+#endif
+
+/*
+  If MORECORE_CONTIGUOUS is true, take advantage of fact that
+  consecutive calls to MORECORE with positive arguments always return
+  contiguous increasing addresses.  This is true of unix sbrk.  Even
+  if not defined, when regions happen to be contiguous, malloc will
+  permit allocations spanning regions obtained from different
+  calls. But defining this when applicable enables some stronger
+  consistency checks and space efficiencies.
+*/
+
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif
+
+/*
+  Define MORECORE_CANNOT_TRIM if your version of MORECORE
+  cannot release space back to the system when given negative
+  arguments. This is generally necessary only if you are using
+  a hand-crafted MORECORE function that cannot handle negative arguments.
+*/
+
+/* #define MORECORE_CANNOT_TRIM */
+
+/*  MORECORE_CLEARS           (default 1)
+     The degree to which the routine mapped to MORECORE zeroes out
+     memory: never (0), only for newly allocated space (1) or always
+     (2).  The distinction between (1) and (2) is necessary because on
+     some systems, if the application first decrements and then
+     increments the break value, the contents of the reallocated space
+     are unspecified.
+ */
+
+#ifndef MORECORE_CLEARS
+# define MORECORE_CLEARS 1
+#endif
+
+
+/*
+   MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
+   sbrk fails, and mmap is used as a backup.  The value must be a
+   multiple of page size.  This backup strategy generally applies only
+   when systems have "holes" in address space, so sbrk cannot perform
+   contiguous expansion, but there is still space available on system.
+   On systems for which this is known to be useful (i.e. most linux
+   kernels), this occurs only when programs allocate huge amounts of
+   memory.  Between this, and the fact that mmap regions tend to be
+   limited, the size should be large, to avoid too many mmap calls and
+   thus avoid running out of kernel resources.  */
+
+#ifndef MMAP_AS_MORECORE_SIZE
+#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
+#endif
+
+/*
+  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
+  large blocks.
+*/
+
+#ifndef HAVE_MREMAP
+#define HAVE_MREMAP 0
+#endif
+
+/* We may need to support __malloc_initialize_hook for backwards
+   compatibility.  */
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
+# define HAVE_MALLOC_INIT_HOOK 1
+#else
+# define HAVE_MALLOC_INIT_HOOK 0
+#endif
+
+
+/*
+  This version of malloc supports the standard SVID/XPG mallinfo
+  routine that returns a struct containing usage properties and
+  statistics. It should work on any SVID/XPG compliant system that has
+  a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
+  install such a thing yourself, cut out the preliminary declarations
+  as described above and below and save them in a malloc.h file. But
+  there's no compelling reason to bother to do this.)
+
+  The main declaration needed is the mallinfo struct that is returned
+  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a
+  bunch of fields that are not even meaningful in this version of
+  malloc.  These fields are are instead filled by mallinfo() with
+  other numbers that might be of interest.
+*/
+
+
+/* ---------- description of public routines ------------ */
+
+/*
+  malloc(size_t n)
+  Returns a pointer to a newly allocated chunk of at least n bytes, or null
+  if no space is available. Additionally, on failure, errno is
+  set to ENOMEM on ANSI C systems.
+
+  If n is zero, malloc returns a minumum-sized chunk. (The minimum
+  size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
+  systems.)  On most systems, size_t is an unsigned type, so calls
+  with negative arguments are interpreted as requests for huge amounts
+  of space, which will often fail. The maximum supported value of n
+  differs across systems, but is in all cases less than the maximum
+  representable value of a size_t.
+*/
+void*  __libc_malloc(size_t);
+libc_hidden_proto (__libc_malloc)
+
+/*
+  free(void* p)
+  Releases the chunk of memory pointed to by p, that had been previously
+  allocated using malloc or a related routine such as realloc.
+  It has no effect if p is null. It can have arbitrary (i.e., bad!)
+  effects if p has already been freed.
+
+  Unless disabled (using mallopt), freeing very large spaces will
+  when possible, automatically trigger operations that give
+  back unused memory to the system, thus reducing program footprint.
+*/
+void     __libc_free(void*);
+libc_hidden_proto (__libc_free)
+
+/*
+  calloc(size_t n_elements, size_t element_size);
+  Returns a pointer to n_elements * element_size bytes, with all locations
+  set to zero.
+*/
+void*  __libc_calloc(size_t, size_t);
+
+/*
+  realloc(void* p, size_t n)
+  Returns a pointer to a chunk of size n that contains the same data
+  as does chunk p up to the minimum of (n, p's size) bytes, or null
+  if no space is available.
+
+  The returned pointer may or may not be the same as p. The algorithm
+  prefers extending p when possible, otherwise it employs the
+  equivalent of a malloc-copy-free sequence.
+
+  If p is null, realloc is equivalent to malloc.
+
+  If space is not available, realloc returns null, errno is set (if on
+  ANSI) and p is NOT freed.
+
+  if n is for fewer bytes than already held by p, the newly unused
+  space is lopped off and freed if possible.  Unless the #define
+  REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
+  zero (re)allocates a minimum-sized chunk.
+
+  Large chunks that were internally obtained via mmap will always be
+  grown using malloc-copy-free sequences unless the system supports
+  MREMAP (currently only linux).
+
+  The old unix realloc convention of allowing the last-free'd chunk
+  to be used as an argument to realloc is not supported.
+*/
+void*  __libc_realloc(void*, size_t);
+libc_hidden_proto (__libc_realloc)
+
+/*
+  memalign(size_t alignment, size_t n);
+  Returns a pointer to a newly allocated chunk of n bytes, aligned
+  in accord with the alignment argument.
+
+  The alignment argument should be a power of two. If the argument is
+  not a power of two, the nearest greater power is used.
+  8-byte alignment is guaranteed by normal malloc calls, so don't
+  bother calling memalign with an argument of 8 or less.
+
+  Overreliance on memalign is a sure way to fragment space.
+*/
+void*  __libc_memalign(size_t, size_t);
+libc_hidden_proto (__libc_memalign)
+
+/*
+  valloc(size_t n);
+  Equivalent to memalign(pagesize, n), where pagesize is the page
+  size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void*  __libc_valloc(size_t);
+
+
+
+/*
+  mallopt(int parameter_number, int parameter_value)
+  Sets tunable parameters The format is to provide a
+  (parameter-number, parameter-value) pair.  mallopt then sets the
+  corresponding parameter to the argument value if it can (i.e., so
+  long as the value is meaningful), and returns 1 if successful else
+  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
+  normally defined in malloc.h.  Only one of these (M_MXFAST) is used
+  in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
+  so setting them has no effect. But this malloc also supports four
+  other options in mallopt. See below for details.  Briefly, supported
+  parameters are as follows (listed defaults are for "typical"
+  configurations).
+
+  Symbol            param #   default    allowed param values
+  M_MXFAST          1         64         0-80  (0 disables fastbins)
+  M_TRIM_THRESHOLD -1         128*1024   any   (-1U disables trimming)
+  M_TOP_PAD        -2         0          any
+  M_MMAP_THRESHOLD -3         128*1024   any   (or 0 if no MMAP support)
+  M_MMAP_MAX       -4         65536      any   (0 disables use of mmap)
+*/
+int      __libc_mallopt(int, int);
+libc_hidden_proto (__libc_mallopt)
+
+
+/*
+  mallinfo()
+  Returns (by copy) a struct containing various summary statistics:
+
+  arena:     current total non-mmapped bytes allocated from system
+  ordblks:   the number of free chunks
+  smblks:    the number of fastbin blocks (i.e., small chunks that
+	       have been freed but not use resused or consolidated)
+  hblks:     current number of mmapped regions
+  hblkhd:    total bytes held in mmapped regions
+  usmblks:   always 0
+  fsmblks:   total bytes held in fastbin blocks
+  uordblks:  current total allocated space (normal or mmapped)
+  fordblks:  total free space
+  keepcost:  the maximum number of bytes that could ideally be released
+	       back to system via malloc_trim. ("ideally" means that
+	       it ignores page restrictions etc.)
+
+  Because these fields are ints, but internal bookkeeping may
+  be kept as longs, the reported values may wrap around zero and
+  thus be inaccurate.
+*/
+struct mallinfo __libc_mallinfo(void);
+
+
+/*
+  pvalloc(size_t n);
+  Equivalent to valloc(minimum-page-that-holds(n)), that is,
+  round up n to nearest pagesize.
+ */
+void*  __libc_pvalloc(size_t);
+
+/*
+  malloc_trim(size_t pad);
+
+  If possible, gives memory back to the system (via negative
+  arguments to sbrk) if there is unused memory at the `high' end of
+  the malloc pool. You can call this after freeing large blocks of
+  memory to potentially reduce the system-level memory requirements
+  of a program. However, it cannot guarantee to reduce memory. Under
+  some allocation patterns, some large free blocks of memory will be
+  locked between two used chunks, so they cannot be given back to
+  the system.
+
+  The `pad' argument to malloc_trim represents the amount of free
+  trailing space to leave untrimmed. If this argument is zero,
+  only the minimum amount of memory to maintain internal data
+  structures will be left (one page or less). Non-zero arguments
+  can be supplied to maintain enough trailing space to service
+  future expected allocations without having to re-obtain memory
+  from the system.
+
+  Malloc_trim returns 1 if it actually released any memory, else 0.
+  On systems that do not support "negative sbrks", it will always
+  return 0.
+*/
+int      __malloc_trim(size_t);
+
+/*
+  malloc_usable_size(void* p);
+
+  Returns the number of bytes you can actually use in
+  an allocated chunk, which may be more than you requested (although
+  often not) due to alignment and minimum size constraints.
+  You can use this many bytes without worrying about
+  overwriting other allocated objects. This is not a particularly great
+  programming practice. malloc_usable_size can be more useful in
+  debugging and assertions, for example:
+
+  p = malloc(n);
+  assert(malloc_usable_size(p) >= 256);
+
+*/
+size_t   __malloc_usable_size(void*);
+
+/*
+  malloc_stats();
+  Prints on stderr the amount of space obtained from the system (both
+  via sbrk and mmap), the maximum amount (which may be more than
+  current if malloc_trim and/or munmap got called), and the current
+  number of bytes allocated via malloc (or realloc, etc) but not yet
+  freed. Note that this is the number of bytes allocated, not the
+  number requested. It will be larger than the number requested
+  because of alignment and bookkeeping overhead. Because it includes
+  alignment wastage as being in use, this figure may be greater than
+  zero even when no user-level chunks are allocated.
+
+  The reported current and maximum system memory can be inaccurate if
+  a program makes other calls to system memory allocation functions
+  (normally sbrk) outside of malloc.
+
+  malloc_stats prints only the most commonly interesting statistics.
+  More information can be obtained by calling mallinfo.
+
+*/
+void     __malloc_stats(void);
+
+/*
+  malloc_get_state(void);
+
+  Returns the state of all malloc variables in an opaque data
+  structure.
+*/
+void*  __malloc_get_state(void);
+
+/*
+  malloc_set_state(void* state);
+
+  Restore the state of all malloc variables from data obtained with
+  malloc_get_state().
+*/
+int      __malloc_set_state(void*);
+
+/*
+  posix_memalign(void **memptr, size_t alignment, size_t size);
+
+  POSIX wrapper like memalign(), checking for validity of size.
+*/
+int      __posix_memalign(void **, size_t, size_t);
+
+/* mallopt tuning options */
+
+/*
+  M_MXFAST is the maximum request size used for "fastbins", special bins
+  that hold returned chunks without consolidating their spaces. This
+  enables future requests for chunks of the same size to be handled
+  very quickly, but can increase fragmentation, and thus increase the
+  overall memory footprint of a program.
+
+  This malloc manages fastbins very conservatively yet still
+  efficiently, so fragmentation is rarely a problem for values less
+  than or equal to the default.  The maximum supported value of MXFAST
+  is 80. You wouldn't want it any higher than this anyway.  Fastbins
+  are designed especially for use with many small structs, objects or
+  strings -- the default handles structs/objects/arrays with sizes up
+  to 8 4byte fields, or small strings representing words, tokens,
+  etc. Using fastbins for larger objects normally worsens
+  fragmentation without improving speed.
+
+  M_MXFAST is set in REQUEST size units. It is internally used in
+  chunksize units, which adds padding and alignment.  You can reduce
+  M_MXFAST to 0 to disable all use of fastbins.  This causes the malloc
+  algorithm to be a closer approximation of fifo-best-fit in all cases,
+  not just for larger requests, but will generally cause it to be
+  slower.
+*/
+
+
+/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
+#ifndef M_MXFAST
+#define M_MXFAST            1
+#endif
+
+#ifndef DEFAULT_MXFAST
+#define DEFAULT_MXFAST     (64 * SIZE_SZ / 4)
+#endif
+
+
+/*
+  M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
+  to keep before releasing via malloc_trim in free().
+
+  Automatic trimming is mainly useful in long-lived programs.
+  Because trimming via sbrk can be slow on some systems, and can
+  sometimes be wasteful (in cases where programs immediately
+  afterward allocate more large chunks) the value should be high
+  enough so that your overall system performance would improve by
+  releasing this much memory.
+
+  The trim threshold and the mmap control parameters (see below)
+  can be traded off with one another. Trimming and mmapping are
+  two different ways of releasing unused memory back to the
+  system. Between these two, it is often possible to keep
+  system-level demands of a long-lived program down to a bare
+  minimum. For example, in one test suite of sessions measuring
+  the XF86 X server on Linux, using a trim threshold of 128K and a
+  mmap threshold of 192K led to near-minimal long term resource
+  consumption.
+
+  If you are using this malloc in a long-lived program, it should
+  pay to experiment with these values.  As a rough guide, you
+  might set to a value close to the average size of a process
+  (program) running on your system.  Releasing this much memory
+  would allow such a process to run in memory.  Generally, it's
+  worth it to tune for trimming rather tham memory mapping when a
+  program undergoes phases where several large chunks are
+  allocated and released in ways that can reuse each other's
+  storage, perhaps mixed with phases where there are no such
+  chunks at all.  And in well-behaved long-lived programs,
+  controlling release of large blocks via trimming versus mapping
+  is usually faster.
+
+  However, in most programs, these parameters serve mainly as
+  protection against the system-level effects of carrying around
+  massive amounts of unneeded memory. Since frequent calls to
+  sbrk, mmap, and munmap otherwise degrade performance, the default
+  parameters are set to relatively high values that serve only as
+  safeguards.
+
+  The trim value It must be greater than page size to have any useful
+  effect.  To disable trimming completely, you can set to
+  (unsigned long)(-1)
+
+  Trim settings interact with fastbin (MXFAST) settings: Unless
+  TRIM_FASTBINS is defined, automatic trimming never takes place upon
+  freeing a chunk with size less than or equal to MXFAST. Trimming is
+  instead delayed until subsequent freeing of larger chunks. However,
+  you can still force an attempted trim by calling malloc_trim.
+
+  Also, trimming is not generally possible in cases where
+  the main arena is obtained via mmap.
+
+  Note that the trick some people use of mallocing a huge space and
+  then freeing it at program startup, in an attempt to reserve system
+  memory, doesn't have the intended effect under automatic trimming,
+  since that memory will immediately be returned to the system.
+*/
+
+#define M_TRIM_THRESHOLD       -1
+
+#ifndef DEFAULT_TRIM_THRESHOLD
+#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
+#endif
+
+/*
+  M_TOP_PAD is the amount of extra `padding' space to allocate or
+  retain whenever sbrk is called. It is used in two ways internally:
+
+  * When sbrk is called to extend the top of the arena to satisfy
+  a new malloc request, this much padding is added to the sbrk
+  request.
+
+  * When malloc_trim is called automatically from free(),
+  it is used as the `pad' argument.
+
+  In both cases, the actual amount of padding is rounded
+  so that the end of the arena is always a system page boundary.
+
+  The main reason for using padding is to avoid calling sbrk so
+  often. Having even a small pad greatly reduces the likelihood
+  that nearly every malloc request during program start-up (or
+  after trimming) will invoke sbrk, which needlessly wastes
+  time.
+
+  Automatic rounding-up to page-size units is normally sufficient
+  to avoid measurable overhead, so the default is 0.  However, in
+  systems where sbrk is relatively slow, it can pay to increase
+  this value, at the expense of carrying around more memory than
+  the program needs.
+*/
+
+#define M_TOP_PAD              -2
+
+#ifndef DEFAULT_TOP_PAD
+#define DEFAULT_TOP_PAD        (0)
+#endif
+
+/*
+  MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
+  adjusted MMAP_THRESHOLD.
+*/
+
+#ifndef DEFAULT_MMAP_THRESHOLD_MIN
+#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
+#endif
+
+#ifndef DEFAULT_MMAP_THRESHOLD_MAX
+  /* For 32-bit platforms we cannot increase the maximum mmap
+     threshold much because it is also the minimum value for the
+     maximum heap size and its alignment.  Going above 512k (i.e., 1M
+     for new heaps) wastes too much address space.  */
+# if __WORDSIZE == 32
+#  define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
+# else
+#  define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
+# endif
+#endif
+
+/*
+  M_MMAP_THRESHOLD is the request size threshold for using mmap()
+  to service a request. Requests of at least this size that cannot
+  be allocated using already-existing space will be serviced via mmap.
+  (If enough normal freed space already exists it is used instead.)
+
+  Using mmap segregates relatively large chunks of memory so that
+  they can be individually obtained and released from the host
+  system. A request serviced through mmap is never reused by any
+  other request (at least not directly; the system may just so
+  happen to remap successive requests to the same locations).
+
+  Segregating space in this way has the benefits that:
+
+   1. Mmapped space can ALWAYS be individually released back
+      to the system, which helps keep the system level memory
+      demands of a long-lived program low.
+   2. Mapped memory can never become `locked' between
+      other chunks, as can happen with normally allocated chunks, which
+      means that even trimming via malloc_trim would not release them.
+   3. On some systems with "holes" in address spaces, mmap can obtain
+      memory that sbrk cannot.
+
+  However, it has the disadvantages that:
+
+   1. The space cannot be reclaimed, consolidated, and then
+      used to service later requests, as happens with normal chunks.
+   2. It can lead to more wastage because of mmap page alignment
+      requirements
+   3. It causes malloc performance to be more dependent on host
+      system memory management support routines which may vary in
+      implementation quality and may impose arbitrary
+      limitations. Generally, servicing a request via normal
+      malloc steps is faster than going through a system's mmap.
+
+  The advantages of mmap nearly always outweigh disadvantages for
+  "large" chunks, but the value of "large" varies across systems.  The
+  default is an empirically derived value that works well in most
+  systems.
+
+
+  Update in 2006:
+  The above was written in 2001. Since then the world has changed a lot.
+  Memory got bigger. Applications got bigger. The virtual address space
+  layout in 32 bit linux changed.
+
+  In the new situation, brk() and mmap space is shared and there are no
+  artificial limits on brk size imposed by the kernel. What is more,
+  applications have started using transient allocations larger than the
+  128Kb as was imagined in 2001.
+
+  The price for mmap is also high now; each time glibc mmaps from the
+  kernel, the kernel is forced to zero out the memory it gives to the
+  application. Zeroing memory is expensive and eats a lot of cache and
+  memory bandwidth. This has nothing to do with the efficiency of the
+  virtual memory system, by doing mmap the kernel just has no choice but
+  to zero.
+
+  In 2001, the kernel had a maximum size for brk() which was about 800
+  megabytes on 32 bit x86, at that point brk() would hit the first
+  mmaped shared libaries and couldn't expand anymore. With current 2.6
+  kernels, the VA space layout is different and brk() and mmap
+  both can span the entire heap at will.
+
+  Rather than using a static threshold for the brk/mmap tradeoff,
+  we are now using a simple dynamic one. The goal is still to avoid
+  fragmentation. The old goals we kept are
+  1) try to get the long lived large allocations to use mmap()
+  2) really large allocations should always use mmap()
+  and we're adding now:
+  3) transient allocations should use brk() to avoid forcing the kernel
+     having to zero memory over and over again
+
+  The implementation works with a sliding threshold, which is by default
+  limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
+  out at 128Kb as per the 2001 default.
+
+  This allows us to satisfy requirement 1) under the assumption that long
+  lived allocations are made early in the process' lifespan, before it has
+  started doing dynamic allocations of the same size (which will
+  increase the threshold).
+
+  The upperbound on the threshold satisfies requirement 2)
+
+  The threshold goes up in value when the application frees memory that was
+  allocated with the mmap allocator. The idea is that once the application
+  starts freeing memory of a certain size, it's highly probable that this is
+  a size the application uses for transient allocations. This estimator
+  is there to satisfy the new third requirement.
+
+*/
+
+#define M_MMAP_THRESHOLD      -3
+
+#ifndef DEFAULT_MMAP_THRESHOLD
+#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
+#endif
+
+/*
+  M_MMAP_MAX is the maximum number of requests to simultaneously
+  service using mmap. This parameter exists because
+  some systems have a limited number of internal tables for
+  use by mmap, and using more than a few of them may degrade
+  performance.
+
+  The default is set to a value that serves only as a safeguard.
+  Setting to 0 disables use of mmap for servicing large requests.
+*/
+
+#define M_MMAP_MAX             -4
+
+#ifndef DEFAULT_MMAP_MAX
+#define DEFAULT_MMAP_MAX       (65536)
+#endif
+
+#include <malloc.h>
+
+#ifndef RETURN_ADDRESS
+#define RETURN_ADDRESS(X_) (NULL)
+#endif
+
+/* On some platforms we can compile internal, not exported functions better.
+   Let the environment provide a macro and define it to be empty if it
+   is not available.  */
+#ifndef internal_function
+# define internal_function
+#endif
+
+/* Forward declarations.  */
+struct malloc_chunk;
+typedef struct malloc_chunk* mchunkptr;
+
+/* Internal routines.  */
+
+static void*  _int_malloc(mstate, size_t);
+static void     _int_free(mstate, mchunkptr, int);
+static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
+			   INTERNAL_SIZE_T);
+static void*  _int_memalign(mstate, size_t, size_t);
+static void*  _mid_memalign(size_t, size_t, void *);
+
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
+
+static void* internal_function mem2mem_check(void *p, size_t sz);
+static int internal_function top_check(void);
+static void internal_function munmap_chunk(mchunkptr p);
+#if HAVE_MREMAP
+static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
+#endif
+
+static void*   malloc_check(size_t sz, const void *caller);
+static void      free_check(void* mem, const void *caller);
+static void*   realloc_check(void* oldmem, size_t bytes,
+			       const void *caller);
+static void*   memalign_check(size_t alignment, size_t bytes,
+				const void *caller);
+
+/* ------------------ MMAP support ------------------  */
+
+
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+# define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifndef MAP_NORESERVE
+# define MAP_NORESERVE 0
+#endif
+
+#define MMAP(addr, size, prot, flags) \
+ __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
+
+
+/*
+  -----------------------  Chunk representations -----------------------
+*/
+
+
+/*
+  This struct declaration is misleading (but accurate and necessary).
+  It declares a "view" into memory allowing access to necessary
+  fields at known offsets from a given base. See explanation below.
+*/
+
+struct malloc_chunk {
+
+  INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */
+  INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */
+
+  struct malloc_chunk* fd;         /* double links -- used only if free. */
+  struct malloc_chunk* bk;
+
+  /* Only used for large blocks: pointer to next larger size.  */
+  struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
+  struct malloc_chunk* bk_nextsize;
+};
+
+
+/*
+   malloc_chunk details:
+
+    (The following includes lightly edited explanations by Colin Plumb.)
+
+    Chunks of memory are maintained using a `boundary tag' method as
+    described in e.g., Knuth or Standish.  (See the paper by Paul
+    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
+    survey of such techniques.)  Sizes of free chunks are stored both
+    in the front of each chunk and at the end.  This makes
+    consolidating fragmented chunks into bigger chunks very fast.  The
+    size fields also hold bits representing whether chunks are free or
+    in use.
+
+    An allocated chunk looks like this:
+
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Size of previous chunk, if unallocated (P clear)  |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Size of chunk, in bytes                     |A|M|P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             User data starts here...                          .
+	    .                                                               .
+	    .             (malloc_usable_size() bytes)                      .
+	    .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             (size of chunk, but used for application data)    |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Size of next chunk, in bytes                |A|0|1|
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+    Where "chunk" is the front of the chunk for the purpose of most of
+    the malloc code, but "mem" is the pointer that is returned to the
+    user.  "Nextchunk" is the beginning of the next contiguous chunk.
+
+    Chunks always begin on even word boundaries, so the mem portion
+    (which is returned to the user) is also on an even word boundary, and
+    thus at least double-word aligned.
+
+    Free chunks are stored in circular doubly-linked lists, and look like this:
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Size of previous chunk, if unallocated (P clear)  |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `head:' |             Size of chunk, in bytes                     |A|0|P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Forward pointer to next chunk in list             |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Back pointer to previous chunk in list            |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Unused space (may be 0 bytes long)                .
+	    .                                                               .
+	    .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `foot:' |             Size of chunk, in bytes                           |
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	    |             Size of next chunk, in bytes                |A|0|0|
+	    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
+    chunk size (which is always a multiple of two words), is an in-use
+    bit for the *previous* chunk.  If that bit is *clear*, then the
+    word before the current chunk size contains the previous chunk
+    size, and can be used to find the front of the previous chunk.
+    The very first chunk allocated always has this bit set,
+    preventing access to non-existent (or non-owned) memory. If
+    prev_inuse is set for any given chunk, then you CANNOT determine
+    the size of the previous chunk, and might even get a memory
+    addressing fault when trying to do so.
+
+    The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
+    main arena, described by the main_arena variable.  When additional
+    threads are spawned, each thread receives its own arena (up to a
+    configurable limit, after which arenas are reused for multiple
+    threads), and the chunks in these arenas have the A bit set.  To
+    find the arena for a chunk on such a non-main arena, heap_for_ptr
+    performs a bit mask operation and indirection through the ar_ptr
+    member of the per-heap header heap_info (see arena.c).
+
+    Note that the `foot' of the current chunk is actually represented
+    as the prev_size of the NEXT chunk. This makes it easier to
+    deal with alignments etc but can be very confusing when trying
+    to extend or adapt this code.
+
+    The three exceptions to all this are:
+
+     1. The special chunk `top' doesn't bother using the
+	trailing size field since there is no next contiguous chunk
+	that would have to index off it. After initialization, `top'
+	is forced to always exist.  If it would become less than
+	MINSIZE bytes long, it is replenished.
+
+     2. Chunks allocated via mmap, which have the second-lowest-order
+	bit M (IS_MMAPPED) set in their size fields.  Because they are
+	allocated one-by-one, each must contain its own trailing size
+	field.  If the M bit is set, the other bits are ignored
+	(because mmapped chunks are neither in an arena, nor adjacent
+	to a freed chunk).  The M bit is also used for chunks which
+	originally came from a dumped heap via malloc_set_state in
+	hooks.c.
+
+     3. Chunks in fastbins are treated as allocated chunks from the
+	point of view of the chunk allocator.  They are consolidated
+	with their neighbors only in bulk, in malloc_consolidate.
+*/
+
+/*
+  ---------- Size and alignment checks and conversions ----------
+*/
+
+/* conversion from malloc headers to user pointers, and back */
+
+#define chunk2mem(p)   ((void*)((char*)(p) + 2*SIZE_SZ))
+#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+
+/* The smallest possible chunk */
+#define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+
+#define MINSIZE  \
+  (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
+
+/* Check if m has acceptable alignment */
+
+#define aligned_OK(m)  (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
+
+#define misaligned_chunk(p) \
+  ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
+   & MALLOC_ALIGN_MASK)
+
+
+/*
+   Check if a request is so large that it would wrap around zero when
+   padded and aligned. To simplify some other code, the bound is made
+   low enough so that adding MINSIZE will also not wrap around zero.
+ */
+
+#define REQUEST_OUT_OF_RANGE(req)                                 \
+  ((unsigned long) (req) >=						      \
+   (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
+
+/* pad request bytes into a usable size -- internal version */
+
+#define request2size(req)                                         \
+  (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
+   MINSIZE :                                                      \
+   ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
+
+/*  Same, except also perform argument check */
+
+#define checked_request2size(req, sz)                             \
+  if (REQUEST_OUT_OF_RANGE (req)) {					      \
+      __set_errno (ENOMEM);						      \
+      return 0;								      \
+    }									      \
+  (sz) = request2size (req);
+
+/*
+   --------------- Physical chunk operations ---------------
+ */
+
+
+/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
+#define PREV_INUSE 0x1
+
+/* extract inuse bit of previous chunk */
+#define prev_inuse(p)       ((p)->mchunk_size & PREV_INUSE)
+
+
+/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
+#define IS_MMAPPED 0x2
+
+/* check for mmap()'ed chunk */
+#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
+
+
+/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
+   from a non-main arena.  This is only set immediately before handing
+   the chunk to the user, if necessary.  */
+#define NON_MAIN_ARENA 0x4
+
+/* Check for chunk from main arena.  */
+#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
+
+/* Mark a chunk as not being on the main arena.  */
+#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
+
+
+/*
+   Bits to mask off when extracting size
+
+   Note: IS_MMAPPED is intentionally not masked off from size field in
+   macros for which mmapped chunks should never be seen. This should
+   cause helpful core dumps to occur if it is tried by accident by
+   people extending or adapting this malloc.
+ */
+#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
+
+/* Get size, ignoring use bits */
+#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
+
+/* Like chunksize, but do not mask SIZE_BITS.  */
+#define chunksize_nomask(p)         ((p)->mchunk_size)
+
+/* Ptr to next physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
+
+/* Size of the chunk below P.  Only valid if prev_inuse (P).  */
+#define prev_size(p) ((p)->mchunk_prev_size)
+
+/* Set the size of the chunk below P.  Only valid if prev_inuse (P).  */
+#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
+
+/* Ptr to previous physical malloc_chunk.  Only valid if prev_inuse (P).  */
+#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
+
+/* Treat space at ptr + offset as a chunk */
+#define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
+
+/* extract p's inuse bit */
+#define inuse(p)							      \
+  ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
+
+/* set/clear chunk as being inuse without otherwise disturbing */
+#define set_inuse(p)							      \
+  ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
+
+#define clear_inuse(p)							      \
+  ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
+
+
+/* check/set/clear inuse bits in known places */
+#define inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
+
+#define set_inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
+
+#define clear_inuse_bit_at_offset(p, s)					      \
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
+
+
+/* Set size at head, without disturbing its use bit */
+#define set_head_size(p, s)  ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
+
+/* Set size/use field */
+#define set_head(p, s)       ((p)->mchunk_size = (s))
+
+/* Set size at footer (only when chunk is not in use) */
+#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
+
+
+#pragma GCC poison mchunk_size
+#pragma GCC poison mchunk_prev_size
+
+/*
+   -------------------- Internal data structures --------------------
+
+   All internal state is held in an instance of malloc_state defined
+   below. There are no other static variables, except in two optional
+   cases:
+ * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
+ * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
+     for mmap.
+
+   Beware of lots of tricks that minimize the total bookkeeping space
+   requirements. The result is a little over 1K bytes (for 4byte
+   pointers and size_t.)
+ */
+
+/*
+   Bins
+
+    An array of bin headers for free chunks. Each bin is doubly
+    linked.  The bins are approximately proportionally (log) spaced.
+    There are a lot of these bins (128). This may look excessive, but
+    works very well in practice.  Most bins hold sizes that are
+    unusual as malloc request sizes, but are more usual for fragments
+    and consolidated sets of chunks, which is what these bins hold, so
+    they can be found quickly.  All procedures maintain the invariant
+    that no consolidated chunk physically borders another one, so each
+    chunk in a list is known to be preceeded and followed by either
+    inuse chunks or the ends of memory.
+
+    Chunks in bins are kept in size order, with ties going to the
+    approximately least recently used chunk. Ordering isn't needed
+    for the small bins, which all contain the same-sized chunks, but
+    facilitates best-fit allocation for larger chunks. These lists
+    are just sequential. Keeping them in order almost never requires
+    enough traversal to warrant using fancier ordered data
+    structures.
+
+    Chunks of the same size are linked with the most
+    recently freed at the front, and allocations are taken from the
+    back.  This results in LRU (FIFO) allocation order, which tends
+    to give each chunk an equal opportunity to be consolidated with
+    adjacent freed chunks, resulting in larger free chunks and less
+    fragmentation.
+
+    To simplify use in double-linked lists, each bin header acts
+    as a malloc_chunk. This avoids special-casing for headers.
+    But to conserve space and improve locality, we allocate
+    only the fd/bk pointers of bins, and then use repositioning tricks
+    to treat these as the fields of a malloc_chunk*.
+ */
+
+typedef struct malloc_chunk *mbinptr;
+
+/* addressing -- note that bin_at(0) does not exist */
+#define bin_at(m, i) \
+  (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
+             - offsetof (struct malloc_chunk, fd))
+
+/* analog of ++bin */
+#define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
+
+/* Reminders about list directionality within bins */
+#define first(b)     ((b)->fd)
+#define last(b)      ((b)->bk)
+
+/* Take a chunk off a bin list */
+#define unlink(AV, P, BK, FD) {                                            \
+    if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0))      \
+      malloc_printerr (check_action, "corrupted size vs. prev_size", P, AV);  \
+    FD = P->fd;								      \
+    BK = P->bk;								      \
+    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
+      malloc_printerr (check_action, "corrupted double-linked list", P, AV);  \
+    else {								      \
+        FD->bk = BK;							      \
+        BK->fd = FD;							      \
+        if (!in_smallbin_range (chunksize_nomask (P))			      \
+            && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
+	    if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)	      \
+		|| __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))    \
+	      malloc_printerr (check_action,				      \
+			       "corrupted double-linked list (not small)",    \
+			       P, AV);					      \
+            if (FD->fd_nextsize == NULL) {				      \
+                if (P->fd_nextsize == P)				      \
+                  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
+                else {							      \
+                    FD->fd_nextsize = P->fd_nextsize;			      \
+                    FD->bk_nextsize = P->bk_nextsize;			      \
+                    P->fd_nextsize->bk_nextsize = FD;			      \
+                    P->bk_nextsize->fd_nextsize = FD;			      \
+                  }							      \
+              } else {							      \
+                P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
+                P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
+              }								      \
+          }								      \
+      }									      \
+}
+
+/*
+   Indexing
+
+    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
+    8 bytes apart. Larger bins are approximately logarithmically spaced:
+
+    64 bins of size       8
+    32 bins of size      64
+    16 bins of size     512
+     8 bins of size    4096
+     4 bins of size   32768
+     2 bins of size  262144
+     1 bin  of size what's left
+
+    There is actually a little bit of slop in the numbers in bin_index
+    for the sake of speed. This makes no difference elsewhere.
+
+    The bins top out around 1MB because we expect to service large
+    requests via mmap.
+
+    Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
+    a valid chunk size the small bins are bumped up one.
+ */
+
+#define NBINS             128
+#define NSMALLBINS         64
+#define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
+#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
+#define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
+
+#define in_smallbin_range(sz)  \
+  ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
+
+#define smallbin_index(sz) \
+  ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
+   + SMALLBIN_CORRECTION)
+
+#define largebin_index_32(sz)                                                \
+  (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
+
+#define largebin_index_32_big(sz)                                            \
+  (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
+
+// XXX It remains to be seen whether it is good to keep the widths of
+// XXX the buckets the same or whether it should be scaled by a factor
+// XXX of two as well.
+#define largebin_index_64(sz)                                                \
+  (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
+   ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
+   ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
+   ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
+   ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
+   126)
+
+#define largebin_index(sz) \
+  (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
+   : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)                     \
+   : largebin_index_32 (sz))
+
+#define bin_index(sz) \
+  ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
+
+
+/*
+   Unsorted chunks
+
+    All remainders from chunk splits, as well as all returned chunks,
+    are first placed in the "unsorted" bin. They are then placed
+    in regular bins after malloc gives them ONE chance to be used before
+    binning. So, basically, the unsorted_chunks list acts as a queue,
+    with chunks being placed on it in free (and malloc_consolidate),
+    and taken off (to be either used or placed in bins) in malloc.
+
+    The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
+    does not have to be taken into account in size comparisons.
+ */
+
+/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
+#define unsorted_chunks(M)          (bin_at (M, 1))
+
+/*
+   Top
+
+    The top-most available chunk (i.e., the one bordering the end of
+    available memory) is treated specially. It is never included in
+    any bin, is used only if no other chunk is available, and is
+    released back to the system if it is very large (see
+    M_TRIM_THRESHOLD).  Because top initially
+    points to its own bin with initial zero size, thus forcing
+    extension on the first malloc request, we avoid having any special
+    code in malloc to check whether it even exists yet. But we still
+    need to do so when getting memory from system, so we make
+    initial_top treat the bin as a legal but unusable chunk during the
+    interval between initialization and the first call to
+    sysmalloc. (This is somewhat delicate, since it relies on
+    the 2 preceding words to be zero during this interval as well.)
+ */
+
+/* Conveniently, the unsorted bin can be used as dummy top on first call */
+#define initial_top(M)              (unsorted_chunks (M))
+
+/*
+   Binmap
+
+    To help compensate for the large number of bins, a one-level index
+    structure is used for bin-by-bin searching.  `binmap' is a
+    bitvector recording whether bins are definitely empty so they can
+    be skipped over during during traversals.  The bits are NOT always
+    cleared as soon as bins are empty, but instead only
+    when they are noticed to be empty during traversal in malloc.
+ */
+
+/* Conservatively use 32 bits per map word, even if on 64bit system */
+#define BINMAPSHIFT      5
+#define BITSPERMAP       (1U << BINMAPSHIFT)
+#define BINMAPSIZE       (NBINS / BITSPERMAP)
+
+#define idx2block(i)     ((i) >> BINMAPSHIFT)
+#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
+
+#define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
+#define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
+#define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
+
+/*
+   Fastbins
+
+    An array of lists holding recently freed small chunks.  Fastbins
+    are not doubly linked.  It is faster to single-link them, and
+    since chunks are never removed from the middles of these lists,
+    double linking is not necessary. Also, unlike regular bins, they
+    are not even processed in FIFO order (they use faster LIFO) since
+    ordering doesn't much matter in the transient contexts in which
+    fastbins are normally used.
+
+    Chunks in fastbins keep their inuse bit set, so they cannot
+    be consolidated with other free chunks. malloc_consolidate
+    releases all chunks in fastbins and consolidates them with
+    other free chunks.
+ */
+
+typedef struct malloc_chunk *mfastbinptr;
+#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
+
+/* offset 2 to use otherwise unindexable first 2 bins */
+#define fastbin_index(sz) \
+  ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
+
+
+/* The maximum fastbin request size we support */
+#define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
+
+#define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
+
+/*
+   FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
+   that triggers automatic consolidation of possibly-surrounding
+   fastbin chunks. This is a heuristic, so the exact value should not
+   matter too much. It is defined at half the default trim threshold as a
+   compromise heuristic to only attempt consolidation if it is likely
+   to lead to trimming. However, it is not dynamically tunable, since
+   consolidation reduces fragmentation surrounding large chunks even
+   if trimming is not used.
+ */
+
+#define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
+
+/*
+   Since the lowest 2 bits in max_fast don't matter in size comparisons,
+   they are used as flags.
+ */
+
+/*
+   FASTCHUNKS_BIT held in max_fast indicates that there are probably
+   some fastbin chunks. It is set true on entering a chunk into any
+   fastbin, and cleared only in malloc_consolidate.
+
+   The truth value is inverted so that have_fastchunks will be true
+   upon startup (since statics are zero-filled), simplifying
+   initialization checks.
+ */
+
+#define FASTCHUNKS_BIT        (1U)
+
+#define have_fastchunks(M)     (((M)->flags & FASTCHUNKS_BIT) == 0)
+#define clear_fastchunks(M)    catomic_or (&(M)->flags, FASTCHUNKS_BIT)
+#define set_fastchunks(M)      catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
+
+/*
+   NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
+   regions.  Otherwise, contiguity is exploited in merging together,
+   when possible, results from consecutive MORECORE calls.
+
+   The initial value comes from MORECORE_CONTIGUOUS, but is
+   changed dynamically if mmap is ever used as an sbrk substitute.
+ */
+
+#define NONCONTIGUOUS_BIT     (2U)
+
+#define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
+#define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
+#define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
+#define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
+
+/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
+   arena.  Such an arena is no longer used to allocate chunks.  Chunks
+   allocated in that arena before detecting corruption are not freed.  */
+
+#define ARENA_CORRUPTION_BIT (4U)
+
+#define arena_is_corrupt(A)	(((A)->flags & ARENA_CORRUPTION_BIT))
+#define set_arena_corrupt(A)	((A)->flags |= ARENA_CORRUPTION_BIT)
+
+/*
+   Set value of max_fast.
+   Use impossibly small value if 0.
+   Precondition: there are no existing fastbin chunks.
+   Setting the value clears fastchunk bit but preserves noncontiguous bit.
+ */
+
+#define set_max_fast(s) \
+  global_max_fast = (((s) == 0)						      \
+                     ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
+#define get_max_fast() global_max_fast
+
+
+/*
+   ----------- Internal state representation and initialization -----------
+ */
+
+struct malloc_state
+{
+  /* Serialize access.  */
+  __libc_lock_define (, mutex);
+
+  /* Flags (formerly in max_fast).  */
+  int flags;
+
+  /* Fastbins */
+  mfastbinptr fastbinsY[NFASTBINS];
+
+  /* Base of the topmost chunk -- not otherwise kept in a bin */
+  mchunkptr top;
+
+  /* The remainder from the most recent split of a small request */
+  mchunkptr last_remainder;
+
+  /* Normal bins packed as described above */
+  mchunkptr bins[NBINS * 2 - 2];
+
+  /* Bitmap of bins */
+  unsigned int binmap[BINMAPSIZE];
+
+  /* Linked list */
+  struct malloc_state *next;
+
+  /* Linked list for free arenas.  Access to this field is serialized
+     by free_list_lock in arena.c.  */
+  struct malloc_state *next_free;
+
+  /* Number of threads attached to this arena.  0 if the arena is on
+     the free list.  Access to this field is serialized by
+     free_list_lock in arena.c.  */
+  INTERNAL_SIZE_T attached_threads;
+
+  /* Memory allocated from the system in this arena.  */
+  INTERNAL_SIZE_T system_mem;
+  INTERNAL_SIZE_T max_system_mem;
+};
+
+struct malloc_par
+{
+  /* Tunable parameters */
+  unsigned long trim_threshold;
+  INTERNAL_SIZE_T top_pad;
+  INTERNAL_SIZE_T mmap_threshold;
+  INTERNAL_SIZE_T arena_test;
+  INTERNAL_SIZE_T arena_max;
+
+  /* Memory map support */
+  int n_mmaps;
+  int n_mmaps_max;
+  int max_n_mmaps;
+  /* the mmap_threshold is dynamic, until the user sets
+     it manually, at which point we need to disable any
+     dynamic behavior. */
+  int no_dyn_threshold;
+
+  /* Statistics */
+  INTERNAL_SIZE_T mmapped_mem;
+  INTERNAL_SIZE_T max_mmapped_mem;
+
+  /* First address handed out by MORECORE/sbrk.  */
+  char *sbrk_base;
+};
+
+/* There are several instances of this struct ("arenas") in this
+   malloc.  If you are adapting this malloc in a way that does NOT use
+   a static or mmapped malloc_state, you MUST explicitly zero-fill it
+   before using. This malloc relies on the property that malloc_state
+   is initialized to all zeroes (as is true of C statics).  */
+
+static struct malloc_state main_arena =
+{
+  .mutex = _LIBC_LOCK_INITIALIZER,
+  .next = &main_arena,
+  .attached_threads = 1
+};
+
+/* These variables are used for undumping support.  Chunked are marked
+   as using mmap, but we leave them alone if they fall into this
+   range.  NB: The chunk size for these chunks only includes the
+   initial size field (of SIZE_SZ bytes), there is no trailing size
+   field (unlike with regular mmapped chunks).  */
+static mchunkptr dumped_main_arena_start; /* Inclusive.  */
+static mchunkptr dumped_main_arena_end;   /* Exclusive.  */
+
+/* True if the pointer falls into the dumped arena.  Use this after
+   chunk_is_mmapped indicates a chunk is mmapped.  */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+  ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
+/* There is only one instance of the malloc parameters.  */
+
+static struct malloc_par mp_ =
+{
+  .top_pad = DEFAULT_TOP_PAD,
+  .n_mmaps_max = DEFAULT_MMAP_MAX,
+  .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
+  .trim_threshold = DEFAULT_TRIM_THRESHOLD,
+#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
+  .arena_test = NARENAS_FROM_NCORES (1)
+};
+
+/* Maximum size of memory handled in fastbins.  */
+static INTERNAL_SIZE_T global_max_fast;
+
+/*
+   Initialize a malloc_state struct.
+
+   This is called only from within malloc_consolidate, which needs
+   be called in the same contexts anyway.  It is never called directly
+   outside of malloc_consolidate because some optimizing compilers try
+   to inline it at all call points, which turns out not to be an
+   optimization at all. (Inlining it in malloc_consolidate is fine though.)
+ */
+
+static void
+malloc_init_state (mstate av)
+{
+  int i;
+  mbinptr bin;
+
+  /* Establish circular links for normal bins */
+  for (i = 1; i < NBINS; ++i)
+    {
+      bin = bin_at (av, i);
+      bin->fd = bin->bk = bin;
+    }
+
+#if MORECORE_CONTIGUOUS
+  if (av != &main_arena)
+#endif
+  set_noncontiguous (av);
+  if (av == &main_arena)
+    set_max_fast (DEFAULT_MXFAST);
+  av->flags |= FASTCHUNKS_BIT;
+
+  av->top = initial_top (av);
+}
+
+/*
+   Other internal utilities operating on mstates
+ */
+
+static void *sysmalloc (INTERNAL_SIZE_T, mstate);
+static int      systrim (size_t, mstate);
+static void     malloc_consolidate (mstate);
+
+
+/* -------------- Early definitions for debugging hooks ---------------- */
+
+/* Define and initialize the hook variables.  These weak definitions must
+   appear before any use of the variables in a function (arena.c uses one).  */
+#ifndef weak_variable
+/* In GNU libc we want the hook variables to be weak definitions to
+   avoid a problem with Emacs.  */
+# define weak_variable weak_function
+#endif
+
+/* Forward declarations.  */
+static void *malloc_hook_ini (size_t sz,
+                              const void *caller) __THROW;
+static void *realloc_hook_ini (void *ptr, size_t sz,
+                               const void *caller) __THROW;
+static void *memalign_hook_ini (size_t alignment, size_t sz,
+                                const void *caller) __THROW;
+
+#if HAVE_MALLOC_INIT_HOOK
+void weak_variable (*__malloc_initialize_hook) (void) = NULL;
+compat_symbol (libc, __malloc_initialize_hook,
+	       __malloc_initialize_hook, GLIBC_2_0);
+#endif
+
+void weak_variable (*__free_hook) (void *__ptr,
+                                   const void *) = NULL;
+void *weak_variable (*__malloc_hook)
+  (size_t __size, const void *) = malloc_hook_ini;
+void *weak_variable (*__realloc_hook)
+  (void *__ptr, size_t __size, const void *)
+  = realloc_hook_ini;
+void *weak_variable (*__memalign_hook)
+  (size_t __alignment, size_t __size, const void *)
+  = memalign_hook_ini;
+void weak_variable (*__after_morecore_hook) (void) = NULL;
+
+
+/* ---------------- Error behavior ------------------------------------ */
+
+#ifndef DEFAULT_CHECK_ACTION
+# define DEFAULT_CHECK_ACTION 3
+#endif
+
+static int check_action = DEFAULT_CHECK_ACTION;
+
+
+/* ------------------ Testing support ----------------------------------*/
+
+static int perturb_byte;
+
+static void
+alloc_perturb (char *p, size_t n)
+{
+  if (__glibc_unlikely (perturb_byte))
+    memset (p, perturb_byte ^ 0xff, n);
+}
+
+static void
+free_perturb (char *p, size_t n)
+{
+  if (__glibc_unlikely (perturb_byte))
+    memset (p, perturb_byte, n);
+}
+
+
+
+#include <stap-probe.h>
+
+/* ------------------- Support for multiple arenas -------------------- */
+#include "arena.c"
+
+/*
+   Debugging support
+
+   These routines make a number of assertions about the states
+   of data structures that should be true at all times. If any
+   are not true, it's very likely that a user program has somehow
+   trashed memory. (It's also possible that there is a coding error
+   in malloc. In which case, please report it!)
+ */
+
+#if !MALLOC_DEBUG
+
+# define check_chunk(A, P)
+# define check_free_chunk(A, P)
+# define check_inuse_chunk(A, P)
+# define check_remalloced_chunk(A, P, N)
+# define check_malloced_chunk(A, P, N)
+# define check_malloc_state(A)
+
+#else
+
+# define check_chunk(A, P)              do_check_chunk (A, P)
+# define check_free_chunk(A, P)         do_check_free_chunk (A, P)
+# define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
+# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
+# define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
+# define check_malloc_state(A)         do_check_malloc_state (A)
+
+/*
+   Properties of all chunks
+ */
+
+static void
+do_check_chunk (mstate av, mchunkptr p)
+{
+  unsigned long sz = chunksize (p);
+  /* min and max possible addresses assuming contiguous allocation */
+  char *max_address = (char *) (av->top) + chunksize (av->top);
+  char *min_address = max_address - av->system_mem;
+
+  if (!chunk_is_mmapped (p))
+    {
+      /* Has legal address ... */
+      if (p != av->top)
+        {
+          if (contiguous (av))
+            {
+              assert (((char *) p) >= min_address);
+              assert (((char *) p + sz) <= ((char *) (av->top)));
+            }
+        }
+      else
+        {
+          /* top size is always at least MINSIZE */
+          assert ((unsigned long) (sz) >= MINSIZE);
+          /* top predecessor always marked inuse */
+          assert (prev_inuse (p));
+        }
+    }
+  else if (!DUMPED_MAIN_ARENA_CHUNK (p))
+    {
+      /* address is outside main heap  */
+      if (contiguous (av) && av->top != initial_top (av))
+        {
+          assert (((char *) p) < min_address || ((char *) p) >= max_address);
+        }
+      /* chunk is page-aligned */
+      assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
+      /* mem is aligned */
+      assert (aligned_OK (chunk2mem (p)));
+    }
+}
+
+/*
+   Properties of free chunks
+ */
+
+static void
+do_check_free_chunk (mstate av, mchunkptr p)
+{
+  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+  mchunkptr next = chunk_at_offset (p, sz);
+
+  do_check_chunk (av, p);
+
+  /* Chunk must claim to be free ... */
+  assert (!inuse (p));
+  assert (!chunk_is_mmapped (p));
+
+  /* Unless a special marker, must have OK fields */
+  if ((unsigned long) (sz) >= MINSIZE)
+    {
+      assert ((sz & MALLOC_ALIGN_MASK) == 0);
+      assert (aligned_OK (chunk2mem (p)));
+      /* ... matching footer field */
+      assert (prev_size (p) == sz);
+      /* ... and is fully consolidated */
+      assert (prev_inuse (p));
+      assert (next == av->top || inuse (next));
+
+      /* ... and has minimally sane links */
+      assert (p->fd->bk == p);
+      assert (p->bk->fd == p);
+    }
+  else /* markers are always of size SIZE_SZ */
+    assert (sz == SIZE_SZ);
+}
+
+/*
+   Properties of inuse chunks
+ */
+
+static void
+do_check_inuse_chunk (mstate av, mchunkptr p)
+{
+  mchunkptr next;
+
+  do_check_chunk (av, p);
+
+  if (chunk_is_mmapped (p))
+    return; /* mmapped chunks have no next/prev */
+
+  /* Check whether it claims to be in use ... */
+  assert (inuse (p));
+
+  next = next_chunk (p);
+
+  /* ... and is surrounded by OK chunks.
+     Since more things can be checked with free chunks than inuse ones,
+     if an inuse chunk borders them and debug is on, it's worth doing them.
+   */
+  if (!prev_inuse (p))
+    {
+      /* Note that we cannot even look at prev unless it is not inuse */
+      mchunkptr prv = prev_chunk (p);
+      assert (next_chunk (prv) == p);
+      do_check_free_chunk (av, prv);
+    }
+
+  if (next == av->top)
+    {
+      assert (prev_inuse (next));
+      assert (chunksize (next) >= MINSIZE);
+    }
+  else if (!inuse (next))
+    do_check_free_chunk (av, next);
+}
+
+/*
+   Properties of chunks recycled from fastbins
+ */
+
+static void
+do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+{
+  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+
+  if (!chunk_is_mmapped (p))
+    {
+      assert (av == arena_for_chunk (p));
+      if (chunk_main_arena (p))
+        assert (av == &main_arena);
+      else
+        assert (av != &main_arena);
+    }
+
+  do_check_inuse_chunk (av, p);
+
+  /* Legal size ... */
+  assert ((sz & MALLOC_ALIGN_MASK) == 0);
+  assert ((unsigned long) (sz) >= MINSIZE);
+  /* ... and alignment */
+  assert (aligned_OK (chunk2mem (p)));
+  /* chunk is less than MINSIZE more than request */
+  assert ((long) (sz) - (long) (s) >= 0);
+  assert ((long) (sz) - (long) (s + MINSIZE) < 0);
+}
+
+/*
+   Properties of nonrecycled chunks at the point they are malloced
+ */
+
+static void
+do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+{
+  /* same as recycled case ... */
+  do_check_remalloced_chunk (av, p, s);
+
+  /*
+     ... plus,  must obey implementation invariant that prev_inuse is
+     always true of any allocated chunk; i.e., that each allocated
+     chunk borders either a previously allocated and still in-use
+     chunk, or the base of its memory arena. This is ensured
+     by making all allocations from the `lowest' part of any found
+     chunk.  This does not necessarily hold however for chunks
+     recycled via fastbins.
+   */
+
+  assert (prev_inuse (p));
+}
+
+
+/*
+   Properties of malloc_state.
+
+   This may be useful for debugging malloc, as well as detecting user
+   programmer errors that somehow write into malloc_state.
+
+   If you are extending or experimenting with this malloc, you can
+   probably figure out how to hack this routine to print out or
+   display chunk addresses, sizes, bins, and other instrumentation.
+ */
+
+static void
+do_check_malloc_state (mstate av)
+{
+  int i;
+  mchunkptr p;
+  mchunkptr q;
+  mbinptr b;
+  unsigned int idx;
+  INTERNAL_SIZE_T size;
+  unsigned long total = 0;
+  int max_fast_bin;
+
+  /* internal size_t must be no wider than pointer type */
+  assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
+
+  /* alignment is a power of 2 */
+  assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
+
+  /* cannot run remaining checks until fully initialized */
+  if (av->top == 0 || av->top == initial_top (av))
+    return;
+
+  /* pagesize is a power of 2 */
+  assert (powerof2(GLRO (dl_pagesize)));
+
+  /* A contiguous main_arena is consistent with sbrk_base.  */
+  if (av == &main_arena && contiguous (av))
+    assert ((char *) mp_.sbrk_base + av->system_mem ==
+            (char *) av->top + chunksize (av->top));
+
+  /* properties of fastbins */
+
+  /* max_fast is in allowed range */
+  assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
+
+  max_fast_bin = fastbin_index (get_max_fast ());
+
+  for (i = 0; i < NFASTBINS; ++i)
+    {
+      p = fastbin (av, i);
+
+      /* The following test can only be performed for the main arena.
+         While mallopt calls malloc_consolidate to get rid of all fast
+         bins (especially those larger than the new maximum) this does
+         only happen for the main arena.  Trying to do this for any
+         other arena would mean those arenas have to be locked and
+         malloc_consolidate be called for them.  This is excessive.  And
+         even if this is acceptable to somebody it still cannot solve
+         the problem completely since if the arena is locked a
+         concurrent malloc call might create a new arena which then
+         could use the newly invalid fast bins.  */
+
+      /* all bins past max_fast are empty */
+      if (av == &main_arena && i > max_fast_bin)
+        assert (p == 0);
+
+      while (p != 0)
+        {
+          /* each chunk claims to be inuse */
+          do_check_inuse_chunk (av, p);
+          total += chunksize (p);
+          /* chunk belongs in this bin */
+          assert (fastbin_index (chunksize (p)) == i);
+          p = p->fd;
+        }
+    }
+
+  if (total != 0)
+    assert (have_fastchunks (av));
+  else if (!have_fastchunks (av))
+    assert (total == 0);
+
+  /* check normal bins */
+  for (i = 1; i < NBINS; ++i)
+    {
+      b = bin_at (av, i);
+
+      /* binmap is accurate (except for bin 1 == unsorted_chunks) */
+      if (i >= 2)
+        {
+          unsigned int binbit = get_binmap (av, i);
+          int empty = last (b) == b;
+          if (!binbit)
+            assert (empty);
+          else if (!empty)
+            assert (binbit);
+        }
+
+      for (p = last (b); p != b; p = p->bk)
+        {
+          /* each chunk claims to be free */
+          do_check_free_chunk (av, p);
+          size = chunksize (p);
+          total += size;
+          if (i >= 2)
+            {
+              /* chunk belongs in bin */
+              idx = bin_index (size);
+              assert (idx == i);
+              /* lists are sorted */
+              assert (p->bk == b ||
+                      (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
+
+              if (!in_smallbin_range (size))
+                {
+                  if (p->fd_nextsize != NULL)
+                    {
+                      if (p->fd_nextsize == p)
+                        assert (p->bk_nextsize == p);
+                      else
+                        {
+                          if (p->fd_nextsize == first (b))
+                            assert (chunksize (p) < chunksize (p->fd_nextsize));
+                          else
+                            assert (chunksize (p) > chunksize (p->fd_nextsize));
+
+                          if (p == first (b))
+                            assert (chunksize (p) > chunksize (p->bk_nextsize));
+                          else
+                            assert (chunksize (p) < chunksize (p->bk_nextsize));
+                        }
+                    }
+                  else
+                    assert (p->bk_nextsize == NULL);
+                }
+            }
+          else if (!in_smallbin_range (size))
+            assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
+          /* chunk is followed by a legal chain of inuse chunks */
+          for (q = next_chunk (p);
+               (q != av->top && inuse (q) &&
+                (unsigned long) (chunksize (q)) >= MINSIZE);
+               q = next_chunk (q))
+            do_check_inuse_chunk (av, q);
+        }
+    }
+
+  /* top chunk is OK */
+  check_chunk (av, av->top);
+}
+#endif
+
+
+/* ----------------- Support for debugging hooks -------------------- */
+#include "hooks.c"
+
+
+/* ----------- Routines dealing with system allocation -------------- */
+
+/*
+   sysmalloc handles malloc cases requiring more memory from the system.
+   On entry, it is assumed that av->top does not have enough
+   space to service request for nb bytes, thus requiring that av->top
+   be extended or replaced.
+ */
+
+static void *
+sysmalloc (INTERNAL_SIZE_T nb, mstate av)
+{
+  mchunkptr old_top;              /* incoming value of av->top */
+  INTERNAL_SIZE_T old_size;       /* its size */
+  char *old_end;                  /* its end address */
+
+  long size;                      /* arg to first MORECORE or mmap call */
+  char *brk;                      /* return value from MORECORE */
+
+  long correction;                /* arg to 2nd MORECORE call */
+  char *snd_brk;                  /* 2nd return val */
+
+  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
+  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
+  char *aligned_brk;              /* aligned offset into brk */
+
+  mchunkptr p;                    /* the allocated/returned chunk */
+  mchunkptr remainder;            /* remainder from allocation */
+  unsigned long remainder_size;   /* its size */
+
+
+  size_t pagesize = GLRO (dl_pagesize);
+  bool tried_mmap = false;
+
+
+  /*
+     If have mmap, and the request size meets the mmap threshold, and
+     the system supports mmap, and there are few enough currently
+     allocated mmapped regions, try to directly map this request
+     rather than expanding top.
+   */
+
+  if (av == NULL
+      || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
+	  && (mp_.n_mmaps < mp_.n_mmaps_max)))
+    {
+      char *mm;           /* return value from mmap call*/
+
+    try_mmap:
+      /*
+         Round up size to nearest page.  For mmapped chunks, the overhead
+         is one SIZE_SZ unit larger than for normal chunks, because there
+         is no following chunk whose prev_size field could be used.
+
+         See the front_misalign handling below, for glibc there is no
+         need for further alignments unless we have have high alignment.
+       */
+      if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+        size = ALIGN_UP (nb + SIZE_SZ, pagesize);
+      else
+        size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+      tried_mmap = true;
+
+      /* Don't try if size wraps around 0 */
+      if ((unsigned long) (size) > (unsigned long) (nb))
+        {
+          mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
+
+          if (mm != MAP_FAILED)
+            {
+              /*
+                 The offset to the start of the mmapped region is stored
+                 in the prev_size field of the chunk. This allows us to adjust
+                 returned start address to meet alignment requirements here
+                 and in memalign(), and still be able to compute proper
+                 address argument for later munmap in free() and realloc().
+               */
+
+              if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+                {
+                  /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
+                     MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
+                     aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
+                  assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
+                  front_misalign = 0;
+                }
+              else
+                front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
+              if (front_misalign > 0)
+                {
+                  correction = MALLOC_ALIGNMENT - front_misalign;
+                  p = (mchunkptr) (mm + correction);
+		  set_prev_size (p, correction);
+                  set_head (p, (size - correction) | IS_MMAPPED);
+                }
+              else
+                {
+                  p = (mchunkptr) mm;
+		  set_prev_size (p, 0);
+                  set_head (p, size | IS_MMAPPED);
+                }
+
+              /* update statistics */
+
+              int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+              atomic_max (&mp_.max_n_mmaps, new);
+
+              unsigned long sum;
+              sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
+              atomic_max (&mp_.max_mmapped_mem, sum);
+
+              check_chunk (av, p);
+
+              return chunk2mem (p);
+            }
+        }
+    }
+
+  /* There are no usable arenas and mmap also failed.  */
+  if (av == NULL)
+    return 0;
+
+  /* Record incoming configuration of top */
+
+  old_top = av->top;
+  old_size = chunksize (old_top);
+  old_end = (char *) (chunk_at_offset (old_top, old_size));
+
+  brk = snd_brk = (char *) (MORECORE_FAILURE);
+
+  /*
+     If not the first time through, we require old_size to be
+     at least MINSIZE and to have prev_inuse set.
+   */
+
+  assert ((old_top == initial_top (av) && old_size == 0) ||
+          ((unsigned long) (old_size) >= MINSIZE &&
+           prev_inuse (old_top) &&
+           ((unsigned long) old_end & (pagesize - 1)) == 0));
+
+  /* Precondition: not enough current space to satisfy nb request */
+  assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
+
+
+  if (av != &main_arena)
+    {
+      heap_info *old_heap, *heap;
+      size_t old_heap_size;
+
+      /* First try to extend the current heap. */
+      old_heap = heap_for_ptr (old_top);
+      old_heap_size = old_heap->size;
+      if ((long) (MINSIZE + nb - old_size) > 0
+          && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
+        {
+          av->system_mem += old_heap->size - old_heap_size;
+          set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
+                    | PREV_INUSE);
+        }
+      else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
+        {
+          /* Use a newly allocated heap.  */
+          heap->ar_ptr = av;
+          heap->prev = old_heap;
+          av->system_mem += heap->size;
+          /* Set up the new top.  */
+          top (av) = chunk_at_offset (heap, sizeof (*heap));
+          set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
+
+          /* Setup fencepost and free the old top chunk with a multiple of
+             MALLOC_ALIGNMENT in size. */
+          /* The fencepost takes at least MINSIZE bytes, because it might
+             become the top chunk again later.  Note that a footer is set
+             up, too, although the chunk is marked in use. */
+          old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
+          set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
+          if (old_size >= MINSIZE)
+            {
+              set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
+              set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
+              set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
+              _int_free (av, old_top, 1);
+            }
+          else
+            {
+              set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
+              set_foot (old_top, (old_size + 2 * SIZE_SZ));
+            }
+        }
+      else if (!tried_mmap)
+        /* We can at least try to use to mmap memory.  */
+        goto try_mmap;
+    }
+  else     /* av == main_arena */
+
+
+    { /* Request enough space for nb + pad + overhead */
+      size = nb + mp_.top_pad + MINSIZE;
+
+      /*
+         If contiguous, we can subtract out existing space that we hope to
+         combine with new space. We add it back later only if
+         we don't actually get contiguous space.
+       */
+
+      if (contiguous (av))
+        size -= old_size;
+
+      /*
+         Round to a multiple of page size.
+         If MORECORE is not contiguous, this ensures that we only call it
+         with whole-page arguments.  And if MORECORE is contiguous and
+         this is not first time through, this preserves page-alignment of
+         previous calls. Otherwise, we correct to page-align below.
+       */
+
+      size = ALIGN_UP (size, pagesize);
+
+      /*
+         Don't try to call MORECORE if argument is so big as to appear
+         negative. Note that since mmap takes size_t arg, it may succeed
+         below even if we cannot call MORECORE.
+       */
+
+      if (size > 0)
+        {
+          brk = (char *) (MORECORE (size));
+          LIBC_PROBE (memory_sbrk_more, 2, brk, size);
+        }
+
+      if (brk != (char *) (MORECORE_FAILURE))
+        {
+          /* Call the `morecore' hook if necessary.  */
+          void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
+          if (__builtin_expect (hook != NULL, 0))
+            (*hook)();
+        }
+      else
+        {
+          /*
+             If have mmap, try using it as a backup when MORECORE fails or
+             cannot be used. This is worth doing on systems that have "holes" in
+             address space, so sbrk cannot extend to give contiguous space, but
+             space is available elsewhere.  Note that we ignore mmap max count
+             and threshold limits, since the space will not be used as a
+             segregated mmap region.
+           */
+
+          /* Cannot merge with old top, so add its size back in */
+          if (contiguous (av))
+            size = ALIGN_UP (size + old_size, pagesize);
+
+          /* If we are relying on mmap as backup, then use larger units */
+          if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
+            size = MMAP_AS_MORECORE_SIZE;
+
+          /* Don't try if size wraps around 0 */
+          if ((unsigned long) (size) > (unsigned long) (nb))
+            {
+              char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
+
+              if (mbrk != MAP_FAILED)
+                {
+                  /* We do not need, and cannot use, another sbrk call to find end */
+                  brk = mbrk;
+                  snd_brk = brk + size;
+
+                  /*
+                     Record that we no longer have a contiguous sbrk region.
+                     After the first time mmap is used as backup, we do not
+                     ever rely on contiguous space since this could incorrectly
+                     bridge regions.
+                   */
+                  set_noncontiguous (av);
+                }
+            }
+        }
+
+      if (brk != (char *) (MORECORE_FAILURE))
+        {
+          if (mp_.sbrk_base == 0)
+            mp_.sbrk_base = brk;
+          av->system_mem += size;
+
+          /*
+             If MORECORE extends previous space, we can likewise extend top size.
+           */
+
+          if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
+            set_head (old_top, (size + old_size) | PREV_INUSE);
+
+          else if (contiguous (av) && old_size && brk < old_end)
+            {
+              /* Oops!  Someone else killed our space..  Can't touch anything.  */
+              malloc_printerr (3, "break adjusted to free malloc space", brk,
+			       av);
+            }
+
+          /*
+             Otherwise, make adjustments:
+
+           * If the first time through or noncontiguous, we need to call sbrk
+              just to find out where the end of memory lies.
+
+           * We need to ensure that all returned chunks from malloc will meet
+              MALLOC_ALIGNMENT
+
+           * If there was an intervening foreign sbrk, we need to adjust sbrk
+              request size to account for fact that we will not be able to
+              combine new space with existing space in old_top.
+
+           * Almost all systems internally allocate whole pages at a time, in
+              which case we might as well use the whole last page of request.
+              So we allocate enough more memory to hit a page boundary now,
+              which in turn causes future contiguous calls to page-align.
+           */
+
+          else
+            {
+              front_misalign = 0;
+              end_misalign = 0;
+              correction = 0;
+              aligned_brk = brk;
+
+              /* handle contiguous cases */
+              if (contiguous (av))
+                {
+                  /* Count foreign sbrk as system_mem.  */
+                  if (old_size)
+                    av->system_mem += brk - old_end;
+
+                  /* Guarantee alignment of first new chunk made from this space */
+
+                  front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
+                  if (front_misalign > 0)
+                    {
+                      /*
+                         Skip over some bytes to arrive at an aligned position.
+                         We don't need to specially mark these wasted front bytes.
+                         They will never be accessed anyway because
+                         prev_inuse of av->top (and any chunk created from its start)
+                         is always true after initialization.
+                       */
+
+                      correction = MALLOC_ALIGNMENT - front_misalign;
+                      aligned_brk += correction;
+                    }
+
+                  /*
+                     If this isn't adjacent to existing space, then we will not
+                     be able to merge with old_top space, so must add to 2nd request.
+                   */
+
+                  correction += old_size;
+
+                  /* Extend the end address to hit a page boundary */
+                  end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
+                  correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
+
+                  assert (correction >= 0);
+                  snd_brk = (char *) (MORECORE (correction));
+
+                  /*
+                     If can't allocate correction, try to at least find out current
+                     brk.  It might be enough to proceed without failing.
+
+                     Note that if second sbrk did NOT fail, we assume that space
+                     is contiguous with first sbrk. This is a safe assumption unless
+                     program is multithreaded but doesn't use locks and a foreign sbrk
+                     occurred between our first and second calls.
+                   */
+
+                  if (snd_brk == (char *) (MORECORE_FAILURE))
+                    {
+                      correction = 0;
+                      snd_brk = (char *) (MORECORE (0));
+                    }
+                  else
+                    {
+                      /* Call the `morecore' hook if necessary.  */
+                      void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
+                      if (__builtin_expect (hook != NULL, 0))
+                        (*hook)();
+                    }
+                }
+
+              /* handle non-contiguous cases */
+              else
+                {
+                  if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+                    /* MORECORE/mmap must correctly align */
+                    assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
+                  else
+                    {
+                      front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
+                      if (front_misalign > 0)
+                        {
+                          /*
+                             Skip over some bytes to arrive at an aligned position.
+                             We don't need to specially mark these wasted front bytes.
+                             They will never be accessed anyway because
+                             prev_inuse of av->top (and any chunk created from its start)
+                             is always true after initialization.
+                           */
+
+                          aligned_brk += MALLOC_ALIGNMENT - front_misalign;
+                        }
+                    }
+
+                  /* Find out current end of memory */
+                  if (snd_brk == (char *) (MORECORE_FAILURE))
+                    {
+                      snd_brk = (char *) (MORECORE (0));
+                    }
+                }
+
+              /* Adjust top based on results of second sbrk */
+              if (snd_brk != (char *) (MORECORE_FAILURE))
+                {
+                  av->top = (mchunkptr) aligned_brk;
+                  set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
+                  av->system_mem += correction;
+
+                  /*
+                     If not the first time through, we either have a
+                     gap due to foreign sbrk or a non-contiguous region.  Insert a
+                     double fencepost at old_top to prevent consolidation with space
+                     we don't own. These fenceposts are artificial chunks that are
+                     marked as inuse and are in any case too small to use.  We need
+                     two to make sizes and alignments work out.
+                   */
+
+                  if (old_size != 0)
+                    {
+                      /*
+                         Shrink old_top to insert fenceposts, keeping size a
+                         multiple of MALLOC_ALIGNMENT. We know there is at least
+                         enough space in old_top to do this.
+                       */
+                      old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
+                      set_head (old_top, old_size | PREV_INUSE);
+
+                      /*
+                         Note that the following assignments completely overwrite
+                         old_top when old_size was previously MINSIZE.  This is
+                         intentional. We need the fencepost, even if old_top otherwise gets
+                         lost.
+                       */
+		      set_head (chunk_at_offset (old_top, old_size),
+				(2 * SIZE_SZ) | PREV_INUSE);
+		      set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
+				(2 * SIZE_SZ) | PREV_INUSE);
+
+                      /* If possible, release the rest. */
+                      if (old_size >= MINSIZE)
+                        {
+                          _int_free (av, old_top, 1);
+                        }
+                    }
+                }
+            }
+        }
+    } /* if (av !=  &main_arena) */
+
+  if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
+    av->max_system_mem = av->system_mem;
+  check_malloc_state (av);
+
+  /* finally, do the allocation */
+  p = av->top;
+  size = chunksize (p);
+
+  /* check that one of the above allocation paths succeeded */
+  if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
+    {
+      remainder_size = size - nb;
+      remainder = chunk_at_offset (p, nb);
+      av->top = remainder;
+      set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
+      set_head (remainder, remainder_size | PREV_INUSE);
+      check_malloced_chunk (av, p, nb);
+      return chunk2mem (p);
+    }
+
+  /* catch all failure paths */
+  __set_errno (ENOMEM);
+  return 0;
+}
+
+
+/*
+   systrim is an inverse of sorts to sysmalloc.  It gives memory back
+   to the system (via negative arguments to sbrk) if there is unused
+   memory at the `high' end of the malloc pool. It is called
+   automatically by free() when top space exceeds the trim
+   threshold. It is also called by the public malloc_trim routine.  It
+   returns 1 if it actually released any memory, else 0.
+ */
+
+static int
+systrim (size_t pad, mstate av)
+{
+  long top_size;         /* Amount of top-most memory */
+  long extra;            /* Amount to release */
+  long released;         /* Amount actually released */
+  char *current_brk;     /* address returned by pre-check sbrk call */
+  char *new_brk;         /* address returned by post-check sbrk call */
+  size_t pagesize;
+  long top_area;
+
+  pagesize = GLRO (dl_pagesize);
+  top_size = chunksize (av->top);
+
+  top_area = top_size - MINSIZE - 1;
+  if (top_area <= pad)
+    return 0;
+
+  /* Release in pagesize units and round down to the nearest page.  */
+  extra = ALIGN_DOWN(top_area - pad, pagesize);
+
+  if (extra == 0)
+    return 0;
+
+  /*
+     Only proceed if end of memory is where we last set it.
+     This avoids problems if there were foreign sbrk calls.
+   */
+  current_brk = (char *) (MORECORE (0));
+  if (current_brk == (char *) (av->top) + top_size)
+    {
+      /*
+         Attempt to release memory. We ignore MORECORE return value,
+         and instead call again to find out where new end of memory is.
+         This avoids problems if first call releases less than we asked,
+         of if failure somehow altered brk value. (We could still
+         encounter problems if it altered brk in some very bad way,
+         but the only thing we can do is adjust anyway, which will cause
+         some downstream failure.)
+       */
+
+      MORECORE (-extra);
+      /* Call the `morecore' hook if necessary.  */
+      void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
+      if (__builtin_expect (hook != NULL, 0))
+        (*hook)();
+      new_brk = (char *) (MORECORE (0));
+
+      LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
+
+      if (new_brk != (char *) MORECORE_FAILURE)
+        {
+          released = (long) (current_brk - new_brk);
+
+          if (released != 0)
+            {
+              /* Success. Adjust top. */
+              av->system_mem -= released;
+              set_head (av->top, (top_size - released) | PREV_INUSE);
+              check_malloc_state (av);
+              return 1;
+            }
+        }
+    }
+  return 0;
+}
+
+static void
+internal_function
+munmap_chunk (mchunkptr p)
+{
+  INTERNAL_SIZE_T size = chunksize (p);
+
+  assert (chunk_is_mmapped (p));
+
+  /* Do nothing if the chunk is a faked mmapped chunk in the dumped
+     main arena.  We never free this memory.  */
+  if (DUMPED_MAIN_ARENA_CHUNK (p))
+    return;
+
+  uintptr_t block = (uintptr_t) p - prev_size (p);
+  size_t total_size = prev_size (p) + size;
+  /* Unfortunately we have to do the compilers job by hand here.  Normally
+     we would test BLOCK and TOTAL-SIZE separately for compliance with the
+     page size.  But gcc does not recognize the optimization possibility
+     (in the moment at least) so we combine the two values into one before
+     the bit test.  */
+  if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
+    {
+      malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
+                       chunk2mem (p), NULL);
+      return;
+    }
+
+  atomic_decrement (&mp_.n_mmaps);
+  atomic_add (&mp_.mmapped_mem, -total_size);
+
+  /* If munmap failed the process virtual memory address space is in a
+     bad shape.  Just leave the block hanging around, the process will
+     terminate shortly anyway since not much can be done.  */
+  __munmap ((char *) block, total_size);
+}
+
+#if HAVE_MREMAP
+
+static mchunkptr
+internal_function
+mremap_chunk (mchunkptr p, size_t new_size)
+{
+  size_t pagesize = GLRO (dl_pagesize);
+  INTERNAL_SIZE_T offset = prev_size (p);
+  INTERNAL_SIZE_T size = chunksize (p);
+  char *cp;
+
+  assert (chunk_is_mmapped (p));
+  assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
+
+  /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
+  new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
+
+  /* No need to remap if the number of pages does not change.  */
+  if (size + offset == new_size)
+    return p;
+
+  cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
+                          MREMAP_MAYMOVE);
+
+  if (cp == MAP_FAILED)
+    return 0;
+
+  p = (mchunkptr) (cp + offset);
+
+  assert (aligned_OK (chunk2mem (p)));
+
+  assert (prev_size (p) == offset);
+  set_head (p, (new_size - offset) | IS_MMAPPED);
+
+  INTERNAL_SIZE_T new;
+  new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
+        + new_size - size - offset;
+  atomic_max (&mp_.max_mmapped_mem, new);
+  return p;
+}
+#endif /* HAVE_MREMAP */
+
+/*------------------------ Public wrappers. --------------------------------*/
+
+void *
+__libc_malloc (size_t bytes)
+{
+  mstate ar_ptr;
+  void *victim;
+
+  void *(*hook) (size_t, const void *)
+    = atomic_forced_read (__malloc_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    return (*hook)(bytes, RETURN_ADDRESS (0));
+
+  arena_get (ar_ptr, bytes);
+
+  victim = _int_malloc (ar_ptr, bytes);
+  /* Retry with another arena only if we were able to find a usable arena
+     before.  */
+  if (!victim && ar_ptr != NULL)
+    {
+      LIBC_PROBE (memory_malloc_retry, 1, bytes);
+      ar_ptr = arena_get_retry (ar_ptr, bytes);
+      victim = _int_malloc (ar_ptr, bytes);
+    }
+
+  if (ar_ptr != NULL)
+    __libc_lock_unlock (ar_ptr->mutex);
+
+  assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
+          ar_ptr == arena_for_chunk (mem2chunk (victim)));
+  return victim;
+}
+libc_hidden_def (__libc_malloc)
+
+void
+__libc_free (void *mem)
+{
+  mstate ar_ptr;
+  mchunkptr p;                          /* chunk corresponding to mem */
+
+  void (*hook) (void *, const void *)
+    = atomic_forced_read (__free_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    {
+      (*hook)(mem, RETURN_ADDRESS (0));
+      return;
+    }
+
+  if (mem == 0)                              /* free(0) has no effect */
+    return;
+
+  p = mem2chunk (mem);
+
+  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
+    {
+      /* See if the dynamic brk/mmap threshold needs adjusting.
+	 Dumped fake mmapped chunks do not affect the threshold.  */
+      if (!mp_.no_dyn_threshold
+          && chunksize_nomask (p) > mp_.mmap_threshold
+          && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
+	  && !DUMPED_MAIN_ARENA_CHUNK (p))
+        {
+          mp_.mmap_threshold = chunksize (p);
+          mp_.trim_threshold = 2 * mp_.mmap_threshold;
+          LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
+                      mp_.mmap_threshold, mp_.trim_threshold);
+        }
+      munmap_chunk (p);
+      return;
+    }
+
+  ar_ptr = arena_for_chunk (p);
+  _int_free (ar_ptr, p, 0);
+}
+libc_hidden_def (__libc_free)
+
+void *
+__libc_realloc (void *oldmem, size_t bytes)
+{
+  mstate ar_ptr;
+  INTERNAL_SIZE_T nb;         /* padded request size */
+
+  void *newp;             /* chunk to return */
+
+  void *(*hook) (void *, size_t, const void *) =
+    atomic_forced_read (__realloc_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
+
+#if REALLOC_ZERO_BYTES_FREES
+  if (bytes == 0 && oldmem != NULL)
+    {
+      __libc_free (oldmem); return 0;
+    }
+#endif
+
+  /* realloc of null is supposed to be same as malloc */
+  if (oldmem == 0)
+    return __libc_malloc (bytes);
+
+  /* chunk corresponding to oldmem */
+  const mchunkptr oldp = mem2chunk (oldmem);
+  /* its size */
+  const INTERNAL_SIZE_T oldsize = chunksize (oldp);
+
+  if (chunk_is_mmapped (oldp))
+    ar_ptr = NULL;
+  else
+    ar_ptr = arena_for_chunk (oldp);
+
+  /* Little security check which won't hurt performance: the allocator
+     never wrapps around at the end of the address space.  Therefore
+     we can exclude some size values which might appear here by
+     accident or by "design" from some intruder.  We need to bypass
+     this check for dumped fake mmap chunks from the old main arena
+     because the new malloc may provide additional alignment.  */
+  if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
+       || __builtin_expect (misaligned_chunk (oldp), 0))
+      && !DUMPED_MAIN_ARENA_CHUNK (oldp))
+    {
+      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
+		       ar_ptr);
+      return NULL;
+    }
+
+  checked_request2size (bytes, nb);
+
+  if (chunk_is_mmapped (oldp))
+    {
+      /* If this is a faked mmapped chunk from the dumped main arena,
+	 always make a copy (and do not free the old chunk).  */
+      if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+	{
+	  /* Must alloc, copy, free. */
+	  void *newmem = __libc_malloc (bytes);
+	  if (newmem == 0)
+	    return NULL;
+	  /* Copy as many bytes as are available from the old chunk
+	     and fit into the new size.  NB: The overhead for faked
+	     mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
+	     regular mmapped chunks.  */
+	  if (bytes > oldsize - SIZE_SZ)
+	    bytes = oldsize - SIZE_SZ;
+	  memcpy (newmem, oldmem, bytes);
+	  return newmem;
+	}
+
+      void *newmem;
+
+#if HAVE_MREMAP
+      newp = mremap_chunk (oldp, nb);
+      if (newp)
+        return chunk2mem (newp);
+#endif
+      /* Note the extra SIZE_SZ overhead. */
+      if (oldsize - SIZE_SZ >= nb)
+        return oldmem;                         /* do nothing */
+
+      /* Must alloc, copy, free. */
+      newmem = __libc_malloc (bytes);
+      if (newmem == 0)
+        return 0;              /* propagate failure */
+
+      memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
+      munmap_chunk (oldp);
+      return newmem;
+    }
+
+  __libc_lock_lock (ar_ptr->mutex);
+
+  newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
+
+  __libc_lock_unlock (ar_ptr->mutex);
+  assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
+          ar_ptr == arena_for_chunk (mem2chunk (newp)));
+
+  if (newp == NULL)
+    {
+      /* Try harder to allocate memory in other arenas.  */
+      LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
+      newp = __libc_malloc (bytes);
+      if (newp != NULL)
+        {
+          memcpy (newp, oldmem, oldsize - SIZE_SZ);
+          _int_free (ar_ptr, oldp, 0);
+        }
+    }
+
+  return newp;
+}
+libc_hidden_def (__libc_realloc)
+
+void *
+__libc_memalign (size_t alignment, size_t bytes)
+{
+  void *address = RETURN_ADDRESS (0);
+  return _mid_memalign (alignment, bytes, address);
+}
+
+static void *
+_mid_memalign (size_t alignment, size_t bytes, void *address)
+{
+  mstate ar_ptr;
+  void *p;
+
+  void *(*hook) (size_t, size_t, const void *) =
+    atomic_forced_read (__memalign_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    return (*hook)(alignment, bytes, address);
+
+  /* If we need less alignment than we give anyway, just relay to malloc.  */
+  if (alignment <= MALLOC_ALIGNMENT)
+    return __libc_malloc (bytes);
+
+  /* Otherwise, ensure that it is at least a minimum chunk size */
+  if (alignment < MINSIZE)
+    alignment = MINSIZE;
+
+  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
+     power of 2 and will cause overflow in the check below.  */
+  if (alignment > SIZE_MAX / 2 + 1)
+    {
+      __set_errno (EINVAL);
+      return 0;
+    }
+
+  /* Check for overflow.  */
+  if (bytes > SIZE_MAX - alignment - MINSIZE)
+    {
+      __set_errno (ENOMEM);
+      return 0;
+    }
+
+
+  /* Make sure alignment is power of 2.  */
+  if (!powerof2 (alignment))
+    {
+      size_t a = MALLOC_ALIGNMENT * 2;
+      while (a < alignment)
+        a <<= 1;
+      alignment = a;
+    }
+
+  arena_get (ar_ptr, bytes + alignment + MINSIZE);
+
+  p = _int_memalign (ar_ptr, alignment, bytes);
+  if (!p && ar_ptr != NULL)
+    {
+      LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
+      ar_ptr = arena_get_retry (ar_ptr, bytes);
+      p = _int_memalign (ar_ptr, alignment, bytes);
+    }
+
+  if (ar_ptr != NULL)
+    __libc_lock_unlock (ar_ptr->mutex);
+
+  assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
+          ar_ptr == arena_for_chunk (mem2chunk (p)));
+  return p;
+}
+/* For ISO C11.  */
+weak_alias (__libc_memalign, aligned_alloc)
+libc_hidden_def (__libc_memalign)
+
+void *
+__libc_valloc (size_t bytes)
+{
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+
+  void *address = RETURN_ADDRESS (0);
+  size_t pagesize = GLRO (dl_pagesize);
+  return _mid_memalign (pagesize, bytes, address);
+}
+
+void *
+__libc_pvalloc (size_t bytes)
+{
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+
+  void *address = RETURN_ADDRESS (0);
+  size_t pagesize = GLRO (dl_pagesize);
+  size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
+
+  /* Check for overflow.  */
+  if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
+    {
+      __set_errno (ENOMEM);
+      return 0;
+    }
+
+  return _mid_memalign (pagesize, rounded_bytes, address);
+}
+
+void *
+__libc_calloc (size_t n, size_t elem_size)
+{
+  mstate av;
+  mchunkptr oldtop, p;
+  INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
+  void *mem;
+  unsigned long clearsize;
+  unsigned long nclears;
+  INTERNAL_SIZE_T *d;
+
+  /* size_t is unsigned so the behavior on overflow is defined.  */
+  bytes = n * elem_size;
+#define HALF_INTERNAL_SIZE_T \
+  (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
+  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
+    {
+      if (elem_size != 0 && bytes / elem_size != n)
+        {
+          __set_errno (ENOMEM);
+          return 0;
+        }
+    }
+
+  void *(*hook) (size_t, const void *) =
+    atomic_forced_read (__malloc_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    {
+      sz = bytes;
+      mem = (*hook)(sz, RETURN_ADDRESS (0));
+      if (mem == 0)
+        return 0;
+
+      return memset (mem, 0, sz);
+    }
+
+  sz = bytes;
+
+  arena_get (av, sz);
+  if (av)
+    {
+      /* Check if we hand out the top chunk, in which case there may be no
+	 need to clear. */
+#if MORECORE_CLEARS
+      oldtop = top (av);
+      oldtopsize = chunksize (top (av));
+# if MORECORE_CLEARS < 2
+      /* Only newly allocated memory is guaranteed to be cleared.  */
+      if (av == &main_arena &&
+	  oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
+	oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
+# endif
+      if (av != &main_arena)
+	{
+	  heap_info *heap = heap_for_ptr (oldtop);
+	  if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
+	    oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
+	}
+#endif
+    }
+  else
+    {
+      /* No usable arenas.  */
+      oldtop = 0;
+      oldtopsize = 0;
+    }
+  mem = _int_malloc (av, sz);
+
+
+  assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
+          av == arena_for_chunk (mem2chunk (mem)));
+
+  if (mem == 0 && av != NULL)
+    {
+      LIBC_PROBE (memory_calloc_retry, 1, sz);
+      av = arena_get_retry (av, sz);
+      mem = _int_malloc (av, sz);
+    }
+
+  if (av != NULL)
+    __libc_lock_unlock (av->mutex);
+
+  /* Allocation failed even after a retry.  */
+  if (mem == 0)
+    return 0;
+
+  p = mem2chunk (mem);
+
+  /* Two optional cases in which clearing not necessary */
+  if (chunk_is_mmapped (p))
+    {
+      if (__builtin_expect (perturb_byte, 0))
+        return memset (mem, 0, sz);
+
+      return mem;
+    }
+
+  csz = chunksize (p);
+
+#if MORECORE_CLEARS
+  if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
+    {
+      /* clear only the bytes from non-freshly-sbrked memory */
+      csz = oldtopsize;
+    }
+#endif
+
+  /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
+     contents have an odd number of INTERNAL_SIZE_T-sized words;
+     minimally 3.  */
+  d = (INTERNAL_SIZE_T *) mem;
+  clearsize = csz - SIZE_SZ;
+  nclears = clearsize / sizeof (INTERNAL_SIZE_T);
+  assert (nclears >= 3);
+
+  if (nclears > 9)
+    return memset (d, 0, clearsize);
+
+  else
+    {
+      *(d + 0) = 0;
+      *(d + 1) = 0;
+      *(d + 2) = 0;
+      if (nclears > 4)
+        {
+          *(d + 3) = 0;
+          *(d + 4) = 0;
+          if (nclears > 6)
+            {
+              *(d + 5) = 0;
+              *(d + 6) = 0;
+              if (nclears > 8)
+                {
+                  *(d + 7) = 0;
+                  *(d + 8) = 0;
+                }
+            }
+        }
+    }
+
+  return mem;
+}
+
+/*
+   ------------------------------ malloc ------------------------------
+ */
+
+static void *
+_int_malloc (mstate av, size_t bytes)
+{
+  INTERNAL_SIZE_T nb;               /* normalized request size */
+  unsigned int idx;                 /* associated bin index */
+  mbinptr bin;                      /* associated bin */
+
+  mchunkptr victim;                 /* inspected/selected chunk */
+  INTERNAL_SIZE_T size;             /* its size */
+  int victim_index;                 /* its bin index */
+
+  mchunkptr remainder;              /* remainder from a split */
+  unsigned long remainder_size;     /* its size */
+
+  unsigned int block;               /* bit map traverser */
+  unsigned int bit;                 /* bit map traverser */
+  unsigned int map;                 /* current word of binmap */
+
+  mchunkptr fwd;                    /* misc temp for linking */
+  mchunkptr bck;                    /* misc temp for linking */
+
+  const char *errstr = NULL;
+
+  /*
+     Convert request size to internal form by adding SIZE_SZ bytes
+     overhead plus possibly more to obtain necessary alignment and/or
+     to obtain a size of at least MINSIZE, the smallest allocatable
+     size. Also, checked_request2size traps (returning 0) request sizes
+     that are so large that they wrap around zero when padded and
+     aligned.
+   */
+
+  checked_request2size (bytes, nb);
+
+  /* There are no usable arenas.  Fall back to sysmalloc to get a chunk from
+     mmap.  */
+  if (__glibc_unlikely (av == NULL))
+    {
+      void *p = sysmalloc (nb, av);
+      if (p != NULL)
+	alloc_perturb (p, bytes);
+      return p;
+    }
+
+  /*
+     If the size qualifies as a fastbin, first check corresponding bin.
+     This code is safe to execute even if av is not yet initialized, so we
+     can try it without checking, which saves some time on this fast path.
+   */
+
+  if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
+    {
+      idx = fastbin_index (nb);
+      mfastbinptr *fb = &fastbin (av, idx);
+      mchunkptr pp = *fb;
+      do
+        {
+          victim = pp;
+          if (victim == NULL)
+            break;
+        }
+      while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
+             != victim);
+      if (victim != 0)
+        {
+          if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
+            {
+              errstr = "malloc(): memory corruption (fast)";
+            errout:
+              malloc_printerr (check_action, errstr, chunk2mem (victim), av);
+              return NULL;
+            }
+          check_remalloced_chunk (av, victim, nb);
+          void *p = chunk2mem (victim);
+          alloc_perturb (p, bytes);
+          return p;
+        }
+    }
+
+  /*
+     If a small request, check regular bin.  Since these "smallbins"
+     hold one size each, no searching within bins is necessary.
+     (For a large request, we need to wait until unsorted chunks are
+     processed to find best fit. But for small ones, fits are exact
+     anyway, so we can check now, which is faster.)
+   */
+
+  if (in_smallbin_range (nb))
+    {
+      idx = smallbin_index (nb);
+      bin = bin_at (av, idx);
+
+      if ((victim = last (bin)) != bin)
+        {
+          if (victim == 0) /* initialization check */
+            malloc_consolidate (av);
+          else
+            {
+              bck = victim->bk;
+	if (__glibc_unlikely (bck->fd != victim))
+                {
+                  errstr = "malloc(): smallbin double linked list corrupted";
+                  goto errout;
+                }
+              set_inuse_bit_at_offset (victim, nb);
+              bin->bk = bck;
+              bck->fd = bin;
+
+              if (av != &main_arena)
+		set_non_main_arena (victim);
+              check_malloced_chunk (av, victim, nb);
+              void *p = chunk2mem (victim);
+              alloc_perturb (p, bytes);
+              return p;
+            }
+        }
+    }
+
+  /*
+     If this is a large request, consolidate fastbins before continuing.
+     While it might look excessive to kill all fastbins before
+     even seeing if there is space available, this avoids
+     fragmentation problems normally associated with fastbins.
+     Also, in practice, programs tend to have runs of either small or
+     large requests, but less often mixtures, so consolidation is not
+     invoked all that often in most programs. And the programs that
+     it is called frequently in otherwise tend to fragment.
+   */
+
+  else
+    {
+      idx = largebin_index (nb);
+      if (have_fastchunks (av))
+        malloc_consolidate (av);
+    }
+
+  /*
+     Process recently freed or remaindered chunks, taking one only if
+     it is exact fit, or, if this a small request, the chunk is remainder from
+     the most recent non-exact fit.  Place other traversed chunks in
+     bins.  Note that this step is the only place in any routine where
+     chunks are placed in bins.
+
+     The outer loop here is needed because we might not realize until
+     near the end of malloc that we should have consolidated, so must
+     do so and retry. This happens at most once, and only when we would
+     otherwise need to expand memory to service a "small" request.
+   */
+
+  for (;; )
+    {
+      int iters = 0;
+      while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
+        {
+          bck = victim->bk;
+          if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
+              || __builtin_expect (chunksize_nomask (victim)
+				   > av->system_mem, 0))
+            malloc_printerr (check_action, "malloc(): memory corruption",
+                             chunk2mem (victim), av);
+          size = chunksize (victim);
+
+          /*
+             If a small request, try to use last remainder if it is the
+             only chunk in unsorted bin.  This helps promote locality for
+             runs of consecutive small requests. This is the only
+             exception to best-fit, and applies only when there is
+             no exact fit for a small chunk.
+           */
+
+          if (in_smallbin_range (nb) &&
+              bck == unsorted_chunks (av) &&
+              victim == av->last_remainder &&
+              (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
+            {
+              /* split and reattach remainder */
+              remainder_size = size - nb;
+              remainder = chunk_at_offset (victim, nb);
+              unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
+              av->last_remainder = remainder;
+              remainder->bk = remainder->fd = unsorted_chunks (av);
+              if (!in_smallbin_range (remainder_size))
+                {
+                  remainder->fd_nextsize = NULL;
+                  remainder->bk_nextsize = NULL;
+                }
+
+              set_head (victim, nb | PREV_INUSE |
+                        (av != &main_arena ? NON_MAIN_ARENA : 0));
+              set_head (remainder, remainder_size | PREV_INUSE);
+              set_foot (remainder, remainder_size);
+
+              check_malloced_chunk (av, victim, nb);
+              void *p = chunk2mem (victim);
+              alloc_perturb (p, bytes);
+              return p;
+            }
+
+          /* remove from unsorted list */
+          unsorted_chunks (av)->bk = bck;
+          bck->fd = unsorted_chunks (av);
+
+          /* Take now instead of binning if exact fit */
+
+          if (size == nb)
+            {
+              set_inuse_bit_at_offset (victim, size);
+              if (av != &main_arena)
+		set_non_main_arena (victim);
+              check_malloced_chunk (av, victim, nb);
+              void *p = chunk2mem (victim);
+              alloc_perturb (p, bytes);
+              return p;
+            }
+
+          /* place chunk in bin */
+
+          if (in_smallbin_range (size))
+            {
+              victim_index = smallbin_index (size);
+              bck = bin_at (av, victim_index);
+              fwd = bck->fd;
+            }
+          else
+            {
+              victim_index = largebin_index (size);
+              bck = bin_at (av, victim_index);
+              fwd = bck->fd;
+
+              /* maintain large bins in sorted order */
+              if (fwd != bck)
+                {
+                  /* Or with inuse bit to speed comparisons */
+                  size |= PREV_INUSE;
+                  /* if smaller than smallest, bypass loop below */
+                  assert (chunk_main_arena (bck->bk));
+                  if ((unsigned long) (size)
+		      < (unsigned long) chunksize_nomask (bck->bk))
+                    {
+                      fwd = bck;
+                      bck = bck->bk;
+
+                      victim->fd_nextsize = fwd->fd;
+                      victim->bk_nextsize = fwd->fd->bk_nextsize;
+                      fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
+                    }
+                  else
+                    {
+                      assert (chunk_main_arena (fwd));
+                      while ((unsigned long) size < chunksize_nomask (fwd))
+                        {
+                          fwd = fwd->fd_nextsize;
+			  assert (chunk_main_arena (fwd));
+                        }
+
+                      if ((unsigned long) size
+			  == (unsigned long) chunksize_nomask (fwd))
+                        /* Always insert in the second position.  */
+                        fwd = fwd->fd;
+                      else
+                        {
+                          victim->fd_nextsize = fwd;
+                          victim->bk_nextsize = fwd->bk_nextsize;
+                          fwd->bk_nextsize = victim;
+                          victim->bk_nextsize->fd_nextsize = victim;
+                        }
+                      bck = fwd->bk;
+                    }
+                }
+              else
+                victim->fd_nextsize = victim->bk_nextsize = victim;
+            }
+
+          mark_bin (av, victim_index);
+          victim->bk = bck;
+          victim->fd = fwd;
+          fwd->bk = victim;
+          bck->fd = victim;
+
+#define MAX_ITERS       10000
+          if (++iters >= MAX_ITERS)
+            break;
+        }
+
+      /*
+         If a large request, scan through the chunks of current bin in
+         sorted order to find smallest that fits.  Use the skip list for this.
+       */
+
+      if (!in_smallbin_range (nb))
+        {
+          bin = bin_at (av, idx);
+
+          /* skip scan if empty or largest chunk is too small */
+          if ((victim = first (bin)) != bin
+	      && (unsigned long) chunksize_nomask (victim)
+	        >= (unsigned long) (nb))
+            {
+              victim = victim->bk_nextsize;
+              while (((unsigned long) (size = chunksize (victim)) <
+                      (unsigned long) (nb)))
+                victim = victim->bk_nextsize;
+
+              /* Avoid removing the first entry for a size so that the skip
+                 list does not have to be rerouted.  */
+              if (victim != last (bin)
+		  && chunksize_nomask (victim)
+		    == chunksize_nomask (victim->fd))
+                victim = victim->fd;
+
+              remainder_size = size - nb;
+              unlink (av, victim, bck, fwd);
+
+              /* Exhaust */
+              if (remainder_size < MINSIZE)
+                {
+                  set_inuse_bit_at_offset (victim, size);
+                  if (av != &main_arena)
+		    set_non_main_arena (victim);
+                }
+              /* Split */
+              else
+                {
+                  remainder = chunk_at_offset (victim, nb);
+                  /* We cannot assume the unsorted list is empty and therefore
+                     have to perform a complete insert here.  */
+                  bck = unsorted_chunks (av);
+                  fwd = bck->fd;
+	  if (__glibc_unlikely (fwd->bk != bck))
+                    {
+                      errstr = "malloc(): corrupted unsorted chunks";
+                      goto errout;
+                    }
+                  remainder->bk = bck;
+                  remainder->fd = fwd;
+                  bck->fd = remainder;
+                  fwd->bk = remainder;
+                  if (!in_smallbin_range (remainder_size))
+                    {
+                      remainder->fd_nextsize = NULL;
+                      remainder->bk_nextsize = NULL;
+                    }
+                  set_head (victim, nb | PREV_INUSE |
+                            (av != &main_arena ? NON_MAIN_ARENA : 0));
+                  set_head (remainder, remainder_size | PREV_INUSE);
+                  set_foot (remainder, remainder_size);
+                }
+              check_malloced_chunk (av, victim, nb);
+              void *p = chunk2mem (victim);
+              alloc_perturb (p, bytes);
+              return p;
+            }
+        }
+
+      /*
+         Search for a chunk by scanning bins, starting with next largest
+         bin. This search is strictly by best-fit; i.e., the smallest
+         (with ties going to approximately the least recently used) chunk
+         that fits is selected.
+
+         The bitmap avoids needing to check that most blocks are nonempty.
+         The particular case of skipping all bins during warm-up phases
+         when no chunks have been returned yet is faster than it might look.
+       */
+
+      ++idx;
+      bin = bin_at (av, idx);
+      block = idx2block (idx);
+      map = av->binmap[block];
+      bit = idx2bit (idx);
+
+      for (;; )
+        {
+          /* Skip rest of block if there are no more set bits in this block.  */
+          if (bit > map || bit == 0)
+            {
+              do
+                {
+                  if (++block >= BINMAPSIZE) /* out of bins */
+                    goto use_top;
+                }
+              while ((map = av->binmap[block]) == 0);
+
+              bin = bin_at (av, (block << BINMAPSHIFT));
+              bit = 1;
+            }
+
+          /* Advance to bin with set bit. There must be one. */
+          while ((bit & map) == 0)
+            {
+              bin = next_bin (bin);
+              bit <<= 1;
+              assert (bit != 0);
+            }
+
+          /* Inspect the bin. It is likely to be non-empty */
+          victim = last (bin);
+
+          /*  If a false alarm (empty bin), clear the bit. */
+          if (victim == bin)
+            {
+              av->binmap[block] = map &= ~bit; /* Write through */
+              bin = next_bin (bin);
+              bit <<= 1;
+            }
+
+          else
+            {
+              size = chunksize (victim);
+
+              /*  We know the first chunk in this bin is big enough to use. */
+              assert ((unsigned long) (size) >= (unsigned long) (nb));
+
+              remainder_size = size - nb;
+
+              /* unlink */
+              unlink (av, victim, bck, fwd);
+
+              /* Exhaust */
+              if (remainder_size < MINSIZE)
+                {
+                  set_inuse_bit_at_offset (victim, size);
+                  if (av != &main_arena)
+		    set_non_main_arena (victim);
+                }
+
+              /* Split */
+              else
+                {
+                  remainder = chunk_at_offset (victim, nb);
+
+                  /* We cannot assume the unsorted list is empty and therefore
+                     have to perform a complete insert here.  */
+                  bck = unsorted_chunks (av);
+                  fwd = bck->fd;
+	  if (__glibc_unlikely (fwd->bk != bck))
+                    {
+                      errstr = "malloc(): corrupted unsorted chunks 2";
+                      goto errout;
+                    }
+                  remainder->bk = bck;
+                  remainder->fd = fwd;
+                  bck->fd = remainder;
+                  fwd->bk = remainder;
+
+                  /* advertise as last remainder */
+                  if (in_smallbin_range (nb))
+                    av->last_remainder = remainder;
+                  if (!in_smallbin_range (remainder_size))
+                    {
+                      remainder->fd_nextsize = NULL;
+                      remainder->bk_nextsize = NULL;
+                    }
+                  set_head (victim, nb | PREV_INUSE |
+                            (av != &main_arena ? NON_MAIN_ARENA : 0));
+                  set_head (remainder, remainder_size | PREV_INUSE);
+                  set_foot (remainder, remainder_size);
+                }
+              check_malloced_chunk (av, victim, nb);
+              void *p = chunk2mem (victim);
+              alloc_perturb (p, bytes);
+              return p;
+            }
+        }
+
+    use_top:
+      /*
+         If large enough, split off the chunk bordering the end of memory
+         (held in av->top). Note that this is in accord with the best-fit
+         search rule.  In effect, av->top is treated as larger (and thus
+         less well fitting) than any other available chunk since it can
+         be extended to be as large as necessary (up to system
+         limitations).
+
+         We require that av->top always exists (i.e., has size >=
+         MINSIZE) after initialization, so if it would otherwise be
+         exhausted by current request, it is replenished. (The main
+         reason for ensuring it exists is that we may need MINSIZE space
+         to put in fenceposts in sysmalloc.)
+       */
+
+      victim = av->top;
+      size = chunksize (victim);
+
+      if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
+        {
+          remainder_size = size - nb;
+          remainder = chunk_at_offset (victim, nb);
+          av->top = remainder;
+          set_head (victim, nb | PREV_INUSE |
+                    (av != &main_arena ? NON_MAIN_ARENA : 0));
+          set_head (remainder, remainder_size | PREV_INUSE);
+
+          check_malloced_chunk (av, victim, nb);
+          void *p = chunk2mem (victim);
+          alloc_perturb (p, bytes);
+          return p;
+        }
+
+      /* When we are using atomic ops to free fast chunks we can get
+         here for all block sizes.  */
+      else if (have_fastchunks (av))
+        {
+          malloc_consolidate (av);
+          /* restore original bin index */
+          if (in_smallbin_range (nb))
+            idx = smallbin_index (nb);
+          else
+            idx = largebin_index (nb);
+        }
+
+      /*
+         Otherwise, relay to handle system-dependent cases
+       */
+      else
+        {
+          void *p = sysmalloc (nb, av);
+          if (p != NULL)
+            alloc_perturb (p, bytes);
+          return p;
+        }
+    }
+}
+
+/*
+   ------------------------------ free ------------------------------
+ */
+
+static void
+_int_free (mstate av, mchunkptr p, int have_lock)
+{
+  INTERNAL_SIZE_T size;        /* its size */
+  mfastbinptr *fb;             /* associated fastbin */
+  mchunkptr nextchunk;         /* next contiguous chunk */
+  INTERNAL_SIZE_T nextsize;    /* its size */
+  int nextinuse;               /* true if nextchunk is used */
+  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
+  mchunkptr bck;               /* misc temp for linking */
+  mchunkptr fwd;               /* misc temp for linking */
+
+  const char *errstr = NULL;
+  int locked = 0;
+
+  size = chunksize (p);
+
+  /* Little security check which won't hurt performance: the
+     allocator never wrapps around at the end of the address space.
+     Therefore we can exclude some size values which might appear
+     here by accident or by "design" from some intruder.  */
+  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
+      || __builtin_expect (misaligned_chunk (p), 0))
+    {
+      errstr = "free(): invalid pointer";
+    errout:
+      if (!have_lock && locked)
+        __libc_lock_unlock (av->mutex);
+      malloc_printerr (check_action, errstr, chunk2mem (p), av);
+      return;
+    }
+  /* We know that each chunk is at least MINSIZE bytes in size or a
+     multiple of MALLOC_ALIGNMENT.  */
+  if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
+    {
+      errstr = "free(): invalid size";
+      goto errout;
+    }
+
+  check_inuse_chunk(av, p);
+
+  /*
+    If eligible, place chunk on a fastbin so it can be found
+    and used quickly in malloc.
+  */
+
+  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
+
+#if TRIM_FASTBINS
+      /*
+	If TRIM_FASTBINS set, don't place chunks
+	bordering top into fastbins
+      */
+      && (chunk_at_offset(p, size) != av->top)
+#endif
+      ) {
+
+    if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
+			  <= 2 * SIZE_SZ, 0)
+	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
+			     >= av->system_mem, 0))
+      {
+	/* We might not have a lock at this point and concurrent modifications
+	   of system_mem might have let to a false positive.  Redo the test
+	   after getting the lock.  */
+	if (have_lock
+	    || ({ assert (locked == 0);
+		  __libc_lock_lock (av->mutex);
+		  locked = 1;
+		  chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
+		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
+	      }))
+	  {
+	    errstr = "free(): invalid next size (fast)";
+	    goto errout;
+	  }
+	if (! have_lock)
+	  {
+	    __libc_lock_unlock (av->mutex);
+	    locked = 0;
+	  }
+      }
+
+    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
+
+    set_fastchunks(av);
+    unsigned int idx = fastbin_index(size);
+    fb = &fastbin (av, idx);
+
+    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
+    mchunkptr old = *fb, old2;
+    unsigned int old_idx = ~0u;
+    do
+      {
+	/* Check that the top of the bin is not the record we are going to add
+	   (i.e., double free).  */
+	if (__builtin_expect (old == p, 0))
+	  {
+	    errstr = "double free or corruption (fasttop)";
+	    goto errout;
+	  }
+	/* Check that size of fastbin chunk at the top is the same as
+	   size of the chunk that we are adding.  We can dereference OLD
+	   only if we have the lock, otherwise it might have already been
+	   deallocated.  See use of OLD_IDX below for the actual check.  */
+	if (have_lock && old != NULL)
+	  old_idx = fastbin_index(chunksize(old));
+	p->fd = old2 = old;
+      }
+    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
+
+    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+      {
+	errstr = "invalid fastbin entry (free)";
+	goto errout;
+      }
+  }
+
+  /*
+    Consolidate other non-mmapped chunks as they arrive.
+  */
+
+  else if (!chunk_is_mmapped(p)) {
+    if (! have_lock) {
+      __libc_lock_lock (av->mutex);
+      locked = 1;
+    }
+
+    nextchunk = chunk_at_offset(p, size);
+
+    /* Lightweight tests: check whether the block is already the
+       top block.  */
+    if (__glibc_unlikely (p == av->top))
+      {
+	errstr = "double free or corruption (top)";
+	goto errout;
+      }
+    /* Or whether the next chunk is beyond the boundaries of the arena.  */
+    if (__builtin_expect (contiguous (av)
+			  && (char *) nextchunk
+			  >= ((char *) av->top + chunksize(av->top)), 0))
+      {
+	errstr = "double free or corruption (out)";
+	goto errout;
+      }
+    /* Or whether the block is actually not marked used.  */
+    if (__glibc_unlikely (!prev_inuse(nextchunk)))
+      {
+	errstr = "double free or corruption (!prev)";
+	goto errout;
+      }
+
+    nextsize = chunksize(nextchunk);
+    if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
+	|| __builtin_expect (nextsize >= av->system_mem, 0))
+      {
+	errstr = "free(): invalid next size (normal)";
+	goto errout;
+      }
+
+    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
+
+    /* consolidate backward */
+    if (!prev_inuse(p)) {
+      prevsize = prev_size (p);
+      size += prevsize;
+      p = chunk_at_offset(p, -((long) prevsize));
+      unlink(av, p, bck, fwd);
+    }
+
+    if (nextchunk != av->top) {
+      /* get and clear inuse bit */
+      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+
+      /* consolidate forward */
+      if (!nextinuse) {
+	unlink(av, nextchunk, bck, fwd);
+	size += nextsize;
+      } else
+	clear_inuse_bit_at_offset(nextchunk, 0);
+
+      /*
+	Place the chunk in unsorted chunk list. Chunks are
+	not placed into regular bins until after they have
+	been given one chance to be used in malloc.
+      */
+
+      bck = unsorted_chunks(av);
+      fwd = bck->fd;
+      if (__glibc_unlikely (fwd->bk != bck))
+	{
+	  errstr = "free(): corrupted unsorted chunks";
+	  goto errout;
+	}
+      p->fd = fwd;
+      p->bk = bck;
+      if (!in_smallbin_range(size))
+	{
+	  p->fd_nextsize = NULL;
+	  p->bk_nextsize = NULL;
+	}
+      bck->fd = p;
+      fwd->bk = p;
+
+      set_head(p, size | PREV_INUSE);
+      set_foot(p, size);
+
+      check_free_chunk(av, p);
+    }
+
+    /*
+      If the chunk borders the current high end of memory,
+      consolidate into top
+    */
+
+    else {
+      size += nextsize;
+      set_head(p, size | PREV_INUSE);
+      av->top = p;
+      check_chunk(av, p);
+    }
+
+    /*
+      If freeing a large space, consolidate possibly-surrounding
+      chunks. Then, if the total unused topmost memory exceeds trim
+      threshold, ask malloc_trim to reduce top.
+
+      Unless max_fast is 0, we don't know if there are fastbins
+      bordering top, so we cannot tell for sure whether threshold
+      has been reached unless fastbins are consolidated.  But we
+      don't want to consolidate on each free.  As a compromise,
+      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+      is reached.
+    */
+
+    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+      if (have_fastchunks(av))
+	malloc_consolidate(av);
+
+      if (av == &main_arena) {
+#ifndef MORECORE_CANNOT_TRIM
+	if ((unsigned long)(chunksize(av->top)) >=
+	    (unsigned long)(mp_.trim_threshold))
+	  systrim(mp_.top_pad, av);
+#endif
+      } else {
+	/* Always try heap_trim(), even if the top chunk is not
+	   large, because the corresponding heap might go away.  */
+	heap_info *heap = heap_for_ptr(top(av));
+
+	assert(heap->ar_ptr == av);
+	heap_trim(heap, mp_.top_pad);
+      }
+    }
+
+    if (! have_lock) {
+      assert (locked);
+      __libc_lock_unlock (av->mutex);
+    }
+  }
+  /*
+    If the chunk was allocated via mmap, release via munmap().
+  */
+
+  else {
+    munmap_chunk (p);
+  }
+}
+
+/*
+  ------------------------- malloc_consolidate -------------------------
+
+  malloc_consolidate is a specialized version of free() that tears
+  down chunks held in fastbins.  Free itself cannot be used for this
+  purpose since, among other things, it might place chunks back onto
+  fastbins.  So, instead, we need to use a minor variant of the same
+  code.
+
+  Also, because this routine needs to be called the first time through
+  malloc anyway, it turns out to be the perfect place to trigger
+  initialization code.
+*/
+
+static void malloc_consolidate(mstate av)
+{
+  mfastbinptr*    fb;                 /* current fastbin being consolidated */
+  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
+  mchunkptr       p;                  /* current chunk being consolidated */
+  mchunkptr       nextp;              /* next chunk to consolidate */
+  mchunkptr       unsorted_bin;       /* bin header */
+  mchunkptr       first_unsorted;     /* chunk to link to */
+
+  /* These have same use as in free() */
+  mchunkptr       nextchunk;
+  INTERNAL_SIZE_T size;
+  INTERNAL_SIZE_T nextsize;
+  INTERNAL_SIZE_T prevsize;
+  int             nextinuse;
+  mchunkptr       bck;
+  mchunkptr       fwd;
+
+  /*
+    If max_fast is 0, we know that av hasn't
+    yet been initialized, in which case do so below
+  */
+
+  if (get_max_fast () != 0) {
+    clear_fastchunks(av);
+
+    unsorted_bin = unsorted_chunks(av);
+
+    /*
+      Remove each chunk from fast bin and consolidate it, placing it
+      then in unsorted bin. Among other reasons for doing this,
+      placing in unsorted bin avoids needing to calculate actual bins
+      until malloc is sure that chunks aren't immediately going to be
+      reused anyway.
+    */
+
+    maxfb = &fastbin (av, NFASTBINS - 1);
+    fb = &fastbin (av, 0);
+    do {
+      p = atomic_exchange_acq (fb, NULL);
+      if (p != 0) {
+	do {
+	  check_inuse_chunk(av, p);
+	  nextp = p->fd;
+
+	  /* Slightly streamlined version of consolidation code in free() */
+	  size = chunksize (p);
+	  nextchunk = chunk_at_offset(p, size);
+	  nextsize = chunksize(nextchunk);
+
+	  if (!prev_inuse(p)) {
+	    prevsize = prev_size (p);
+	    size += prevsize;
+	    p = chunk_at_offset(p, -((long) prevsize));
+	    unlink(av, p, bck, fwd);
+	  }
+
+	  if (nextchunk != av->top) {
+	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+
+	    if (!nextinuse) {
+	      size += nextsize;
+	      unlink(av, nextchunk, bck, fwd);
+	    } else
+	      clear_inuse_bit_at_offset(nextchunk, 0);
+
+	    first_unsorted = unsorted_bin->fd;
+	    unsorted_bin->fd = p;
+	    first_unsorted->bk = p;
+
+	    if (!in_smallbin_range (size)) {
+	      p->fd_nextsize = NULL;
+	      p->bk_nextsize = NULL;
+	    }
+
+	    set_head(p, size | PREV_INUSE);
+	    p->bk = unsorted_bin;
+	    p->fd = first_unsorted;
+	    set_foot(p, size);
+	  }
+
+	  else {
+	    size += nextsize;
+	    set_head(p, size | PREV_INUSE);
+	    av->top = p;
+	  }
+
+	} while ( (p = nextp) != 0);
+
+      }
+    } while (fb++ != maxfb);
+  }
+  else {
+    malloc_init_state(av);
+    check_malloc_state(av);
+  }
+}
+
+/*
+  ------------------------------ realloc ------------------------------
+*/
+
+void*
+_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
+	     INTERNAL_SIZE_T nb)
+{
+  mchunkptr        newp;            /* chunk to return */
+  INTERNAL_SIZE_T  newsize;         /* its size */
+  void*          newmem;          /* corresponding user mem */
+
+  mchunkptr        next;            /* next contiguous chunk after oldp */
+
+  mchunkptr        remainder;       /* extra space at end of newp */
+  unsigned long    remainder_size;  /* its size */
+
+  mchunkptr        bck;             /* misc temp for linking */
+  mchunkptr        fwd;             /* misc temp for linking */
+
+  unsigned long    copysize;        /* bytes to copy */
+  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
+  INTERNAL_SIZE_T* s;               /* copy source */
+  INTERNAL_SIZE_T* d;               /* copy destination */
+
+  const char *errstr = NULL;
+
+  /* oldmem size */
+  if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
+      || __builtin_expect (oldsize >= av->system_mem, 0))
+    {
+      errstr = "realloc(): invalid old size";
+    errout:
+      malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
+      return NULL;
+    }
+
+  check_inuse_chunk (av, oldp);
+
+  /* All callers already filter out mmap'ed chunks.  */
+  assert (!chunk_is_mmapped (oldp));
+
+  next = chunk_at_offset (oldp, oldsize);
+  INTERNAL_SIZE_T nextsize = chunksize (next);
+  if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
+      || __builtin_expect (nextsize >= av->system_mem, 0))
+    {
+      errstr = "realloc(): invalid next size";
+      goto errout;
+    }
+
+  if ((unsigned long) (oldsize) >= (unsigned long) (nb))
+    {
+      /* already big enough; split below */
+      newp = oldp;
+      newsize = oldsize;
+    }
+
+  else
+    {
+      /* Try to expand forward into top */
+      if (next == av->top &&
+          (unsigned long) (newsize = oldsize + nextsize) >=
+          (unsigned long) (nb + MINSIZE))
+        {
+          set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
+          av->top = chunk_at_offset (oldp, nb);
+          set_head (av->top, (newsize - nb) | PREV_INUSE);
+          check_inuse_chunk (av, oldp);
+          return chunk2mem (oldp);
+        }
+
+      /* Try to expand forward into next chunk;  split off remainder below */
+      else if (next != av->top &&
+               !inuse (next) &&
+               (unsigned long) (newsize = oldsize + nextsize) >=
+               (unsigned long) (nb))
+        {
+          newp = oldp;
+          unlink (av, next, bck, fwd);
+        }
+
+      /* allocate, copy, free */
+      else
+        {
+          newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
+          if (newmem == 0)
+            return 0; /* propagate failure */
+
+          newp = mem2chunk (newmem);
+          newsize = chunksize (newp);
+
+          /*
+             Avoid copy if newp is next chunk after oldp.
+           */
+          if (newp == next)
+            {
+              newsize += oldsize;
+              newp = oldp;
+            }
+          else
+            {
+              /*
+                 Unroll copy of <= 36 bytes (72 if 8byte sizes)
+                 We know that contents have an odd number of
+                 INTERNAL_SIZE_T-sized words; minimally 3.
+               */
+
+              copysize = oldsize - SIZE_SZ;
+              s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
+              d = (INTERNAL_SIZE_T *) (newmem);
+              ncopies = copysize / sizeof (INTERNAL_SIZE_T);
+              assert (ncopies >= 3);
+
+              if (ncopies > 9)
+                memcpy (d, s, copysize);
+
+              else
+                {
+                  *(d + 0) = *(s + 0);
+                  *(d + 1) = *(s + 1);
+                  *(d + 2) = *(s + 2);
+                  if (ncopies > 4)
+                    {
+                      *(d + 3) = *(s + 3);
+                      *(d + 4) = *(s + 4);
+                      if (ncopies > 6)
+                        {
+                          *(d + 5) = *(s + 5);
+                          *(d + 6) = *(s + 6);
+                          if (ncopies > 8)
+                            {
+                              *(d + 7) = *(s + 7);
+                              *(d + 8) = *(s + 8);
+                            }
+                        }
+                    }
+                }
+
+              _int_free (av, oldp, 1);
+              check_inuse_chunk (av, newp);
+              return chunk2mem (newp);
+            }
+        }
+    }
+
+  /* If possible, free extra space in old or extended chunk */
+
+  assert ((unsigned long) (newsize) >= (unsigned long) (nb));
+
+  remainder_size = newsize - nb;
+
+  if (remainder_size < MINSIZE)   /* not enough extra to split off */
+    {
+      set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
+      set_inuse_bit_at_offset (newp, newsize);
+    }
+  else   /* split remainder */
+    {
+      remainder = chunk_at_offset (newp, nb);
+      set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
+      set_head (remainder, remainder_size | PREV_INUSE |
+                (av != &main_arena ? NON_MAIN_ARENA : 0));
+      /* Mark remainder as inuse so free() won't complain */
+      set_inuse_bit_at_offset (remainder, remainder_size);
+      _int_free (av, remainder, 1);
+    }
+
+  check_inuse_chunk (av, newp);
+  return chunk2mem (newp);
+}
+
+/*
+   ------------------------------ memalign ------------------------------
+ */
+
+static void *
+_int_memalign (mstate av, size_t alignment, size_t bytes)
+{
+  INTERNAL_SIZE_T nb;             /* padded  request size */
+  char *m;                        /* memory returned by malloc call */
+  mchunkptr p;                    /* corresponding chunk */
+  char *brk;                      /* alignment point within p */
+  mchunkptr newp;                 /* chunk to return */
+  INTERNAL_SIZE_T newsize;        /* its size */
+  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
+  mchunkptr remainder;            /* spare room at end to split off */
+  unsigned long remainder_size;   /* its size */
+  INTERNAL_SIZE_T size;
+
+
+
+  checked_request2size (bytes, nb);
+
+  /*
+     Strategy: find a spot within that chunk that meets the alignment
+     request, and then possibly free the leading and trailing space.
+   */
+
+
+  /* Call malloc with worst case padding to hit alignment. */
+
+  m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
+
+  if (m == 0)
+    return 0;           /* propagate failure */
+
+  p = mem2chunk (m);
+
+  if ((((unsigned long) (m)) % alignment) != 0)   /* misaligned */
+
+    { /*
+                Find an aligned spot inside chunk.  Since we need to give back
+                leading space in a chunk of at least MINSIZE, if the first
+                calculation places us at a spot with less than MINSIZE leader,
+                we can move to the next aligned spot -- we've allocated enough
+                total room so that this is always possible.
+                 */
+      brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
+                                - ((signed long) alignment));
+      if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
+        brk += alignment;
+
+      newp = (mchunkptr) brk;
+      leadsize = brk - (char *) (p);
+      newsize = chunksize (p) - leadsize;
+
+      /* For mmapped chunks, just adjust offset */
+      if (chunk_is_mmapped (p))
+        {
+          set_prev_size (newp, prev_size (p) + leadsize);
+          set_head (newp, newsize | IS_MMAPPED);
+          return chunk2mem (newp);
+        }
+
+      /* Otherwise, give back leader, use the rest */
+      set_head (newp, newsize | PREV_INUSE |
+                (av != &main_arena ? NON_MAIN_ARENA : 0));
+      set_inuse_bit_at_offset (newp, newsize);
+      set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
+      _int_free (av, p, 1);
+      p = newp;
+
+      assert (newsize >= nb &&
+              (((unsigned long) (chunk2mem (p))) % alignment) == 0);
+    }
+
+  /* Also give back spare room at the end */
+  if (!chunk_is_mmapped (p))
+    {
+      size = chunksize (p);
+      if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
+        {
+          remainder_size = size - nb;
+          remainder = chunk_at_offset (p, nb);
+          set_head (remainder, remainder_size | PREV_INUSE |
+                    (av != &main_arena ? NON_MAIN_ARENA : 0));
+          set_head_size (p, nb);
+          _int_free (av, remainder, 1);
+        }
+    }
+
+  check_inuse_chunk (av, p);
+  return chunk2mem (p);
+}
+
+
+/*
+   ------------------------------ malloc_trim ------------------------------
+ */
+
+static int
+mtrim (mstate av, size_t pad)
+{
+  /* Don't touch corrupt arenas.  */
+  if (arena_is_corrupt (av))
+    return 0;
+
+  /* Ensure initialization/consolidation */
+  malloc_consolidate (av);
+
+  const size_t ps = GLRO (dl_pagesize);
+  int psindex = bin_index (ps);
+  const size_t psm1 = ps - 1;
+
+  int result = 0;
+  for (int i = 1; i < NBINS; ++i)
+    if (i == 1 || i >= psindex)
+      {
+        mbinptr bin = bin_at (av, i);
+
+        for (mchunkptr p = last (bin); p != bin; p = p->bk)
+          {
+            INTERNAL_SIZE_T size = chunksize (p);
+
+            if (size > psm1 + sizeof (struct malloc_chunk))
+              {
+                /* See whether the chunk contains at least one unused page.  */
+                char *paligned_mem = (char *) (((uintptr_t) p
+                                                + sizeof (struct malloc_chunk)
+                                                + psm1) & ~psm1);
+
+                assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
+                assert ((char *) p + size > paligned_mem);
+
+                /* This is the size we could potentially free.  */
+                size -= paligned_mem - (char *) p;
+
+                if (size > psm1)
+                  {
+#if MALLOC_DEBUG
+                    /* When debugging we simulate destroying the memory
+                       content.  */
+                    memset (paligned_mem, 0x89, size & ~psm1);
+#endif
+                    __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
+
+                    result = 1;
+                  }
+              }
+          }
+      }
+
+#ifndef MORECORE_CANNOT_TRIM
+  return result | (av == &main_arena ? systrim (pad, av) : 0);
+
+#else
+  return result;
+#endif
+}
+
+
+int
+__malloc_trim (size_t s)
+{
+  int result = 0;
+
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+
+  mstate ar_ptr = &main_arena;
+  do
+    {
+      __libc_lock_lock (ar_ptr->mutex);
+      result |= mtrim (ar_ptr, s);
+      __libc_lock_unlock (ar_ptr->mutex);
+
+      ar_ptr = ar_ptr->next;
+    }
+  while (ar_ptr != &main_arena);
+
+  return result;
+}
+
+
+/*
+   ------------------------- malloc_usable_size -------------------------
+ */
+
+static size_t
+musable (void *mem)
+{
+  mchunkptr p;
+  if (mem != 0)
+    {
+      p = mem2chunk (mem);
+
+      if (__builtin_expect (using_malloc_checking == 1, 0))
+        return malloc_check_get_size (p);
+
+      if (chunk_is_mmapped (p))
+	{
+	  if (DUMPED_MAIN_ARENA_CHUNK (p))
+	    return chunksize (p) - SIZE_SZ;
+	  else
+	    return chunksize (p) - 2 * SIZE_SZ;
+	}
+      else if (inuse (p))
+        return chunksize (p) - SIZE_SZ;
+    }
+  return 0;
+}
+
+
+size_t
+__malloc_usable_size (void *m)
+{
+  size_t result;
+
+  result = musable (m);
+  return result;
+}
+
+/*
+   ------------------------------ mallinfo ------------------------------
+   Accumulate malloc statistics for arena AV into M.
+ */
+
+static void
+int_mallinfo (mstate av, struct mallinfo *m)
+{
+  size_t i;
+  mbinptr b;
+  mchunkptr p;
+  INTERNAL_SIZE_T avail;
+  INTERNAL_SIZE_T fastavail;
+  int nblocks;
+  int nfastblocks;
+
+  /* Ensure initialization */
+  if (av->top == 0)
+    malloc_consolidate (av);
+
+  check_malloc_state (av);
+
+  /* Account for top */
+  avail = chunksize (av->top);
+  nblocks = 1;  /* top always exists */
+
+  /* traverse fastbins */
+  nfastblocks = 0;
+  fastavail = 0;
+
+  for (i = 0; i < NFASTBINS; ++i)
+    {
+      for (p = fastbin (av, i); p != 0; p = p->fd)
+        {
+          ++nfastblocks;
+          fastavail += chunksize (p);
+        }
+    }
+
+  avail += fastavail;
+
+  /* traverse regular bins */
+  for (i = 1; i < NBINS; ++i)
+    {
+      b = bin_at (av, i);
+      for (p = last (b); p != b; p = p->bk)
+        {
+          ++nblocks;
+          avail += chunksize (p);
+        }
+    }
+
+  m->smblks += nfastblocks;
+  m->ordblks += nblocks;
+  m->fordblks += avail;
+  m->uordblks += av->system_mem - avail;
+  m->arena += av->system_mem;
+  m->fsmblks += fastavail;
+  if (av == &main_arena)
+    {
+      m->hblks = mp_.n_mmaps;
+      m->hblkhd = mp_.mmapped_mem;
+      m->usmblks = 0;
+      m->keepcost = chunksize (av->top);
+    }
+}
+
+
+struct mallinfo
+__libc_mallinfo (void)
+{
+  struct mallinfo m;
+  mstate ar_ptr;
+
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+
+  memset (&m, 0, sizeof (m));
+  ar_ptr = &main_arena;
+  do
+    {
+      __libc_lock_lock (ar_ptr->mutex);
+      int_mallinfo (ar_ptr, &m);
+      __libc_lock_unlock (ar_ptr->mutex);
+
+      ar_ptr = ar_ptr->next;
+    }
+  while (ar_ptr != &main_arena);
+
+  return m;
+}
+
+/*
+   ------------------------------ malloc_stats ------------------------------
+ */
+
+void
+__malloc_stats (void)
+{
+  int i;
+  mstate ar_ptr;
+  unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
+
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+  _IO_flockfile (stderr);
+  int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
+  ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+  for (i = 0, ar_ptr = &main_arena;; i++)
+    {
+      struct mallinfo mi;
+
+      memset (&mi, 0, sizeof (mi));
+      __libc_lock_lock (ar_ptr->mutex);
+      int_mallinfo (ar_ptr, &mi);
+      fprintf (stderr, "Arena %d:\n", i);
+      fprintf (stderr, "system bytes     = %10u\n", (unsigned int) mi.arena);
+      fprintf (stderr, "in use bytes     = %10u\n", (unsigned int) mi.uordblks);
+#if MALLOC_DEBUG > 1
+      if (i > 0)
+        dump_heap (heap_for_ptr (top (ar_ptr)));
+#endif
+      system_b += mi.arena;
+      in_use_b += mi.uordblks;
+      __libc_lock_unlock (ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+  fprintf (stderr, "Total (incl. mmap):\n");
+  fprintf (stderr, "system bytes     = %10u\n", system_b);
+  fprintf (stderr, "in use bytes     = %10u\n", in_use_b);
+  fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
+  fprintf (stderr, "max mmap bytes   = %10lu\n",
+           (unsigned long) mp_.max_mmapped_mem);
+  ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
+  _IO_funlockfile (stderr);
+}
+
+
+/*
+   ------------------------------ mallopt ------------------------------
+ */
+static inline int
+__always_inline
+do_set_trim_threshold (size_t value)
+{
+  LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
+	      mp_.no_dyn_threshold);
+  mp_.trim_threshold = value;
+  mp_.no_dyn_threshold = 1;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_top_pad (size_t value)
+{
+  LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
+	      mp_.no_dyn_threshold);
+  mp_.top_pad = value;
+  mp_.no_dyn_threshold = 1;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_mmap_threshold (size_t value)
+{
+  /* Forbid setting the threshold too high.  */
+  if (value <= HEAP_MAX_SIZE / 2)
+    {
+      LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
+		  mp_.no_dyn_threshold);
+      mp_.mmap_threshold = value;
+      mp_.no_dyn_threshold = 1;
+      return 1;
+    }
+  return 0;
+}
+
+static inline int
+__always_inline
+do_set_mmaps_max (int32_t value)
+{
+  LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
+	      mp_.no_dyn_threshold);
+  mp_.n_mmaps_max = value;
+  mp_.no_dyn_threshold = 1;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_mallopt_check (int32_t value)
+{
+  LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
+  check_action = value;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_perturb_byte (int32_t value)
+{
+  LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
+  perturb_byte = value;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_arena_test (size_t value)
+{
+  LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
+  mp_.arena_test = value;
+  return 1;
+}
+
+static inline int
+__always_inline
+do_set_arena_max (size_t value)
+{
+  LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
+  mp_.arena_max = value;
+  return 1;
+}
+
+
+int
+__libc_mallopt (int param_number, int value)
+{
+  mstate av = &main_arena;
+  int res = 1;
+
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+  __libc_lock_lock (av->mutex);
+  /* Ensure initialization/consolidation */
+  malloc_consolidate (av);
+
+  LIBC_PROBE (memory_mallopt, 2, param_number, value);
+
+  switch (param_number)
+    {
+    case M_MXFAST:
+      if (value >= 0 && value <= MAX_FAST_SIZE)
+        {
+          LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
+          set_max_fast (value);
+        }
+      else
+        res = 0;
+      break;
+
+    case M_TRIM_THRESHOLD:
+      do_set_trim_threshold (value);
+      break;
+
+    case M_TOP_PAD:
+      do_set_top_pad (value);
+      break;
+
+    case M_MMAP_THRESHOLD:
+      res = do_set_mmap_threshold (value);
+      break;
+
+    case M_MMAP_MAX:
+      do_set_mmaps_max (value);
+      break;
+
+    case M_CHECK_ACTION:
+      do_set_mallopt_check (value);
+      break;
+
+    case M_PERTURB:
+      do_set_perturb_byte (value);
+      break;
+
+    case M_ARENA_TEST:
+      if (value > 0)
+	do_set_arena_test (value);
+      break;
+
+    case M_ARENA_MAX:
+      if (value > 0)
+	do_set_arena_max (value);
+      break;
+    }
+  __libc_lock_unlock (av->mutex);
+  return res;
+}
+libc_hidden_def (__libc_mallopt)
+
+
+/*
+   -------------------- Alternative MORECORE functions --------------------
+ */
+
+
+/*
+   General Requirements for MORECORE.
+
+   The MORECORE function must have the following properties:
+
+   If MORECORE_CONTIGUOUS is false:
+
+ * MORECORE must allocate in multiples of pagesize. It will
+      only be called with arguments that are multiples of pagesize.
+
+ * MORECORE(0) must return an address that is at least
+      MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
+
+   else (i.e. If MORECORE_CONTIGUOUS is true):
+
+ * Consecutive calls to MORECORE with positive arguments
+      return increasing addresses, indicating that space has been
+      contiguously extended.
+
+ * MORECORE need not allocate in multiples of pagesize.
+      Calls to MORECORE need not have args of multiples of pagesize.
+
+ * MORECORE need not page-align.
+
+   In either case:
+
+ * MORECORE may allocate more memory than requested. (Or even less,
+      but this will generally result in a malloc failure.)
+
+ * MORECORE must not allocate memory when given argument zero, but
+      instead return one past the end address of memory from previous
+      nonzero call. This malloc does NOT call MORECORE(0)
+      until at least one call with positive arguments is made, so
+      the initial value returned is not important.
+
+ * Even though consecutive calls to MORECORE need not return contiguous
+      addresses, it must be OK for malloc'ed chunks to span multiple
+      regions in those cases where they do happen to be contiguous.
+
+ * MORECORE need not handle negative arguments -- it may instead
+      just return MORECORE_FAILURE when given negative arguments.
+      Negative arguments are always multiples of pagesize. MORECORE
+      must not misinterpret negative args as large positive unsigned
+      args. You can suppress all such calls from even occurring by defining
+      MORECORE_CANNOT_TRIM,
+
+   There is some variation across systems about the type of the
+   argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
+   actually be size_t, because sbrk supports negative args, so it is
+   normally the signed type of the same width as size_t (sometimes
+   declared as "intptr_t", and sometimes "ptrdiff_t").  It doesn't much
+   matter though. Internally, we use "long" as arguments, which should
+   work across all reasonable possibilities.
+
+   Additionally, if MORECORE ever returns failure for a positive
+   request, then mmap is used as a noncontiguous system allocator. This
+   is a useful backup strategy for systems with holes in address spaces
+   -- in this case sbrk cannot contiguously expand the heap, but mmap
+   may be able to map noncontiguous space.
+
+   If you'd like mmap to ALWAYS be used, you can define MORECORE to be
+   a function that always returns MORECORE_FAILURE.
+
+   If you are using this malloc with something other than sbrk (or its
+   emulation) to supply memory regions, you probably want to set
+   MORECORE_CONTIGUOUS as false.  As an example, here is a custom
+   allocator kindly contributed for pre-OSX macOS.  It uses virtually
+   but not necessarily physically contiguous non-paged memory (locked
+   in, present and won't get swapped out).  You can use it by
+   uncommenting this section, adding some #includes, and setting up the
+   appropriate defines above:
+
+ *#define MORECORE osMoreCore
+ *#define MORECORE_CONTIGUOUS 0
+
+   There is also a shutdown routine that should somehow be called for
+   cleanup upon program exit.
+
+ *#define MAX_POOL_ENTRIES 100
+ *#define MINIMUM_MORECORE_SIZE  (64 * 1024)
+   static int next_os_pool;
+   void *our_os_pools[MAX_POOL_ENTRIES];
+
+   void *osMoreCore(int size)
+   {
+    void *ptr = 0;
+    static void *sbrk_top = 0;
+
+    if (size > 0)
+    {
+      if (size < MINIMUM_MORECORE_SIZE)
+         size = MINIMUM_MORECORE_SIZE;
+      if (CurrentExecutionLevel() == kTaskLevel)
+         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+      if (ptr == 0)
+      {
+        return (void *) MORECORE_FAILURE;
+      }
+      // save ptrs so they can be freed during cleanup
+      our_os_pools[next_os_pool] = ptr;
+      next_os_pool++;
+      ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+      sbrk_top = (char *) ptr + size;
+      return ptr;
+    }
+    else if (size < 0)
+    {
+      // we don't currently support shrink behavior
+      return (void *) MORECORE_FAILURE;
+    }
+    else
+    {
+      return sbrk_top;
+    }
+   }
+
+   // cleanup any allocated memory pools
+   // called as last thing before shutting down driver
+
+   void osCleanupMem(void)
+   {
+    void **ptr;
+
+    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+      if (*ptr)
+      {
+         PoolDeallocate(*ptr);
+ * ptr = 0;
+      }
+   }
+
+ */
+
+
+/* Helper code.  */
+
+extern char **__libc_argv attribute_hidden;
+
+static void
+malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
+{
+  /* Avoid using this arena in future.  We do not attempt to synchronize this
+     with anything else because we minimally want to ensure that __libc_message
+     gets its resources safely without stumbling on the current corruption.  */
+  if (ar_ptr)
+    set_arena_corrupt (ar_ptr);
+
+  if ((action & 5) == 5)
+    __libc_message (action & 2, "%s\n", str);
+  else if (action & 1)
+    {
+      char buf[2 * sizeof (uintptr_t) + 1];
+
+      buf[sizeof (buf) - 1] = '\0';
+      char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
+      while (cp > buf)
+        *--cp = '0';
+
+      __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
+                      __libc_argv[0] ? : "<unknown>", str, cp);
+    }
+  else if (action & 2)
+    abort ();
+}
+
+/* We need a wrapper function for one of the additions of POSIX.  */
+int
+__posix_memalign (void **memptr, size_t alignment, size_t size)
+{
+  void *mem;
+
+  /* Test whether the SIZE argument is valid.  It must be a power of
+     two multiple of sizeof (void *).  */
+  if (alignment % sizeof (void *) != 0
+      || !powerof2 (alignment / sizeof (void *))
+      || alignment == 0)
+    return EINVAL;
+
+
+  void *address = RETURN_ADDRESS (0);
+  mem = _mid_memalign (alignment, size, address);
+
+  if (mem != NULL)
+    {
+      *memptr = mem;
+      return 0;
+    }
+
+  return ENOMEM;
+}
+weak_alias (__posix_memalign, posix_memalign)
+
+
+int
+__malloc_info (int options, FILE *fp)
+{
+  /* For now, at least.  */
+  if (options != 0)
+    return EINVAL;
+
+  int n = 0;
+  size_t total_nblocks = 0;
+  size_t total_nfastblocks = 0;
+  size_t total_avail = 0;
+  size_t total_fastavail = 0;
+  size_t total_system = 0;
+  size_t total_max_system = 0;
+  size_t total_aspace = 0;
+  size_t total_aspace_mprotect = 0;
+
+
+
+  if (__malloc_initialized < 0)
+    ptmalloc_init ();
+
+  fputs ("<malloc version=\"1\">\n", fp);
+
+  /* Iterate over all arenas currently in use.  */
+  mstate ar_ptr = &main_arena;
+  do
+    {
+      fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
+
+      size_t nblocks = 0;
+      size_t nfastblocks = 0;
+      size_t avail = 0;
+      size_t fastavail = 0;
+      struct
+      {
+	size_t from;
+	size_t to;
+	size_t total;
+	size_t count;
+      } sizes[NFASTBINS + NBINS - 1];
+#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
+
+      __libc_lock_lock (ar_ptr->mutex);
+
+      for (size_t i = 0; i < NFASTBINS; ++i)
+	{
+	  mchunkptr p = fastbin (ar_ptr, i);
+	  if (p != NULL)
+	    {
+	      size_t nthissize = 0;
+	      size_t thissize = chunksize (p);
+
+	      while (p != NULL)
+		{
+		  ++nthissize;
+		  p = p->fd;
+		}
+
+	      fastavail += nthissize * thissize;
+	      nfastblocks += nthissize;
+	      sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
+	      sizes[i].to = thissize;
+	      sizes[i].count = nthissize;
+	    }
+	  else
+	    sizes[i].from = sizes[i].to = sizes[i].count = 0;
+
+	  sizes[i].total = sizes[i].count * sizes[i].to;
+	}
+
+
+      mbinptr bin;
+      struct malloc_chunk *r;
+
+      for (size_t i = 1; i < NBINS; ++i)
+	{
+	  bin = bin_at (ar_ptr, i);
+	  r = bin->fd;
+	  sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
+	  sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
+					  = sizes[NFASTBINS - 1 + i].count = 0;
+
+	  if (r != NULL)
+	    while (r != bin)
+	      {
+		size_t r_size = chunksize_nomask (r);
+		++sizes[NFASTBINS - 1 + i].count;
+		sizes[NFASTBINS - 1 + i].total += r_size;
+		sizes[NFASTBINS - 1 + i].from
+		  = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
+		sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
+						   r_size);
+
+		r = r->fd;
+	      }
+
+	  if (sizes[NFASTBINS - 1 + i].count == 0)
+	    sizes[NFASTBINS - 1 + i].from = 0;
+	  nblocks += sizes[NFASTBINS - 1 + i].count;
+	  avail += sizes[NFASTBINS - 1 + i].total;
+	}
+
+      __libc_lock_unlock (ar_ptr->mutex);
+
+      total_nfastblocks += nfastblocks;
+      total_fastavail += fastavail;
+
+      total_nblocks += nblocks;
+      total_avail += avail;
+
+      for (size_t i = 0; i < nsizes; ++i)
+	if (sizes[i].count != 0 && i != NFASTBINS)
+	  fprintf (fp, "							      \
+  <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
+		   sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
+
+      if (sizes[NFASTBINS].count != 0)
+	fprintf (fp, "\
+  <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
+		 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
+		 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
+
+      total_system += ar_ptr->system_mem;
+      total_max_system += ar_ptr->max_system_mem;
+
+      fprintf (fp,
+	       "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
+	       "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
+	       "<system type=\"current\" size=\"%zu\"/>\n"
+	       "<system type=\"max\" size=\"%zu\"/>\n",
+	       nfastblocks, fastavail, nblocks, avail,
+	       ar_ptr->system_mem, ar_ptr->max_system_mem);
+
+      if (ar_ptr != &main_arena)
+	{
+	  heap_info *heap = heap_for_ptr (top (ar_ptr));
+	  fprintf (fp,
+		   "<aspace type=\"total\" size=\"%zu\"/>\n"
+		   "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
+		   heap->size, heap->mprotect_size);
+	  total_aspace += heap->size;
+	  total_aspace_mprotect += heap->mprotect_size;
+	}
+      else
+	{
+	  fprintf (fp,
+		   "<aspace type=\"total\" size=\"%zu\"/>\n"
+		   "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
+		   ar_ptr->system_mem, ar_ptr->system_mem);
+	  total_aspace += ar_ptr->system_mem;
+	  total_aspace_mprotect += ar_ptr->system_mem;
+	}
+
+      fputs ("</heap>\n", fp);
+      ar_ptr = ar_ptr->next;
+    }
+  while (ar_ptr != &main_arena);
+
+  fprintf (fp,
+	   "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
+	   "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
+	   "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
+	   "<system type=\"current\" size=\"%zu\"/>\n"
+	   "<system type=\"max\" size=\"%zu\"/>\n"
+	   "<aspace type=\"total\" size=\"%zu\"/>\n"
+	   "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
+	   "</malloc>\n",
+	   total_nfastblocks, total_fastavail, total_nblocks, total_avail,
+	   mp_.n_mmaps, mp_.mmapped_mem,
+	   total_system, total_max_system,
+	   total_aspace, total_aspace_mprotect);
+
+  return 0;
+}
+weak_alias (__malloc_info, malloc_info)
+
+
+strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
+strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
+strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
+strong_alias (__libc_memalign, __memalign)
+weak_alias (__libc_memalign, memalign)
+strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
+strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
+strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
+strong_alias (__libc_mallinfo, __mallinfo)
+weak_alias (__libc_mallinfo, mallinfo)
+strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
+
+weak_alias (__malloc_stats, malloc_stats)
+weak_alias (__malloc_usable_size, malloc_usable_size)
+weak_alias (__malloc_trim, malloc_trim)
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
+compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
+#endif
+
+/* ------------------------------------------------------------
+   History:
+
+   [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
+
+ */
+/*
+ * Local variables:
+ * c-basic-offset: 2
+ * End:
+ */
diff --git a/REORG.TODO/malloc/malloc.h b/REORG.TODO/malloc/malloc.h
new file mode 100644
index 0000000000..339ab64c7d
--- /dev/null
+++ b/REORG.TODO/malloc/malloc.h
@@ -0,0 +1,164 @@
+/* Prototypes and definition for malloc implementation.
+   Copyright (C) 1996-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_H
+#define _MALLOC_H 1
+
+#include <features.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#ifdef _LIBC
+# define __MALLOC_HOOK_VOLATILE
+# define __MALLOC_DEPRECATED
+#else
+# define __MALLOC_HOOK_VOLATILE volatile
+# define __MALLOC_DEPRECATED __attribute_deprecated__
+#endif
+
+
+__BEGIN_DECLS
+
+/* Allocate SIZE bytes of memory.  */
+extern void *malloc (size_t __size) __THROW __attribute_malloc__ __wur;
+
+/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
+extern void *calloc (size_t __nmemb, size_t __size)
+__THROW __attribute_malloc__ __wur;
+
+/* Re-allocate the previously allocated block in __ptr, making the new
+   block SIZE bytes long.  */
+/* __attribute_malloc__ is not used, because if realloc returns
+   the same pointer that was passed to it, aliasing needs to be allowed
+   between objects pointed by the old and new pointers.  */
+extern void *realloc (void *__ptr, size_t __size)
+__THROW __attribute_warn_unused_result__;
+
+/* Re-allocate the previously allocated block in PTR, making the new
+   block large enough for NMEMB elements of SIZE bytes each.  */
+/* __attribute_malloc__ is not used, because if reallocarray returns
+   the same pointer that was passed to it, aliasing needs to be allowed
+   between objects pointed by the old and new pointers.  */
+extern void *reallocarray (void *__ptr, size_t __nmemb, size_t __size)
+__THROW __attribute_warn_unused_result__;
+
+/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
+extern void free (void *__ptr) __THROW;
+
+/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
+extern void *memalign (size_t __alignment, size_t __size)
+__THROW __attribute_malloc__ __wur;
+
+/* Allocate SIZE bytes on a page boundary.  */
+extern void *valloc (size_t __size) __THROW __attribute_malloc__ __wur;
+
+/* Equivalent to valloc(minimum-page-that-holds(n)), that is, round up
+   __size to nearest pagesize. */
+extern void *pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
+
+/* Underlying allocation function; successive calls should return
+   contiguous pieces of memory.  */
+extern void *(*__morecore) (ptrdiff_t __size);
+
+/* Default value of `__morecore'.  */
+extern void *__default_morecore (ptrdiff_t __size)
+__THROW __attribute_malloc__;
+
+/* SVID2/XPG mallinfo structure */
+
+struct mallinfo
+{
+  int arena;    /* non-mmapped space allocated from system */
+  int ordblks;  /* number of free chunks */
+  int smblks;   /* number of fastbin blocks */
+  int hblks;    /* number of mmapped regions */
+  int hblkhd;   /* space in mmapped regions */
+  int usmblks;  /* always 0, preserved for backwards compatibility */
+  int fsmblks;  /* space available in freed fastbin blocks */
+  int uordblks; /* total allocated space */
+  int fordblks; /* total free space */
+  int keepcost; /* top-most, releasable (via malloc_trim) space */
+};
+
+/* Returns a copy of the updated current mallinfo. */
+extern struct mallinfo mallinfo (void) __THROW;
+
+/* SVID2/XPG mallopt options */
+#ifndef M_MXFAST
+# define M_MXFAST  1    /* maximum request size for "fastbins" */
+#endif
+#ifndef M_NLBLKS
+# define M_NLBLKS  2    /* UNUSED in this malloc */
+#endif
+#ifndef M_GRAIN
+# define M_GRAIN   3    /* UNUSED in this malloc */
+#endif
+#ifndef M_KEEP
+# define M_KEEP    4    /* UNUSED in this malloc */
+#endif
+
+/* mallopt options that actually do something */
+#define M_TRIM_THRESHOLD    -1
+#define M_TOP_PAD           -2
+#define M_MMAP_THRESHOLD    -3
+#define M_MMAP_MAX          -4
+#define M_CHECK_ACTION      -5
+#define M_PERTURB           -6
+#define M_ARENA_TEST        -7
+#define M_ARENA_MAX         -8
+
+/* General SVID/XPG interface to tunable parameters. */
+extern int mallopt (int __param, int __val) __THROW;
+
+/* Release all but __pad bytes of freed top-most memory back to the
+   system. Return 1 if successful, else 0. */
+extern int malloc_trim (size_t __pad) __THROW;
+
+/* Report the number of usable allocated bytes associated with allocated
+   chunk __ptr. */
+extern size_t malloc_usable_size (void *__ptr) __THROW;
+
+/* Prints brief summary statistics on stderr. */
+extern void malloc_stats (void) __THROW;
+
+/* Output information about state of allocator to stream FP.  */
+extern int malloc_info (int __options, FILE *__fp) __THROW;
+
+/* Hooks for debugging and user-defined versions. */
+extern void (*__MALLOC_HOOK_VOLATILE __free_hook) (void *__ptr,
+                                                   const void *)
+__MALLOC_DEPRECATED;
+extern void *(*__MALLOC_HOOK_VOLATILE __malloc_hook)(size_t __size,
+                                                     const void *)
+__MALLOC_DEPRECATED;
+extern void *(*__MALLOC_HOOK_VOLATILE __realloc_hook)(void *__ptr,
+                                                      size_t __size,
+                                                      const void *)
+__MALLOC_DEPRECATED;
+extern void *(*__MALLOC_HOOK_VOLATILE __memalign_hook)(size_t __alignment,
+                                                       size_t __size,
+                                                       const void *)
+__MALLOC_DEPRECATED;
+extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
+
+/* Activate a standard set of debugging hooks. */
+extern void __malloc_check_init (void) __THROW __MALLOC_DEPRECATED;
+
+
+__END_DECLS
+#endif /* malloc.h */
diff --git a/REORG.TODO/malloc/mallocbug.c b/REORG.TODO/malloc/mallocbug.c
new file mode 100644
index 0000000000..7d19b6fc65
--- /dev/null
+++ b/REORG.TODO/malloc/mallocbug.c
@@ -0,0 +1,70 @@
+/* Reproduce a GNU malloc bug.  */
+#include <malloc.h>
+#include <stdio.h>
+#include <string.h>
+
+#define size_t unsigned int
+
+/* Defined as global variables to avoid warnings about unused variables.  */
+char *dummy0;
+char *dummy1;
+char *fill_info_table1;
+
+
+int
+main (int argc, char *argv[])
+{
+  char *over_top;
+  size_t over_top_size = 0x3000;
+  char *over_top_dup;
+  size_t over_top_dup_size = 0x7000;
+  char *x;
+  size_t i;
+
+  /* Here's what memory is supposed to look like (hex):
+        size  contents
+        3000  original_info_table, later fill_info_table1
+      3fa000  dummy0
+      3fa000  dummy1
+        6000  info_table_2
+        3000  over_top
+
+   */
+  /* mem: original_info_table */
+  dummy0 = malloc (0x3fa000);
+  /* mem: original_info_table, dummy0 */
+  dummy1 = malloc (0x3fa000);
+  /* mem: free, dummy0, dummy1, info_table_2 */
+  fill_info_table1 = malloc (0x3000);
+  /* mem: fill_info_table1, dummy0, dummy1, info_table_2 */
+
+  x = malloc (0x1000);
+  free (x);
+  /* mem: fill_info_table1, dummy0, dummy1, info_table_2, freexx */
+
+  /* This is what loses; info_table_2 and freexx get combined unbeknownst
+     to mmalloc, and mmalloc puts over_top in a section of memory which
+     is on the free list as part of another block (where info_table_2 had
+     been).  */
+  over_top = malloc (over_top_size);
+  over_top_dup = malloc (over_top_dup_size);
+  memset (over_top, 0, over_top_size);
+  memset (over_top_dup, 1, over_top_dup_size);
+
+  for (i = 0; i < over_top_size; ++i)
+    if (over_top[i] != 0)
+      {
+        printf ("FAIL: malloc expands info table\n");
+        return 0;
+      }
+
+  for (i = 0; i < over_top_dup_size; ++i)
+    if (over_top_dup[i] != 1)
+      {
+        printf ("FAIL: malloc expands info table\n");
+        return 0;
+      }
+
+  printf ("PASS: malloc expands info table\n");
+  return 0;
+}
diff --git a/REORG.TODO/malloc/mcheck-init.c b/REORG.TODO/malloc/mcheck-init.c
new file mode 100644
index 0000000000..6d2492ef7e
--- /dev/null
+++ b/REORG.TODO/malloc/mcheck-init.c
@@ -0,0 +1,30 @@
+/* Copyright (C) 1991-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* The object of this file should be installed as libmcheck.a,
+   so one can do -lmcheck to turn on mcheck.  */
+
+#include <malloc.h>
+#include <mcheck.h>
+
+static void
+turn_on_mcheck (void)
+{
+  mcheck (NULL);
+}
+
+void (*__malloc_initialize_hook) (void) = turn_on_mcheck;
diff --git a/REORG.TODO/malloc/mcheck.c b/REORG.TODO/malloc/mcheck.c
new file mode 100644
index 0000000000..5128d687fa
--- /dev/null
+++ b/REORG.TODO/malloc/mcheck.c
@@ -0,0 +1,416 @@
+/* Standard debugging hooks for `malloc'.
+   Copyright (C) 1990-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Written May 1989 by Mike Haertel.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_INTERNAL
+# define _MALLOC_INTERNAL
+# include <malloc.h>
+# include <mcheck.h>
+# include <stdint.h>
+# include <stdio.h>
+# include <libintl.h>
+# include <errno.h>
+#endif
+
+/* Old hook values.  */
+static void (*old_free_hook)(__ptr_t ptr, const __ptr_t);
+static __ptr_t (*old_malloc_hook) (size_t size, const __ptr_t);
+static __ptr_t (*old_memalign_hook) (size_t alignment, size_t size,
+                                     const __ptr_t);
+static __ptr_t (*old_realloc_hook) (__ptr_t ptr, size_t size,
+                                    const __ptr_t);
+
+/* Function to call when something awful happens.  */
+static void (*abortfunc) (enum mcheck_status);
+
+/* Arbitrary magical numbers.  */
+#define MAGICWORD       0xfedabeeb
+#define MAGICFREE       0xd8675309
+#define MAGICBYTE       ((char) 0xd7)
+#define MALLOCFLOOD     ((char) 0x93)
+#define FREEFLOOD       ((char) 0x95)
+
+struct hdr
+{
+  size_t size;                  /* Exact size requested by user.  */
+  unsigned long int magic;      /* Magic number to check header integrity.  */
+  struct hdr *prev;
+  struct hdr *next;
+  __ptr_t block;                /* Real block allocated, for memalign.  */
+  unsigned long int magic2;     /* Extra, keeps us doubleword aligned.  */
+};
+
+/* This is the beginning of the list of all memory blocks allocated.
+   It is only constructed if the pedantic testing is requested.  */
+static struct hdr *root;
+
+static int mcheck_used;
+
+/* Nonzero if pedentic checking of all blocks is requested.  */
+static int pedantic;
+
+#if defined _LIBC || defined STDC_HEADERS || defined USG
+# include <string.h>
+# define flood memset
+#else
+static void flood (__ptr_t, int, size_t);
+static void
+flood (__ptr_t ptr, int val, size_t size)
+{
+  char *cp = ptr;
+  while (size--)
+    *cp++ = val;
+}
+#endif
+
+static enum mcheck_status
+checkhdr (const struct hdr *hdr)
+{
+  enum mcheck_status status;
+
+  if (!mcheck_used)
+    /* Maybe the mcheck used is disabled?  This happens when we find
+       an error and report it.  */
+    return MCHECK_OK;
+
+  switch (hdr->magic ^ ((uintptr_t) hdr->prev + (uintptr_t) hdr->next))
+    {
+    default:
+      status = MCHECK_HEAD;
+      break;
+    case MAGICFREE:
+      status = MCHECK_FREE;
+      break;
+    case MAGICWORD:
+      if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
+        status = MCHECK_TAIL;
+      else if ((hdr->magic2 ^ (uintptr_t) hdr->block) != MAGICWORD)
+        status = MCHECK_HEAD;
+      else
+        status = MCHECK_OK;
+      break;
+    }
+  if (status != MCHECK_OK)
+    {
+      mcheck_used = 0;
+      (*abortfunc) (status);
+      mcheck_used = 1;
+    }
+  return status;
+}
+
+void
+mcheck_check_all (void)
+{
+  /* Walk through all the active blocks and test whether they were tampered
+     with.  */
+  struct hdr *runp = root;
+
+  /* Temporarily turn off the checks.  */
+  pedantic = 0;
+
+  while (runp != NULL)
+    {
+      (void) checkhdr (runp);
+
+      runp = runp->next;
+    }
+
+  /* Turn checks on again.  */
+  pedantic = 1;
+}
+#ifdef _LIBC
+libc_hidden_def (mcheck_check_all)
+#endif
+
+static void
+unlink_blk (struct hdr *ptr)
+{
+  if (ptr->next != NULL)
+    {
+      ptr->next->prev = ptr->prev;
+      ptr->next->magic = MAGICWORD ^ ((uintptr_t) ptr->next->prev
+                                      + (uintptr_t) ptr->next->next);
+    }
+  if (ptr->prev != NULL)
+    {
+      ptr->prev->next = ptr->next;
+      ptr->prev->magic = MAGICWORD ^ ((uintptr_t) ptr->prev->prev
+                                      + (uintptr_t) ptr->prev->next);
+    }
+  else
+    root = ptr->next;
+}
+
+static void
+link_blk (struct hdr *hdr)
+{
+  hdr->prev = NULL;
+  hdr->next = root;
+  root = hdr;
+  hdr->magic = MAGICWORD ^ (uintptr_t) hdr->next;
+
+  /* And the next block.  */
+  if (hdr->next != NULL)
+    {
+      hdr->next->prev = hdr;
+      hdr->next->magic = MAGICWORD ^ ((uintptr_t) hdr
+                                      + (uintptr_t) hdr->next->next);
+    }
+}
+static void
+freehook (__ptr_t ptr, const __ptr_t caller)
+{
+  if (pedantic)
+    mcheck_check_all ();
+  if (ptr)
+    {
+      struct hdr *hdr = ((struct hdr *) ptr) - 1;
+      checkhdr (hdr);
+      hdr->magic = MAGICFREE;
+      hdr->magic2 = MAGICFREE;
+      unlink_blk (hdr);
+      hdr->prev = hdr->next = NULL;
+      flood (ptr, FREEFLOOD, hdr->size);
+      ptr = hdr->block;
+    }
+  __free_hook = old_free_hook;
+  if (old_free_hook != NULL)
+    (*old_free_hook)(ptr, caller);
+  else
+    free (ptr);
+  __free_hook = freehook;
+}
+
+static __ptr_t
+mallochook (size_t size, const __ptr_t caller)
+{
+  struct hdr *hdr;
+
+  if (pedantic)
+    mcheck_check_all ();
+
+  if (size > ~((size_t) 0) - (sizeof (struct hdr) + 1))
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+
+  __malloc_hook = old_malloc_hook;
+  if (old_malloc_hook != NULL)
+    hdr = (struct hdr *) (*old_malloc_hook)(sizeof (struct hdr) + size + 1,
+                                            caller);
+  else
+    hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
+  __malloc_hook = mallochook;
+  if (hdr == NULL)
+    return NULL;
+
+  hdr->size = size;
+  link_blk (hdr);
+  hdr->block = hdr;
+  hdr->magic2 = (uintptr_t) hdr ^ MAGICWORD;
+  ((char *) &hdr[1])[size] = MAGICBYTE;
+  flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
+  return (__ptr_t) (hdr + 1);
+}
+
+static __ptr_t
+memalignhook (size_t alignment, size_t size,
+              const __ptr_t caller)
+{
+  struct hdr *hdr;
+  size_t slop;
+  char *block;
+
+  if (pedantic)
+    mcheck_check_all ();
+
+  slop = (sizeof *hdr + alignment - 1) & - alignment;
+
+  if (size > ~((size_t) 0) - (slop + 1))
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+
+  __memalign_hook = old_memalign_hook;
+  if (old_memalign_hook != NULL)
+    block = (*old_memalign_hook)(alignment, slop + size + 1, caller);
+  else
+    block = memalign (alignment, slop + size + 1);
+  __memalign_hook = memalignhook;
+  if (block == NULL)
+    return NULL;
+
+  hdr = ((struct hdr *) (block + slop)) - 1;
+
+  hdr->size = size;
+  link_blk (hdr);
+  hdr->block = (__ptr_t) block;
+  hdr->magic2 = (uintptr_t) block ^ MAGICWORD;
+  ((char *) &hdr[1])[size] = MAGICBYTE;
+  flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
+  return (__ptr_t) (hdr + 1);
+}
+
+static __ptr_t
+reallochook (__ptr_t ptr, size_t size, const __ptr_t caller)
+{
+  if (size == 0)
+    {
+      freehook (ptr, caller);
+      return NULL;
+    }
+
+  struct hdr *hdr;
+  size_t osize;
+
+  if (pedantic)
+    mcheck_check_all ();
+
+  if (size > ~((size_t) 0) - (sizeof (struct hdr) + 1))
+    {
+      __set_errno (ENOMEM);
+      return NULL;
+    }
+
+  if (ptr)
+    {
+      hdr = ((struct hdr *) ptr) - 1;
+      osize = hdr->size;
+
+      checkhdr (hdr);
+      unlink_blk (hdr);
+      if (size < osize)
+        flood ((char *) ptr + size, FREEFLOOD, osize - size);
+    }
+  else
+    {
+      osize = 0;
+      hdr = NULL;
+    }
+  __free_hook = old_free_hook;
+  __malloc_hook = old_malloc_hook;
+  __memalign_hook = old_memalign_hook;
+  __realloc_hook = old_realloc_hook;
+  if (old_realloc_hook != NULL)
+    hdr = (struct hdr *) (*old_realloc_hook)((__ptr_t) hdr,
+                                             sizeof (struct hdr) + size + 1,
+                                             caller);
+  else
+    hdr = (struct hdr *) realloc ((__ptr_t) hdr,
+                                  sizeof (struct hdr) + size + 1);
+  __free_hook = freehook;
+  __malloc_hook = mallochook;
+  __memalign_hook = memalignhook;
+  __realloc_hook = reallochook;
+  if (hdr == NULL)
+    return NULL;
+
+  hdr->size = size;
+  link_blk (hdr);
+  hdr->block = hdr;
+  hdr->magic2 = (uintptr_t) hdr ^ MAGICWORD;
+  ((char *) &hdr[1])[size] = MAGICBYTE;
+  if (size > osize)
+    flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
+  return (__ptr_t) (hdr + 1);
+}
+
+__attribute__ ((noreturn))
+static void
+mabort (enum mcheck_status status)
+{
+  const char *msg;
+  switch (status)
+    {
+    case MCHECK_OK:
+      msg = _ ("memory is consistent, library is buggy\n");
+      break;
+    case MCHECK_HEAD:
+      msg = _ ("memory clobbered before allocated block\n");
+      break;
+    case MCHECK_TAIL:
+      msg = _ ("memory clobbered past end of allocated block\n");
+      break;
+    case MCHECK_FREE:
+      msg = _ ("block freed twice\n");
+      break;
+    default:
+      msg = _ ("bogus mcheck_status, library is buggy\n");
+      break;
+    }
+#ifdef _LIBC
+  __libc_fatal (msg);
+#else
+  fprintf (stderr, "mcheck: %s", msg);
+  fflush (stderr);
+  abort ();
+#endif
+}
+
+/* Memory barrier so that GCC does not optimize out the argument.  */
+#define malloc_opt_barrier(x) \
+  ({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
+
+int
+mcheck (void (*func) (enum mcheck_status))
+{
+  abortfunc = (func != NULL) ? func : &mabort;
+
+  /* These hooks may not be safely inserted if malloc is already in use.  */
+  if (__malloc_initialized <= 0 && !mcheck_used)
+    {
+      /* We call malloc() once here to ensure it is initialized.  */
+      void *p = malloc (0);
+      /* GCC might optimize out the malloc/free pair without a barrier.  */
+      p = malloc_opt_barrier (p);
+      free (p);
+
+      old_free_hook = __free_hook;
+      __free_hook = freehook;
+      old_malloc_hook = __malloc_hook;
+      __malloc_hook = mallochook;
+      old_memalign_hook = __memalign_hook;
+      __memalign_hook = memalignhook;
+      old_realloc_hook = __realloc_hook;
+      __realloc_hook = reallochook;
+      mcheck_used = 1;
+    }
+
+  return mcheck_used ? 0 : -1;
+}
+#ifdef _LIBC
+libc_hidden_def (mcheck)
+#endif
+
+int
+mcheck_pedantic (void (*func) (enum mcheck_status))
+{
+  int res = mcheck (func);
+  if (res == 0)
+    pedantic = 1;
+  return res;
+}
+
+enum mcheck_status
+mprobe (__ptr_t ptr)
+{
+  return mcheck_used ? checkhdr (((struct hdr *) ptr) - 1) : MCHECK_DISABLED;
+}
diff --git a/REORG.TODO/malloc/mcheck.h b/REORG.TODO/malloc/mcheck.h
new file mode 100644
index 0000000000..3d56bef238
--- /dev/null
+++ b/REORG.TODO/malloc/mcheck.h
@@ -0,0 +1,60 @@
+/* Copyright (C) 1996-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MCHECK_H
+#define _MCHECK_H       1
+
+#include <features.h>
+
+__BEGIN_DECLS
+
+/* Return values for `mprobe': these are the kinds of inconsistencies that
+   `mcheck' enables detection of.  */
+enum mcheck_status
+{
+  MCHECK_DISABLED = -1,         /* Consistency checking is not turned on.  */
+  MCHECK_OK,                    /* Block is fine.  */
+  MCHECK_FREE,                  /* Block freed twice.  */
+  MCHECK_HEAD,                  /* Memory before the block was clobbered.  */
+  MCHECK_TAIL                   /* Memory after the block was clobbered.  */
+};
+
+
+/* Activate a standard collection of debugging hooks.  This must be called
+   before `malloc' is ever called.  ABORTFUNC is called with an error code
+   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
+   null, the standard function prints on stderr and then calls `abort'.  */
+extern int mcheck (void (*__abortfunc)(enum mcheck_status)) __THROW;
+
+/* Similar to `mcheck' but performs checks for all block whenever one of
+   the memory handling functions is called.  This can be very slow.  */
+extern int mcheck_pedantic (void (*__abortfunc)(enum mcheck_status)) __THROW;
+
+/* Force check of all blocks now.  */
+extern void mcheck_check_all (void);
+
+/* Check for aberrations in a particular malloc'd block.  You must have
+   called `mcheck' already.  These are the same checks that `mcheck' does
+   when you free or reallocate a block.  */
+extern enum mcheck_status mprobe (void *__ptr) __THROW;
+
+/* Activate a standard collection of tracing hooks.  */
+extern void mtrace (void) __THROW;
+extern void muntrace (void) __THROW;
+
+__END_DECLS
+#endif /* mcheck.h */
diff --git a/REORG.TODO/malloc/memusage.c b/REORG.TODO/malloc/memusage.c
new file mode 100644
index 0000000000..3deca2a406
--- /dev/null
+++ b/REORG.TODO/malloc/memusage.c
@@ -0,0 +1,936 @@
+/* Profile heap and stack memory usage of running program.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <assert.h>
+#include <atomic.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#include <memusage.h>
+
+/* Pointer to the real functions.  These are determined used `dlsym'
+   when really needed.  */
+static void *(*mallocp)(size_t);
+static void *(*reallocp) (void *, size_t);
+static void *(*callocp) (size_t, size_t);
+static void (*freep) (void *);
+
+static void *(*mmapp) (void *, size_t, int, int, int, off_t);
+static void *(*mmap64p) (void *, size_t, int, int, int, off64_t);
+static int (*munmapp) (void *, size_t);
+static void *(*mremapp) (void *, size_t, size_t, int, void *);
+
+enum
+{
+  idx_malloc = 0,
+  idx_realloc,
+  idx_calloc,
+  idx_free,
+  idx_mmap_r,
+  idx_mmap_w,
+  idx_mmap_a,
+  idx_mremap,
+  idx_munmap,
+  idx_last
+};
+
+
+struct header
+{
+  size_t length;
+  size_t magic;
+};
+
+#define MAGIC 0xfeedbeaf
+
+
+static memusage_cntr_t calls[idx_last];
+static memusage_cntr_t failed[idx_last];
+static memusage_size_t total[idx_last];
+static memusage_size_t grand_total;
+static memusage_cntr_t histogram[65536 / 16];
+static memusage_cntr_t large;
+static memusage_cntr_t calls_total;
+static memusage_cntr_t inplace;
+static memusage_cntr_t decreasing;
+static memusage_cntr_t realloc_free;
+static memusage_cntr_t inplace_mremap;
+static memusage_cntr_t decreasing_mremap;
+static memusage_size_t current_heap;
+static memusage_size_t peak_use[3];
+static __thread uintptr_t start_sp;
+
+/* A few macros to make the source more readable.  */
+#define peak_heap       peak_use[0]
+#define peak_stack      peak_use[1]
+#define peak_total      peak_use[2]
+
+#define DEFAULT_BUFFER_SIZE     32768
+static size_t buffer_size;
+
+static int fd = -1;
+
+static bool not_me;
+static int initialized;
+static bool trace_mmap;
+extern const char *__progname;
+
+struct entry
+{
+  uint64_t heap;
+  uint64_t stack;
+  uint32_t time_low;
+  uint32_t time_high;
+};
+
+static struct entry buffer[2 * DEFAULT_BUFFER_SIZE];
+static uatomic32_t buffer_cnt;
+static struct entry first;
+
+
+/* Update the global data after a successful function call.  */
+static void
+update_data (struct header *result, size_t len, size_t old_len)
+{
+  if (result != NULL)
+    {
+      /* Record the information we need and mark the block using a
+         magic number.  */
+      result->length = len;
+      result->magic = MAGIC;
+    }
+
+  /* Compute current heap usage and compare it with the maximum value.  */
+  memusage_size_t heap
+    = catomic_exchange_and_add (&current_heap, len - old_len) + len - old_len;
+  catomic_max (&peak_heap, heap);
+
+  /* Compute current stack usage and compare it with the maximum
+     value.  The base stack pointer might not be set if this is not
+     the main thread and it is the first call to any of these
+     functions.  */
+  if (__glibc_unlikely (!start_sp))
+    start_sp = GETSP ();
+
+  uintptr_t sp = GETSP ();
+#ifdef STACK_GROWS_UPWARD
+  /* This can happen in threads where we didn't catch the thread's
+     stack early enough.  */
+  if (__glibc_unlikely (sp < start_sp))
+    start_sp = sp;
+  size_t current_stack = sp - start_sp;
+#else
+  /* This can happen in threads where we didn't catch the thread's
+     stack early enough.  */
+  if (__glibc_unlikely (sp > start_sp))
+    start_sp = sp;
+  size_t current_stack = start_sp - sp;
+#endif
+  catomic_max (&peak_stack, current_stack);
+
+  /* Add up heap and stack usage and compare it with the maximum value.  */
+  catomic_max (&peak_total, heap + current_stack);
+
+  /* Store the value only if we are writing to a file.  */
+  if (fd != -1)
+    {
+      uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
+      if (idx + 1 >= 2 * buffer_size)
+        {
+          /* We try to reset the counter to the correct range.  If
+             this fails because of another thread increasing the
+             counter it does not matter since that thread will take
+             care of the correction.  */
+          uatomic32_t reset = (idx + 1) % (2 * buffer_size);
+          catomic_compare_and_exchange_val_acq (&buffer_cnt, reset, idx + 1);
+          if (idx >= 2 * buffer_size)
+            idx = reset - 1;
+        }
+      assert (idx < 2 * DEFAULT_BUFFER_SIZE);
+
+      buffer[idx].heap = current_heap;
+      buffer[idx].stack = current_stack;
+      GETTIME (buffer[idx].time_low, buffer[idx].time_high);
+
+      /* Write out buffer if it is full.  */
+      if (idx + 1 == buffer_size)
+        write (fd, buffer, buffer_size * sizeof (struct entry));
+      else if (idx + 1 == 2 * buffer_size)
+        write (fd, &buffer[buffer_size], buffer_size * sizeof (struct entry));
+    }
+}
+
+
+/* Interrupt handler.  */
+static void
+int_handler (int signo)
+{
+  /* Nothing gets allocated.  Just record the stack pointer position.  */
+  update_data (NULL, 0, 0);
+}
+
+
+/* Find out whether this is the program we are supposed to profile.
+   For this the name in the variable `__progname' must match the one
+   given in the environment variable MEMUSAGE_PROG_NAME.  If the variable
+   is not present every program assumes it should be profiling.
+
+   If this is the program open a file descriptor to the output file.
+   We will write to it whenever the buffer overflows.  The name of the
+   output file is determined by the environment variable MEMUSAGE_OUTPUT.
+
+   If the environment variable MEMUSAGE_BUFFER_SIZE is set its numerical
+   value determines the size of the internal buffer.  The number gives
+   the number of elements in the buffer.  By setting the number to one
+   one effectively selects unbuffered operation.
+
+   If MEMUSAGE_NO_TIMER is not present an alarm handler is installed
+   which at the highest possible frequency records the stack pointer.  */
+static void
+me (void)
+{
+  const char *env = getenv ("MEMUSAGE_PROG_NAME");
+  size_t prog_len = strlen (__progname);
+
+  initialized = -1;
+  mallocp = (void *(*)(size_t))dlsym (RTLD_NEXT, "malloc");
+  reallocp = (void *(*)(void *, size_t))dlsym (RTLD_NEXT, "realloc");
+  callocp = (void *(*)(size_t, size_t))dlsym (RTLD_NEXT, "calloc");
+  freep = (void (*)(void *))dlsym (RTLD_NEXT, "free");
+
+  mmapp = (void *(*)(void *, size_t, int, int, int, off_t))dlsym (RTLD_NEXT,
+                                                                  "mmap");
+  mmap64p =
+    (void *(*)(void *, size_t, int, int, int, off64_t))dlsym (RTLD_NEXT,
+                                                              "mmap64");
+  mremapp = (void *(*)(void *, size_t, size_t, int, void *))dlsym (RTLD_NEXT,
+                                                                   "mremap");
+  munmapp = (int (*)(void *, size_t))dlsym (RTLD_NEXT, "munmap");
+  initialized = 1;
+
+  if (env != NULL)
+    {
+      /* Check for program name.  */
+      size_t len = strlen (env);
+      if (len > prog_len || strcmp (env, &__progname[prog_len - len]) != 0
+          || (prog_len != len && __progname[prog_len - len - 1] != '/'))
+        not_me = true;
+    }
+
+  /* Only open the file if it's really us.  */
+  if (!not_me && fd == -1)
+    {
+      const char *outname;
+
+      if (!start_sp)
+        start_sp = GETSP ();
+
+      outname = getenv ("MEMUSAGE_OUTPUT");
+      if (outname != NULL && outname[0] != '\0'
+          && (access (outname, R_OK | W_OK) == 0 || errno == ENOENT))
+        {
+          fd = creat64 (outname, 0666);
+
+          if (fd == -1)
+            /* Don't do anything in future calls if we cannot write to
+               the output file.  */
+            not_me = true;
+          else
+            {
+              /* Write the first entry.  */
+              first.heap = 0;
+              first.stack = 0;
+              GETTIME (first.time_low, first.time_high);
+              /* Write it two times since we need the starting and end time. */
+              write (fd, &first, sizeof (first));
+              write (fd, &first, sizeof (first));
+
+              /* Determine the buffer size.  We use the default if the
+                 environment variable is not present.  */
+              buffer_size = DEFAULT_BUFFER_SIZE;
+              const char *str_buffer_size = getenv ("MEMUSAGE_BUFFER_SIZE");
+              if (str_buffer_size != NULL)
+                {
+                  buffer_size = atoi (str_buffer_size);
+                  if (buffer_size == 0 || buffer_size > DEFAULT_BUFFER_SIZE)
+                    buffer_size = DEFAULT_BUFFER_SIZE;
+                }
+
+              /* Possibly enable timer-based stack pointer retrieval.  */
+              if (getenv ("MEMUSAGE_NO_TIMER") == NULL)
+                {
+                  struct sigaction act;
+
+                  act.sa_handler = (sighandler_t) &int_handler;
+                  act.sa_flags = SA_RESTART;
+                  sigfillset (&act.sa_mask);
+
+                  if (sigaction (SIGPROF, &act, NULL) >= 0)
+                    {
+                      struct itimerval timer;
+
+                      timer.it_value.tv_sec = 0;
+                      timer.it_value.tv_usec = 1;
+                      timer.it_interval = timer.it_value;
+                      setitimer (ITIMER_PROF, &timer, NULL);
+                    }
+                }
+            }
+        }
+
+      if (!not_me && getenv ("MEMUSAGE_TRACE_MMAP") != NULL)
+        trace_mmap = true;
+    }
+}
+
+
+/* Record the initial stack position.  */
+static void
+__attribute__ ((constructor))
+init (void)
+{
+  start_sp = GETSP ();
+  if (!initialized)
+    me ();
+}
+
+
+/* `malloc' replacement.  We keep track of the memory usage if this is the
+   correct program.  */
+void *
+malloc (size_t len)
+{
+  struct header *result = NULL;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* If this is not the correct program just use the normal function.  */
+  if (not_me)
+    return (*mallocp)(len);
+
+  /* Keep track of number of calls.  */
+  catomic_increment (&calls[idx_malloc]);
+  /* Keep track of total memory consumption for `malloc'.  */
+  catomic_add (&total[idx_malloc], len);
+  /* Keep track of total memory requirement.  */
+  catomic_add (&grand_total, len);
+  /* Remember the size of the request.  */
+  if (len < 65536)
+    catomic_increment (&histogram[len / 16]);
+  else
+    catomic_increment (&large);
+  /* Total number of calls of any of the functions.  */
+  catomic_increment (&calls_total);
+
+  /* Do the real work.  */
+  result = (struct header *) (*mallocp)(len + sizeof (struct header));
+  if (result == NULL)
+    {
+      catomic_increment (&failed[idx_malloc]);
+      return NULL;
+    }
+
+  /* Update the allocation data and write out the records if necessary.  */
+  update_data (result, len, 0);
+
+  /* Return the pointer to the user buffer.  */
+  return (void *) (result + 1);
+}
+
+
+/* `realloc' replacement.  We keep track of the memory usage if this is the
+   correct program.  */
+void *
+realloc (void *old, size_t len)
+{
+  struct header *result = NULL;
+  struct header *real;
+  size_t old_len;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* If this is not the correct program just use the normal function.  */
+  if (not_me)
+    return (*reallocp)(old, len);
+
+  if (old == NULL)
+    {
+      /* This is really a `malloc' call.  */
+      real = NULL;
+      old_len = 0;
+    }
+  else
+    {
+      real = ((struct header *) old) - 1;
+      if (real->magic != MAGIC)
+        /* This is no memory allocated here.  */
+        return (*reallocp)(old, len);
+
+      old_len = real->length;
+    }
+
+  /* Keep track of number of calls.  */
+  catomic_increment (&calls[idx_realloc]);
+  if (len > old_len)
+    {
+      /* Keep track of total memory consumption for `realloc'.  */
+      catomic_add (&total[idx_realloc], len - old_len);
+      /* Keep track of total memory requirement.  */
+      catomic_add (&grand_total, len - old_len);
+    }
+
+  if (len == 0 && old != NULL)
+    {
+      /* Special case.  */
+      catomic_increment (&realloc_free);
+      /* Keep track of total memory freed using `free'.  */
+      catomic_add (&total[idx_free], real->length);
+
+      /* Update the allocation data and write out the records if necessary.  */
+      update_data (NULL, 0, old_len);
+
+      /* Do the real work.  */
+      (*freep) (real);
+
+      return NULL;
+    }
+
+  /* Remember the size of the request.  */
+  if (len < 65536)
+    catomic_increment (&histogram[len / 16]);
+  else
+    catomic_increment (&large);
+  /* Total number of calls of any of the functions.  */
+  catomic_increment (&calls_total);
+
+  /* Do the real work.  */
+  result = (struct header *) (*reallocp)(real, len + sizeof (struct header));
+  if (result == NULL)
+    {
+      catomic_increment (&failed[idx_realloc]);
+      return NULL;
+    }
+
+  /* Record whether the reduction/increase happened in place.  */
+  if (real == result)
+    catomic_increment (&inplace);
+  /* Was the buffer increased?  */
+  if (old_len > len)
+    catomic_increment (&decreasing);
+
+  /* Update the allocation data and write out the records if necessary.  */
+  update_data (result, len, old_len);
+
+  /* Return the pointer to the user buffer.  */
+  return (void *) (result + 1);
+}
+
+
+/* `calloc' replacement.  We keep track of the memory usage if this is the
+   correct program.  */
+void *
+calloc (size_t n, size_t len)
+{
+  struct header *result;
+  size_t size = n * len;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* If this is not the correct program just use the normal function.  */
+  if (not_me)
+    return (*callocp)(n, len);
+
+  /* Keep track of number of calls.  */
+  catomic_increment (&calls[idx_calloc]);
+  /* Keep track of total memory consumption for `calloc'.  */
+  catomic_add (&total[idx_calloc], size);
+  /* Keep track of total memory requirement.  */
+  catomic_add (&grand_total, size);
+  /* Remember the size of the request.  */
+  if (size < 65536)
+    catomic_increment (&histogram[size / 16]);
+  else
+    catomic_increment (&large);
+  /* Total number of calls of any of the functions.  */
+  ++calls_total;
+
+  /* Do the real work.  */
+  result = (struct header *) (*mallocp)(size + sizeof (struct header));
+  if (result == NULL)
+    {
+      catomic_increment (&failed[idx_calloc]);
+      return NULL;
+    }
+
+  /* Update the allocation data and write out the records if necessary.  */
+  update_data (result, size, 0);
+
+  /* Do what `calloc' would have done and return the buffer to the caller.  */
+  return memset (result + 1, '\0', size);
+}
+
+
+/* `free' replacement.  We keep track of the memory usage if this is the
+   correct program.  */
+void
+free (void *ptr)
+{
+  struct header *real;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return;
+
+      me ();
+    }
+
+  /* If this is not the correct program just use the normal function.  */
+  if (not_me)
+    {
+      (*freep) (ptr);
+      return;
+    }
+
+  /* `free (NULL)' has no effect.  */
+  if (ptr == NULL)
+    {
+      catomic_increment (&calls[idx_free]);
+      return;
+    }
+
+  /* Determine the pointer to the header.  */
+  real = ((struct header *) ptr) - 1;
+  if (real->magic != MAGIC)
+    {
+      /* This block wasn't allocated here.  */
+      (*freep) (ptr);
+      return;
+    }
+
+  /* Keep track of number of calls.  */
+  catomic_increment (&calls[idx_free]);
+  /* Keep track of total memory freed using `free'.  */
+  catomic_add (&total[idx_free], real->length);
+
+  /* Update the allocation data and write out the records if necessary.  */
+  update_data (NULL, 0, real->length);
+
+  /* Do the real work.  */
+  (*freep) (real);
+}
+
+
+/* `mmap' replacement.  We do not have to keep track of the size since
+   `munmap' will get it as a parameter.  */
+void *
+mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
+{
+  void *result = NULL;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* Always get a block.  We don't need extra memory.  */
+  result = (*mmapp)(start, len, prot, flags, fd, offset);
+
+  if (!not_me && trace_mmap)
+    {
+      int idx = (flags & MAP_ANON
+                 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
+
+      /* Keep track of number of calls.  */
+      catomic_increment (&calls[idx]);
+      /* Keep track of total memory consumption for `malloc'.  */
+      catomic_add (&total[idx], len);
+      /* Keep track of total memory requirement.  */
+      catomic_add (&grand_total, len);
+      /* Remember the size of the request.  */
+      if (len < 65536)
+        catomic_increment (&histogram[len / 16]);
+      else
+        catomic_increment (&large);
+      /* Total number of calls of any of the functions.  */
+      catomic_increment (&calls_total);
+
+      /* Check for failures.  */
+      if (result == NULL)
+        catomic_increment (&failed[idx]);
+      else if (idx == idx_mmap_w)
+        /* Update the allocation data and write out the records if
+           necessary.  Note the first parameter is NULL which means
+           the size is not tracked.  */
+        update_data (NULL, len, 0);
+    }
+
+  /* Return the pointer to the user buffer.  */
+  return result;
+}
+
+
+/* `mmap64' replacement.  We do not have to keep track of the size since
+   `munmap' will get it as a parameter.  */
+void *
+mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
+{
+  void *result = NULL;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* Always get a block.  We don't need extra memory.  */
+  result = (*mmap64p)(start, len, prot, flags, fd, offset);
+
+  if (!not_me && trace_mmap)
+    {
+      int idx = (flags & MAP_ANON
+                 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
+
+      /* Keep track of number of calls.  */
+      catomic_increment (&calls[idx]);
+      /* Keep track of total memory consumption for `malloc'.  */
+      catomic_add (&total[idx], len);
+      /* Keep track of total memory requirement.  */
+      catomic_add (&grand_total, len);
+      /* Remember the size of the request.  */
+      if (len < 65536)
+        catomic_increment (&histogram[len / 16]);
+      else
+        catomic_increment (&large);
+      /* Total number of calls of any of the functions.  */
+      catomic_increment (&calls_total);
+
+      /* Check for failures.  */
+      if (result == NULL)
+        catomic_increment (&failed[idx]);
+      else if (idx == idx_mmap_w)
+        /* Update the allocation data and write out the records if
+           necessary.  Note the first parameter is NULL which means
+           the size is not tracked.  */
+        update_data (NULL, len, 0);
+    }
+
+  /* Return the pointer to the user buffer.  */
+  return result;
+}
+
+
+/* `mremap' replacement.  We do not have to keep track of the size since
+   `munmap' will get it as a parameter.  */
+void *
+mremap (void *start, size_t old_len, size_t len, int flags, ...)
+{
+  void *result = NULL;
+  va_list ap;
+
+  va_start (ap, flags);
+  void *newaddr = (flags & MREMAP_FIXED) ? va_arg (ap, void *) : NULL;
+  va_end (ap);
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return NULL;
+
+      me ();
+    }
+
+  /* Always get a block.  We don't need extra memory.  */
+  result = (*mremapp)(start, old_len, len, flags, newaddr);
+
+  if (!not_me && trace_mmap)
+    {
+      /* Keep track of number of calls.  */
+      catomic_increment (&calls[idx_mremap]);
+      if (len > old_len)
+        {
+          /* Keep track of total memory consumption for `malloc'.  */
+          catomic_add (&total[idx_mremap], len - old_len);
+          /* Keep track of total memory requirement.  */
+          catomic_add (&grand_total, len - old_len);
+        }
+      /* Remember the size of the request.  */
+      if (len < 65536)
+        catomic_increment (&histogram[len / 16]);
+      else
+        catomic_increment (&large);
+      /* Total number of calls of any of the functions.  */
+      catomic_increment (&calls_total);
+
+      /* Check for failures.  */
+      if (result == NULL)
+        catomic_increment (&failed[idx_mremap]);
+      else
+        {
+          /* Record whether the reduction/increase happened in place.  */
+          if (start == result)
+            catomic_increment (&inplace_mremap);
+          /* Was the buffer increased?  */
+          if (old_len > len)
+            catomic_increment (&decreasing_mremap);
+
+          /* Update the allocation data and write out the records if
+             necessary.  Note the first parameter is NULL which means
+             the size is not tracked.  */
+          update_data (NULL, len, old_len);
+        }
+    }
+
+  /* Return the pointer to the user buffer.  */
+  return result;
+}
+
+
+/* `munmap' replacement.  */
+int
+munmap (void *start, size_t len)
+{
+  int result;
+
+  /* Determine real implementation if not already happened.  */
+  if (__glibc_unlikely (initialized <= 0))
+    {
+      if (initialized == -1)
+        return -1;
+
+      me ();
+    }
+
+  /* Do the real work.  */
+  result = (*munmapp)(start, len);
+
+  if (!not_me && trace_mmap)
+    {
+      /* Keep track of number of calls.  */
+      catomic_increment (&calls[idx_munmap]);
+
+      if (__glibc_likely (result == 0))
+        {
+          /* Keep track of total memory freed using `free'.  */
+          catomic_add (&total[idx_munmap], len);
+
+          /* Update the allocation data and write out the records if
+             necessary.  */
+          update_data (NULL, 0, len);
+        }
+      else
+        catomic_increment (&failed[idx_munmap]);
+    }
+
+  return result;
+}
+
+
+/* Write some statistics to standard error.  */
+static void
+__attribute__ ((destructor))
+dest (void)
+{
+  int percent, cnt;
+  unsigned long int maxcalls;
+
+  /* If we haven't done anything here just return.  */
+  if (not_me)
+    return;
+
+  /* If we should call any of the memory functions don't do any profiling.  */
+  not_me = true;
+
+  /* Finish the output file.  */
+  if (fd != -1)
+    {
+      /* Write the partially filled buffer.  */
+      if (buffer_cnt > buffer_size)
+        write (fd, buffer + buffer_size,
+               (buffer_cnt - buffer_size) * sizeof (struct entry));
+      else
+        write (fd, buffer, buffer_cnt * sizeof (struct entry));
+
+      /* Go back to the beginning of the file.  We allocated two records
+         here when we opened the file.  */
+      lseek (fd, 0, SEEK_SET);
+      /* Write out a record containing the total size.  */
+      first.stack = peak_total;
+      write (fd, &first, sizeof (struct entry));
+      /* Write out another record containing the maximum for heap and
+         stack.  */
+      first.heap = peak_heap;
+      first.stack = peak_stack;
+      GETTIME (first.time_low, first.time_high);
+      write (fd, &first, sizeof (struct entry));
+
+      /* Close the file.  */
+      close (fd);
+      fd = -1;
+    }
+
+  /* Write a colorful statistic.  */
+  fprintf (stderr, "\n\
+\e[01;32mMemory usage summary:\e[0;0m heap total: %llu, heap peak: %lu, stack peak: %lu\n\
+\e[04;34m         total calls   total memory   failed calls\e[0m\n\
+\e[00;34m malloc|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
+\e[00;34mrealloc|\e[0m %10lu   %12llu   %s%12lu\e[00;00m  (nomove:%ld, dec:%ld, free:%ld)\n\
+\e[00;34m calloc|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
+\e[00;34m   free|\e[0m %10lu   %12llu\n",
+           (unsigned long long int) grand_total, (unsigned long int) peak_heap,
+           (unsigned long int) peak_stack,
+           (unsigned long int) calls[idx_malloc],
+           (unsigned long long int) total[idx_malloc],
+           failed[idx_malloc] ? "\e[01;41m" : "",
+           (unsigned long int) failed[idx_malloc],
+           (unsigned long int) calls[idx_realloc],
+           (unsigned long long int) total[idx_realloc],
+           failed[idx_realloc] ? "\e[01;41m" : "",
+           (unsigned long int) failed[idx_realloc],
+           (unsigned long int) inplace,
+           (unsigned long int) decreasing,
+           (unsigned long int) realloc_free,
+           (unsigned long int) calls[idx_calloc],
+           (unsigned long long int) total[idx_calloc],
+           failed[idx_calloc] ? "\e[01;41m" : "",
+           (unsigned long int) failed[idx_calloc],
+           (unsigned long int) calls[idx_free],
+           (unsigned long long int) total[idx_free]);
+
+  if (trace_mmap)
+    fprintf (stderr, "\
+\e[00;34mmmap(r)|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
+\e[00;34mmmap(w)|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
+\e[00;34mmmap(a)|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n\
+\e[00;34m mremap|\e[0m %10lu   %12llu   %s%12lu\e[00;00m  (nomove: %ld, dec:%ld)\n\
+\e[00;34m munmap|\e[0m %10lu   %12llu   %s%12lu\e[00;00m\n",
+             (unsigned long int) calls[idx_mmap_r],
+             (unsigned long long int) total[idx_mmap_r],
+             failed[idx_mmap_r] ? "\e[01;41m" : "",
+             (unsigned long int) failed[idx_mmap_r],
+             (unsigned long int) calls[idx_mmap_w],
+             (unsigned long long int) total[idx_mmap_w],
+             failed[idx_mmap_w] ? "\e[01;41m" : "",
+             (unsigned long int) failed[idx_mmap_w],
+             (unsigned long int) calls[idx_mmap_a],
+             (unsigned long long int) total[idx_mmap_a],
+             failed[idx_mmap_a] ? "\e[01;41m" : "",
+             (unsigned long int) failed[idx_mmap_a],
+             (unsigned long int) calls[idx_mremap],
+             (unsigned long long int) total[idx_mremap],
+             failed[idx_mremap] ? "\e[01;41m" : "",
+             (unsigned long int) failed[idx_mremap],
+             (unsigned long int) inplace_mremap,
+             (unsigned long int) decreasing_mremap,
+             (unsigned long int) calls[idx_munmap],
+             (unsigned long long int) total[idx_munmap],
+             failed[idx_munmap] ? "\e[01;41m" : "",
+             (unsigned long int) failed[idx_munmap]);
+
+  /* Write out a histoogram of the sizes of the allocations.  */
+  fprintf (stderr, "\e[01;32mHistogram for block sizes:\e[0;0m\n");
+
+  /* Determine the maximum of all calls for each size range.  */
+  maxcalls = large;
+  for (cnt = 0; cnt < 65536; cnt += 16)
+    if (histogram[cnt / 16] > maxcalls)
+      maxcalls = histogram[cnt / 16];
+
+  for (cnt = 0; cnt < 65536; cnt += 16)
+    /* Only write out the nonzero entries.  */
+    if (histogram[cnt / 16] != 0)
+      {
+        percent = (histogram[cnt / 16] * 100) / calls_total;
+        fprintf (stderr, "%5d-%-5d%12lu ", cnt, cnt + 15,
+                 (unsigned long int) histogram[cnt / 16]);
+        if (percent == 0)
+          fputs (" <1% \e[41;37m", stderr);
+        else
+          fprintf (stderr, "%3d%% \e[41;37m", percent);
+
+        /* Draw a bar with a length corresponding to the current
+           percentage.  */
+        percent = (histogram[cnt / 16] * 50) / maxcalls;
+        while (percent-- > 0)
+          fputc ('=', stderr);
+        fputs ("\e[0;0m\n", stderr);
+      }
+
+  if (large != 0)
+    {
+      percent = (large * 100) / calls_total;
+      fprintf (stderr, "   large   %12lu ", (unsigned long int) large);
+      if (percent == 0)
+        fputs (" <1% \e[41;37m", stderr);
+      else
+        fprintf (stderr, "%3d%% \e[41;37m", percent);
+      percent = (large * 50) / maxcalls;
+      while (percent-- > 0)
+        fputc ('=', stderr);
+      fputs ("\e[0;0m\n", stderr);
+    }
+
+  /* Any following malloc/free etc. calls should generate statistics again,
+     because otherwise freeing something that has been malloced before
+     this destructor (including struct header in front of it) wouldn't
+     be properly freed.  */
+  not_me = false;
+}
diff --git a/REORG.TODO/malloc/memusage.sh b/REORG.TODO/malloc/memusage.sh
new file mode 100755
index 0000000000..93ece87a31
--- /dev/null
+++ b/REORG.TODO/malloc/memusage.sh
@@ -0,0 +1,274 @@
+#! @BASH@
+# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+# Contributed by Ulrich Drepper <drepper@gnu.org>, 1999.
+
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+
+memusageso='@SLIBDIR@/libmemusage.so'
+memusagestat='@BINDIR@/memusagestat'
+TEXTDOMAIN=libc
+
+# Print usage message.
+do_usage() {
+  printf >&2 $"Try \`%s --help' or \`%s --usage' for more information.\n" memusage memusage
+  exit 1
+}
+
+# Message for missing argument.
+do_missing_arg() {
+  printf >&2 $"%s: option '%s' requires an argument\n" memusage "$1"
+  do_usage
+}
+
+# Print help message
+do_help() {
+  echo $"Usage: memusage [OPTION]... PROGRAM [PROGRAMOPTION]...
+Profile memory usage of PROGRAM.
+
+   -n,--progname=NAME     Name of the program file to profile
+   -p,--png=FILE          Generate PNG graphic and store it in FILE
+   -d,--data=FILE         Generate binary data file and store it in FILE
+   -u,--unbuffered        Don't buffer output
+   -b,--buffer=SIZE       Collect SIZE entries before writing them out
+      --no-timer          Don't collect additional information through timer
+   -m,--mmap              Also trace mmap & friends
+
+   -?,--help              Print this help and exit
+      --usage             Give a short usage message
+   -V,--version           Print version information and exit
+
+ The following options only apply when generating graphical output:
+   -t,--time-based        Make graph linear in time
+   -T,--total             Also draw graph of total memory use
+      --title=STRING      Use STRING as title of the graph
+   -x,--x-size=SIZE       Make graphic SIZE pixels wide
+   -y,--y-size=SIZE       Make graphic SIZE pixels high
+
+Mandatory arguments to long options are also mandatory for any corresponding
+short options.
+
+"
+  printf $"For bug reporting instructions, please see:\\n%s.\\n" \
+    "@REPORT_BUGS_TO@"
+  exit 0
+}
+
+do_version() {
+  echo 'memusage @PKGVERSION@@VERSION@'
+  printf $"Copyright (C) %s Free Software Foundation, Inc.
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+" "2017"
+  printf $"Written by %s.
+" "Ulrich Drepper"
+  exit 0
+}
+
+# These variables are local
+buffer=
+data=
+memusagestat_args=
+notimer=
+png=
+progname=
+tracemmap=
+
+# Process arguments.  But stop as soon as the program name is found.
+while test $# -gt 0; do
+  case "$1" in
+  -V | --v | --ve | --ver | --vers | --versi | --versio | --version)
+    do_version
+    ;;
+  -\? | --h | --he | --hel | --help)
+    do_help
+    ;;
+  --us | --usa | --usag | --usage)
+    echo $"Syntax: memusage [--data=FILE] [--progname=NAME] [--png=FILE] [--unbuffered]
+	    [--buffer=SIZE] [--no-timer] [--time-based] [--total]
+	    [--title=STRING] [--x-size=SIZE] [--y-size=SIZE]
+	    PROGRAM [PROGRAMOPTION]..."
+    exit 0
+    ;;
+  -n | --pr | --pro | --prog | --progn | --progna | --prognam | --progname)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    progname="$1"
+    ;;
+  --pr=* | --pro=* | --prog=* | --progn=* | --progna=* | --prognam=* | --progname=*)
+    progname=${1##*=}
+    ;;
+  -p | --pn | --png)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    png="$1"
+    ;;
+  --pn=* | --png=*)
+    png=${1##*=}
+    ;;
+  -d | --d | --da | --dat | --data)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    data="$1"
+    ;;
+  --d=* | --da=* | --dat=* | --data=*)
+    data=${1##*=}
+    ;;
+  -u | --un | --unb | --unbu | --unbuf | --unbuff | --unbuffe | --unbuffer | --unbuffere | --unbuffered)
+    buffer=1
+    ;;
+  -b | --b | --bu | --buf | --buff | --buffe | --buffer)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    buffer="$1"
+    ;;
+  --b=* | --bu=* | --buf=* | --buff=* | --buffe=* | --buffer=*)
+    buffer=${1##*=}
+    ;;
+  --n | --no | --no- | --no-t | --no-ti | --no-tim | --no-time | --no-timer)
+    notimer=yes
+    ;;
+  -m | --m | --mm | --mma | --mmap)
+    tracemmap=yes
+    ;;
+  -t | --tim | --time | --time- | --time-b | --time-ba | --time-bas | --time-base | --time-based)
+    memusagestat_args="$memusagestat_args -t"
+    ;;
+  -T | --to | --tot | --tota | --total)
+    memusagestat_args="$memusagestat_args -T"
+    ;;
+  --tit | --titl | --title)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    memusagestat_args="$memusagestat_args -s $1"
+    ;;
+  --tit=* | --titl=* | --title=*)
+    memusagestat_args="$memusagestat_args -s ${1##*=}"
+    ;;
+  -x | --x | --x- | --x-s | --x-si | --x-siz | --x-size)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    memusagestat_args="$memusagestat_args -x $1"
+    ;;
+  --x=* | --x-=* | --x-s=* | --x-si=* | --x-siz=* | --x-size=*)
+    memusagestat_args="$memusagestat_args -x ${1##*=}"
+    ;;
+  -y | --y | --y- | --y-s | --y-si | --y-siz | --y-size)
+    if test $# -eq 1; then
+      do_missing_arg $1
+    fi
+    shift
+    memusagestat_args="$memusagestat_args -y $1"
+    ;;
+  --y=* | --y-=* | --y-s=* | --y-si=* | --y-siz=* | --y-size=*)
+    memusagestat_args="$memusagestat_args -y ${1##*=}"
+    ;;
+  --p | --p=* | --t | --t=* | --ti | --ti=* | --u)
+    echo >&2 $"memusage: option \`${1##*=}' is ambiguous"
+    do_usage
+    ;;
+  --)
+    # Stop processing arguments.
+    shift
+    break
+    ;;
+  --*)
+    echo >&2 $"memusage: unrecognized option \`$1'"
+    do_usage
+    ;;
+  *)
+    # Unknown option.  This means the rest is the program name and parameters.
+    break
+    ;;
+  esac
+  shift
+done
+
+# See whether any arguments are left.
+if test $# -eq 0; then
+  echo >&2 $"No program name given"
+  do_usage
+fi
+
+# This will be in the environment.
+add_env="LD_PRELOAD=$memusageso"
+
+# Generate data file name.
+datafile=
+if test -n "$data"; then
+  datafile="$data"
+elif test -n "$png"; then
+  datafile=$(mktemp -t memusage.XXXXXX) || exit
+  trap 'rm -f "$datafile"; exit 1' HUP INT QUIT TERM PIPE
+fi
+if test -n "$datafile"; then
+  add_env="$add_env MEMUSAGE_OUTPUT=$datafile"
+fi
+
+# Set program name.
+if test -n "$progname"; then
+  add_env="$add_env MEMUSAGE_PROG_NAME=$progname"
+fi
+
+# Set buffer size.
+if test -n "$buffer"; then
+  add_env="$add_env MEMUSAGE_BUFFER_SIZE=$buffer"
+fi
+
+# Disable timers.
+if test -n "$notimer"; then
+  add_env="$add_env MEMUSAGE_NO_TIMER=yes"
+fi
+
+# Trace mmap.
+if test -n "$tracemmap"; then
+  add_env="$add_env MEMUSAGE_TRACE_MMAP=yes"
+fi
+
+# Execute the program itself.
+eval $add_env '"$@"'
+result=$?
+
+# Generate the PNG data file if wanted and there is something to generate
+# it from.
+if test -n "$png" -a -n "$datafile" -a -s "$datafile"; then
+  # Append extension .png if it isn't already there.
+  case $png in
+  *.png) ;;
+  *) png="$png.png" ;;
+  esac
+  $memusagestat $memusagestat_args "$datafile" "$png"
+fi
+
+if test -z "$data" -a -n "$datafile"; then
+  rm -f "$datafile"
+fi
+
+exit $result
+# Local Variables:
+#  mode:ksh
+# End:
diff --git a/REORG.TODO/malloc/memusagestat.c b/REORG.TODO/malloc/memusagestat.c
new file mode 100644
index 0000000000..0bd158bd1e
--- /dev/null
+++ b/REORG.TODO/malloc/memusagestat.c
@@ -0,0 +1,587 @@
+/* Generate graphic from memory profiling data.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published
+   by the Free Software Foundation; version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, see <http://www.gnu.org/licenses/>.  */
+
+#define _FILE_OFFSET_BITS 64
+
+#include <argp.h>
+#include <assert.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <inttypes.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <gd.h>
+#include <gdfontl.h>
+#include <gdfonts.h>
+
+#include "../version.h"
+#define PACKAGE _libc_intl_domainname
+
+/* Default size of the generated image.  */
+#define XSIZE 800
+#define YSIZE 600
+
+#ifndef N_
+# define N_(Arg) Arg
+#endif
+
+
+/* Definitions of arguments for argp functions.  */
+static const struct argp_option options[] =
+{
+  { "output", 'o', N_ ("FILE"), 0, N_ ("Name output file") },
+  { "string", 's', N_ ("STRING"), 0, N_ ("Title string used in output graphic") },
+  { "time", 't', NULL, 0, N_ ("\
+Generate output linear to time (default is linear to number of function calls)\
+") },
+  { "total", 'T', NULL, 0,
+    N_ ("Also draw graph for total memory consumption") },
+  { "x-size", 'x', N_ ("VALUE"), 0,
+    N_ ("Make output graphic VALUE pixels wide") },
+  { "y-size", 'y', "VALUE", 0, N_ ("Make output graphic VALUE pixels high") },
+  { NULL, 0, NULL, 0, NULL }
+};
+
+/* Short description of program.  */
+static const char doc[] = N_ ("Generate graphic from memory profiling data");
+
+/* Strings for arguments in help texts.  */
+static const char args_doc[] = N_ ("DATAFILE [OUTFILE]");
+
+/* Prototype for option handler.  */
+static error_t parse_opt (int key, char *arg, struct argp_state *state);
+
+/* Function to print some extra text in the help message.  */
+static char *more_help (int key, const char *text, void *input);
+
+/* Name and version of program.  */
+static void print_version (FILE *stream, struct argp_state *state);
+void (*argp_program_version_hook) (FILE *, struct argp_state *) = print_version;
+
+/* Data structure to communicate with argp functions.  */
+static struct argp argp =
+{
+  options, parse_opt, args_doc, doc, NULL, more_help
+};
+
+
+struct entry
+{
+  uint64_t heap;
+  uint64_t stack;
+  uint32_t time_low;
+  uint32_t time_high;
+};
+
+
+/* Size of the image.  */
+static size_t xsize;
+static size_t ysize;
+
+/* Name of the output file.  */
+static char *outname;
+
+/* Title string for the graphic.  */
+static const char *string;
+
+/* Nonzero if graph should be generated linear in time.  */
+static int time_based;
+
+/* Nonzero if graph to display total use of memory should be drawn as well.  */
+static int also_total = 0;
+
+
+int
+main (int argc, char *argv[])
+{
+  int remaining;
+  const char *inname;
+  gdImagePtr im_out;
+  int grey, blue, red, green, yellow, black;
+  int fd;
+  struct stat st;
+  size_t maxsize_heap;
+  size_t maxsize_stack;
+  size_t maxsize_total;
+  uint64_t total;
+  uint64_t cnt, cnt2;
+  FILE *outfile;
+  char buf[30];
+  size_t last_heap;
+  size_t last_stack;
+  size_t last_total;
+  struct entry headent[2];
+  uint64_t start_time;
+  uint64_t end_time;
+  uint64_t total_time;
+  const char *heap_format, *stack_format;
+  int heap_scale, stack_scale, line;
+
+  outname = NULL;
+  xsize = XSIZE;
+  ysize = YSIZE;
+  string = NULL;
+
+  /* Parse and process arguments.  */
+  argp_parse (&argp, argc, argv, 0, &remaining, NULL);
+
+  if (remaining >= argc || remaining + 2 < argc)
+    {
+      argp_help (&argp, stdout, ARGP_HELP_SEE | ARGP_HELP_EXIT_ERR,
+                 program_invocation_short_name);
+      exit (1);
+    }
+
+  inname = argv[remaining++];
+
+  if (remaining < argc)
+    outname = argv[remaining];
+  else if (outname == NULL)
+    {
+      size_t len = strlen (inname);
+      outname = alloca (len + 5);
+      stpcpy (stpcpy (outname, inname), ".png");
+    }
+
+  /* Open for read/write since we try to repair the file in case the
+     application hasn't terminated cleanly.  */
+  fd = open (inname, O_RDWR);
+  if (fd == -1)
+    error (EXIT_FAILURE, errno, "cannot open input file");
+  if (fstat (fd, &st) != 0)
+    {
+      close (fd);
+      error (EXIT_FAILURE, errno, "cannot get size of input file");
+    }
+  /* Test whether the file contains only full records.  */
+  if ((st.st_size % sizeof (struct entry)) != 0
+      /* The file must at least contain the two administrative records.  */
+      || st.st_size < 2 * sizeof (struct entry))
+    {
+      close (fd);
+      error (EXIT_FAILURE, 0, "input file has incorrect size");
+    }
+  /* Compute number of data entries.  */
+  total = st.st_size / sizeof (struct entry) - 2;
+
+  /* Read the administrative information.  */
+  read (fd, headent, sizeof (headent));
+  maxsize_heap = headent[1].heap;
+  maxsize_stack = headent[1].stack;
+  maxsize_total = headent[0].stack;
+
+  if (maxsize_heap == 0 && maxsize_stack == 0)
+    {
+      /* The program aborted before memusage was able to write the
+         information about the maximum heap and stack use.  Repair
+         the file now.  */
+      struct entry next;
+
+      while (1)
+        {
+          if (read (fd, &next, sizeof (next)) == 0)
+            break;
+          if (next.heap > maxsize_heap)
+            maxsize_heap = next.heap;
+          if (next.stack > maxsize_stack)
+            maxsize_stack = next.stack;
+          if (maxsize_heap + maxsize_stack > maxsize_total)
+            maxsize_total = maxsize_heap + maxsize_stack;
+        }
+
+      headent[0].stack = maxsize_total;
+      headent[1].heap = maxsize_heap;
+      headent[1].stack = maxsize_stack;
+      headent[1].time_low = next.time_low;
+      headent[1].time_high = next.time_high;
+
+      /* Write the computed values in the file.  */
+      lseek (fd, 0, SEEK_SET);
+      write (fd, headent, 2 * sizeof (struct entry));
+    }
+
+  if (also_total)
+    {
+      /* We use one scale and since we also draw the total amount of
+         memory used we have to adapt the maximum.  */
+      maxsize_heap = maxsize_total;
+      maxsize_stack = maxsize_total;
+    }
+
+  start_time = ((uint64_t) headent[0].time_high) << 32 | headent[0].time_low;
+  end_time = ((uint64_t) headent[1].time_high) << 32 | headent[1].time_low;
+  total_time = end_time - start_time;
+
+  if (xsize < 100)
+    xsize = 100;
+  if (ysize < 80)
+    ysize = 80;
+
+  /* Create output image with the specified size.  */
+  im_out = gdImageCreate (xsize, ysize);
+
+  /* First color allocated is background.  */
+  grey = gdImageColorAllocate (im_out, 224, 224, 224);
+
+  /* Set transparent color. */
+  gdImageColorTransparent (im_out, grey);
+
+  /* These are all the other colors we need (in the moment).  */
+  red = gdImageColorAllocate (im_out, 255, 0, 0);
+  green = gdImageColorAllocate (im_out, 0, 130, 0);
+  blue = gdImageColorAllocate (im_out, 0, 0, 255);
+  yellow = gdImageColorAllocate (im_out, 154, 205, 50);
+  black = gdImageColorAllocate (im_out, 0, 0, 0);
+
+  gdImageRectangle (im_out, 40, 20, xsize - 40, ysize - 20, blue);
+
+  if (maxsize_heap < 1024)
+    {
+      heap_format = "%Zu";
+      heap_scale = 1;
+    }
+  else if (maxsize_heap < 1024 * 1024 * 100)
+    {
+      heap_format = "%Zuk";
+      heap_scale = 1024;
+    }
+  else
+    {
+      heap_format = "%ZuM";
+      heap_scale = 1024 * 1024;
+    }
+
+  if (maxsize_stack < 1024)
+    {
+      stack_format = "%Zu";
+      stack_scale = 1;
+    }
+  else if (maxsize_stack < 1024 * 1024 * 100)
+    {
+      stack_format = "%Zuk";
+      stack_scale = 1024;
+    }
+  else
+    {
+      stack_format = "%ZuM";
+      stack_scale = 1024 * 1024;
+    }
+
+  gdImageString (im_out, gdFontSmall, 38, ysize - 14, (unsigned char *) "0",
+                 blue);
+  snprintf (buf, sizeof (buf), heap_format, 0);
+  gdImageString (im_out, gdFontSmall, maxsize_heap < 1024 ? 32 : 26,
+                 ysize - 26, (unsigned char *) buf, red);
+  snprintf (buf, sizeof (buf), stack_format, 0);
+  gdImageString (im_out, gdFontSmall, xsize - 37, ysize - 26,
+                 (unsigned char *) buf, green);
+
+  if (string != NULL)
+    gdImageString (im_out, gdFontLarge, (xsize - strlen (string) * 8) / 2,
+                   2, (unsigned char *) string, green);
+
+  gdImageStringUp (im_out, gdFontSmall, 1, ysize / 2 - 10,
+                   (unsigned char *) "allocated", red);
+  gdImageStringUp (im_out, gdFontSmall, 11, ysize / 2 - 10,
+                   (unsigned char *) "memory", red);
+
+  gdImageStringUp (im_out, gdFontSmall, xsize - 39, ysize / 2 - 10,
+                   (unsigned char *) "used", green);
+  gdImageStringUp (im_out, gdFontSmall, xsize - 27, ysize / 2 - 10,
+                   (unsigned char *) "stack", green);
+
+  snprintf (buf, sizeof (buf), heap_format, maxsize_heap / heap_scale);
+  gdImageString (im_out, gdFontSmall, 39 - strlen (buf) * 6, 14,
+                 (unsigned char *) buf, red);
+  snprintf (buf, sizeof (buf), stack_format, maxsize_stack / stack_scale);
+  gdImageString (im_out, gdFontSmall, xsize - 37, 14,
+                 (unsigned char *) buf, green);
+
+  for (line = 1; line <= 3; ++line)
+    {
+      if (maxsize_heap > 0)
+        {
+          cnt = (((ysize - 40) * (maxsize_heap / 4 * line / heap_scale))
+                 / (maxsize_heap / heap_scale));
+          gdImageDashedLine (im_out, 40, ysize - 20 - cnt, xsize - 40,
+                             ysize - 20 - cnt, red);
+          snprintf (buf, sizeof (buf), heap_format,
+                    maxsize_heap / 4 * line / heap_scale);
+          gdImageString (im_out, gdFontSmall, 39 - strlen (buf) * 6,
+                         ysize - 26 - cnt, (unsigned char *) buf, red);
+        }
+      else
+        cnt = 0;
+
+      if (maxsize_stack > 0)
+        cnt2 = (((ysize - 40) * (maxsize_stack / 4 * line / stack_scale))
+                / (maxsize_stack / stack_scale));
+      else
+        cnt2 = 0;
+
+      if (cnt != cnt2)
+        gdImageDashedLine (im_out, 40, ysize - 20 - cnt2, xsize - 40,
+                           ysize - 20 - cnt2, green);
+      snprintf (buf, sizeof (buf), stack_format, maxsize_stack / 4 * line /
+                stack_scale);
+      gdImageString (im_out, gdFontSmall, xsize - 37, ysize - 26 - cnt2,
+                     (unsigned char *) buf, green);
+    }
+
+  snprintf (buf, sizeof (buf), "%llu", (unsigned long long) total);
+  gdImageString (im_out, gdFontSmall, xsize - 50, ysize - 14,
+                 (unsigned char *) buf, blue);
+
+  if (!time_based)
+    {
+      uint64_t previously = start_time;
+
+      gdImageString (im_out, gdFontSmall, 40 + (xsize - 32 * 6 - 80) / 2,
+                     ysize - 12,
+                     (unsigned char *) "# memory handling function calls",
+                     blue);
+
+
+      last_stack = last_heap = last_total = ysize - 20;
+      for (cnt = 1; cnt <= total; ++cnt)
+        {
+          struct entry entry;
+          size_t new[2];
+          uint64_t now;
+
+          read (fd, &entry, sizeof (entry));
+
+          now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
+
+          if ((((previously - start_time) * 100) / total_time) % 10 < 5)
+            gdImageFilledRectangle (im_out,
+                                    40 + ((cnt - 1) * (xsize - 80)) / total,
+                                    ysize - 19,
+                                    39 + (cnt * (xsize - 80)) / total,
+                                    ysize - 14, yellow);
+          previously = now;
+
+          if (also_total && maxsize_heap > 0)
+            {
+              size_t new3;
+
+              new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
+                                      * (entry.heap + entry.stack))
+                                     / maxsize_heap);
+              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
+                           last_total,
+                           40 + ((xsize - 80) * cnt) / total, new3,
+                           black);
+              last_total = new3;
+            }
+
+          if (maxsize_heap > 0)
+            {
+              new[0] = ((ysize - 20)
+                        - ((((unsigned long long int) (ysize - 40))
+                            * entry.heap) / maxsize_heap));
+              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
+                           last_heap, 40 + ((xsize - 80) * cnt) / total,
+                           new[0], red);
+              last_heap = new[0];
+            }
+
+          if (maxsize_stack > 0)
+            {
+              new[1] = ((ysize - 20)
+                        - ((((unsigned long long int) (ysize - 40))
+                            * entry.stack) / maxsize_stack));
+              gdImageLine (im_out, 40 + ((xsize - 80) * (cnt - 1)) / total,
+                           last_stack, 40 + ((xsize - 80) * cnt) / total,
+                           new[1], green);
+              last_stack = new[1];
+            }
+        }
+
+      cnt = 0;
+      while (cnt < total)
+        {
+          gdImageLine (im_out, 40 + ((xsize - 80) * cnt) / total, ysize - 20,
+                       40 + ((xsize - 80) * cnt) / total, ysize - 15, blue);
+          cnt += MAX (1, total / 20);
+        }
+      gdImageLine (im_out, xsize - 40, ysize - 20, xsize - 40, ysize - 15,
+                   blue);
+    }
+  else
+    {
+      uint64_t next_tick = MAX (1, total / 20);
+      size_t last_xpos = 40;
+
+      gdImageString (im_out, gdFontSmall, 40 + (xsize - 39 * 6 - 80) / 2,
+                     ysize - 12,
+                     (unsigned char *) "				      \
+# memory handling function calls / time", blue);
+
+      for (cnt = 0; cnt < 20; cnt += 2)
+        gdImageFilledRectangle (im_out,
+                                40 + (cnt * (xsize - 80)) / 20, ysize - 19,
+                                39 + ((cnt + 1) * (xsize - 80)) / 20,
+                                ysize - 14, yellow);
+
+      last_stack = last_heap = last_total = ysize - 20;
+      for (cnt = 1; cnt <= total; ++cnt)
+        {
+          struct entry entry;
+          size_t new[2];
+          size_t xpos;
+          uint64_t now;
+
+          read (fd, &entry, sizeof (entry));
+
+          now = ((uint64_t) entry.time_high) << 32 | entry.time_low;
+          xpos = 40 + ((xsize - 80) * (now - start_time)) / total_time;
+
+          if (cnt == next_tick)
+            {
+              gdImageLine (im_out, xpos, ysize - 20, xpos, ysize - 15, blue);
+              next_tick += MAX (1, total / 20);
+            }
+
+          if (also_total && maxsize_heap > 0)
+            {
+              size_t new3;
+
+              new3 = (ysize - 20) - ((((unsigned long long int) (ysize - 40))
+                                      * (entry.heap + entry.stack))
+                                     / maxsize_heap);
+              gdImageLine (im_out, last_xpos, last_total, xpos, new3, black);
+              last_total = new3;
+            }
+
+          if (maxsize_heap > 0)
+            {
+              new[0] = ((ysize - 20)
+                        - ((((unsigned long long int) (ysize - 40))
+                            * entry.heap) / maxsize_heap));
+              gdImageLine (im_out, last_xpos, last_heap, xpos, new[0], red);
+              last_heap = new[0];
+            }
+
+          if (maxsize_stack > 0)
+            {
+              new[1] = ((ysize - 20)
+                        - ((((unsigned long long int) (ysize - 40))
+                            * entry.stack) / maxsize_stack));
+              gdImageLine (im_out, last_xpos, last_stack, xpos, new[1],
+                           green);
+              last_stack = new[1];
+            }
+
+          last_xpos = xpos;
+        }
+    }
+
+  /* Write out the result.  */
+  outfile = fopen (outname, "w");
+  if (outfile == NULL)
+    error (EXIT_FAILURE, errno, "cannot open output file");
+
+  gdImagePng (im_out, outfile);
+
+  fclose (outfile);
+
+  gdImageDestroy (im_out);
+
+  return 0;
+}
+
+
+/* Handle program arguments.  */
+static error_t
+parse_opt (int key, char *arg, struct argp_state *state)
+{
+  switch (key)
+    {
+    case 'o':
+      outname = arg;
+      break;
+    case 's':
+      string = arg;
+      break;
+    case 't':
+      time_based = 1;
+      break;
+    case 'T':
+      also_total = 1;
+      break;
+    case 'x':
+      xsize = atoi (arg);
+      if (xsize == 0)
+        xsize = XSIZE;
+      break;
+    case 'y':
+      ysize = atoi (arg);
+      if (ysize == 0)
+        ysize = XSIZE;
+      break;
+    default:
+      return ARGP_ERR_UNKNOWN;
+    }
+  return 0;
+}
+
+
+static char *
+more_help (int key, const char *text, void *input)
+{
+  char *tp;
+
+  switch (key)
+    {
+    case ARGP_KEY_HELP_EXTRA:
+      /* We print some extra information.  */
+      if (asprintf (&tp, gettext ("\
+For bug reporting instructions, please see:\n\
+%s.\n"), REPORT_BUGS_TO) < 0)
+        return NULL;
+
+      return tp;
+
+    default:
+      break;
+    }
+  return (char *) text;
+}
+
+/* Print the version information.  */
+static void
+print_version (FILE *stream, struct argp_state *state)
+{
+  fprintf (stream, "memusagestat %s%s\n", PKGVERSION, VERSION);
+  fprintf (stream, gettext ("\
+Copyright (C) %s Free Software Foundation, Inc.\n\
+This is free software; see the source for copying conditions.  There is NO\n\
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\
+"), "2017");
+  fprintf (stream, gettext ("Written by %s.\n"), "Ulrich Drepper");
+}
diff --git a/REORG.TODO/malloc/morecore.c b/REORG.TODO/malloc/morecore.c
new file mode 100644
index 0000000000..edfceda926
--- /dev/null
+++ b/REORG.TODO/malloc/morecore.c
@@ -0,0 +1,53 @@
+/* Copyright (C) 1991-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_INTERNAL
+# define _MALLOC_INTERNAL
+# include <malloc.h>
+#endif
+
+#ifndef __GNU_LIBRARY__
+# define __sbrk  sbrk
+#endif
+
+#ifdef __GNU_LIBRARY__
+/* It is best not to declare this and cast its result on foreign operating
+   systems with potentially hostile include files.  */
+
+# include <stddef.h>
+# include <stdlib.h>
+extern void *__sbrk (ptrdiff_t increment) __THROW;
+libc_hidden_proto (__sbrk)
+#endif
+
+#ifndef NULL
+# define NULL 0
+#endif
+
+/* Allocate INCREMENT more bytes of data space,
+   and return the start of data space, or NULL on errors.
+   If INCREMENT is negative, shrink data space.  */
+void *
+__default_morecore (ptrdiff_t increment)
+{
+  void *result = (void *) __sbrk (increment);
+  if (result == (void *) -1)
+    return NULL;
+
+  return result;
+}
+libc_hidden_def (__default_morecore)
diff --git a/REORG.TODO/malloc/mtrace.c b/REORG.TODO/malloc/mtrace.c
new file mode 100644
index 0000000000..02c53eb9fe
--- /dev/null
+++ b/REORG.TODO/malloc/mtrace.c
@@ -0,0 +1,348 @@
+/* More debugging hooks for `malloc'.
+   Copyright (C) 1991-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+                 Written April 2, 1991 by John Gilmore of Cygnus Support.
+                 Based on mcheck.c by Mike Haertel.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _MALLOC_INTERNAL
+# define _MALLOC_INTERNAL
+# include <malloc.h>
+# include <mcheck.h>
+# include <libc-lock.h>
+#endif
+
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <_itoa.h>
+
+#include <libc-internal.h>
+
+#include <libio/iolibio.h>
+#define setvbuf(s, b, f, l) _IO_setvbuf (s, b, f, l)
+#define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
+
+#include <kernel-features.h>
+
+#define TRACE_BUFFER_SIZE 512
+
+static FILE *mallstream;
+static const char mallenv[] = "MALLOC_TRACE";
+static char *malloc_trace_buffer;
+
+__libc_lock_define_initialized (static, lock);
+
+/* Address to breakpoint on accesses to... */
+__ptr_t mallwatch;
+
+/* Old hook values.  */
+static void (*tr_old_free_hook) (__ptr_t ptr, const __ptr_t);
+static __ptr_t (*tr_old_malloc_hook) (size_t size, const __ptr_t);
+static __ptr_t (*tr_old_realloc_hook) (__ptr_t ptr, size_t size,
+                                       const __ptr_t);
+static __ptr_t (*tr_old_memalign_hook) (size_t __alignment, size_t __size,
+                                        const __ptr_t);
+
+/* This function is called when the block being alloc'd, realloc'd, or
+   freed has an address matching the variable "mallwatch".  In a debugger,
+   set "mallwatch" to the address of interest, then put a breakpoint on
+   tr_break.  */
+
+extern void tr_break (void) __THROW;
+libc_hidden_proto (tr_break)
+void
+tr_break (void)
+{
+}
+libc_hidden_def (tr_break)
+
+static void internal_function
+tr_where (const __ptr_t caller, Dl_info *info)
+{
+  if (caller != NULL)
+    {
+      if (info != NULL)
+        {
+          char *buf = (char *) "";
+          if (info->dli_sname != NULL)
+            {
+              size_t len = strlen (info->dli_sname);
+              buf = alloca (len + 6 + 2 * sizeof (void *));
+
+              buf[0] = '(';
+              __stpcpy (_fitoa (caller >= (const __ptr_t) info->dli_saddr
+                                ? caller - (const __ptr_t) info->dli_saddr
+                                : (const __ptr_t) info->dli_saddr - caller,
+                                __stpcpy (__mempcpy (buf + 1, info->dli_sname,
+                                                     len),
+                                          caller >= (__ptr_t) info->dli_saddr
+                                          ? "+0x" : "-0x"),
+                                16, 0),
+                        ")");
+            }
+
+          fprintf (mallstream, "@ %s%s%s[%p] ",
+                   info->dli_fname ? : "", info->dli_fname ? ":" : "",
+                   buf, caller);
+        }
+      else
+        fprintf (mallstream, "@ [%p] ", caller);
+    }
+}
+
+static Dl_info *
+lock_and_info (const __ptr_t caller, Dl_info *mem)
+{
+  if (caller == NULL)
+    return NULL;
+
+  Dl_info *res = _dl_addr (caller, mem, NULL, NULL) ? mem : NULL;
+
+  __libc_lock_lock (lock);
+
+  return res;
+}
+
+static void
+tr_freehook (__ptr_t ptr, const __ptr_t caller)
+{
+  if (ptr == NULL)
+    return;
+
+  Dl_info mem;
+  Dl_info *info = lock_and_info (caller, &mem);
+  tr_where (caller, info);
+  /* Be sure to print it first.  */
+  fprintf (mallstream, "- %p\n", ptr);
+  if (ptr == mallwatch)
+    {
+      __libc_lock_unlock (lock);
+      tr_break ();
+      __libc_lock_lock (lock);
+    }
+  __free_hook = tr_old_free_hook;
+  if (tr_old_free_hook != NULL)
+    (*tr_old_free_hook)(ptr, caller);
+  else
+    free (ptr);
+  __free_hook = tr_freehook;
+  __libc_lock_unlock (lock);
+}
+
+static __ptr_t
+tr_mallochook (size_t size, const __ptr_t caller)
+{
+  __ptr_t hdr;
+
+  Dl_info mem;
+  Dl_info *info = lock_and_info (caller, &mem);
+
+  __malloc_hook = tr_old_malloc_hook;
+  if (tr_old_malloc_hook != NULL)
+    hdr = (__ptr_t) (*tr_old_malloc_hook)(size, caller);
+  else
+    hdr = (__ptr_t) malloc (size);
+  __malloc_hook = tr_mallochook;
+
+  tr_where (caller, info);
+  /* We could be printing a NULL here; that's OK.  */
+  fprintf (mallstream, "+ %p %#lx\n", hdr, (unsigned long int) size);
+
+  __libc_lock_unlock (lock);
+
+  if (hdr == mallwatch)
+    tr_break ();
+
+  return hdr;
+}
+
+static __ptr_t
+tr_reallochook (__ptr_t ptr, size_t size, const __ptr_t caller)
+{
+  __ptr_t hdr;
+
+  if (ptr == mallwatch)
+    tr_break ();
+
+  Dl_info mem;
+  Dl_info *info = lock_and_info (caller, &mem);
+
+  __free_hook = tr_old_free_hook;
+  __malloc_hook = tr_old_malloc_hook;
+  __realloc_hook = tr_old_realloc_hook;
+  if (tr_old_realloc_hook != NULL)
+    hdr = (__ptr_t) (*tr_old_realloc_hook)(ptr, size, caller);
+  else
+    hdr = (__ptr_t) realloc (ptr, size);
+  __free_hook = tr_freehook;
+  __malloc_hook = tr_mallochook;
+  __realloc_hook = tr_reallochook;
+
+  tr_where (caller, info);
+  if (hdr == NULL)
+    {
+      if (size != 0)
+        /* Failed realloc.  */
+        fprintf (mallstream, "! %p %#lx\n", ptr, (unsigned long int) size);
+      else
+        fprintf (mallstream, "- %p\n", ptr);
+    }
+  else if (ptr == NULL)
+    fprintf (mallstream, "+ %p %#lx\n", hdr, (unsigned long int) size);
+  else
+    {
+      fprintf (mallstream, "< %p\n", ptr);
+      tr_where (caller, info);
+      fprintf (mallstream, "> %p %#lx\n", hdr, (unsigned long int) size);
+    }
+
+  __libc_lock_unlock (lock);
+
+  if (hdr == mallwatch)
+    tr_break ();
+
+  return hdr;
+}
+
+static __ptr_t
+tr_memalignhook (size_t alignment, size_t size, const __ptr_t caller)
+{
+  __ptr_t hdr;
+
+  Dl_info mem;
+  Dl_info *info = lock_and_info (caller, &mem);
+
+  __memalign_hook = tr_old_memalign_hook;
+  __malloc_hook = tr_old_malloc_hook;
+  if (tr_old_memalign_hook != NULL)
+    hdr = (__ptr_t) (*tr_old_memalign_hook)(alignment, size, caller);
+  else
+    hdr = (__ptr_t) memalign (alignment, size);
+  __memalign_hook = tr_memalignhook;
+  __malloc_hook = tr_mallochook;
+
+  tr_where (caller, info);
+  /* We could be printing a NULL here; that's OK.  */
+  fprintf (mallstream, "+ %p %#lx\n", hdr, (unsigned long int) size);
+
+  __libc_lock_unlock (lock);
+
+  if (hdr == mallwatch)
+    tr_break ();
+
+  return hdr;
+}
+
+
+#ifdef _LIBC
+
+/* This function gets called to make sure all memory the library
+   allocates get freed and so does not irritate the user when studying
+   the mtrace output.  */
+static void __libc_freeres_fn_section
+release_libc_mem (void)
+{
+  /* Only call the free function if we still are running in mtrace mode.  */
+  if (mallstream != NULL)
+    __libc_freeres ();
+}
+#endif
+
+
+/* We enable tracing if either the environment variable MALLOC_TRACE
+   is set, or if the variable mallwatch has been patched to an address
+   that the debugging user wants us to stop on.  When patching mallwatch,
+   don't forget to set a breakpoint on tr_break!  */
+
+void
+mtrace (void)
+{
+#ifdef _LIBC
+  static int added_atexit_handler;
+#endif
+  char *mallfile;
+
+  /* Don't panic if we're called more than once.  */
+  if (mallstream != NULL)
+    return;
+
+#ifdef _LIBC
+  /* When compiling the GNU libc we use the secure getenv function
+     which prevents the misuse in case of SUID or SGID enabled
+     programs.  */
+  mallfile = __libc_secure_getenv (mallenv);
+#else
+  mallfile = getenv (mallenv);
+#endif
+  if (mallfile != NULL || mallwatch != NULL)
+    {
+      char *mtb = malloc (TRACE_BUFFER_SIZE);
+      if (mtb == NULL)
+        return;
+
+      mallstream = fopen (mallfile != NULL ? mallfile : "/dev/null", "wce");
+      if (mallstream != NULL)
+        {
+          /* Be sure it doesn't malloc its buffer!  */
+          malloc_trace_buffer = mtb;
+          setvbuf (mallstream, malloc_trace_buffer, _IOFBF, TRACE_BUFFER_SIZE);
+          fprintf (mallstream, "= Start\n");
+          tr_old_free_hook = __free_hook;
+          __free_hook = tr_freehook;
+          tr_old_malloc_hook = __malloc_hook;
+          __malloc_hook = tr_mallochook;
+          tr_old_realloc_hook = __realloc_hook;
+          __realloc_hook = tr_reallochook;
+          tr_old_memalign_hook = __memalign_hook;
+          __memalign_hook = tr_memalignhook;
+#ifdef _LIBC
+          if (!added_atexit_handler)
+            {
+              extern void *__dso_handle __attribute__ ((__weak__));
+              added_atexit_handler = 1;
+              __cxa_atexit ((void (*)(void *))release_libc_mem, NULL,
+                            &__dso_handle ? __dso_handle : NULL);
+            }
+#endif
+        }
+      else
+        free (mtb);
+    }
+}
+
+void
+muntrace (void)
+{
+  if (mallstream == NULL)
+    return;
+
+  /* Do the reverse of what done in mtrace: first reset the hooks and
+     MALLSTREAM, and only after that write the trailer and close the
+     file.  */
+  FILE *f = mallstream;
+  mallstream = NULL;
+  __free_hook = tr_old_free_hook;
+  __malloc_hook = tr_old_malloc_hook;
+  __realloc_hook = tr_old_realloc_hook;
+  __memalign_hook = tr_old_memalign_hook;
+
+  fprintf (f, "= End\n");
+  fclose (f);
+}
diff --git a/REORG.TODO/malloc/mtrace.pl b/REORG.TODO/malloc/mtrace.pl
new file mode 100644
index 0000000000..4acbd81680
--- /dev/null
+++ b/REORG.TODO/malloc/mtrace.pl
@@ -0,0 +1,237 @@
+#! @PERL@
+eval "exec @PERL@ -S $0 $@"
+    if 0;
+# Copyright (C) 1997-2017 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+# Contributed by Ulrich Drepper <drepper@gnu.org>, 1997.
+# Based on the mtrace.awk script.
+
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+
+$VERSION = "@VERSION@";
+$PKGVERSION = "@PKGVERSION@";
+$REPORT_BUGS_TO = '@REPORT_BUGS_TO@';
+$progname = $0;
+
+sub usage {
+    print "Usage: mtrace [OPTION]... [Binary] MtraceData\n";
+    print "  --help       print this help, then exit\n";
+    print "  --version    print version number, then exit\n";
+    print "\n";
+    print "For bug reporting instructions, please see:\n";
+    print "$REPORT_BUGS_TO.\n";
+    exit 0;
+}
+
+# We expect two arguments:
+#   #1: the complete path to the binary
+#   #2: the mtrace data filename
+# The usual options are also recognized.
+
+arglist: while (@ARGV) {
+    if ($ARGV[0] eq "--v" || $ARGV[0] eq "--ve" || $ARGV[0] eq "--ver" ||
+	$ARGV[0] eq "--vers" || $ARGV[0] eq "--versi" ||
+	$ARGV[0] eq "--versio" || $ARGV[0] eq "--version") {
+	print "mtrace $PKGVERSION$VERSION\n";
+	print "Copyright (C) 2017 Free Software Foundation, Inc.\n";
+	print "This is free software; see the source for copying conditions.  There is NO\n";
+	print "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n";
+	print "Written by Ulrich Drepper <drepper\@gnu.org>\n";
+
+	exit 0;
+    } elsif ($ARGV[0] eq "--h" || $ARGV[0] eq "--he" || $ARGV[0] eq "--hel" ||
+	     $ARGV[0] eq "--help") {
+	&usage;
+    } elsif ($ARGV[0] =~ /^-/) {
+	print "$progname: unrecognized option `$ARGV[0]'\n";
+	print "Try `$progname --help' for more information.\n";
+	exit 1;
+    } else {
+	last arglist;
+    }
+}
+
+if ($#ARGV == 0) {
+    $binary="";
+    $data=$ARGV[0];
+} elsif ($#ARGV == 1) {
+    $binary=$ARGV[0];
+    $data=$ARGV[1];
+
+    if ($binary =~ /^.*[\/].*$/) {
+	$prog = $binary;
+    } else {
+	$prog = "./$binary";
+    }
+    if (open (LOCS, "env LD_TRACE_LOADED_OBJECTS=1 $prog |")) {
+	while (<LOCS>) {
+	    chop;
+	    if (/^.*=> (.*) .(0x[0123456789abcdef]*).$/) {
+		$locs{$1} = $2;
+	    }
+	}
+	close (LOCS);
+    }
+} else {
+    die "Wrong number of arguments, run $progname --help for help.";
+}
+
+sub location {
+    my $str = pop(@_);
+    return $str if ($str eq "");
+    if ($str =~ /.*[[](0x[^]]*)]:(.)*/) {
+	my $addr = $1;
+	my $fct = $2;
+	return $cache{$addr} if (exists $cache{$addr});
+	if ($binary ne "" && open (ADDR, "addr2line -e $binary $addr|")) {
+	    my $line = <ADDR>;
+	    chomp $line;
+	    close (ADDR);
+	    if ($line ne '??:0') {
+		$cache{$addr} = $line;
+		return $cache{$addr};
+	    }
+	}
+	$cache{$addr} = $str = "$fct @ $addr";
+    } elsif ($str =~ /^(.*):.*[[](0x[^]]*)]$/) {
+	my $prog = $1;
+	my $addr = $2;
+	my $searchaddr;
+	return $cache{$addr} if (exists $cache{$addr});
+	if ($locs{$prog} ne "") {
+	    $searchaddr = sprintf "%#x", $addr - $locs{$prog};
+	} else {
+	    $searchaddr = $addr;
+	    $prog = $binary;
+	}
+	if ($binary ne "" && open (ADDR, "addr2line -e $prog $searchaddr|")) {
+	    my $line = <ADDR>;
+	    chomp $line;
+	    close (ADDR);
+	    if ($line ne '??:0') {
+		$cache{$addr} = $line;
+		return $cache{$addr};
+	    }
+	}
+	$cache{$addr} = $str = $addr;
+    } elsif ($str =~ /^.*[[](0x[^]]*)]$/) {
+	my $addr = $1;
+	return $cache{$addr} if (exists $cache{$addr});
+	if ($binary ne "" && open (ADDR, "addr2line -e $binary $addr|")) {
+	    my $line = <ADDR>;
+	    chomp $line;
+	    close (ADDR);
+	    if ($line ne '??:0') {
+		$cache{$addr} = $line;
+		return $cache{$addr};
+	    }
+	}
+	$cache{$addr} = $str = $addr;
+    }
+    return $str;
+}
+
+$nr=0;
+open(DATA, "<$data") || die "Cannot open mtrace data file";
+while (<DATA>) {
+    my @cols = split (' ');
+    my $n, $where;
+    if ($cols[0] eq "@") {
+	# We have address and/or function name.
+	$where=$cols[1];
+	$n=2;
+    } else {
+	$where="";
+	$n=0;
+    }
+
+    $allocaddr=$cols[$n + 1];
+    $howmuch=hex($cols[$n + 2]);
+
+    ++$nr;
+    SWITCH: {
+	if ($cols[$n] eq "+") {
+	    if (defined $allocated{$allocaddr}) {
+		printf ("+ %#0@XXX@x Alloc %d duplicate: %s %s\n",
+			hex($allocaddr), $nr, &location($addrwas{$allocaddr}),
+			$where);
+	    } elsif ($allocaddr =~ /^0x/) {
+		$allocated{$allocaddr}=$howmuch;
+		$addrwas{$allocaddr}=$where;
+	    }
+	    last SWITCH;
+	}
+	if ($cols[$n] eq "-") {
+	    if (defined $allocated{$allocaddr}) {
+		undef $allocated{$allocaddr};
+		undef $addrwas{$allocaddr};
+	    } else {
+		printf ("- %#0@XXX@x Free %d was never alloc'd %s\n",
+			hex($allocaddr), $nr, &location($where));
+	    }
+	    last SWITCH;
+	}
+	if ($cols[$n] eq "<") {
+	    if (defined $allocated{$allocaddr}) {
+		undef $allocated{$allocaddr};
+		undef $addrwas{$allocaddr};
+	    } else {
+		printf ("- %#0@XXX@x Realloc %d was never alloc'd %s\n",
+			hex($allocaddr), $nr, &location($where));
+	    }
+	    last SWITCH;
+	}
+	if ($cols[$n] eq ">") {
+	    if (defined $allocated{$allocaddr}) {
+		printf ("+ %#0@XXX@x Realloc %d duplicate: %#010x %s %s\n",
+			hex($allocaddr), $nr, $allocated{$allocaddr},
+			&location($addrwas{$allocaddr}), &location($where));
+	    } else {
+		$allocated{$allocaddr}=$howmuch;
+		$addrwas{$allocaddr}=$where;
+	    }
+	    last SWITCH;
+	}
+	if ($cols[$n] eq "=") {
+	    # Ignore "= Start".
+	    last SWITCH;
+	}
+	if ($cols[$n] eq "!") {
+	    # Ignore failed realloc for now.
+	    last SWITCH;
+	}
+    }
+}
+close (DATA);
+
+# Now print all remaining entries.
+@addrs= keys %allocated;
+$anything=0;
+if ($#addrs >= 0) {
+    foreach $addr (sort @addrs) {
+	if (defined $allocated{$addr}) {
+	    if ($anything == 0) {
+		print "\nMemory not freed:\n-----------------\n";
+		print ' ' x (@XXX@ - 7), "Address     Size     Caller\n";
+		$anything=1;
+	    }
+	    printf ("%#0@XXX@x %#8x  at %s\n", hex($addr), $allocated{$addr},
+		    &location($addrwas{$addr}));
+	}
+    }
+}
+print "No memory leaks.\n" if ($anything == 0);
+
+exit $anything != 0;
diff --git a/REORG.TODO/malloc/obstack.c b/REORG.TODO/malloc/obstack.c
new file mode 100644
index 0000000000..4ac8938c73
--- /dev/null
+++ b/REORG.TODO/malloc/obstack.c
@@ -0,0 +1,423 @@
+/* obstack.c - subroutines used implicitly by object stack macros
+   Copyright (C) 1988-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+
+#ifdef _LIBC
+# include <obstack.h>
+# include <shlib-compat.h>
+#else
+# include <config.h>
+# include "obstack.h"
+#endif
+
+/* NOTE BEFORE MODIFYING THIS FILE: This version number must be
+   incremented whenever callers compiled using an old obstack.h can no
+   longer properly call the functions in this obstack.c.  */
+#define OBSTACK_INTERFACE_VERSION 1
+
+/* Comment out all this code if we are using the GNU C Library, and are not
+   actually compiling the library itself, and the installed library
+   supports the same library interface we do.  This code is part of the GNU
+   C Library, but also included in many other GNU distributions.  Compiling
+   and linking in this code is a waste when using the GNU C library
+   (especially if it is a shared library).  Rather than having every GNU
+   program understand 'configure --with-gnu-libc' and omit the object
+   files, it is simpler to just do this in the source for each such file.  */
+
+#include <stdio.h>              /* Random thing to get __GNU_LIBRARY__.  */
+#if !defined _LIBC && defined __GNU_LIBRARY__ && __GNU_LIBRARY__ > 1
+# include <gnu-versions.h>
+# if _GNU_OBSTACK_INTERFACE_VERSION == OBSTACK_INTERFACE_VERSION
+#  define ELIDE_CODE
+# endif
+#endif
+
+#include <stddef.h>
+
+#ifndef ELIDE_CODE
+
+
+# include <stdint.h>
+
+/* Determine default alignment.  */
+union fooround
+{
+  uintmax_t i;
+  long double d;
+  void *p;
+};
+struct fooalign
+{
+  char c;
+  union fooround u;
+};
+/* If malloc were really smart, it would round addresses to DEFAULT_ALIGNMENT.
+   But in fact it might be less smart and round addresses to as much as
+   DEFAULT_ROUNDING.  So we prepare for it to do that.  */
+enum
+{
+  DEFAULT_ALIGNMENT = offsetof (struct fooalign, u),
+  DEFAULT_ROUNDING = sizeof (union fooround)
+};
+
+/* When we copy a long block of data, this is the unit to do it with.
+   On some machines, copying successive ints does not work;
+   in such a case, redefine COPYING_UNIT to 'long' (if that works)
+   or 'char' as a last resort.  */
+# ifndef COPYING_UNIT
+#  define COPYING_UNIT int
+# endif
+
+
+/* The functions allocating more room by calling 'obstack_chunk_alloc'
+   jump to the handler pointed to by 'obstack_alloc_failed_handler'.
+   This can be set to a user defined function which should either
+   abort gracefully or use longjump - but shouldn't return.  This
+   variable by default points to the internal function
+   'print_and_abort'.  */
+static _Noreturn void print_and_abort (void);
+void (*obstack_alloc_failed_handler) (void) = print_and_abort;
+
+/* Exit value used when 'print_and_abort' is used.  */
+# include <stdlib.h>
+# ifdef _LIBC
+int obstack_exit_failure = EXIT_FAILURE;
+# else
+#  include "exitfail.h"
+#  define obstack_exit_failure exit_failure
+# endif
+
+# ifdef _LIBC
+#  if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+/* A looong time ago (before 1994, anyway; we're not sure) this global variable
+   was used by non-GNU-C macros to avoid multiple evaluation.  The GNU C
+   library still exports it because somebody might use it.  */
+struct obstack *_obstack_compat = 0;
+compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
+#  endif
+# endif
+
+/* Define a macro that either calls functions with the traditional malloc/free
+   calling interface, or calls functions with the mmalloc/mfree interface
+   (that adds an extra first argument), based on the state of use_extra_arg.
+   For free, do not use ?:, since some compilers, like the MIPS compilers,
+   do not allow (expr) ? void : void.  */
+
+# define CALL_CHUNKFUN(h, size) \
+  (((h)->use_extra_arg)							      \
+   ? (*(h)->chunkfun)((h)->extra_arg, (size))				      \
+   : (*(struct _obstack_chunk *(*)(long))(h)->chunkfun)((size)))
+
+# define CALL_FREEFUN(h, old_chunk) \
+  do { \
+      if ((h)->use_extra_arg)						      \
+	(*(h)->freefun)((h)->extra_arg, (old_chunk));			      \
+      else								      \
+	(*(void (*)(void *))(h)->freefun)((old_chunk));			      \
+    } while (0)
+
+
+/* Initialize an obstack H for use.  Specify chunk size SIZE (0 means default).
+   Objects start on multiples of ALIGNMENT (0 means use default).
+   CHUNKFUN is the function to use to allocate chunks,
+   and FREEFUN the function to free them.
+
+   Return nonzero if successful, calls obstack_alloc_failed_handler if
+   allocation fails.  */
+
+int
+_obstack_begin (struct obstack *h,
+		int size, int alignment,
+		void *(*chunkfun) (long),
+		void (*freefun) (void *))
+{
+  struct _obstack_chunk *chunk; /* points to new chunk */
+
+  if (alignment == 0)
+    alignment = DEFAULT_ALIGNMENT;
+  if (size == 0)
+    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
+    {
+      /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+	 Use the values for range checking, because if range checking is off,
+	 the extra bytes won't be missed terribly, but if range checking is on
+	 and we used a larger request, a whole extra 4096 bytes would be
+	 allocated.
+
+	 These number are irrelevant to the new GNU malloc.  I suspect it is
+	 less sensitive to the size of the request.  */
+      int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+		    + 4 + DEFAULT_ROUNDING - 1)
+		   & ~(DEFAULT_ROUNDING - 1));
+      size = 4096 - extra;
+    }
+
+  h->chunkfun = (struct _obstack_chunk * (*) (void *, long)) chunkfun;
+  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
+  h->chunk_size = size;
+  h->alignment_mask = alignment - 1;
+  h->use_extra_arg = 0;
+
+  chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
+  if (!chunk)
+    (*obstack_alloc_failed_handler) ();
+  h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
+					       alignment - 1);
+  h->chunk_limit = chunk->limit
+    = (char *) chunk + h->chunk_size;
+  chunk->prev = 0;
+  /* The initial chunk now contains no empty object.  */
+  h->maybe_empty_object = 0;
+  h->alloc_failed = 0;
+  return 1;
+}
+
+int
+_obstack_begin_1 (struct obstack *h, int size, int alignment,
+		  void *(*chunkfun) (void *, long),
+		  void (*freefun) (void *, void *),
+		  void *arg)
+{
+  struct _obstack_chunk *chunk; /* points to new chunk */
+
+  if (alignment == 0)
+    alignment = DEFAULT_ALIGNMENT;
+  if (size == 0)
+    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
+    {
+      /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+	 Use the values for range checking, because if range checking is off,
+	 the extra bytes won't be missed terribly, but if range checking is on
+	 and we used a larger request, a whole extra 4096 bytes would be
+	 allocated.
+
+	 These number are irrelevant to the new GNU malloc.  I suspect it is
+	 less sensitive to the size of the request.  */
+      int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+		    + 4 + DEFAULT_ROUNDING - 1)
+		   & ~(DEFAULT_ROUNDING - 1));
+      size = 4096 - extra;
+    }
+
+  h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun;
+  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
+  h->chunk_size = size;
+  h->alignment_mask = alignment - 1;
+  h->extra_arg = arg;
+  h->use_extra_arg = 1;
+
+  chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
+  if (!chunk)
+    (*obstack_alloc_failed_handler) ();
+  h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
+					       alignment - 1);
+  h->chunk_limit = chunk->limit
+    = (char *) chunk + h->chunk_size;
+  chunk->prev = 0;
+  /* The initial chunk now contains no empty object.  */
+  h->maybe_empty_object = 0;
+  h->alloc_failed = 0;
+  return 1;
+}
+
+/* Allocate a new current chunk for the obstack *H
+   on the assumption that LENGTH bytes need to be added
+   to the current object, or a new object of length LENGTH allocated.
+   Copies any partial object from the end of the old chunk
+   to the beginning of the new one.  */
+
+void
+_obstack_newchunk (struct obstack *h, int length)
+{
+  struct _obstack_chunk *old_chunk = h->chunk;
+  struct _obstack_chunk *new_chunk;
+  long new_size;
+  long obj_size = h->next_free - h->object_base;
+  long i;
+  long already;
+  char *object_base;
+
+  /* Compute size for new chunk.  */
+  new_size = (obj_size + length) + (obj_size >> 3) + h->alignment_mask + 100;
+  if (new_size < h->chunk_size)
+    new_size = h->chunk_size;
+
+  /* Allocate and initialize the new chunk.  */
+  new_chunk = CALL_CHUNKFUN (h, new_size);
+  if (!new_chunk)
+    (*obstack_alloc_failed_handler)();
+  h->chunk = new_chunk;
+  new_chunk->prev = old_chunk;
+  new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size;
+
+  /* Compute an aligned object_base in the new chunk */
+  object_base =
+    __PTR_ALIGN ((char *) new_chunk, new_chunk->contents, h->alignment_mask);
+
+  /* Move the existing object to the new chunk.
+     Word at a time is fast and is safe if the object
+     is sufficiently aligned.  */
+  if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT)
+    {
+      for (i = obj_size / sizeof (COPYING_UNIT) - 1;
+	   i >= 0; i--)
+	((COPYING_UNIT *) object_base)[i]
+	  = ((COPYING_UNIT *) h->object_base)[i];
+      /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT,
+	 but that can cross a page boundary on a machine
+	 which does not do strict alignment for COPYING_UNITS.  */
+      already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT);
+    }
+  else
+    already = 0;
+  /* Copy remaining bytes one by one.  */
+  for (i = already; i < obj_size; i++)
+    object_base[i] = h->object_base[i];
+
+  /* If the object just copied was the only data in OLD_CHUNK,
+     free that chunk and remove it from the chain.
+     But not if that chunk might contain an empty object.  */
+  if (!h->maybe_empty_object
+      && (h->object_base
+	  == __PTR_ALIGN ((char *) old_chunk, old_chunk->contents,
+			  h->alignment_mask)))
+    {
+      new_chunk->prev = old_chunk->prev;
+      CALL_FREEFUN (h, old_chunk);
+    }
+
+  h->object_base = object_base;
+  h->next_free = h->object_base + obj_size;
+  /* The new chunk certainly contains no empty object yet.  */
+  h->maybe_empty_object = 0;
+}
+# ifdef _LIBC
+libc_hidden_def (_obstack_newchunk)
+# endif
+
+/* Return nonzero if object OBJ has been allocated from obstack H.
+   This is here for debugging.
+   If you use it in a program, you are probably losing.  */
+
+/* Suppress -Wmissing-prototypes warning.  We don't want to declare this in
+   obstack.h because it is just for debugging.  */
+int _obstack_allocated_p (struct obstack *h, void *obj) __attribute_pure__;
+
+int
+_obstack_allocated_p (struct obstack *h, void *obj)
+{
+  struct _obstack_chunk *lp;    /* below addr of any objects in this chunk */
+  struct _obstack_chunk *plp;   /* point to previous chunk if any */
+
+  lp = (h)->chunk;
+  /* We use >= rather than > since the object cannot be exactly at
+     the beginning of the chunk but might be an empty object exactly
+     at the end of an adjacent chunk.  */
+  while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+    {
+      plp = lp->prev;
+      lp = plp;
+    }
+  return lp != 0;
+}
+
+/* Free objects in obstack H, including OBJ and everything allocate
+   more recently than OBJ.  If OBJ is zero, free everything in H.  */
+
+# undef obstack_free
+
+void
+__obstack_free (struct obstack *h, void *obj)
+{
+  struct _obstack_chunk *lp;    /* below addr of any objects in this chunk */
+  struct _obstack_chunk *plp;   /* point to previous chunk if any */
+
+  lp = h->chunk;
+  /* We use >= because there cannot be an object at the beginning of a chunk.
+     But there can be an empty object at that address
+     at the end of another chunk.  */
+  while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+    {
+      plp = lp->prev;
+      CALL_FREEFUN (h, lp);
+      lp = plp;
+      /* If we switch chunks, we can't tell whether the new current
+	 chunk contains an empty object, so assume that it may.  */
+      h->maybe_empty_object = 1;
+    }
+  if (lp)
+    {
+      h->object_base = h->next_free = (char *) (obj);
+      h->chunk_limit = lp->limit;
+      h->chunk = lp;
+    }
+  else if (obj != 0)
+    /* obj is not in any of the chunks! */
+    abort ();
+}
+
+# ifdef _LIBC
+/* Older versions of libc used a function _obstack_free intended to be
+   called by non-GCC compilers.  */
+strong_alias (obstack_free, _obstack_free)
+# endif
+
+int
+_obstack_memory_used (struct obstack *h)
+{
+  struct _obstack_chunk *lp;
+  int nbytes = 0;
+
+  for (lp = h->chunk; lp != 0; lp = lp->prev)
+    {
+      nbytes += lp->limit - (char *) lp;
+    }
+  return nbytes;
+}
+
+/* Define the error handler.  */
+# ifdef _LIBC
+#  include <libintl.h>
+# else
+#  include "gettext.h"
+# endif
+# ifndef _
+#  define _(msgid) gettext (msgid)
+# endif
+
+# ifdef _LIBC
+#  include <libio/iolibio.h>
+# endif
+
+static _Noreturn void
+print_and_abort (void)
+{
+  /* Don't change any of these strings.  Yes, it would be possible to add
+     the newline to the string and use fputs or so.  But this must not
+     happen because the "memory exhausted" message appears in other places
+     like this and the translation should be reused instead of creating
+     a very similar string which requires a separate translation.  */
+# ifdef _LIBC
+  (void) __fxprintf (NULL, "%s\n", _("memory exhausted"));
+# else
+  fprintf (stderr, "%s\n", _("memory exhausted"));
+# endif
+  exit (obstack_exit_failure);
+}
+
+#endif  /* !ELIDE_CODE */
diff --git a/REORG.TODO/malloc/obstack.h b/REORG.TODO/malloc/obstack.h
new file mode 100644
index 0000000000..538837eaa6
--- /dev/null
+++ b/REORG.TODO/malloc/obstack.h
@@ -0,0 +1,515 @@
+/* obstack.h - object stack macros
+   Copyright (C) 1988-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Summary:
+
+   All the apparent functions defined here are macros. The idea
+   is that you would use these pre-tested macros to solve a
+   very specific set of problems, and they would run fast.
+   Caution: no side-effects in arguments please!! They may be
+   evaluated MANY times!!
+
+   These macros operate a stack of objects.  Each object starts life
+   small, and may grow to maturity.  (Consider building a word syllable
+   by syllable.)  An object can move while it is growing.  Once it has
+   been "finished" it never changes address again.  So the "top of the
+   stack" is typically an immature growing object, while the rest of the
+   stack is of mature, fixed size and fixed address objects.
+
+   These routines grab large chunks of memory, using a function you
+   supply, called 'obstack_chunk_alloc'.  On occasion, they free chunks,
+   by calling 'obstack_chunk_free'.  You must define them and declare
+   them before using any obstack macros.
+
+   Each independent stack is represented by a 'struct obstack'.
+   Each of the obstack macros expects a pointer to such a structure
+   as the first argument.
+
+   One motivation for this package is the problem of growing char strings
+   in symbol tables.  Unless you are "fascist pig with a read-only mind"
+   --Gosper's immortal quote from HAKMEM item 154, out of context--you
+   would not like to put any arbitrary upper limit on the length of your
+   symbols.
+
+   In practice this often means you will build many short symbols and a
+   few long symbols.  At the time you are reading a symbol you don't know
+   how long it is.  One traditional method is to read a symbol into a
+   buffer, realloc()ating the buffer every time you try to read a symbol
+   that is longer than the buffer.  This is beaut, but you still will
+   want to copy the symbol from the buffer to a more permanent
+   symbol-table entry say about half the time.
+
+   With obstacks, you can work differently.  Use one obstack for all symbol
+   names.  As you read a symbol, grow the name in the obstack gradually.
+   When the name is complete, finalize it.  Then, if the symbol exists already,
+   free the newly read name.
+
+   The way we do this is to take a large chunk, allocating memory from
+   low addresses.  When you want to build a symbol in the chunk you just
+   add chars above the current "high water mark" in the chunk.  When you
+   have finished adding chars, because you got to the end of the symbol,
+   you know how long the chars are, and you can create a new object.
+   Mostly the chars will not burst over the highest address of the chunk,
+   because you would typically expect a chunk to be (say) 100 times as
+   long as an average object.
+
+   In case that isn't clear, when we have enough chars to make up
+   the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
+   so we just point to it where it lies.  No moving of chars is
+   needed and this is the second win: potentially long strings need
+   never be explicitly shuffled. Once an object is formed, it does not
+   change its address during its lifetime.
+
+   When the chars burst over a chunk boundary, we allocate a larger
+   chunk, and then copy the partly formed object from the end of the old
+   chunk to the beginning of the new larger chunk.  We then carry on
+   accreting characters to the end of the object as we normally would.
+
+   A special macro is provided to add a single char at a time to a
+   growing object.  This allows the use of register variables, which
+   break the ordinary 'growth' macro.
+
+   Summary:
+	We allocate large chunks.
+	We carve out one object at a time from the current chunk.
+	Once carved, an object never moves.
+	We are free to append data of any size to the currently
+	  growing object.
+	Exactly one object is growing in an obstack at any one time.
+	You can run one obstack per control block.
+	You may have as many control blocks as you dare.
+	Because of the way we do it, you can "unwind" an obstack
+	  back to a previous state. (You may remove objects much
+	  as you would with a stack.)
+ */
+
+
+/* Don't do the contents of this file more than once.  */
+
+#ifndef _OBSTACK_H
+#define _OBSTACK_H 1
+
+/* We need the type of a pointer subtraction.  If __PTRDIFF_TYPE__ is
+   defined, as with GNU C, use that; that way we don't pollute the
+   namespace with <stddef.h>'s symbols.  Otherwise, include <stddef.h>
+   and use ptrdiff_t.  */
+
+#ifdef __PTRDIFF_TYPE__
+# define PTR_INT_TYPE __PTRDIFF_TYPE__
+#else
+# include <stddef.h>
+# define PTR_INT_TYPE ptrdiff_t
+#endif
+
+/* If B is the base of an object addressed by P, return the result of
+   aligning P to the next multiple of A + 1.  B and P must be of type
+   char *.  A + 1 must be a power of 2.  */
+
+#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A)))
+
+/* Similar to _BPTR_ALIGN (B, P, A), except optimize the common case
+   where pointers can be converted to integers, aligned as integers,
+   and converted back again.  If PTR_INT_TYPE is narrower than a
+   pointer (e.g., the AS/400), play it safe and compute the alignment
+   relative to B.  Otherwise, use the faster strategy of computing the
+   alignment relative to 0.  */
+
+#define __PTR_ALIGN(B, P, A)						      \
+  __BPTR_ALIGN (sizeof (PTR_INT_TYPE) < sizeof (void *) ? (B) : (char *) 0, \
+		P, A)
+
+#include <string.h>
+
+#ifndef __attribute_pure__
+# define __attribute_pure__ _GL_ATTRIBUTE_PURE
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _obstack_chunk           /* Lives at front of each chunk. */
+{
+  char *limit;                  /* 1 past end of this chunk */
+  struct _obstack_chunk *prev;  /* address of prior chunk or NULL */
+  char contents[4];             /* objects begin here */
+};
+
+struct obstack          /* control current object in current chunk */
+{
+  long chunk_size;              /* preferred size to allocate chunks in */
+  struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */
+  char *object_base;            /* address of object we are building */
+  char *next_free;              /* where to add next char to current object */
+  char *chunk_limit;            /* address of char after current chunk */
+  union
+  {
+    PTR_INT_TYPE tempint;
+    void *tempptr;
+  } temp;                       /* Temporary for some macros.  */
+  int alignment_mask;           /* Mask of alignment for each object. */
+  /* These prototypes vary based on 'use_extra_arg', and we use
+     casts to the prototypeless function type in all assignments,
+     but having prototypes here quiets -Wstrict-prototypes.  */
+  struct _obstack_chunk *(*chunkfun) (void *, long);
+  void (*freefun) (void *, struct _obstack_chunk *);
+  void *extra_arg;              /* first arg for chunk alloc/dealloc funcs */
+  unsigned use_extra_arg : 1;     /* chunk alloc/dealloc funcs take extra arg */
+  unsigned maybe_empty_object : 1; /* There is a possibility that the current
+				      chunk contains a zero-length object.  This
+				      prevents freeing the chunk if we allocate
+				      a bigger chunk to replace it. */
+  unsigned alloc_failed : 1;      /* No longer used, as we now call the failed
+				     handler on error, but retained for binary
+				     compatibility.  */
+};
+
+/* Declare the external functions we use; they are in obstack.c.  */
+
+extern void _obstack_newchunk (struct obstack *, int);
+extern int _obstack_begin (struct obstack *, int, int,
+			   void *(*)(long), void (*)(void *));
+extern int _obstack_begin_1 (struct obstack *, int, int,
+			     void *(*)(void *, long),
+			     void (*)(void *, void *), void *);
+extern int _obstack_memory_used (struct obstack *) __attribute_pure__;
+
+/* The default name of the function for freeing a chunk is 'obstack_free',
+   but gnulib users can override this by defining '__obstack_free'.  */
+#ifndef __obstack_free
+# define __obstack_free obstack_free
+#endif
+extern void __obstack_free (struct obstack *, void *);
+
+
+/* Error handler called when 'obstack_chunk_alloc' failed to allocate
+   more memory.  This can be set to a user defined function which
+   should either abort gracefully or use longjump - but shouldn't
+   return.  The default action is to print a message and abort.  */
+extern void (*obstack_alloc_failed_handler) (void);
+
+/* Exit value used when 'print_and_abort' is used.  */
+extern int obstack_exit_failure;
+
+/* Pointer to beginning of object being allocated or to be allocated next.
+   Note that this might not be the final address of the object
+   because a new chunk might be needed to hold the final size.  */
+
+#define obstack_base(h) ((void *) (h)->object_base)
+
+/* Size for allocating ordinary chunks.  */
+
+#define obstack_chunk_size(h) ((h)->chunk_size)
+
+/* Pointer to next byte not yet allocated in current chunk.  */
+
+#define obstack_next_free(h)    ((h)->next_free)
+
+/* Mask specifying low bits that should be clear in address of an object.  */
+
+#define obstack_alignment_mask(h) ((h)->alignment_mask)
+
+/* To prevent prototype warnings provide complete argument list.  */
+#define obstack_init(h)							      \
+  _obstack_begin ((h), 0, 0,						      \
+		  (void *(*)(long))obstack_chunk_alloc,			      \
+		  (void (*)(void *))obstack_chunk_free)
+
+#define obstack_begin(h, size)						      \
+  _obstack_begin ((h), (size), 0,					      \
+		  (void *(*)(long))obstack_chunk_alloc,			      \
+		  (void (*)(void *))obstack_chunk_free)
+
+#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun)  \
+  _obstack_begin ((h), (size), (alignment),				      \
+		  (void *(*)(long))(chunkfun),				      \
+		  (void (*)(void *))(freefun))
+
+#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
+  _obstack_begin_1 ((h), (size), (alignment),				      \
+		    (void *(*)(void *, long))(chunkfun),		      \
+		    (void (*)(void *, void *))(freefun), (arg))
+
+#define obstack_chunkfun(h, newchunkfun) \
+  ((h)->chunkfun = (struct _obstack_chunk *(*)(void *, long))(newchunkfun))
+
+#define obstack_freefun(h, newfreefun) \
+  ((h)->freefun = (void (*)(void *, struct _obstack_chunk *))(newfreefun))
+
+#define obstack_1grow_fast(h, achar) (*((h)->next_free)++ = (achar))
+
+#define obstack_blank_fast(h, n) ((h)->next_free += (n))
+
+#define obstack_memory_used(h) _obstack_memory_used (h)
+
+#if defined __GNUC__
+# if ! (2 < __GNUC__ + (8 <= __GNUC_MINOR__))
+#  define __extension__
+# endif
+
+/* For GNU C, if not -traditional,
+   we can define these macros to compute all args only once
+   without using a global variable.
+   Also, we can avoid using the 'temp' slot, to make faster code.  */
+
+# define obstack_object_size(OBSTACK)					      \
+  __extension__								      \
+    ({ struct obstack const *__o = (OBSTACK);				      \
+       (unsigned) (__o->next_free - __o->object_base); })
+
+# define obstack_room(OBSTACK)						      \
+  __extension__								      \
+    ({ struct obstack const *__o = (OBSTACK);				      \
+       (unsigned) (__o->chunk_limit - __o->next_free); })
+
+# define obstack_make_room(OBSTACK, length)				      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       int __len = (length);						      \
+       if (__o->chunk_limit - __o->next_free < __len)			      \
+	 _obstack_newchunk (__o, __len);				      \
+       (void) 0; })
+
+# define obstack_empty_p(OBSTACK)					      \
+  __extension__								      \
+    ({ struct obstack const *__o = (OBSTACK);				      \
+       (__o->chunk->prev == 0						      \
+	&& __o->next_free == __PTR_ALIGN ((char *) __o->chunk,		      \
+					  __o->chunk->contents,		      \
+					  __o->alignment_mask)); })
+
+# define obstack_grow(OBSTACK, where, length)				      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       int __len = (length);						      \
+       if (__o->next_free + __len > __o->chunk_limit)			      \
+	 _obstack_newchunk (__o, __len);				      \
+       memcpy (__o->next_free, where, __len);				      \
+       __o->next_free += __len;						      \
+       (void) 0; })
+
+# define obstack_grow0(OBSTACK, where, length)				      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       int __len = (length);						      \
+       if (__o->next_free + __len + 1 > __o->chunk_limit)		      \
+	 _obstack_newchunk (__o, __len + 1);				      \
+       memcpy (__o->next_free, where, __len);				      \
+       __o->next_free += __len;						      \
+       *(__o->next_free)++ = 0;						      \
+       (void) 0; })
+
+# define obstack_1grow(OBSTACK, datum)					      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       if (__o->next_free + 1 > __o->chunk_limit)			      \
+	 _obstack_newchunk (__o, 1);					      \
+       obstack_1grow_fast (__o, datum);					      \
+       (void) 0; })
+
+/* These assume that the obstack alignment is good enough for pointers
+   or ints, and that the data added so far to the current object
+   shares that much alignment.  */
+
+# define obstack_ptr_grow(OBSTACK, datum)				      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       if (__o->next_free + sizeof (void *) > __o->chunk_limit)		      \
+	 _obstack_newchunk (__o, sizeof (void *));			      \
+       obstack_ptr_grow_fast (__o, datum); })				      \
+
+# define obstack_int_grow(OBSTACK, datum)				      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       if (__o->next_free + sizeof (int) > __o->chunk_limit)		      \
+	 _obstack_newchunk (__o, sizeof (int));				      \
+       obstack_int_grow_fast (__o, datum); })
+
+# define obstack_ptr_grow_fast(OBSTACK, aptr)				      \
+  __extension__								      \
+    ({ struct obstack *__o1 = (OBSTACK);				      \
+       void *__p1 = __o1->next_free;					      \
+       *(const void **) __p1 = (aptr);					      \
+       __o1->next_free += sizeof (const void *);			      \
+       (void) 0; })
+
+# define obstack_int_grow_fast(OBSTACK, aint)				      \
+  __extension__								      \
+    ({ struct obstack *__o1 = (OBSTACK);				      \
+       void *__p1 = __o1->next_free;					      \
+       *(int *) __p1 = (aint);						      \
+       __o1->next_free += sizeof (int);					      \
+       (void) 0; })
+
+# define obstack_blank(OBSTACK, length)					      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       int __len = (length);						      \
+       if (__o->chunk_limit - __o->next_free < __len)			      \
+	 _obstack_newchunk (__o, __len);				      \
+       obstack_blank_fast (__o, __len);					      \
+       (void) 0; })
+
+# define obstack_alloc(OBSTACK, length)					      \
+  __extension__								      \
+    ({ struct obstack *__h = (OBSTACK);					      \
+       obstack_blank (__h, (length));					      \
+       obstack_finish (__h); })
+
+# define obstack_copy(OBSTACK, where, length)				      \
+  __extension__								      \
+    ({ struct obstack *__h = (OBSTACK);					      \
+       obstack_grow (__h, (where), (length));				      \
+       obstack_finish (__h); })
+
+# define obstack_copy0(OBSTACK, where, length)				      \
+  __extension__								      \
+    ({ struct obstack *__h = (OBSTACK);					      \
+       obstack_grow0 (__h, (where), (length));				      \
+       obstack_finish (__h); })
+
+/* The local variable is named __o1 to avoid a name conflict
+   when obstack_blank is called.  */
+# define obstack_finish(OBSTACK)					      \
+  __extension__								      \
+    ({ struct obstack *__o1 = (OBSTACK);				      \
+       void *__value = (void *) __o1->object_base;			      \
+       if (__o1->next_free == __value)					      \
+	 __o1->maybe_empty_object = 1;					      \
+       __o1->next_free							      \
+	 = __PTR_ALIGN (__o1->object_base, __o1->next_free,		      \
+			__o1->alignment_mask);				      \
+       if (__o1->next_free - (char *) __o1->chunk			      \
+	   > __o1->chunk_limit - (char *) __o1->chunk)			      \
+	 __o1->next_free = __o1->chunk_limit;				      \
+       __o1->object_base = __o1->next_free;				      \
+       __value; })
+
+# define obstack_free(OBSTACK, OBJ)					      \
+  __extension__								      \
+    ({ struct obstack *__o = (OBSTACK);					      \
+       void *__obj = (OBJ);						      \
+       if (__obj > (void *) __o->chunk && __obj < (void *) __o->chunk_limit)  \
+	 __o->next_free = __o->object_base = (char *) __obj;		      \
+       else (__obstack_free) (__o, __obj); })
+
+#else /* not __GNUC__ */
+
+# define obstack_object_size(h) \
+  (unsigned) ((h)->next_free - (h)->object_base)
+
+# define obstack_room(h)						      \
+  (unsigned) ((h)->chunk_limit - (h)->next_free)
+
+# define obstack_empty_p(h) \
+  ((h)->chunk->prev == 0						      \
+   && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk,		      \
+				     (h)->chunk->contents,		      \
+				     (h)->alignment_mask))
+
+/* Note that the call to _obstack_newchunk is enclosed in (..., 0)
+   so that we can avoid having void expressions
+   in the arms of the conditional expression.
+   Casting the third operand to void was tried before,
+   but some compilers won't accept it.  */
+
+# define obstack_make_room(h, length)					      \
+  ((h)->temp.tempint = (length),					      \
+   (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		      \
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0))
+
+# define obstack_grow(h, where, length)					      \
+  ((h)->temp.tempint = (length),					      \
+   (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		      \
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		      \
+   memcpy ((h)->next_free, where, (h)->temp.tempint),			      \
+   (h)->next_free += (h)->temp.tempint)
+
+# define obstack_grow0(h, where, length)				      \
+  ((h)->temp.tempint = (length),					      \
+   (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit)		      \
+   ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0),		      \
+   memcpy ((h)->next_free, where, (h)->temp.tempint),			      \
+   (h)->next_free += (h)->temp.tempint,					      \
+   *((h)->next_free)++ = 0)
+
+# define obstack_1grow(h, datum)					      \
+  ((((h)->next_free + 1 > (h)->chunk_limit)				      \
+    ? (_obstack_newchunk ((h), 1), 0) : 0),				      \
+   obstack_1grow_fast (h, datum))
+
+# define obstack_ptr_grow(h, datum)					      \
+  ((((h)->next_free + sizeof (char *) > (h)->chunk_limit)		      \
+    ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0),		      \
+   obstack_ptr_grow_fast (h, datum))
+
+# define obstack_int_grow(h, datum)					      \
+  ((((h)->next_free + sizeof (int) > (h)->chunk_limit)			      \
+    ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0),			      \
+   obstack_int_grow_fast (h, datum))
+
+# define obstack_ptr_grow_fast(h, aptr)					      \
+  (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr))
+
+# define obstack_int_grow_fast(h, aint)					      \
+  (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint))
+
+# define obstack_blank(h, length)					      \
+  ((h)->temp.tempint = (length),					      \
+   (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint)		      \
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		      \
+   obstack_blank_fast (h, (h)->temp.tempint))
+
+# define obstack_alloc(h, length)					      \
+  (obstack_blank ((h), (length)), obstack_finish ((h)))
+
+# define obstack_copy(h, where, length)					      \
+  (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_copy0(h, where, length)				      \
+  (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_finish(h)						      \
+  (((h)->next_free == (h)->object_base					      \
+    ? (((h)->maybe_empty_object = 1), 0)				      \
+    : 0),								      \
+   (h)->temp.tempptr = (h)->object_base,				      \
+   (h)->next_free							      \
+     = __PTR_ALIGN ((h)->object_base, (h)->next_free,			      \
+		    (h)->alignment_mask),				      \
+   (((h)->next_free - (char *) (h)->chunk				      \
+     > (h)->chunk_limit - (char *) (h)->chunk)				      \
+   ? ((h)->next_free = (h)->chunk_limit) : 0),				      \
+   (h)->object_base = (h)->next_free,					      \
+   (h)->temp.tempptr)
+
+# define obstack_free(h, obj)						      \
+  ((h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk,		      \
+   ((((h)->temp.tempint > 0						      \
+      && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk))	      \
+    ? (void) ((h)->next_free = (h)->object_base				      \
+	      = (h)->temp.tempint + (char *) (h)->chunk)		      \
+    : (__obstack_free) (h, (h)->temp.tempint + (char *) (h)->chunk)))
+
+#endif /* not __GNUC__ */
+
+#ifdef __cplusplus
+}       /* C++ */
+#endif
+
+#endif /* obstack.h */
diff --git a/REORG.TODO/malloc/reallocarray.c b/REORG.TODO/malloc/reallocarray.c
new file mode 100644
index 0000000000..07562c30c9
--- /dev/null
+++ b/REORG.TODO/malloc/reallocarray.c
@@ -0,0 +1,37 @@
+/* Change the size of an allocated block.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <malloc/malloc-internal.h>
+
+void *
+__libc_reallocarray (void *optr, size_t nmemb, size_t elem_size)
+{
+  size_t bytes;
+  if (check_mul_overflow_size_t (nmemb, elem_size, &bytes))
+    {
+      __set_errno (ENOMEM);
+      return 0;
+    }
+  else
+    return realloc (optr, bytes);
+}
+libc_hidden_def (__libc_reallocarray)
+
+weak_alias (__libc_reallocarray, reallocarray)
diff --git a/REORG.TODO/malloc/scratch_buffer_grow.c b/REORG.TODO/malloc/scratch_buffer_grow.c
new file mode 100644
index 0000000000..22bae506a1
--- /dev/null
+++ b/REORG.TODO/malloc/scratch_buffer_grow.c
@@ -0,0 +1,52 @@
+/* Variable-sized buffer with on-stack default allocation.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <scratch_buffer.h>
+#include <errno.h>
+
+bool
+__libc_scratch_buffer_grow (struct scratch_buffer *buffer)
+{
+  void *new_ptr;
+  size_t new_length = buffer->length * 2;
+
+  /* Discard old buffer.  */
+  scratch_buffer_free (buffer);
+
+  /* Check for overflow.  */
+  if (__glibc_likely (new_length >= buffer->length))
+    new_ptr = malloc (new_length);
+  else
+    {
+      __set_errno (ENOMEM);
+      new_ptr = NULL;
+    }
+
+  if (__glibc_unlikely (new_ptr == NULL))
+    {
+      /* Buffer must remain valid to free.  */
+      scratch_buffer_init (buffer);
+      return false;
+    }
+
+  /* Install new heap-based buffer.  */
+  buffer->data = new_ptr;
+  buffer->length = new_length;
+  return true;
+}
+libc_hidden_def (__libc_scratch_buffer_grow);
diff --git a/REORG.TODO/malloc/scratch_buffer_grow_preserve.c b/REORG.TODO/malloc/scratch_buffer_grow_preserve.c
new file mode 100644
index 0000000000..18543ef85b
--- /dev/null
+++ b/REORG.TODO/malloc/scratch_buffer_grow_preserve.c
@@ -0,0 +1,63 @@
+/* Variable-sized buffer with on-stack default allocation.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <scratch_buffer.h>
+#include <errno.h>
+#include <string.h>
+
+bool
+__libc_scratch_buffer_grow_preserve (struct scratch_buffer *buffer)
+{
+  size_t new_length = 2 * buffer->length;
+  void *new_ptr;
+
+  if (buffer->data == buffer->__space)
+    {
+      /* Move buffer to the heap.  No overflow is possible because
+	 buffer->length describes a small buffer on the stack.  */
+      new_ptr = malloc (new_length);
+      if (new_ptr == NULL)
+	return false;
+      memcpy (new_ptr, buffer->__space, buffer->length);
+    }
+  else
+    {
+      /* Buffer was already on the heap.  Check for overflow.  */
+      if (__glibc_likely (new_length >= buffer->length))
+	new_ptr = realloc (buffer->data, new_length);
+      else
+	{
+	  __set_errno (ENOMEM);
+	  new_ptr = NULL;
+	}
+
+      if (__glibc_unlikely (new_ptr == NULL))
+	{
+	  /* Deallocate, but buffer must remain valid to free.  */
+	  free (buffer->data);
+	  scratch_buffer_init (buffer);
+	  return false;
+	}
+    }
+
+  /* Install new heap-based buffer.  */
+  buffer->data = new_ptr;
+  buffer->length = new_length;
+  return true;
+}
+libc_hidden_def (__libc_scratch_buffer_grow_preserve);
diff --git a/REORG.TODO/malloc/scratch_buffer_set_array_size.c b/REORG.TODO/malloc/scratch_buffer_set_array_size.c
new file mode 100644
index 0000000000..8ab6d9d300
--- /dev/null
+++ b/REORG.TODO/malloc/scratch_buffer_set_array_size.c
@@ -0,0 +1,60 @@
+/* Variable-sized buffer with on-stack default allocation.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <scratch_buffer.h>
+#include <errno.h>
+#include <limits.h>
+
+bool
+__libc_scratch_buffer_set_array_size (struct scratch_buffer *buffer,
+				      size_t nelem, size_t size)
+{
+  size_t new_length = nelem * size;
+
+  /* Avoid overflow check if both values are small. */
+  if ((nelem | size) >> (sizeof (size_t) * CHAR_BIT / 2) != 0
+      && nelem != 0 && size != new_length / nelem)
+    {
+      /* Overflow.  Discard the old buffer, but it must remain valid
+	 to free.  */
+      scratch_buffer_free (buffer);
+      scratch_buffer_init (buffer);
+      __set_errno (ENOMEM);
+      return false;
+    }
+
+  if (new_length <= buffer->length)
+    return true;
+
+  /* Discard old buffer.  */
+  scratch_buffer_free (buffer);
+
+  char *new_ptr = malloc (new_length);
+  if (new_ptr == NULL)
+    {
+      /* Buffer must remain valid to free.  */
+      scratch_buffer_init (buffer);
+      return false;
+    }
+
+  /* Install new heap-based buffer.  */
+  buffer->data = new_ptr;
+  buffer->length = new_length;
+  return true;
+}
+libc_hidden_def (__libc_scratch_buffer_set_array_size);
diff --git a/REORG.TODO/malloc/set-freeres.c b/REORG.TODO/malloc/set-freeres.c
new file mode 100644
index 0000000000..c9cdd819e7
--- /dev/null
+++ b/REORG.TODO/malloc/set-freeres.c
@@ -0,0 +1,49 @@
+/* Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <stdlib.h>
+#include <set-hooks.h>
+#include <libc-internal.h>
+
+#include "../libio/libioP.h"
+
+DEFINE_HOOK (__libc_subfreeres, (void));
+
+symbol_set_define (__libc_freeres_ptrs);
+
+void __libc_freeres_fn_section
+__libc_freeres (void)
+{
+  /* This function might be called from different places.  So better
+     protect for multiple executions since these are fatal.  */
+  static long int already_called;
+
+  if (!atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
+    {
+      void *const *p;
+
+      _IO_cleanup ();
+
+      RUN_HOOK (__libc_subfreeres, ());
+
+      for (p = symbol_set_first_element (__libc_freeres_ptrs);
+           !symbol_set_end_p (__libc_freeres_ptrs, p); ++p)
+        free (*p);
+    }
+}
+libc_hidden_def (__libc_freeres)
diff --git a/REORG.TODO/malloc/thread-freeres.c b/REORG.TODO/malloc/thread-freeres.c
new file mode 100644
index 0000000000..675c12dc3a
--- /dev/null
+++ b/REORG.TODO/malloc/thread-freeres.c
@@ -0,0 +1,31 @@
+/* Free resources stored in thread-local variables on thread exit.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <stdlib.h>
+#include <libc-internal.h>
+#include <set-hooks.h>
+
+#ifdef _LIBC_REENTRANT
+DEFINE_HOOK (__libc_thread_subfreeres, (void));
+
+void __attribute__ ((section ("__libc_thread_freeres_fn")))
+__libc_thread_freeres (void)
+{
+  RUN_HOOK (__libc_thread_subfreeres, ());
+}
+#endif
diff --git a/REORG.TODO/malloc/tst-calloc.c b/REORG.TODO/malloc/tst-calloc.c
new file mode 100644
index 0000000000..c4236809f3
--- /dev/null
+++ b/REORG.TODO/malloc/tst-calloc.c
@@ -0,0 +1,128 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <error.h>
+#include <limits.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+
+/* Number of samples per size.  */
+#define N 50000
+
+
+static void
+fixed_test (int size)
+{
+  char *ptrs[N];
+  int i;
+
+  for (i = 0; i < N; ++i)
+    {
+      int j;
+
+      ptrs[i] = (char *) calloc (1, size);
+
+      if (ptrs[i] == NULL)
+	break;
+
+      for (j = 0; j < size; ++j)
+	{
+	  if (ptrs[i][j] != '\0')
+	    error (EXIT_FAILURE, 0,
+		   "byte not cleared (size %d, element %d, byte %d)",
+		   size, i, j);
+	  ptrs[i][j] = '\xff';
+	}
+    }
+
+  while (i-- > 0)
+    free (ptrs[i]);
+}
+
+
+static void
+random_test (void)
+{
+  char *ptrs[N];
+  int i;
+
+  for (i = 0; i < N; ++i)
+    {
+      int j;
+      int n = 1 + random () % 10;
+      int elem = 1 + random () % 100;
+      int size = n * elem;
+
+      ptrs[i] = (char *) calloc (n, elem);
+
+      if (ptrs[i] == NULL)
+	break;
+
+      for (j = 0; j < size; ++j)
+	{
+	  if (ptrs[i][j] != '\0')
+	    error (EXIT_FAILURE, 0,
+		   "byte not cleared (size %d, element %d, byte %d)",
+		   size, i, j);
+	  ptrs[i][j] = '\xff';
+	}
+    }
+
+  while (i-- > 0)
+    free (ptrs[i]);
+}
+
+
+static void
+null_test (void)
+{
+  /* If the size is 0 the result is implementation defined.  Just make
+     sure the program doesn't crash.  */
+  calloc (0, 0);
+  calloc (0, UINT_MAX);
+  calloc (UINT_MAX, 0);
+  calloc (0, ~((size_t) 0));
+  calloc (~((size_t) 0), 0);
+}
+
+
+static int
+do_test (void)
+{
+  /* We are allocating blocks with `calloc' and check whether every
+     block is completely cleared.  We first try this for some fixed
+     times and then with random size.  */
+  fixed_test (15);
+  fixed_test (5);
+  fixed_test (17);
+  fixed_test (6);
+  fixed_test (31);
+  fixed_test (96);
+
+  random_test ();
+
+  null_test ();
+
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-dynarray-at-fail.c b/REORG.TODO/malloc/tst-dynarray-at-fail.c
new file mode 100644
index 0000000000..bc1a48c5cb
--- /dev/null
+++ b/REORG.TODO/malloc/tst-dynarray-at-fail.c
@@ -0,0 +1,125 @@
+/* Test reporting of out-of-bounds access for dynamic arrays.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "tst-dynarray-shared.h"
+
+#include <signal.h>
+#include <stdint.h>
+#include <string.h>
+#include <support/capture_subprocess.h>
+#include <support/check.h>
+
+/* Run CALLBACK and check that the data on standard error equals
+   EXPECTED.  */
+static void
+check (const char *test, void (*callback) (void *), size_t index,
+       const char *expected)
+{
+  struct support_capture_subprocess result
+    = support_capture_subprocess (callback, &index);
+  if (strcmp (result.err.buffer, expected) != 0)
+    {
+      support_record_failure ();
+      printf ("error: test %s (%zu) unexpected standard error data\n"
+              "  expected: %s\n"
+              "  actual:   %s\n",
+              test, index, expected, result.err.buffer);
+    }
+  TEST_VERIFY (strlen (result.out.buffer) == 0);
+  TEST_VERIFY (WIFSIGNALED (result.status));
+  if (WIFSIGNALED (result.status))
+    TEST_VERIFY (WTERMSIG (result.status) == SIGABRT);
+  support_capture_subprocess_free (&result);
+}
+
+/* Try indexing an empty array.  */
+static void
+test_empty (void *closure)
+{
+  size_t *pindex = closure;
+  struct dynarray_int dyn;
+  dynarray_int_init (&dyn);
+  dynarray_int_at (&dyn, *pindex);
+}
+
+/* Try indexing a one-element array.  */
+static void
+test_one (void *closure)
+{
+  size_t *pindex = closure;
+  struct dynarray_int dyn;
+  dynarray_int_init (&dyn);
+  TEST_VERIFY (dynarray_int_resize (&dyn, 1));
+  dynarray_int_at (&dyn, *pindex);
+}
+
+/* Try indexing a longer array.  */
+static void
+test_many (void *closure)
+{
+  size_t *pindex = closure;
+  struct dynarray_int dyn;
+  dynarray_int_init (&dyn);
+  TEST_VERIFY (dynarray_int_resize (&dyn, 5371));
+  dynarray_int_at (&dyn, *pindex);
+}
+
+/* (size_t) -1 for use in string literals.  */
+#if SIZE_WIDTH == 32
+# define MINUS_1 "4294967295"
+#elif SIZE_WIDTH == 64
+# define MINUS_1 "18446744073709551615"
+#else
+# error "unknown value for SIZE_WIDTH"
+#endif
+
+static int
+do_test (void)
+{
+  TEST_VERIFY (setenv ("LIBC_FATAL_STDERR_", "1", 1) == 0);
+
+  check ("test_empty", test_empty, 0,
+         "Fatal glibc error: array index 0 not less than array length 0\n");
+  check ("test_empty", test_empty, 1,
+         "Fatal glibc error: array index 1 not less than array length 0\n");
+  check ("test_empty", test_empty, -1,
+         "Fatal glibc error: array index " MINUS_1
+         " not less than array length 0\n");
+
+  check ("test_one", test_one, 1,
+         "Fatal glibc error: array index 1 not less than array length 1\n");
+  check ("test_one", test_one, 2,
+         "Fatal glibc error: array index 2 not less than array length 1\n");
+  check ("test_one", test_one, -1,
+         "Fatal glibc error: array index " MINUS_1
+         " not less than array length 1\n");
+
+  check ("test_many", test_many, 5371,
+         "Fatal glibc error: array index 5371"
+         " not less than array length 5371\n");
+  check ("test_many", test_many, 5372,
+         "Fatal glibc error: array index 5372"
+         " not less than array length 5371\n");
+  check ("test_many", test_many, -1,
+         "Fatal glibc error: array index " MINUS_1
+         " not less than array length 5371\n");
+
+  return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-dynarray-fail.c b/REORG.TODO/malloc/tst-dynarray-fail.c
new file mode 100644
index 0000000000..508dbae93e
--- /dev/null
+++ b/REORG.TODO/malloc/tst-dynarray-fail.c
@@ -0,0 +1,418 @@
+/* Test allocation failures with dynamic arrays.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This test is separate from tst-dynarray because it cannot run under
+   valgrind.  */
+
+#include "tst-dynarray-shared.h"
+
+#include <mcheck.h>
+#include <stdio.h>
+#include <support/check.h>
+#include <support/support.h>
+#include <support/xunistd.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+/* Data structure to fill up the heap.  */
+struct heap_filler
+{
+  struct heap_filler *next;
+};
+
+/* Allocate objects until the heap is full.  */
+static struct heap_filler *
+fill_heap (void)
+{
+  size_t pad = 4096;
+  struct heap_filler *head = NULL;
+  while (true)
+    {
+      struct heap_filler *new_head = malloc (sizeof (*new_head) + pad);
+      if (new_head == NULL)
+        {
+          if (pad > 0)
+            {
+              /* Try again with smaller allocations.  */
+              pad = 0;
+              continue;
+            }
+          else
+            break;
+        }
+      new_head->next = head;
+      head = new_head;
+    }
+  return head;
+}
+
+/* Free the heap-filling allocations, so that we can continue testing
+   and detect memory leaks elsewhere.  */
+static void
+free_fill_heap (struct heap_filler *head)
+{
+  while (head != NULL)
+    {
+      struct heap_filler *next = head->next;
+      free (head);
+      head = next;
+    }
+}
+
+/* Check allocation failures for int arrays (without an element free
+   function).  */
+static void
+test_int_fail (void)
+{
+  /* Exercise failure in add/emplace.
+
+     do_add: Use emplace (false) or add (true) to add elements.
+     do_finalize: Perform finalization at the end (instead of free).  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    for (int do_finalize = 0; do_finalize < 2; ++do_finalize)
+      {
+        struct dynarray_int dyn;
+        dynarray_int_init (&dyn);
+        size_t count = 0;
+        while (true)
+          {
+            if (do_add)
+              {
+                dynarray_int_add (&dyn, 0);
+                if (dynarray_int_has_failed (&dyn))
+                  break;
+              }
+            else
+              {
+                int *place = dynarray_int_emplace (&dyn);
+                if (place == NULL)
+                  break;
+                TEST_VERIFY_EXIT (!dynarray_int_has_failed (&dyn));
+                *place = 0;
+              }
+            ++count;
+          }
+        printf ("info: %s: failure after %zu elements\n", __func__, count);
+        TEST_VERIFY_EXIT (dynarray_int_has_failed (&dyn));
+        if (do_finalize)
+          {
+            struct int_array result = { (int *) (uintptr_t) -1, -1 };
+            TEST_VERIFY_EXIT (!dynarray_int_finalize (&dyn, &result));
+            TEST_VERIFY_EXIT (result.array == (int *) (uintptr_t) -1);
+            TEST_VERIFY_EXIT (result.length == (size_t) -1);
+          }
+        else
+          dynarray_int_free (&dyn);
+        CHECK_INIT_STATE (int, &dyn);
+      }
+
+  /* Exercise failure in finalize.  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    {
+      struct dynarray_int dyn;
+      dynarray_int_init (&dyn);
+      for (unsigned int i = 0; i < 10000; ++i)
+        {
+          if (do_add)
+            {
+              dynarray_int_add (&dyn, i);
+              TEST_VERIFY_EXIT (!dynarray_int_has_failed (&dyn));
+            }
+          else
+            {
+              int *place = dynarray_int_emplace (&dyn);
+              TEST_VERIFY_EXIT (place != NULL);
+              *place = i;
+            }
+        }
+      TEST_VERIFY_EXIT (!dynarray_int_has_failed (&dyn));
+      struct heap_filler *heap_filler = fill_heap ();
+      struct int_array result = { (int *) (uintptr_t) -1, -1 };
+      TEST_VERIFY_EXIT (!dynarray_int_finalize (&dyn, &result));
+      TEST_VERIFY_EXIT (result.array == (int *) (uintptr_t) -1);
+      TEST_VERIFY_EXIT (result.length == (size_t) -1);
+      CHECK_INIT_STATE (int, &dyn);
+      free_fill_heap (heap_filler);
+    }
+
+  /* Exercise failure in resize.  */
+  {
+    struct dynarray_int dyn;
+    dynarray_int_init (&dyn);
+    struct heap_filler *heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_int_resize (&dyn, 1000));
+    TEST_VERIFY (dynarray_int_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+
+    dynarray_int_init (&dyn);
+    TEST_VERIFY (dynarray_int_resize (&dyn, 1));
+    heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_int_resize (&dyn, 1000));
+    TEST_VERIFY (dynarray_int_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+
+    dynarray_int_init (&dyn);
+    TEST_VERIFY (dynarray_int_resize (&dyn, 1000));
+    heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_int_resize (&dyn, 2000));
+    TEST_VERIFY (dynarray_int_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+  }
+}
+
+/* Check allocation failures for char * arrays (which automatically
+   free the pointed-to strings).  */
+static void
+test_str_fail (void)
+{
+  /* Exercise failure in add/emplace.
+
+     do_add: Use emplace (false) or add (true) to add elements.
+     do_finalize: Perform finalization at the end (instead of free).  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    for (int do_finalize = 0; do_finalize < 2; ++do_finalize)
+      {
+        struct dynarray_str dyn;
+        dynarray_str_init (&dyn);
+        size_t count = 0;
+        while (true)
+          {
+            char **place;
+            if (do_add)
+              {
+                dynarray_str_add (&dyn, NULL);
+                if (dynarray_str_has_failed (&dyn))
+                  break;
+                else
+                  place = dynarray_str_at (&dyn, dynarray_str_size (&dyn) - 1);
+              }
+            else
+              {
+                place = dynarray_str_emplace (&dyn);
+                if (place == NULL)
+                  break;
+              }
+            TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+            TEST_VERIFY_EXIT (*place == NULL);
+            *place = strdup ("placeholder");
+            if (*place == NULL)
+              {
+                /* Second loop to wait for failure of
+                   dynarray_str_emplace.  */
+                while (true)
+                  {
+                    if (do_add)
+                      {
+                        dynarray_str_add (&dyn, NULL);
+                        if (dynarray_str_has_failed (&dyn))
+                          break;
+                      }
+                    else
+                      {
+                        char **place = dynarray_str_emplace (&dyn);
+                        if (place == NULL)
+                          break;
+                        TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+                        *place = NULL;
+                      }
+                    ++count;
+                  }
+                break;
+              }
+            ++count;
+          }
+        printf ("info: %s: failure after %zu elements\n", __func__, count);
+        TEST_VERIFY_EXIT (dynarray_str_has_failed (&dyn));
+        if (do_finalize)
+          {
+            struct str_array result = { (char **) (uintptr_t) -1, -1 };
+            TEST_VERIFY_EXIT (!dynarray_str_finalize (&dyn, &result));
+            TEST_VERIFY_EXIT (result.array == (char **) (uintptr_t) -1);
+            TEST_VERIFY_EXIT (result.length == (size_t) -1);
+          }
+        else
+          dynarray_str_free (&dyn);
+        TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+        TEST_VERIFY_EXIT (dyn.dynarray_header.array == dyn.scratch);
+        TEST_VERIFY_EXIT (dynarray_str_size (&dyn) == 0);
+        TEST_VERIFY_EXIT (dyn.dynarray_header.allocated > 0);
+      }
+
+  /* Exercise failure in finalize.  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    {
+      struct dynarray_str dyn;
+      dynarray_str_init (&dyn);
+      for (unsigned int i = 0; i < 1000; ++i)
+        {
+          if (do_add)
+            dynarray_str_add (&dyn, xstrdup ("placeholder"));
+          else
+            {
+              char **place = dynarray_str_emplace (&dyn);
+              TEST_VERIFY_EXIT (place != NULL);
+              TEST_VERIFY_EXIT (*place == NULL);
+              *place = xstrdup ("placeholder");
+            }
+        }
+      TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+      struct heap_filler *heap_filler = fill_heap ();
+      struct str_array result = { (char **) (uintptr_t) -1, -1 };
+      TEST_VERIFY_EXIT (!dynarray_str_finalize (&dyn, &result));
+      TEST_VERIFY_EXIT (result.array == (char **) (uintptr_t) -1);
+      TEST_VERIFY_EXIT (result.length == (size_t) -1);
+      TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+      TEST_VERIFY_EXIT (dyn.dynarray_header.array == dyn.scratch);
+      TEST_VERIFY_EXIT (dynarray_str_size (&dyn) == 0);
+      TEST_VERIFY_EXIT (dyn.dynarray_header.allocated > 0);
+      free_fill_heap (heap_filler);
+    }
+
+  /* Exercise failure in resize.  */
+  {
+    struct dynarray_str dyn;
+    dynarray_str_init (&dyn);
+    struct heap_filler *heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_str_resize (&dyn, 1000));
+    TEST_VERIFY (dynarray_str_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+
+    dynarray_str_init (&dyn);
+    TEST_VERIFY (dynarray_str_resize (&dyn, 1));
+    *dynarray_str_at (&dyn, 0) = xstrdup ("allocated");
+    heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_str_resize (&dyn, 1000));
+    TEST_VERIFY (dynarray_str_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+
+    dynarray_str_init (&dyn);
+    TEST_VERIFY (dynarray_str_resize (&dyn, 1000));
+    *dynarray_str_at (&dyn, 0) = xstrdup ("allocated");
+    heap_filler = fill_heap ();
+    TEST_VERIFY (!dynarray_str_resize (&dyn, 2000));
+    TEST_VERIFY (dynarray_str_has_failed (&dyn));
+    free_fill_heap (heap_filler);
+  }
+}
+
+/* Test if mmap can allocate a page.  This is necessary because
+   setrlimit does not fail even if it reduces the RLIMIT_AS limit
+   below what is currently needed by the process.  */
+static bool
+mmap_works (void)
+{
+  void *ptr =  mmap (NULL, 1, PROT_READ | PROT_WRITE,
+                     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (ptr == MAP_FAILED)
+    return false;
+  xmunmap (ptr, 1);
+  return true;
+}
+
+/* Set the RLIMIT_AS limit to the value in *LIMIT.  */
+static void
+xsetrlimit_as (const struct rlimit *limit)
+{
+  if (setrlimit (RLIMIT_AS, limit) != 0)
+    FAIL_EXIT1 ("setrlimit (RLIMIT_AS, %lu): %m",
+                (unsigned long) limit->rlim_cur);
+}
+
+/* Approximately this many bytes can be allocated after
+   reduce_rlimit_as has run.  */
+enum { as_limit_reserve = 2 * 1024 * 1024 };
+
+/* Limit the size of the process, so that memory allocation in
+   allocate_thread will eventually fail, without impacting the entire
+   system.  By default, a dynamic limit which leaves room for 2 MiB is
+   activated.  The TEST_RLIMIT_AS environment variable overrides
+   it.  */
+static void
+reduce_rlimit_as (void)
+{
+  struct rlimit limit;
+  if (getrlimit (RLIMIT_AS, &limit) != 0)
+    FAIL_EXIT1 ("getrlimit (RLIMIT_AS) failed: %m");
+
+  /* Use the TEST_RLIMIT_AS setting if available.  */
+  {
+    long target = 0;
+    const char *variable = "TEST_RLIMIT_AS";
+    const char *target_str = getenv (variable);
+    if (target_str != NULL)
+      {
+        target = atoi (target_str);
+        if (target <= 0)
+          FAIL_EXIT1 ("invalid %s value: \"%s\"", variable, target_str);
+        printf ("info: setting RLIMIT_AS to %ld MiB\n", target);
+        target *= 1024 * 1024;      /* Convert to megabytes.  */
+        limit.rlim_cur = target;
+        xsetrlimit_as (&limit);
+        return;
+      }
+  }
+
+  /* Otherwise, try to find the limit with a binary search.  */
+  unsigned long low = 1 << 20;
+  limit.rlim_cur = low;
+  xsetrlimit_as (&limit);
+
+  /* Find working upper limit.  */
+  unsigned long high = 1 << 30;
+  while (true)
+    {
+      limit.rlim_cur = high;
+      xsetrlimit_as (&limit);
+      if (mmap_works ())
+        break;
+      if (2 * high < high)
+        FAIL_EXIT1 ("cannot find upper AS limit");
+      high *= 2;
+    }
+
+  /* Perform binary search.  */
+  while ((high - low) > 128 * 1024)
+    {
+      unsigned long middle = (low + high) / 2;
+      limit.rlim_cur = middle;
+      xsetrlimit_as (&limit);
+      if (mmap_works ())
+        high = middle;
+      else
+        low = middle;
+    }
+
+  unsigned long target = high + as_limit_reserve;
+  limit.rlim_cur = target;
+  xsetrlimit_as (&limit);
+  printf ("info: RLIMIT_AS limit: %lu bytes\n", target);
+}
+
+static int
+do_test (void)
+{
+  mtrace ();
+  reduce_rlimit_as ();
+  test_int_fail ();
+  test_str_fail ();
+  return 0;
+}
+
+#define TIMEOUT 90
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-dynarray-shared.h b/REORG.TODO/malloc/tst-dynarray-shared.h
new file mode 100644
index 0000000000..faba66f580
--- /dev/null
+++ b/REORG.TODO/malloc/tst-dynarray-shared.h
@@ -0,0 +1,77 @@
+/* Shared definitions for dynarray tests.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <stddef.h>
+
+struct int_array
+{
+  int *array;
+  size_t length;
+};
+
+#define DYNARRAY_STRUCT dynarray_int
+#define DYNARRAY_ELEMENT int
+#define DYNARRAY_PREFIX dynarray_int_
+#define DYNARRAY_FINAL_TYPE struct int_array
+#include <malloc/dynarray-skeleton.c>
+
+struct str_array
+{
+  char **array;
+  size_t length;
+};
+
+#define DYNARRAY_STRUCT dynarray_str
+#define DYNARRAY_ELEMENT char *
+#define DYNARRAY_ELEMENT_FREE(ptr) free (*ptr)
+#define DYNARRAY_PREFIX dynarray_str_
+#define DYNARRAY_FINAL_TYPE struct str_array
+#include <malloc/dynarray-skeleton.c>
+
+/* Check that *DYN is equivalent to its initial state.  */
+#define CHECK_INIT_STATE(type, dyn)                             \
+  ({                                                            \
+    TEST_VERIFY_EXIT (!dynarray_##type##_has_failed (dyn));     \
+    TEST_VERIFY_EXIT (dynarray_##type##_size (dyn) == 0);       \
+    TEST_VERIFY_EXIT ((dyn)->dynarray_header.array              \
+                      == (dyn)->scratch);                       \
+    TEST_VERIFY_EXIT ((dyn)->dynarray_header.allocated > 0);    \
+    (void) 0;                                                   \
+  })
+
+/* Check that *DYN behaves as if it is in its initial state.  */
+#define CHECK_EMPTY(type, dyn)                                       \
+  ({                                                                 \
+    CHECK_INIT_STATE (type, (dyn));                                  \
+    dynarray_##type##_free (dyn);                                    \
+    CHECK_INIT_STATE (type, (dyn));                                  \
+    dynarray_##type##_clear (dyn);                                   \
+    CHECK_INIT_STATE (type, (dyn));                                  \
+    dynarray_##type##_remove_last (dyn);                             \
+    CHECK_INIT_STATE (type, (dyn));                                  \
+    dynarray_##type##_mark_failed (dyn);                             \
+    TEST_VERIFY_EXIT (dynarray_##type##_has_failed (dyn));           \
+    dynarray_##type##_clear (dyn);                                   \
+    TEST_VERIFY_EXIT (dynarray_##type##_has_failed (dyn));           \
+    dynarray_##type##_remove_last (dyn);                             \
+    TEST_VERIFY_EXIT (dynarray_##type##_has_failed (dyn));           \
+    TEST_VERIFY_EXIT (dynarray_##type##_emplace (dyn) == NULL);      \
+    dynarray_##type##_free (dyn);                                    \
+    CHECK_INIT_STATE (type, (dyn));                                  \
+    (void) 0;                                                        \
+  })
diff --git a/REORG.TODO/malloc/tst-dynarray.c b/REORG.TODO/malloc/tst-dynarray.c
new file mode 100644
index 0000000000..7aee85aa39
--- /dev/null
+++ b/REORG.TODO/malloc/tst-dynarray.c
@@ -0,0 +1,517 @@
+/* Test for dynamic arrays.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "tst-dynarray-shared.h"
+
+#define DYNARRAY_STRUCT dynarray_long
+#define DYNARRAY_ELEMENT long
+#define DYNARRAY_PREFIX dynarray_long_
+#define DYNARRAY_ELEMENT_INIT(e) (*(e) = 17)
+#include <malloc/dynarray-skeleton.c>
+
+struct long_array
+{
+  long *array;
+  size_t length;
+};
+
+#define DYNARRAY_STRUCT dynarray_long_noscratch
+#define DYNARRAY_ELEMENT long
+#define DYNARRAY_PREFIX dynarray_long_noscratch_
+#define DYNARRAY_ELEMENT_INIT(e) (*(e) = 23)
+#define DYNARRAY_FINAL_TYPE struct long_array
+#define DYNARRAY_INITIAL_SIZE 0
+#include <malloc/dynarray-skeleton.c>
+
+#define DYNARRAY_STRUCT zstr
+#define DYNARRAY_ELEMENT char
+#define DYNARRAY_PREFIX zstr_
+#define DYNARRAY_INITIAL_SIZE 128
+#include <malloc/dynarray-skeleton.c>
+
+#include <malloc.h>
+#include <mcheck.h>
+#include <stdint.h>
+#include <support/check.h>
+#include <support/support.h>
+
+enum { max_count = 20 };
+
+/* Test dynamic arrays with int elements (no automatic deallocation
+   for elements).  */
+static void
+test_int (void)
+{
+  /* Empty array.  */
+  {
+    struct dynarray_int dyn;
+    dynarray_int_init (&dyn);
+    CHECK_EMPTY (int, &dyn);
+  }
+
+  /* Empty array with finalization.  */
+  {
+    struct dynarray_int dyn;
+    dynarray_int_init (&dyn);
+    CHECK_INIT_STATE (int, &dyn);
+    struct int_array result = { (int *) (uintptr_t) -1, -1 };
+    TEST_VERIFY_EXIT (dynarray_int_finalize (&dyn, &result));
+    CHECK_INIT_STATE (int, &dyn);
+    TEST_VERIFY_EXIT (result.array == NULL);
+    TEST_VERIFY_EXIT (result.length == 0);
+  }
+
+  /* Non-empty array tests.
+
+     do_add: Switch between emplace (false) and add (true).
+     do_finalize: Perform finalize call at the end.
+     do_clear: Perform clear call at the end.
+     do_remove_last: Perform remove_last call after adding elements.
+     count: Number of elements added to the array.  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    for (int do_finalize = 0; do_finalize < 2; ++do_finalize)
+      for (int do_clear = 0; do_clear < 2; ++do_clear)
+        for (int do_remove_last = 0; do_remove_last < 2; ++do_remove_last)
+          for (unsigned int count = 0; count < max_count; ++count)
+            {
+              if (do_remove_last && count == 0)
+                continue;
+              unsigned int base = count * count;
+              struct dynarray_int dyn;
+              dynarray_int_init (&dyn);
+              for (unsigned int i = 0; i < count; ++i)
+                {
+                  if (do_add)
+                    dynarray_int_add (&dyn, base + i);
+                  else
+                    {
+                      int *place = dynarray_int_emplace (&dyn);
+                      TEST_VERIFY_EXIT (place != NULL);
+                      *place = base + i;
+                    }
+                  TEST_VERIFY_EXIT (!dynarray_int_has_failed (&dyn));
+                  TEST_VERIFY_EXIT (dynarray_int_size (&dyn) == i + 1);
+                  TEST_VERIFY_EXIT (dynarray_int_size (&dyn)
+                                    <= dyn.dynarray_header.allocated);
+                }
+              TEST_VERIFY_EXIT (dynarray_int_size (&dyn) == count);
+              TEST_VERIFY_EXIT (count <= dyn.dynarray_header.allocated);
+              unsigned final_count;
+              bool heap_array = dyn.dynarray_header.array != dyn.scratch;
+              if (do_remove_last)
+                {
+                  dynarray_int_remove_last (&dyn);
+                  if (count == 0)
+                    final_count = 0;
+                  else
+                    final_count = count - 1;
+                }
+              else
+                final_count = count;
+              if (do_clear)
+                {
+                  dynarray_int_clear (&dyn);
+                  final_count = 0;
+                }
+              TEST_VERIFY_EXIT (!dynarray_int_has_failed (&dyn));
+              TEST_VERIFY_EXIT ((dyn.dynarray_header.array != dyn.scratch)
+                                == heap_array);
+              TEST_VERIFY_EXIT (dynarray_int_size (&dyn) == final_count);
+              TEST_VERIFY_EXIT (dyn.dynarray_header.allocated >= final_count);
+              if (!do_clear)
+                for (unsigned int i = 0; i < final_count; ++i)
+                  TEST_VERIFY_EXIT (*dynarray_int_at (&dyn, i) == base + i);
+              if (do_finalize)
+                {
+                  struct int_array result = { (int *) (uintptr_t) -1, -1 };
+                  TEST_VERIFY_EXIT (dynarray_int_finalize (&dyn, &result));
+                  CHECK_INIT_STATE (int, &dyn);
+                  TEST_VERIFY_EXIT (result.length == final_count);
+                  if (final_count == 0)
+                    TEST_VERIFY_EXIT (result.array == NULL);
+                  else
+                    {
+                      TEST_VERIFY_EXIT (result.array != NULL);
+                      TEST_VERIFY_EXIT (result.array != (int *) (uintptr_t) -1);
+                      TEST_VERIFY_EXIT
+                        (malloc_usable_size (result.array)
+                         >= final_count * sizeof (result.array[0]));
+                      for (unsigned int i = 0; i < final_count; ++i)
+                        TEST_VERIFY_EXIT (result.array[i] == base + i);
+                      free (result.array);
+                    }
+                }
+              else /* !do_finalize */
+                {
+                  dynarray_int_free (&dyn);
+                  CHECK_INIT_STATE (int, &dyn);
+                }
+            }
+}
+
+/* Test dynamic arrays with char * elements (with automatic
+   deallocation of the pointed-to strings).  */
+static void
+test_str (void)
+{
+  /* Empty array.  */
+  {
+    struct dynarray_str dyn;
+    dynarray_str_init (&dyn);
+    CHECK_EMPTY (str, &dyn);
+  }
+
+  /* Empty array with finalization.  */
+  {
+    struct dynarray_str dyn;
+    dynarray_str_init (&dyn);
+    TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+    struct str_array result = { (char **) (uintptr_t) -1, -1 };
+    TEST_VERIFY_EXIT (dynarray_str_finalize (&dyn, &result));
+    CHECK_INIT_STATE (str, &dyn);
+    TEST_VERIFY_EXIT (result.array == NULL);
+    TEST_VERIFY_EXIT (result.length == 0);
+  }
+
+  /* Non-empty array tests.
+
+     do_add: Switch between emplace (false) and add (true).
+     do_finalize: Perform finalize call at the end.
+     do_clear: Perform clear call at the end.
+     do_remove_last: Perform remove_last call after adding elements.
+     count: Number of elements added to the array.  */
+  for (int do_add = 0; do_add < 2; ++do_add)
+    for (int do_finalize = 0; do_finalize < 2; ++do_finalize)
+      for (int do_clear = 0; do_clear < 2; ++do_clear)
+        for (int do_remove_last = 0; do_remove_last < 2; ++do_remove_last)
+          for (unsigned int count = 0; count < max_count; ++count)
+            {
+              if (do_remove_last && count == 0)
+                continue;
+              unsigned int base = count * count;
+              struct dynarray_str dyn;
+              dynarray_str_init (&dyn);
+              for (unsigned int i = 0; i < count; ++i)
+                {
+                  char *item = xasprintf ("%d", base + i);
+                  if (do_add)
+                    dynarray_str_add (&dyn, item);
+                  else
+                    {
+                      char **place = dynarray_str_emplace (&dyn);
+                      TEST_VERIFY_EXIT (place != NULL);
+                      TEST_VERIFY_EXIT (*place == NULL);
+                      *place = item;
+                    }
+                  TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+                  TEST_VERIFY_EXIT (dynarray_str_size (&dyn) == i + 1);
+                  TEST_VERIFY_EXIT (dynarray_str_size (&dyn)
+                                    <= dyn.dynarray_header.allocated);
+                }
+              TEST_VERIFY_EXIT (dynarray_str_size (&dyn) == count);
+              TEST_VERIFY_EXIT (count <= dyn.dynarray_header.allocated);
+              unsigned final_count;
+              bool heap_array = dyn.dynarray_header.array != dyn.scratch;
+              if (do_remove_last)
+                {
+                  dynarray_str_remove_last (&dyn);
+                  if (count == 0)
+                    final_count = 0;
+                  else
+                    final_count = count - 1;
+                }
+              else
+                final_count = count;
+              if (do_clear)
+                {
+                  dynarray_str_clear (&dyn);
+                  final_count = 0;
+                }
+              TEST_VERIFY_EXIT (!dynarray_str_has_failed (&dyn));
+              TEST_VERIFY_EXIT ((dyn.dynarray_header.array != dyn.scratch)
+                                == heap_array);
+              TEST_VERIFY_EXIT (dynarray_str_size (&dyn) == final_count);
+              TEST_VERIFY_EXIT (dyn.dynarray_header.allocated >= final_count);
+              if (!do_clear)
+                for (unsigned int i = 0; i < count - do_remove_last; ++i)
+                  {
+                    char *expected = xasprintf ("%d", base + i);
+                    const char *actual = *dynarray_str_at (&dyn, i);
+                    TEST_VERIFY_EXIT (strcmp (actual, expected) == 0);
+                    free (expected);
+                  }
+              if (do_finalize)
+                {
+                  struct str_array result = { (char **) (uintptr_t) -1, -1 };
+                  TEST_VERIFY_EXIT (dynarray_str_finalize (&dyn, &result));
+                  CHECK_INIT_STATE (str, &dyn);
+                  TEST_VERIFY_EXIT (result.length == final_count);
+                  if (final_count == 0)
+                    TEST_VERIFY_EXIT (result.array == NULL);
+                  else
+                    {
+                      TEST_VERIFY_EXIT (result.array != NULL);
+                      TEST_VERIFY_EXIT (result.array
+                                        != (char **) (uintptr_t) -1);
+                      TEST_VERIFY_EXIT (result.length
+                                        == count - do_remove_last);
+                      TEST_VERIFY_EXIT
+                        (malloc_usable_size (result.array)
+                         >= final_count * sizeof (result.array[0]));
+                      for (unsigned int i = 0; i < count - do_remove_last; ++i)
+                        {
+                          char *expected = xasprintf ("%d", base + i);
+                          char *actual = result.array[i];
+                          TEST_VERIFY_EXIT (strcmp (actual, expected) == 0);
+                          free (expected);
+                          free (actual);
+                        }
+                      free (result.array);
+                    }
+                }
+              else /* !do_finalize */
+                {
+                  dynarray_str_free (&dyn);
+                  CHECK_INIT_STATE (str, &dyn);
+                }
+            }
+
+  /* Test resizing.  */
+  {
+    enum { count = 2131 };
+    struct dynarray_str dyn;
+    dynarray_str_init (&dyn);
+
+    /* From length 0 to length 1.  */
+    TEST_VERIFY (dynarray_str_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_str_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_str_at (&dyn, 0) == NULL);
+    *dynarray_str_at (&dyn, 0) = xstrdup ("allocated");
+    dynarray_str_free (&dyn);
+
+    /* From length 0 to length 1 and 2.  */
+    TEST_VERIFY (dynarray_str_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_str_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_str_at (&dyn, 0) == NULL);
+    *dynarray_str_at (&dyn, 0) = xstrdup ("allocated0");
+    TEST_VERIFY (dynarray_str_resize (&dyn, 2));
+    TEST_VERIFY (dynarray_str_size (&dyn) == 2);
+    TEST_VERIFY (strcmp (*dynarray_str_at (&dyn, 0), "allocated0") == 0);
+    TEST_VERIFY (*dynarray_str_at (&dyn, 1) == NULL);
+    *dynarray_str_at (&dyn, 1) = xstrdup ("allocated1");
+    TEST_VERIFY (dynarray_str_resize (&dyn, count));
+    TEST_VERIFY (dynarray_str_size (&dyn) == count);
+    TEST_VERIFY (strcmp (*dynarray_str_at (&dyn, 0), "allocated0") == 0);
+    TEST_VERIFY (strcmp (*dynarray_str_at (&dyn, 1), "allocated1") == 0);
+    for (int i = 2; i < count; ++i)
+      TEST_VERIFY (*dynarray_str_at (&dyn, i) == NULL);
+    *dynarray_str_at (&dyn, count - 1) = xstrdup ("allocated2");
+    TEST_VERIFY (dynarray_str_resize (&dyn, 3));
+    TEST_VERIFY (strcmp (*dynarray_str_at (&dyn, 0), "allocated0") == 0);
+    TEST_VERIFY (strcmp (*dynarray_str_at (&dyn, 1), "allocated1") == 0);
+    TEST_VERIFY (*dynarray_str_at (&dyn, 2) == NULL);
+    dynarray_str_free (&dyn);
+  }
+}
+
+/* Verify that DYNARRAY_ELEMENT_INIT has an effect.  */
+static void
+test_long_init (void)
+{
+  enum { count = 2131 };
+  {
+    struct dynarray_long dyn;
+    dynarray_long_init (&dyn);
+    for (int i = 0; i < count; ++i)
+      {
+        long *place = dynarray_long_emplace (&dyn);
+        TEST_VERIFY_EXIT (place != NULL);
+        TEST_VERIFY (*place == 17);
+      }
+    TEST_VERIFY (dynarray_long_size (&dyn) == count);
+    for (int i = 0; i < count; ++i)
+      TEST_VERIFY (*dynarray_long_at (&dyn, i) == 17);
+    dynarray_long_free (&dyn);
+
+    TEST_VERIFY (dynarray_long_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_long_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_long_at (&dyn, 0) == 17);
+    *dynarray_long_at (&dyn, 0) = 18;
+    dynarray_long_free (&dyn);
+    TEST_VERIFY (dynarray_long_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_long_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_long_at (&dyn, 0) == 17);
+    TEST_VERIFY (dynarray_long_resize (&dyn, 2));
+    TEST_VERIFY (dynarray_long_size (&dyn) == 2);
+    TEST_VERIFY (*dynarray_long_at (&dyn, 0) == 17);
+    TEST_VERIFY (*dynarray_long_at (&dyn, 1) == 17);
+    *dynarray_long_at (&dyn, 0) = 18;
+    TEST_VERIFY (dynarray_long_resize (&dyn, count));
+    TEST_VERIFY (dynarray_long_size (&dyn) == count);
+    TEST_VERIFY (*dynarray_long_at (&dyn, 0) == 18);
+    for (int i = 1; i < count; ++i)
+      TEST_VERIFY (*dynarray_long_at (&dyn, i) == 17);
+    dynarray_long_free (&dyn);
+  }
+
+  /* Similar, but without an on-stack scratch region
+     (DYNARRAY_INITIAL_SIZE is 0).  */
+  {
+    struct dynarray_long_noscratch dyn;
+    dynarray_long_noscratch_init (&dyn);
+    struct long_array result;
+    TEST_VERIFY_EXIT (dynarray_long_noscratch_finalize (&dyn, &result));
+    TEST_VERIFY (result.array == NULL);
+    TEST_VERIFY (result.length == 0);
+
+    /* Test with one element.  */
+    {
+      long *place = dynarray_long_noscratch_emplace (&dyn);
+      TEST_VERIFY_EXIT (place != NULL);
+      TEST_VERIFY (*place == 23);
+    }
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 23);
+    TEST_VERIFY_EXIT (dynarray_long_noscratch_finalize (&dyn, &result));
+    TEST_VERIFY_EXIT (result.array != NULL);
+    TEST_VERIFY (result.length == 1);
+    TEST_VERIFY (result.array[0] == 23);
+    free (result.array);
+
+    for (int i = 0; i < count; ++i)
+      {
+        long *place = dynarray_long_noscratch_emplace (&dyn);
+        TEST_VERIFY_EXIT (place != NULL);
+        TEST_VERIFY (*place == 23);
+        if (i == 0)
+          *place = 29;
+      }
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == count);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 29);
+    for (int i = 1; i < count; ++i)
+      TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, i) == 23);
+    TEST_VERIFY_EXIT (dynarray_long_noscratch_finalize (&dyn, &result));
+    TEST_VERIFY_EXIT (result.array != NULL);
+    TEST_VERIFY (result.length == count);
+    TEST_VERIFY (result.array[0] == 29);
+    for (int i = 1; i < count; ++i)
+      TEST_VERIFY (result.array[i] == 23);
+    free (result.array);
+
+    TEST_VERIFY (dynarray_long_noscratch_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 23);
+    *dynarray_long_noscratch_at (&dyn, 0) = 24;
+    dynarray_long_noscratch_free (&dyn);
+    TEST_VERIFY (dynarray_long_noscratch_resize (&dyn, 1));
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == 1);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 23);
+    TEST_VERIFY (dynarray_long_noscratch_resize (&dyn, 2));
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == 2);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 23);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 1) == 23);
+    *dynarray_long_noscratch_at (&dyn, 0) = 24;
+    TEST_VERIFY (dynarray_long_noscratch_resize (&dyn, count));
+    TEST_VERIFY (dynarray_long_noscratch_size (&dyn) == count);
+    TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, 0) == 24);
+    for (int i = 1; i < count; ++i)
+      TEST_VERIFY (*dynarray_long_noscratch_at (&dyn, i) == 23);
+    dynarray_long_noscratch_free (&dyn);
+  }
+}
+
+/* Test NUL-terminated string construction with the add function and
+   the simple finalize function.  */
+static void
+test_zstr (void)
+{
+  /* Totally empty string (no NUL termination).  */
+  {
+    struct zstr s;
+    zstr_init (&s);
+    char *result = zstr_finalize (&s, NULL);
+    TEST_VERIFY (result == NULL);
+    TEST_VERIFY (zstr_size (&s) == 0);
+    size_t length = 1;
+    result = zstr_finalize (&s, &length);
+    TEST_VERIFY (result == NULL);
+    TEST_VERIFY (length == 0);
+    TEST_VERIFY (zstr_size (&s) == 0);
+  }
+
+  /* Empty string.  */
+  {
+    struct zstr s;
+    zstr_init (&s);
+    zstr_add (&s, '\0');
+    char *result = zstr_finalize (&s, NULL);
+    TEST_VERIFY_EXIT (result != NULL);
+    TEST_VERIFY (*result == '\0');
+    TEST_VERIFY (zstr_size (&s) == 0);
+    free (result);
+
+    zstr_add (&s, '\0');
+    size_t length = 1;
+    result = zstr_finalize (&s, &length);
+    TEST_VERIFY_EXIT (result != NULL);
+    TEST_VERIFY (*result == '\0');
+    TEST_VERIFY (length == 1);
+    TEST_VERIFY (zstr_size (&s) == 0);
+    free (result);
+  }
+
+  /* A few characters.  */
+  {
+    struct zstr s;
+    zstr_init (&s);
+    zstr_add (&s, 'A');
+    zstr_add (&s, 'b');
+    zstr_add (&s, 'c');
+    zstr_add (&s, '\0');
+    char *result = zstr_finalize (&s, NULL);
+    TEST_VERIFY_EXIT (result != NULL);
+    TEST_VERIFY (strcmp (result, "Abc") == 0);
+    TEST_VERIFY (zstr_size (&s) == 0);
+    free (result);
+
+    zstr_add (&s, 'X');
+    zstr_add (&s, 'y');
+    zstr_add (&s, 'z');
+    zstr_add (&s, '\0');
+    size_t length = 1;
+    result = zstr_finalize (&s, &length);
+    TEST_VERIFY_EXIT (result != NULL);
+    TEST_VERIFY (strcmp (result, "Xyz") == 0);
+    TEST_VERIFY (length == 4);
+    TEST_VERIFY (zstr_size (&s) == 0);
+    free (result);
+  }
+}
+
+static int
+do_test (void)
+{
+  mtrace ();
+  test_int ();
+  test_str ();
+  test_long_init ();
+  test_zstr ();
+  return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-interpose-aux-nothread.c b/REORG.TODO/malloc/tst-interpose-aux-nothread.c
new file mode 100644
index 0000000000..804bac3708
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-aux-nothread.c
@@ -0,0 +1,20 @@
+/* Interposed malloc, version without threading support.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#define INTERPOSE_THREADS 0
+#include "tst-interpose-aux.c"
diff --git a/REORG.TODO/malloc/tst-interpose-aux-thread.c b/REORG.TODO/malloc/tst-interpose-aux-thread.c
new file mode 100644
index 0000000000..107ff2b6a4
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-aux-thread.c
@@ -0,0 +1,20 @@
+/* Interposed malloc, version with threading support.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#define INTERPOSE_THREADS 1
+#include "tst-interpose-aux.c"
diff --git a/REORG.TODO/malloc/tst-interpose-aux.c b/REORG.TODO/malloc/tst-interpose-aux.c
new file mode 100644
index 0000000000..68282b41d5
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-aux.c
@@ -0,0 +1,271 @@
+/* Minimal malloc implementation for interposition tests.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include "tst-interpose-aux.h"
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+#if INTERPOSE_THREADS
+#include <pthread.h>
+#endif
+
+/* Print the error message and terminate the process with status 1.  */
+__attribute__ ((noreturn))
+__attribute__ ((format (printf, 1, 2)))
+static void *
+fail (const char *format, ...)
+{
+  /* This assumes that vsnprintf will not call malloc.  It does not do
+     so for the format strings we use.  */
+  char message[4096];
+  va_list ap;
+  va_start (ap, format);
+  vsnprintf (message, sizeof (message), format, ap);
+  va_end (ap);
+
+  enum { count = 3 };
+  struct iovec iov[count];
+
+  iov[0].iov_base = (char *) "error: ";
+  iov[1].iov_base = (char *) message;
+  iov[2].iov_base = (char *) "\n";
+
+  for (int i = 0; i < count; ++i)
+    iov[i].iov_len = strlen (iov[i].iov_base);
+
+  int unused __attribute__ ((unused));
+  unused = writev (STDOUT_FILENO, iov, count);
+  _exit (1);
+}
+
+#if INTERPOSE_THREADS
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+static void
+lock (void)
+{
+#if INTERPOSE_THREADS
+  int ret = pthread_mutex_lock (&mutex);
+  if (ret != 0)
+    {
+      errno = ret;
+      fail ("pthread_mutex_lock: %m");
+    }
+#endif
+}
+
+static void
+unlock (void)
+{
+#if INTERPOSE_THREADS
+  int ret = pthread_mutex_unlock (&mutex);
+  if (ret != 0)
+    {
+      errno = ret;
+      fail ("pthread_mutex_unlock: %m");
+    }
+#endif
+}
+
+struct __attribute__ ((aligned (__alignof__ (max_align_t)))) allocation_header
+{
+  size_t allocation_index;
+  size_t allocation_size;
+};
+
+/* Array of known allocations, to track invalid frees.  */
+enum { max_allocations = 65536 };
+static struct allocation_header *allocations[max_allocations];
+static size_t allocation_index;
+static size_t deallocation_count;
+
+/* Sanity check for successful malloc interposition.  */
+__attribute__ ((destructor))
+static void
+check_for_allocations (void)
+{
+  if (allocation_index == 0)
+    {
+      /* Make sure that malloc is called at least once from libc.  */
+      void *volatile ptr = strdup ("ptr");
+      /* Compiler barrier.  The strdup function calls malloc, which
+         updates allocation_index, but strdup is marked __THROW, so
+         the compiler could optimize away the reload.  */
+      __asm__ volatile ("" ::: "memory");
+      free (ptr);
+      /* If the allocation count is still zero, it means we did not
+         interpose malloc successfully.  */
+      if (allocation_index == 0)
+        fail ("malloc does not seem to have been interposed");
+    }
+}
+
+static struct allocation_header *get_header (const char *op, void *ptr)
+{
+  struct allocation_header *header = ((struct allocation_header *) ptr) - 1;
+  if (header->allocation_index >= allocation_index)
+    fail ("%s: %p: invalid allocation index: %zu (not less than %zu)",
+          op, ptr, header->allocation_index, allocation_index);
+  if (allocations[header->allocation_index] != header)
+    fail ("%s: %p: allocation pointer does not point to header, but %p",
+          op, ptr, allocations[header->allocation_index]);
+  return header;
+}
+
+/* Internal helper functions.  Those must be called while the lock is
+   acquired.  */
+
+static void *
+malloc_internal (size_t size)
+{
+  if (allocation_index == max_allocations)
+    {
+      errno = ENOMEM;
+      return NULL;
+    }
+  size_t allocation_size = size + sizeof (struct allocation_header);
+  if (allocation_size < size)
+    {
+      errno = ENOMEM;
+      return NULL;
+    }
+
+  size_t index = allocation_index++;
+  void *result = mmap (NULL, allocation_size, PROT_READ | PROT_WRITE,
+                       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (result == MAP_FAILED)
+    return NULL;
+  allocations[index] = result;
+  *allocations[index] = (struct allocation_header)
+    {
+      .allocation_index = index,
+      .allocation_size = allocation_size
+    };
+  return allocations[index] + 1;
+}
+
+static void
+free_internal (const char *op, struct allocation_header *header)
+{
+  size_t index = header->allocation_index;
+  int result = mprotect (header, header->allocation_size, PROT_NONE);
+  if (result != 0)
+    fail ("%s: mprotect (%p, %zu): %m", op, header, header->allocation_size);
+  /* Catch double-free issues.  */
+  allocations[index] = NULL;
+  ++deallocation_count;
+}
+
+static void *
+realloc_internal (void *ptr, size_t new_size)
+{
+  struct allocation_header *header = get_header ("realloc", ptr);
+  size_t old_size = header->allocation_size - sizeof (struct allocation_header);
+  if (old_size >= new_size)
+    return ptr;
+
+  void *newptr = malloc_internal (new_size);
+  if (newptr == NULL)
+    return NULL;
+  memcpy (newptr, ptr, old_size);
+  free_internal ("realloc", header);
+  return newptr;
+}
+
+/* Public interfaces.  These functions must perform locking.  */
+
+size_t
+malloc_allocation_count (void)
+{
+  lock ();
+  size_t count = allocation_index;
+  unlock ();
+  return count;
+}
+
+size_t
+malloc_deallocation_count (void)
+{
+  lock ();
+  size_t count = deallocation_count;
+  unlock ();
+  return count;
+}
+void *
+malloc (size_t size)
+{
+  lock ();
+  void *result = malloc_internal (size);
+  unlock ();
+  return result;
+}
+
+void
+free (void *ptr)
+{
+  if (ptr == NULL)
+    return;
+  lock ();
+  struct allocation_header *header = get_header ("free", ptr);
+  free_internal ("free", header);
+  unlock ();
+}
+
+void *
+calloc (size_t a, size_t b)
+{
+  if (b > 0 && a > SIZE_MAX / b)
+    {
+      errno = ENOMEM;
+      return NULL;
+    }
+  lock ();
+  /* malloc_internal uses mmap, so the memory is zeroed.  */
+  void *result = malloc_internal (a * b);
+  unlock ();
+  return result;
+}
+
+void *
+realloc (void *ptr, size_t n)
+{
+  if (n ==0)
+    {
+      free (ptr);
+      return NULL;
+    }
+  else if (ptr == NULL)
+    return malloc (n);
+  else
+    {
+      lock ();
+      void *result = realloc_internal (ptr, n);
+      unlock ();
+      return result;
+    }
+}
diff --git a/REORG.TODO/malloc/tst-interpose-aux.h b/REORG.TODO/malloc/tst-interpose-aux.h
new file mode 100644
index 0000000000..1388a73b06
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-aux.h
@@ -0,0 +1,30 @@
+/* Statistics interface for the minimal malloc implementation.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef TST_INTERPOSE_AUX_H
+#define TST_INTERPOSE_AUX_H
+
+#include <stddef.h>
+
+/* Return the number of allocations performed.  */
+size_t malloc_allocation_count (void);
+
+/* Return the number of deallocations performed.  */
+size_t malloc_deallocation_count (void);
+
+#endif /* TST_INTERPOSE_AUX_H */
diff --git a/REORG.TODO/malloc/tst-interpose-nothread.c b/REORG.TODO/malloc/tst-interpose-nothread.c
new file mode 100644
index 0000000000..6beea3698a
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-nothread.c
@@ -0,0 +1,20 @@
+/* Malloc interposition test, dynamically-linked version without threads.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#define INTERPOSE_THREADS 0
+#include "tst-interpose-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-interpose-skeleton.c b/REORG.TODO/malloc/tst-interpose-skeleton.c
new file mode 100644
index 0000000000..82a9078158
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-skeleton.c
@@ -0,0 +1,204 @@
+/* Test driver for malloc interposition tests.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#if INTERPOSE_THREADS
+#include <pthread.h>
+#endif
+
+static int do_test (void);
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
+
+/* Fills BUFFER with a test string.  */
+static void
+line_string (int number, char *buffer, size_t length)
+{
+  for (size_t i = 0; i < length - 2; ++i)
+    buffer[i] = 'A' + ((number + i) % 26);
+  buffer[length - 2] = '\n';
+  buffer[length - 1] = '\0';
+}
+
+/* Perform the tests.  */
+static void *
+run_tests (void *closure)
+{
+  char *temp_file_path;
+  int fd = create_temp_file ("tst-malloc-interpose", &temp_file_path);
+  if (fd < 0)
+    _exit (1);
+
+  /* Line lengths excluding the line terminator.  */
+  static const int line_lengths[] = { 0, 45, 80, 2, 8201, 0, 17, -1 };
+
+  /* Fill the test file with data.  */
+  {
+    FILE *fp = fdopen (fd, "w");
+    for (int lineno = 0; line_lengths[lineno] >= 0; ++lineno)
+      {
+        char buffer[line_lengths[lineno] + 2];
+        line_string (lineno, buffer, sizeof (buffer));
+        fprintf (fp, "%s", buffer);
+      }
+
+    if (ferror (fp))
+      {
+        printf ("error: fprintf: %m\n");
+        _exit (1);
+      }
+    if (fclose (fp) != 0)
+      {
+        printf ("error: fclose: %m\n");
+        _exit (1);
+      }
+  }
+
+  /* Read the test file.  This tests libc-internal allocation with
+     realloc.  */
+  {
+    FILE *fp = fopen (temp_file_path, "r");
+
+    char *actual = NULL;
+    size_t actual_size = 0;
+    for (int lineno = 0; ; ++lineno)
+      {
+        errno = 0;
+        ssize_t result = getline (&actual, &actual_size, fp);
+        if (result == 0)
+          {
+            printf ("error: invalid return value 0 from getline\n");
+            _exit (1);
+          }
+        if (result < 0 && errno != 0)
+          {
+            printf ("error: getline: %m\n");
+            _exit (1);
+          }
+        if (result < 0 && line_lengths[lineno] >= 0)
+          {
+            printf ("error: unexpected end of file after line %d\n", lineno);
+            _exit (1);
+          }
+        if (result > 0 && line_lengths[lineno] < 0)
+          {
+            printf ("error: no end of file after line %d\n", lineno);
+            _exit (1);
+          }
+        if (result == -1 && line_lengths[lineno] == -1)
+          /* End of file reached as expected.  */
+          break;
+
+        if (result != line_lengths[lineno] + 1)
+          {
+            printf ("error: line length mismatch: expected %d, got %zd\n",
+                    line_lengths[lineno], result);
+            _exit (1);
+          }
+
+        char expected[line_lengths[lineno] + 2];
+        line_string (lineno, expected, sizeof (expected));
+        if (strcmp (actual, expected) != 0)
+          {
+            printf ("error: line mismatch\n");
+            printf ("error:   expected: [[%s]]\n", expected);
+            printf ("error:   actual:   [[%s]]\n", actual);
+            _exit (1);
+          }
+      }
+
+    if (fclose (fp) != 0)
+      {
+        printf ("error: fclose (after reading): %m\n");
+        _exit (1);
+      }
+  }
+
+  free (temp_file_path);
+
+  /* Make sure that fork is working.  */
+  pid_t pid = fork ();
+  if (pid == -1)
+    {
+      printf ("error: fork: %m\n");
+      _exit (1);
+    }
+  enum { exit_code = 55 };
+  if (pid == 0)
+    _exit (exit_code);
+  int status;
+  int ret = waitpid (pid, &status, 0);
+  if (ret < 0)
+    {
+      printf ("error: waitpid: %m\n");
+      _exit (1);
+    }
+  if (!WIFEXITED (status) || WEXITSTATUS (status) != exit_code)
+    {
+      printf ("error: unexpected exit status from child process: %d\n",
+              status);
+      _exit (1);
+    }
+
+  return NULL;
+}
+
+/* This is used to detect if malloc has not been successfully
+   interposed.  The interposed malloc does not use brk/sbrk.  */
+static void *initial_brk;
+__attribute__ ((constructor))
+static void
+set_initial_brk (void)
+{
+  initial_brk = sbrk (0);
+}
+
+/* Terminate the process if the break value has been changed.  */
+__attribute__ ((destructor))
+static void
+check_brk (void)
+{
+  void *current = sbrk (0);
+  if (current != initial_brk)
+    {
+      printf ("error: brk changed from %p to %p; no interposition?\n",
+              initial_brk, current);
+      _exit (1);
+    }
+}
+
+static int
+do_test (void)
+{
+  check_brk ();
+
+#if INTERPOSE_THREADS
+  pthread_t thr = xpthread_create (NULL, run_tests, NULL);
+  xpthread_join (thr);
+#else
+  run_tests (NULL);
+#endif
+
+  check_brk ();
+
+  return 0;
+}
diff --git a/REORG.TODO/malloc/tst-interpose-static-nothread.c b/REORG.TODO/malloc/tst-interpose-static-nothread.c
new file mode 100644
index 0000000000..ab3d464859
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-static-nothread.c
@@ -0,0 +1,19 @@
+/* Malloc interposition test, static version without threads.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include "tst-interpose-nothread.c"
diff --git a/REORG.TODO/malloc/tst-interpose-static-thread.c b/REORG.TODO/malloc/tst-interpose-static-thread.c
new file mode 100644
index 0000000000..80ddcd4fce
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-static-thread.c
@@ -0,0 +1,19 @@
+/* Malloc interposition test, static version with threads.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include "tst-interpose-nothread.c"
diff --git a/REORG.TODO/malloc/tst-interpose-thread.c b/REORG.TODO/malloc/tst-interpose-thread.c
new file mode 100644
index 0000000000..d09c959c90
--- /dev/null
+++ b/REORG.TODO/malloc/tst-interpose-thread.c
@@ -0,0 +1,20 @@
+/* Malloc interposition test, dynamically-linked version with threads.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#define INTERPOSE_THREADS 1
+#include "tst-interpose-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-malloc-backtrace.c b/REORG.TODO/malloc/tst-malloc-backtrace.c
new file mode 100644
index 0000000000..012ff81caa
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-backtrace.c
@@ -0,0 +1,53 @@
+/* Verify that backtrace does not deadlock on itself on memory corruption.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <signal.h>
+#include <stdlib.h>
+
+#include <support/support.h>
+
+#define SIZE 4096
+
+/* Wrap free with a function to prevent gcc from optimizing it out.  */
+static void
+__attribute__((noinline))
+call_free (void *ptr)
+{
+  free (ptr);
+  *(size_t *)(ptr - sizeof (size_t)) = 1;
+}
+
+int
+do_test (void)
+{
+  void *ptr1 = malloc (SIZE);
+  void *ptr2 = malloc (SIZE);
+
+  /* Avoid unwanted output to TTY after an expected memory corruption.  */
+  ignore_stderr();
+
+  call_free (ptr1);
+  ptr1 = malloc (SIZE);
+
+  /* Not reached.  The return statement is to put ptr2 into use so that gcc
+     doesn't optimize out that malloc call.  */
+  return (ptr1 == ptr2);
+}
+
+#define EXPECTED_SIGNAL SIGABRT
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-malloc-fork-deadlock.c b/REORG.TODO/malloc/tst-malloc-fork-deadlock.c
new file mode 100644
index 0000000000..30d3df3b08
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-fork-deadlock.c
@@ -0,0 +1,206 @@
+/* Test concurrent fork, getline, and fflush (NULL).
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <sys/wait.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <time.h>
+#include <string.h>
+#include <signal.h>
+
+#include <support/xthread.h>
+#include <support/temp_file.h>
+#include <support/test-driver.h>
+
+enum {
+  /* Number of threads which call fork.  */
+  fork_thread_count = 4,
+  /* Number of threads which call getline (and, indirectly,
+     malloc).  */
+  read_thread_count = 8,
+};
+
+static bool termination_requested;
+
+static void *
+fork_thread_function (void *closure)
+{
+  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+    {
+      pid_t pid = fork ();
+      if (pid < 0)
+        {
+          printf ("error: fork: %m\n");
+          abort ();
+        }
+      else if (pid == 0)
+        _exit (17);
+
+      int status;
+      if (waitpid (pid, &status, 0) < 0)
+        {
+          printf ("error: waitpid: %m\n");
+          abort ();
+        }
+      if (!WIFEXITED (status) || WEXITSTATUS (status) != 17)
+        {
+          printf ("error: waitpid returned invalid status: %d\n", status);
+          abort ();
+        }
+    }
+  return NULL;
+}
+
+static char *file_to_read;
+
+static void *
+read_thread_function (void *closure)
+{
+  FILE *f = fopen (file_to_read, "r");
+  if (f == NULL)
+    {
+      printf ("error: fopen (%s): %m\n", file_to_read);
+      abort ();
+    }
+
+  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+    {
+      rewind (f);
+      char *line = NULL;
+      size_t line_allocated = 0;
+      ssize_t ret = getline (&line, &line_allocated, f);
+      if (ret < 0)
+        {
+          printf ("error: getline: %m\n");
+          abort ();
+        }
+      free (line);
+    }
+  fclose (f);
+
+  return NULL;
+}
+
+static void *
+flushall_thread_function (void *closure)
+{
+  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+    if (fflush (NULL) != 0)
+      {
+        printf ("error: fflush (NULL): %m\n");
+        abort ();
+      }
+  return NULL;
+}
+
+static void
+create_threads (pthread_t *threads, size_t count, void *(*func) (void *))
+{
+  for (size_t i = 0; i < count; ++i)
+    threads[i] = xpthread_create (NULL, func, NULL);
+}
+
+static void
+join_threads (pthread_t *threads, size_t count)
+{
+  for (size_t i = 0; i < count; ++i)
+    xpthread_join (threads[i]);
+}
+
+/* Create a file which consists of a single long line, and assigns
+   file_to_read.  The hope is that this triggers an allocation in
+   getline which needs a lock.  */
+static void
+create_file_with_large_line (void)
+{
+  int fd = create_temp_file ("bug19431-large-line", &file_to_read);
+  if (fd < 0)
+    {
+      printf ("error: create_temp_file: %m\n");
+      abort ();
+    }
+  FILE *f = fdopen (fd, "w+");
+  if (f == NULL)
+    {
+      printf ("error: fdopen: %m\n");
+      abort ();
+    }
+  for (int i = 0; i < 50000; ++i)
+    fputc ('x', f);
+  fputc ('\n', f);
+  if (ferror (f))
+    {
+      printf ("error: fputc: %m\n");
+      abort ();
+    }
+  if (fclose (f) != 0)
+    {
+      printf ("error: fclose: %m\n");
+      abort ();
+    }
+}
+
+static int
+do_test (void)
+{
+  /* Make sure that we do not exceed the arena limit with the number
+     of threads we configured.  */
+  if (mallopt (M_ARENA_MAX, 400) == 0)
+    {
+      printf ("error: mallopt (M_ARENA_MAX) failed\n");
+      return 1;
+    }
+
+  /* Leave some room for shutting down all threads gracefully.  */
+  int timeout = 3;
+  if (timeout > DEFAULT_TIMEOUT)
+    timeout = DEFAULT_TIMEOUT - 1;
+
+  create_file_with_large_line ();
+
+  pthread_t fork_threads[fork_thread_count];
+  create_threads (fork_threads, fork_thread_count, fork_thread_function);
+  pthread_t read_threads[read_thread_count];
+  create_threads (read_threads, read_thread_count, read_thread_function);
+  pthread_t flushall_threads[1];
+  create_threads (flushall_threads, 1, flushall_thread_function);
+
+  struct timespec ts = {timeout, 0};
+  if (nanosleep (&ts, NULL))
+    {
+      printf ("error: error: nanosleep: %m\n");
+      abort ();
+    }
+
+  __atomic_store_n (&termination_requested, true, __ATOMIC_RELAXED);
+
+  join_threads (flushall_threads, 1);
+  join_threads (read_threads, read_thread_count);
+  join_threads (fork_threads, fork_thread_count);
+
+  free (file_to_read);
+
+  return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-malloc-thread-exit.c b/REORG.TODO/malloc/tst-malloc-thread-exit.c
new file mode 100644
index 0000000000..8a0909b2f8
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-thread-exit.c
@@ -0,0 +1,137 @@
+/* Test malloc with concurrent thread termination.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This thread spawns a number of outer threads, equal to the arena
+   limit.  The outer threads run a loop which start and join two
+   different kinds of threads: the first kind allocates (attaching an
+   arena to the thread; malloc_first_thread) and waits, the second
+   kind waits and allocates (wait_first_threads).  Both kinds of
+   threads exit immediately after waiting.  The hope is that this will
+   exhibit races in thread termination and arena management,
+   particularly related to the arena free list.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <support/support.h>
+#include <support/xthread.h>
+#include <support/test-driver.h>
+
+static bool termination_requested;
+static int inner_thread_count = 4;
+static size_t malloc_size = 32;
+
+static void
+__attribute__ ((noinline, noclone))
+unoptimized_free (void *ptr)
+{
+  free (ptr);
+}
+
+static void *
+malloc_first_thread (void * closure)
+{
+  pthread_barrier_t *barrier = closure;
+  void *ptr = xmalloc (malloc_size);
+  xpthread_barrier_wait (barrier);
+  unoptimized_free (ptr);
+  return NULL;
+}
+
+static void *
+wait_first_thread (void * closure)
+{
+  pthread_barrier_t *barrier = closure;
+  xpthread_barrier_wait (barrier);
+  void *ptr = xmalloc (malloc_size);
+  unoptimized_free (ptr);
+  return NULL;
+}
+
+static void *
+outer_thread (void *closure)
+{
+  pthread_t *threads = xcalloc (sizeof (*threads), inner_thread_count);
+  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+    {
+      pthread_barrier_t barrier;
+      xpthread_barrier_init (&barrier, NULL, inner_thread_count + 1);
+      for (int i = 0; i < inner_thread_count; ++i)
+        {
+          void *(*func) (void *);
+          if ((i  % 2) == 0)
+            func = malloc_first_thread;
+          else
+            func = wait_first_thread;
+          threads[i] = xpthread_create (NULL, func, &barrier);
+        }
+      xpthread_barrier_wait (&barrier);
+      for (int i = 0; i < inner_thread_count; ++i)
+        xpthread_join (threads[i]);
+      xpthread_barrier_destroy (&barrier);
+    }
+
+  free (threads);
+
+  return NULL;
+}
+
+static int
+do_test (void)
+{
+  /* The number of threads should be smaller than the number of
+     arenas, so that there will be some free arenas to add to the
+     arena free list.  */
+  enum { outer_thread_count = 2 };
+  if (mallopt (M_ARENA_MAX, 8) == 0)
+    {
+      printf ("error: mallopt (M_ARENA_MAX) failed\n");
+      return 1;
+    }
+
+  /* Leave some room for shutting down all threads gracefully.  */
+  int timeout = 3;
+  if (timeout > DEFAULT_TIMEOUT)
+    timeout = DEFAULT_TIMEOUT - 1;
+
+  pthread_t *threads = xcalloc (sizeof (*threads), outer_thread_count);
+  for (long i = 0; i < outer_thread_count; ++i)
+    threads[i] = xpthread_create (NULL, outer_thread, NULL);
+
+  struct timespec ts = {timeout, 0};
+  if (nanosleep (&ts, NULL))
+    {
+      printf ("error: error: nanosleep: %m\n");
+      abort ();
+    }
+
+  __atomic_store_n (&termination_requested, true, __ATOMIC_RELAXED);
+
+  for (long i = 0; i < outer_thread_count; ++i)
+    xpthread_join (threads[i]);
+  free (threads);
+
+  return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-malloc-thread-fail.c b/REORG.TODO/malloc/tst-malloc-thread-fail.c
new file mode 100644
index 0000000000..2745a33856
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-thread-fail.c
@@ -0,0 +1,442 @@
+/* Test allocation function behavior on allocation failure.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+/* This test case attempts to trigger various unusual conditions
+   related to allocation failures, notably switching to a different
+   arena, and falling back to mmap (via sysmalloc).  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+/* Wrapper for calloc with an optimization barrier.  */
+static void *
+__attribute__ ((noinline, noclone))
+allocate_zeroed (size_t a, size_t b)
+{
+  return calloc (a, b);
+}
+
+/* System page size, as determined by sysconf (_SC_PAGE_SIZE).  */
+static unsigned long page_size;
+
+/* Test parameters. */
+static size_t allocation_size;
+static size_t alignment;
+static enum {
+  with_malloc,
+  with_realloc,
+  with_aligned_alloc,
+  with_memalign,
+  with_posix_memalign,
+  with_valloc,
+  with_pvalloc,
+  with_calloc,
+  last_allocation_function = with_calloc
+} allocation_function;
+
+/* True if an allocation function uses the alignment test
+   parameter.  */
+const static bool alignment_sensitive[last_allocation_function + 1] =
+  {
+    [with_aligned_alloc] = true,
+    [with_memalign] = true,
+    [with_posix_memalign] = true,
+  };
+
+/* Combined pointer/expected alignment result of an allocation
+   function.  */
+struct allocate_result {
+  void *pointer;
+  size_t alignment;
+};
+
+/* Call the allocation function specified by allocation_function, with
+   allocation_size and alignment (if applicable) as arguments.  No
+   alignment check.  */
+static struct allocate_result
+allocate_1 (void)
+{
+  switch (allocation_function)
+    {
+    case with_malloc:
+      return (struct allocate_result)
+        {malloc (allocation_size), _Alignof (max_align_t)};
+    case with_realloc:
+      {
+        void *p = realloc (NULL, 16);
+        void *q;
+        if (p == NULL)
+          q = NULL;
+        else
+          {
+            q = realloc (p, allocation_size);
+            if (q == NULL)
+              free (p);
+          }
+        return (struct allocate_result) {q, _Alignof (max_align_t)};
+      }
+    case with_aligned_alloc:
+      {
+        void *p = aligned_alloc (alignment, allocation_size);
+        return (struct allocate_result) {p, alignment};
+      }
+    case with_memalign:
+      {
+        void *p = memalign (alignment, allocation_size);
+        return (struct allocate_result) {p, alignment};
+      }
+    case with_posix_memalign:
+      {
+        void *p;
+        if (posix_memalign (&p, alignment, allocation_size))
+          {
+            if (errno == ENOMEM)
+              p = NULL;
+            else
+              {
+                printf ("error: posix_memalign (p, %zu, %zu): %m\n",
+                        alignment, allocation_size);
+                abort ();
+              }
+          }
+        return (struct allocate_result) {p, alignment};
+      }
+    case with_valloc:
+      {
+        void *p = valloc (allocation_size);
+        return (struct allocate_result) {p, page_size};
+      }
+    case with_pvalloc:
+      {
+        void *p = pvalloc (allocation_size);
+        return (struct allocate_result) {p, page_size};
+      }
+    case with_calloc:
+      {
+        char *p = allocate_zeroed (1, allocation_size);
+        /* Check for non-zero bytes.  */
+        if (p != NULL)
+          for (size_t i = 0; i < allocation_size; ++i)
+            if (p[i] != 0)
+              {
+                printf ("error: non-zero byte at offset %zu\n", i);
+                abort ();
+              }
+        return (struct allocate_result) {p, _Alignof (max_align_t)};
+      }
+    }
+  abort ();
+}
+
+/* Call allocate_1 and perform the alignment check on the result.  */
+static void *
+allocate (void)
+{
+  struct allocate_result r = allocate_1 ();
+  if ((((uintptr_t) r.pointer) & (r.alignment - 1)) != 0)
+    {
+      printf ("error: allocation function %d, size %zu not aligned to %zu\n",
+              (int) allocation_function, allocation_size, r.alignment);
+      abort ();
+    }
+  return r.pointer;
+}
+
+/* Barriers to synchronize thread creation and termination.  */
+static pthread_barrier_t start_barrier;
+static pthread_barrier_t end_barrier;
+
+/* Thread function which performs the allocation test.  Called by
+   pthread_create and from the main thread.  */
+static void *
+allocate_thread (void *closure)
+{
+  /* Wait for the creation of all threads.  */
+  {
+    int ret = pthread_barrier_wait (&start_barrier);
+    if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_wait: %m\n");
+        abort ();
+      }
+  }
+
+  /* Allocate until we run out of memory, creating a single-linked
+     list.  */
+  struct list {
+    struct list *next;
+  };
+  struct list *head = NULL;
+  while (true)
+    {
+      struct list *e = allocate ();
+      if (e == NULL)
+        break;
+
+      e->next = head;
+      head = e;
+    }
+
+  /* Wait for the allocation of all available memory.  */
+  {
+    int ret = pthread_barrier_wait (&end_barrier);
+    if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_wait: %m\n");
+        abort ();
+      }
+  }
+
+  /* Free the allocated memory.  */
+  while (head != NULL)
+    {
+      struct list *next = head->next;
+      free (head);
+      head = next;
+    }
+
+  return NULL;
+}
+
+/* Number of threads (plus the main thread.  */
+enum { thread_count = 8 };
+
+/* Thread attribute to request creation of threads with a non-default
+   stack size which is rather small.  This avoids interfering with the
+   configured address space limit.  */
+static pthread_attr_t small_stack;
+
+/* Runs one test in multiple threads, all in a subprocess so that
+   subsequent tests do not interfere with each other.  */
+static void
+run_one (void)
+{
+  /* Isolate the tests in a subprocess, so that we can start over
+     from scratch.  */
+  pid_t pid = fork ();
+  if (pid == 0)
+    {
+      /* In the child process.  Create the allocation threads.  */
+      pthread_t threads[thread_count];
+
+      for (unsigned i = 0; i < thread_count; ++i)
+        {
+          int ret = pthread_create (threads + i, &small_stack, allocate_thread, NULL);
+          if (ret != 0)
+            {
+              errno = ret;
+              printf ("error: pthread_create: %m\n");
+              abort ();
+            }
+        }
+
+      /* Also run the test on the main thread.  */
+      allocate_thread (NULL);
+
+      for (unsigned i = 0; i < thread_count; ++i)
+        {
+          int ret = pthread_join (threads[i], NULL);
+          if (ret != 0)
+            {
+              errno = ret;
+              printf ("error: pthread_join: %m\n");
+              abort ();
+            }
+        }
+      _exit (0);
+    }
+  else if (pid < 0)
+    {
+      printf ("error: fork: %m\n");
+      abort ();
+    }
+
+  /* In the parent process.  Wait for the child process to exit.  */
+  int status;
+  if (waitpid (pid, &status, 0) < 0)
+    {
+      printf ("error: waitpid: %m\n");
+      abort ();
+    }
+  if (status != 0)
+    {
+      printf ("error: exit status %d from child process\n", status);
+      exit (1);
+    }
+}
+
+/* Run all applicable allocation functions for the current test
+   parameters.  */
+static void
+run_allocation_functions (void)
+{
+  for (int af = 0; af <= last_allocation_function; ++af)
+    {
+      /* Run alignment-sensitive functions for non-default
+         alignments.  */
+      if (alignment_sensitive[af] != (alignment != 0))
+        continue;
+      allocation_function = af;
+      run_one ();
+    }
+}
+
+int
+do_test (void)
+{
+  /* Limit the number of malloc arenas.  We use a very low number so
+     that despute the address space limit configured below, all
+     requested arenas a can be created.  */
+  if (mallopt (M_ARENA_MAX, 2) == 0)
+    {
+      printf ("error: mallopt (M_ARENA_MAX) failed\n");
+      return 1;
+    }
+
+  /* Determine the page size.  */
+  {
+    long ret = sysconf (_SC_PAGE_SIZE);
+    if (ret < 0)
+      {
+        printf ("error: sysconf (_SC_PAGE_SIZE): %m\n");
+        return 1;
+      }
+    page_size = ret;
+  }
+
+  /* Limit the size of the process, so that memory allocation in
+     allocate_thread will eventually fail, without impacting the
+     entire system.  */
+  {
+    struct rlimit limit;
+    if (getrlimit (RLIMIT_AS, &limit) != 0)
+      {
+        printf ("getrlimit (RLIMIT_AS) failed: %m\n");
+        return 1;
+      }
+    long target = 200 * 1024 * 1024;
+    if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > target)
+      {
+        limit.rlim_cur = target;
+        if (setrlimit (RLIMIT_AS, &limit) != 0)
+          {
+            printf ("setrlimit (RLIMIT_AS) failed: %m\n");
+            return 1;
+          }
+      }
+  }
+
+  /* Initialize thread attribute with a reduced stack size.  */
+  {
+    int ret = pthread_attr_init (&small_stack);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_attr_init: %m\n");
+        abort ();
+      }
+    unsigned long stack_size = ((256 * 1024) / page_size) * page_size;
+    if (stack_size < 4 * page_size)
+      stack_size = 8 * page_size;
+    ret = pthread_attr_setstacksize (&small_stack, stack_size);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_attr_setstacksize: %m\n");
+        abort ();
+      }
+  }
+
+  /* Initialize the barriers.  We run thread_count threads, plus 1 for
+     the main thread.  */
+  {
+    int ret = pthread_barrier_init (&start_barrier, NULL, thread_count + 1);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_init: %m\n");
+        abort ();
+      }
+
+    ret = pthread_barrier_init (&end_barrier, NULL, thread_count + 1);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_init: %m\n");
+        abort ();
+      }
+  }
+
+  allocation_size = 144;
+  run_allocation_functions ();
+  allocation_size = page_size;
+  run_allocation_functions ();
+
+  alignment = 128;
+  allocation_size = 512;
+  run_allocation_functions ();
+
+  allocation_size = page_size;
+  run_allocation_functions ();
+
+  allocation_size = 17 * page_size;
+  run_allocation_functions ();
+
+  /* Deallocation the barriers and the thread attribute.  */
+  {
+    int ret = pthread_barrier_destroy (&end_barrier);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_destroy: %m\n");
+        return 1;
+      }
+    ret = pthread_barrier_destroy (&start_barrier);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_barrier_destroy: %m\n");
+        return 1;
+      }
+    ret = pthread_attr_destroy (&small_stack);
+    if (ret != 0)
+      {
+        errno = ret;
+        printf ("error: pthread_attr_destroy: %m\n");
+        return 1;
+      }
+  }
+
+  return 0;
+}
+
+/* The repeated allocations take some time on slow machines.  */
+#define TIMEOUT 100
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-malloc-usable-static-tunables.c b/REORG.TODO/malloc/tst-malloc-usable-static-tunables.c
new file mode 100644
index 0000000000..8907db01a5
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-usable-static-tunables.c
@@ -0,0 +1 @@
+#include <malloc/tst-malloc-usable.c>
diff --git a/REORG.TODO/malloc/tst-malloc-usable-static.c b/REORG.TODO/malloc/tst-malloc-usable-static.c
new file mode 100644
index 0000000000..8907db01a5
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-usable-static.c
@@ -0,0 +1 @@
+#include <malloc/tst-malloc-usable.c>
diff --git a/REORG.TODO/malloc/tst-malloc-usable-tunables.c b/REORG.TODO/malloc/tst-malloc-usable-tunables.c
new file mode 100644
index 0000000000..8907db01a5
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-usable-tunables.c
@@ -0,0 +1 @@
+#include <malloc/tst-malloc-usable.c>
diff --git a/REORG.TODO/malloc/tst-malloc-usable.c b/REORG.TODO/malloc/tst-malloc-usable.c
new file mode 100644
index 0000000000..6a64d204c7
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc-usable.c
@@ -0,0 +1,49 @@
+/* Ensure that malloc_usable_size returns the request size with
+   MALLOC_CHECK_ exported to a positive value.
+
+   Copyright (C) 2012-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <malloc.h>
+#include <string.h>
+#include <stdio.h>
+
+static int
+do_test (void)
+{
+  size_t usable_size;
+  void *p = malloc (7);
+  if (!p)
+    {
+      printf ("memory allocation failed\n");
+      return 1;
+    }
+
+  usable_size = malloc_usable_size (p);
+  if (usable_size != 7)
+    {
+      printf ("malloc_usable_size: expected 7 but got %zu\n", usable_size);
+      return 1;
+    }
+
+  memset (p, 0, usable_size);
+  free (p);
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-malloc.c b/REORG.TODO/malloc/tst-malloc.c
new file mode 100644
index 0000000000..dbc8d4ab56
--- /dev/null
+++ b/REORG.TODO/malloc/tst-malloc.c
@@ -0,0 +1,95 @@
+/* Copyright (C) 1999-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Andreas Jaeger <aj@arthur.rhein-neckar.de>, 1999.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <libc-diag.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p, *q;
+  int save;
+
+  errno = 0;
+
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  p = malloc (-1);
+  DIAG_POP_NEEDS_COMMENT;
+  save = errno;
+
+  if (p != NULL)
+    merror ("malloc (-1) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("errno is not set correctly");
+
+  p = malloc (10);
+  if (p == NULL)
+    merror ("malloc (10) failed.");
+
+  /* realloc (p, 0) == free (p).  */
+  p = realloc (p, 0);
+  if (p != NULL)
+    merror ("realloc (p, 0) failed.");
+
+  p = malloc (0);
+  if (p == NULL)
+    merror ("malloc (0) failed.");
+
+  p = realloc (p, 0);
+  if (p != NULL)
+    merror ("realloc (p, 0) failed.");
+
+  p = malloc (513 * 1024);
+  if (p == NULL)
+    merror ("malloc (513K) failed.");
+
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  q = malloc (-512 * 1024);
+  DIAG_POP_NEEDS_COMMENT;
+  if (q != NULL)
+    merror ("malloc (-512K) succeeded.");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-mallocfork.c b/REORG.TODO/malloc/tst-mallocfork.c
new file mode 100644
index 0000000000..f90ce94887
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mallocfork.c
@@ -0,0 +1,51 @@
+/* Derived from the test case in
+   http://sourceware.org/bugzilla/show_bug.cgi?id=838.  */
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+static void
+sig_handler (int signum)
+{
+  pid_t child = fork ();
+  if (child == 0)
+    exit (0);
+  TEMP_FAILURE_RETRY (waitpid (child, NULL, 0));
+}
+
+static int
+do_test (void)
+{
+  pid_t parent = getpid ();
+
+  struct sigaction action = { .sa_handler = sig_handler };
+  sigemptyset (&action.sa_mask);
+
+  malloc (sizeof (int));
+
+  if (sigaction (SIGALRM, &action, NULL) != 0)
+    {
+      puts ("sigaction failed");
+      return 1;
+    }
+
+  /* Create a child that sends the signal to be caught.  */
+  pid_t child = fork ();
+  if (child == 0)
+    {
+      if (kill (parent, SIGALRM) == -1)
+	perror ("kill");
+      exit (0);
+    }
+
+  TEMP_FAILURE_RETRY (waitpid (child, NULL, 0));
+
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-mallocfork2.c b/REORG.TODO/malloc/tst-mallocfork2.c
new file mode 100644
index 0000000000..bb6a3319d8
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mallocfork2.c
@@ -0,0 +1,211 @@
+/* Test case for async-signal-safe fork (with respect to malloc).
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+/* This test will fail if the process is multi-threaded because we
+   only have an async-signal-safe fork in the single-threaded case
+   (where we skip acquiring the malloc heap locks).
+
+   This test only checks async-signal-safety with regards to malloc;
+   other, more rarely-used glibc subsystems could have locks which
+   still make fork unsafe, even in single-threaded processes.  */
+
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
+/* How many malloc objects to keep arond.  */
+enum { malloc_objects = 1009 };
+
+/* The maximum size of an object.  */
+enum { malloc_maximum_size = 70000 };
+
+/* How many signals need to be delivered before the test exits.  */
+enum { signal_count = 1000 };
+
+static int do_test (void);
+#define TIMEOUT 100
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
+
+/* Process ID of the subprocess which sends SIGUSR1 signals.  */
+static pid_t sigusr1_sender_pid;
+
+/* Set to 1 if SIGUSR1 is received.  Used to detect a signal during
+   malloc/free.  */
+static volatile sig_atomic_t sigusr1_received;
+
+/* Periodically set to 1, to indicate that the process is making
+   progress.  Checked by liveness_signal_handler.  */
+static volatile sig_atomic_t progress_indicator = 1;
+
+static void
+sigusr1_handler (int signo)
+{
+  /* Let the main program make progress, by temporarily suspending
+     signals from the subprocess.  */
+  if (sigusr1_received)
+    return;
+  /* sigusr1_sender_pid might not be initialized in the parent when
+     the first SIGUSR1 signal arrives.  */
+  if (sigusr1_sender_pid > 0 && kill (sigusr1_sender_pid, SIGSTOP) != 0)
+    {
+      write_message ("error: kill (SIGSTOP)\n");
+      abort ();
+    }
+  sigusr1_received = 1;
+
+  /* Perform a fork with a trivial subprocess.  */
+  pid_t pid = fork ();
+  if (pid == -1)
+    {
+      write_message ("error: fork\n");
+      abort ();
+    }
+  if (pid == 0)
+    _exit (0);
+  int status;
+  int ret = TEMP_FAILURE_RETRY (waitpid (pid, &status, 0));
+  if (ret < 0)
+    {
+      write_message ("error: waitpid\n");
+      abort ();
+    }
+  if (status != 0)
+    {
+      write_message ("error: unexpected exit status from subprocess\n");
+      abort ();
+    }
+}
+
+static void
+liveness_signal_handler (int signo)
+{
+  if (progress_indicator)
+    progress_indicator = 0;
+  else
+    write_message ("warning: process seems to be stuck\n");
+}
+
+static void
+__attribute__ ((noreturn))
+signal_sender (int signo, bool sleep)
+{
+  pid_t target = getppid ();
+  while (true)
+    {
+      if (kill (target, signo) != 0)
+        {
+          dprintf (STDOUT_FILENO, "error: kill: %m\n");
+          abort ();
+        }
+      if (sleep)
+        usleep (1 * 1000 * 1000);
+      else
+        /* Reduce the rate at which we send signals.  */
+        sched_yield ();
+    }
+}
+
+static int
+do_test (void)
+{
+  struct sigaction action =
+    {
+      .sa_handler = sigusr1_handler,
+    };
+  sigemptyset (&action.sa_mask);
+
+  if (sigaction (SIGUSR1, &action, NULL) != 0)
+    {
+      printf ("error: sigaction: %m");
+      return 1;
+    }
+
+  action.sa_handler = liveness_signal_handler;
+  if (sigaction (SIGUSR2, &action, NULL) != 0)
+    {
+      printf ("error: sigaction: %m");
+      return 1;
+    }
+
+  pid_t sigusr2_sender_pid = fork ();
+  if (sigusr2_sender_pid == 0)
+    signal_sender (SIGUSR2, true);
+  sigusr1_sender_pid = fork ();
+  if (sigusr1_sender_pid == 0)
+    signal_sender (SIGUSR1, false);
+
+  void *objects[malloc_objects] = {};
+  unsigned signals = 0;
+  unsigned seed = 1;
+  time_t last_report = 0;
+  while (signals < signal_count)
+    {
+      progress_indicator = 1;
+      int slot = rand_r (&seed) % malloc_objects;
+      size_t size = rand_r (&seed) % malloc_maximum_size;
+      if (kill (sigusr1_sender_pid, SIGCONT) != 0)
+        {
+          printf ("error: kill (SIGCONT): %m\n");
+          signal (SIGUSR1, SIG_IGN);
+          kill (sigusr1_sender_pid, SIGKILL);
+          kill (sigusr2_sender_pid, SIGKILL);
+          return 1;
+        }
+      sigusr1_received = false;
+      free (objects[slot]);
+      objects[slot] = malloc (size);
+      if (sigusr1_received)
+        {
+          ++signals;
+          time_t current = time (0);
+          if (current != last_report)
+            {
+              printf ("info: SIGUSR1 signal count: %u\n", signals);
+              last_report = current;
+            }
+        }
+      if (objects[slot] == NULL)
+        {
+          printf ("error: malloc: %m\n");
+          signal (SIGUSR1, SIG_IGN);
+          kill (sigusr1_sender_pid, SIGKILL);
+          kill (sigusr2_sender_pid, SIGKILL);
+          return 1;
+        }
+    }
+
+  /* Clean up allocations.  */
+  for (int slot = 0; slot < malloc_objects; ++slot)
+    free (objects[slot]);
+
+  /* Terminate the signal-sending subprocess.  The SIGUSR1 handler
+     should no longer run because it uses sigusr1_sender_pid.  */
+  signal (SIGUSR1, SIG_IGN);
+  kill (sigusr1_sender_pid, SIGKILL);
+  kill (sigusr2_sender_pid, SIGKILL);
+
+  return 0;
+}
diff --git a/REORG.TODO/malloc/tst-mallocstate.c b/REORG.TODO/malloc/tst-mallocstate.c
new file mode 100644
index 0000000000..5cb39c0f3d
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mallocstate.c
@@ -0,0 +1,505 @@
+/* Emulate Emacs heap dumping to test malloc_set_state.
+   Copyright (C) 2001-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <libc-symbols.h>
+#include <shlib-compat.h>
+
+#include "malloc.h"
+
+/* Make the compatibility symbols availabile to this test case.  */
+void *malloc_get_state (void);
+compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
+int malloc_set_state (void *);
+compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
+
+static int do_test (void);
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
+
+/* Maximum object size in the fake heap.  */
+enum { max_size = 64 };
+
+/* Allocation actions.  These are randomized actions executed on the
+   dumped heap (see allocation_tasks below).  They are interspersed
+   with operations on the new heap (see heap_activity).  */
+enum allocation_action
+  {
+    action_free,                /* Dumped and freed.  */
+    action_realloc,             /* Dumped and realloc'ed.  */
+    action_realloc_same,        /* Dumped and realloc'ed, same size.  */
+    action_realloc_smaller,     /* Dumped and realloc'ed, shrinked.  */
+    action_count
+  };
+
+/* Dumped heap.  Initialize it, so that the object is placed into the
+   .data section, for increased realism.  The size is an upper bound;
+   we use about half of the space.  */
+static size_t dumped_heap[action_count * max_size * max_size
+                          / sizeof (size_t)] = {1};
+
+/* Next free space in the dumped heap.  Also top of the heap at the
+   end of the initialization procedure.  */
+static size_t *next_heap_chunk;
+
+/* Copied from malloc.c and hooks.c.  The version is deliberately
+   lower than the final version of malloc_set_state.  */
+#define NBINS 128
+#define MALLOC_STATE_MAGIC   0x444c4541l
+#define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
+static struct
+{
+  long magic;
+  long version;
+  void *av[NBINS * 2 + 2];
+  char *sbrk_base;
+  int sbrked_mem_bytes;
+  unsigned long trim_threshold;
+  unsigned long top_pad;
+  unsigned int n_mmaps_max;
+  unsigned long mmap_threshold;
+  int check_action;
+  unsigned long max_sbrked_mem;
+  unsigned long max_total_mem;
+  unsigned int n_mmaps;
+  unsigned int max_n_mmaps;
+  unsigned long mmapped_mem;
+  unsigned long max_mmapped_mem;
+  int using_malloc_checking;
+  unsigned long max_fast;
+  unsigned long arena_test;
+  unsigned long arena_max;
+  unsigned long narenas;
+} save_state =
+  {
+    .magic = MALLOC_STATE_MAGIC,
+    .version = MALLOC_STATE_VERSION,
+  };
+
+/* Allocate a blob in the fake heap.  */
+static void *
+dumped_heap_alloc (size_t length)
+{
+  /* malloc needs three state bits in the size field, so the minimum
+     alignment is 8 even on 32-bit architectures.  malloc_set_state
+     should be compatible with such heaps even if it currently
+     provides more alignment to applications.  */
+  enum
+  {
+    heap_alignment = 8,
+    heap_alignment_mask = heap_alignment - 1
+  };
+  _Static_assert (sizeof (size_t) <= heap_alignment,
+                  "size_t compatible with heap alignment");
+
+  /* Need at least this many bytes for metadata and application
+     data. */
+  size_t chunk_size = sizeof (size_t) + length;
+  /* Round up the allocation size to the heap alignment.  */
+  chunk_size += heap_alignment_mask;
+  chunk_size &= ~heap_alignment_mask;
+  if ((chunk_size & 3) != 0)
+    {
+      /* The lower three bits in the chunk size have to be 0.  */
+      write_message ("error: dumped_heap_alloc computed invalid chunk size\n");
+      _exit (1);
+    }
+  if (next_heap_chunk == NULL)
+    /* Initialize the top of the heap.  Add one word of zero padding,
+       to match existing practice.  */
+    {
+      dumped_heap[0] = 0;
+      next_heap_chunk = dumped_heap + 1;
+    }
+  else
+    /* The previous chunk is allocated. */
+    chunk_size |= 1;
+  *next_heap_chunk = chunk_size;
+
+  /* User data starts after the chunk header.  */
+  void *result = next_heap_chunk + 1;
+  next_heap_chunk += chunk_size / sizeof (size_t);
+
+  /* Mark the previous chunk as used.   */
+  *next_heap_chunk = 1;
+  return result;
+}
+
+/* Global seed variable for the random number generator.  */
+static unsigned long long global_seed;
+
+/* Simple random number generator.  The numbers are in the range from
+   0 to UINT_MAX (inclusive).  */
+static unsigned int
+rand_next (unsigned long long *seed)
+{
+  /* Linear congruential generated as used for MMIX.  */
+  *seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
+  return *seed >> 32;
+}
+
+/* Fill LENGTH bytes at BUFFER with random contents, as determined by
+   SEED.  */
+static void
+randomize_buffer (unsigned char *buffer, size_t length,
+                  unsigned long long seed)
+{
+  for (size_t i = 0; i < length; ++i)
+    buffer[i] = rand_next (&seed);
+}
+
+/* Dumps the buffer to standard output,  in hexadecimal.  */
+static void
+dump_hex (unsigned char *buffer, size_t length)
+{
+  for (int i = 0; i < length; ++i)
+    printf (" %02X", buffer[i]);
+}
+
+/* Set to true if an error is encountered.  */
+static bool errors = false;
+
+/* Keep track of object allocations.  */
+struct allocation
+{
+  unsigned char *data;
+  unsigned int size;
+  unsigned int seed;
+};
+
+/* Check that the allocation task allocation has the expected
+   contents.  */
+static void
+check_allocation (const struct allocation *alloc, int index)
+{
+  size_t size = alloc->size;
+  if (alloc->data == NULL)
+    {
+      printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
+              size, index, alloc->seed);
+      errors = true;
+      return;
+    }
+
+  unsigned char expected[4096];
+  if (size > sizeof (expected))
+    {
+      printf ("error: invalid allocation size %zu at %d, seed %u\n",
+              size, index, alloc->seed);
+      errors = true;
+      return;
+    }
+  randomize_buffer (expected, size, alloc->seed);
+  if (memcmp (alloc->data, expected, size) != 0)
+    {
+      printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
+              index, size, alloc->seed);
+      printf ("  expected:");
+      dump_hex (expected, size);
+      putc ('\n', stdout);
+      printf ("    actual:");
+      dump_hex (alloc->data, size);
+      putc ('\n', stdout);
+      errors = true;
+    }
+}
+
+/* A heap allocation combined with pending actions on it.  */
+struct allocation_task
+{
+  struct allocation allocation;
+  enum allocation_action action;
+};
+
+/* Allocation tasks.  Initialized by init_allocation_tasks and used by
+   perform_allocations.  */
+enum { allocation_task_count = action_count * max_size };
+static struct allocation_task allocation_tasks[allocation_task_count];
+
+/* Fisher-Yates shuffle of allocation_tasks.  */
+static void
+shuffle_allocation_tasks (void)
+{
+  for (int i = 0; i < allocation_task_count - 1; ++i)
+    {
+      /* Pick pair in the tail of the array.  */
+      int j = i + (rand_next (&global_seed)
+                   % ((unsigned) (allocation_task_count - i)));
+      if (j < 0 || j >= allocation_task_count)
+        {
+          write_message ("error: test bug in shuffle\n");
+          _exit (1);
+        }
+      /* Exchange. */
+      struct allocation_task tmp = allocation_tasks[i];
+      allocation_tasks[i] = allocation_tasks[j];
+      allocation_tasks[j] = tmp;
+    }
+}
+
+/* Set up the allocation tasks and the dumped heap.  */
+static void
+initial_allocations (void)
+{
+  /* Initialize in a position-dependent way.  */
+  for (int i = 0; i < allocation_task_count; ++i)
+    allocation_tasks[i] = (struct allocation_task)
+      {
+        .allocation =
+          {
+            .size = 1 + (i / action_count),
+            .seed = i,
+          },
+        .action = i % action_count
+      };
+
+  /* Execute the tasks in a random order.  */
+  shuffle_allocation_tasks ();
+
+  /* Initialize the contents of the dumped heap.   */
+  for (int i = 0; i < allocation_task_count; ++i)
+    {
+      struct allocation_task *task = allocation_tasks + i;
+      task->allocation.data = dumped_heap_alloc (task->allocation.size);
+      randomize_buffer (task->allocation.data, task->allocation.size,
+                        task->allocation.seed);
+    }
+
+  for (int i = 0; i < allocation_task_count; ++i)
+    check_allocation (&allocation_tasks[i].allocation, i);
+}
+
+/* Indicates whether init_heap has run.  This variable needs to be
+   volatile because malloc is declared __THROW, which implies it is a
+   leaf function, but we expect it to run our hooks.  */
+static volatile bool heap_initialized;
+
+/* Executed by glibc malloc, through __malloc_initialize_hook
+   below.  */
+static void
+init_heap (void)
+{
+  write_message ("info: performing heap initialization\n");
+  heap_initialized = true;
+
+  /* Populate the dumped heap.  */
+  initial_allocations ();
+
+  /* Complete initialization of the saved heap data structure.  */
+  save_state.sbrk_base = (void *) dumped_heap;
+  save_state.sbrked_mem_bytes = sizeof (dumped_heap);
+  /* Top pointer.  Adjust so that it points to the start of struct
+     malloc_chunk.  */
+  save_state.av[2] = (void *) (next_heap_chunk - 1);
+
+  /* Integrate the dumped heap into the process heap.  */
+  if (malloc_set_state (&save_state) != 0)
+    {
+      write_message ("error: malloc_set_state failed\n");
+      _exit (1);
+    }
+}
+
+/* Interpose the initialization callback.  */
+void (*volatile __malloc_initialize_hook) (void) = init_heap;
+
+/* Simulate occasional unrelated heap activity in the non-dumped
+   heap.  */
+enum { heap_activity_allocations_count = 32 };
+static struct allocation heap_activity_allocations
+  [heap_activity_allocations_count] = {};
+static int heap_activity_seed_counter = 1000 * 1000;
+
+static void
+heap_activity (void)
+{
+  /* Only do this from time to time.  */
+  if ((rand_next (&global_seed) % 4) == 0)
+    {
+      int slot = rand_next (&global_seed) % heap_activity_allocations_count;
+      struct allocation *alloc = heap_activity_allocations + slot;
+      if (alloc->data == NULL)
+        {
+          alloc->size = rand_next (&global_seed) % (4096U + 1);
+          alloc->data = xmalloc (alloc->size);
+          alloc->seed = heap_activity_seed_counter++;
+          randomize_buffer (alloc->data, alloc->size, alloc->seed);
+          check_allocation (alloc, 1000 + slot);
+        }
+      else
+        {
+          check_allocation (alloc, 1000 + slot);
+          free (alloc->data);
+          alloc->data = NULL;
+        }
+    }
+}
+
+static void
+heap_activity_deallocate (void)
+{
+  for (int i = 0; i < heap_activity_allocations_count; ++i)
+    free (heap_activity_allocations[i].data);
+}
+
+/* Perform a full heap check across the dumped heap allocation tasks,
+   and the simulated heap activity directly above.  */
+static void
+full_heap_check (void)
+{
+  /* Dumped heap.  */
+  for (int i = 0; i < allocation_task_count; ++i)
+    if (allocation_tasks[i].allocation.data != NULL)
+      check_allocation (&allocation_tasks[i].allocation, i);
+
+  /* Heap activity allocations.  */
+  for (int i = 0; i < heap_activity_allocations_count; ++i)
+    if (heap_activity_allocations[i].data != NULL)
+      check_allocation (heap_activity_allocations + i, i);
+}
+
+/* Used as an optimization barrier to force a heap allocation.  */
+__attribute__ ((noinline, noclone))
+static void
+my_free (void *ptr)
+{
+  free (ptr);
+}
+
+static int
+do_test (void)
+{
+  my_free (malloc (1));
+  if (!heap_initialized)
+    {
+      printf ("error: heap was not initialized by malloc\n");
+      return 1;
+    }
+
+  /* The first pass performs the randomly generated allocation
+     tasks.  */
+  write_message ("info: first pass through allocation tasks\n");
+  full_heap_check ();
+
+  /* Execute the post-undump tasks in a random order.  */
+  shuffle_allocation_tasks ();
+
+  for (int i = 0; i < allocation_task_count; ++i)
+    {
+      heap_activity ();
+      struct allocation_task *task = allocation_tasks + i;
+      switch (task->action)
+        {
+        case action_free:
+          check_allocation (&task->allocation, i);
+          free (task->allocation.data);
+          task->allocation.data = NULL;
+          break;
+
+        case action_realloc:
+          check_allocation (&task->allocation, i);
+          task->allocation.data = xrealloc
+            (task->allocation.data, task->allocation.size + max_size);
+          check_allocation (&task->allocation, i);
+          break;
+
+        case action_realloc_same:
+          check_allocation (&task->allocation, i);
+          task->allocation.data = xrealloc
+            (task->allocation.data, task->allocation.size);
+          check_allocation (&task->allocation, i);
+          break;
+
+        case action_realloc_smaller:
+          check_allocation (&task->allocation, i);
+          size_t new_size = task->allocation.size - 1;
+          task->allocation.data = xrealloc (task->allocation.data, new_size);
+          if (new_size == 0)
+            {
+              if (task->allocation.data != NULL)
+                {
+                  printf ("error: realloc with size zero did not deallocate\n");
+                  errors = true;
+                }
+              /* No further action on this task.  */
+              task->action = action_free;
+            }
+          else
+            {
+              task->allocation.size = new_size;
+              check_allocation (&task->allocation, i);
+            }
+          break;
+
+        case action_count:
+          abort ();
+        }
+      full_heap_check ();
+    }
+
+  /* The second pass frees the objects which were allocated during the
+     first pass.  */
+  write_message ("info: second pass through allocation tasks\n");
+
+  shuffle_allocation_tasks ();
+  for (int i = 0; i < allocation_task_count; ++i)
+    {
+      heap_activity ();
+      struct allocation_task *task = allocation_tasks + i;
+      switch (task->action)
+        {
+        case action_free:
+          /* Already freed, nothing to do.  */
+          break;
+
+        case action_realloc:
+        case action_realloc_same:
+        case action_realloc_smaller:
+          check_allocation (&task->allocation, i);
+          free (task->allocation.data);
+          task->allocation.data = NULL;
+          break;
+
+        case action_count:
+          abort ();
+        }
+      full_heap_check ();
+    }
+
+  heap_activity_deallocate ();
+
+  /* Check that the malloc_get_state stub behaves in the intended
+     way.  */
+  errno = 0;
+  if (malloc_get_state () != NULL)
+    {
+      printf ("error: malloc_get_state succeeded\n");
+      errors = true;
+    }
+  if (errno != ENOSYS)
+    {
+      printf ("error: malloc_get_state: %m\n");
+      errors = true;
+    }
+
+  return errors;
+}
diff --git a/REORG.TODO/malloc/tst-mallopt.c b/REORG.TODO/malloc/tst-mallopt.c
new file mode 100644
index 0000000000..37f6c4cc04
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mallopt.c
@@ -0,0 +1,75 @@
+/* Copyright (C) 2014-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <malloc.h>
+#include <stdio.h>
+#include <string.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  int ret;
+
+  ret = mallopt(M_CHECK_ACTION, 1);
+
+  if (ret != 1)
+    merror ("mallopt (M_CHECK_ACTION, 1) failed.");
+
+  ret = mallopt(M_MMAP_MAX, 64*1024);
+
+  if (ret != 1)
+    merror ("mallopt (M_MMAP_MAX, 64*1024) failed.");
+
+  ret = mallopt(M_MMAP_THRESHOLD, 64*1024);
+
+  if (ret != 1)
+    merror ("mallopt (M_MMAP_THRESHOLD, 64*1024) failed.");
+
+  ret = mallopt(M_MXFAST, 0);
+
+  if (ret != 1)
+    merror ("mallopt (M_MXFAST, 0) failed.");
+
+  ret = mallopt(M_PERTURB, 0xa5);
+
+  if (ret != 1)
+    merror ("mallopt (M_PERTURB, 0xa5) failed.");
+
+  ret = mallopt(M_TOP_PAD, 64*1024);
+
+  if (ret != 1)
+    merror ("mallopt (M_TOP_PAD, 64*1024) failed.");
+
+  ret = mallopt(M_TRIM_THRESHOLD, -1);
+
+  if (ret != 1)
+    merror ("mallopt (M_TRIM_THRESHOLD, -1) failed.");
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-mcheck.c b/REORG.TODO/malloc/tst-mcheck.c
new file mode 100644
index 0000000000..5a66bab331
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mcheck.c
@@ -0,0 +1,115 @@
+/* Copyright (C) 2005-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2005.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <libc-diag.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p, *q;
+
+  errno = 0;
+
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  p = malloc (-1);
+  DIAG_POP_NEEDS_COMMENT;
+
+  if (p != NULL)
+    merror ("malloc (-1) succeeded.");
+  else if (errno != ENOMEM)
+    merror ("errno is not set correctly.");
+
+  p = malloc (10);
+  if (p == NULL)
+    merror ("malloc (10) failed.");
+
+  p = realloc (p, 0);
+  if (p != NULL)
+    merror ("realloc (p, 0) failed.");
+
+  p = malloc (0);
+  if (p == NULL)
+    merror ("malloc (0) failed.");
+
+  p = realloc (p, 0);
+  if (p != NULL)
+    merror ("realloc (p, 0) failed.");
+
+  q = malloc (256);
+  if (q == NULL)
+    merror ("malloc (256) failed.");
+
+  p = malloc (512);
+  if (p == NULL)
+    merror ("malloc (512) failed.");
+
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  if (realloc (p, -256) != NULL)
+    merror ("realloc (p, -256) succeeded.");
+  else if (errno != ENOMEM)
+    merror ("errno is not set correctly.");
+  DIAG_POP_NEEDS_COMMENT;
+
+  free (p);
+
+  p = malloc (512);
+  if (p == NULL)
+    merror ("malloc (512) failed.");
+
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  if (realloc (p, -1) != NULL)
+    merror ("realloc (p, -1) succeeded.");
+  else if (errno != ENOMEM)
+    merror ("errno is not set correctly.");
+  DIAG_POP_NEEDS_COMMENT;
+
+  free (p);
+  free (q);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-memalign.c b/REORG.TODO/malloc/tst-memalign.c
new file mode 100644
index 0000000000..b403464e27
--- /dev/null
+++ b/REORG.TODO/malloc/tst-memalign.c
@@ -0,0 +1,114 @@
+/* Test for memalign.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p;
+  unsigned long pagesize = getpagesize ();
+  unsigned long ptrval;
+  int save;
+
+  errno = 0;
+
+  /* An attempt to allocate a huge value should return NULL and set
+     errno to ENOMEM.  */
+  p = memalign (sizeof (void *), -1);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("memalign (sizeof (void *), -1) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("memalign (sizeof (void *), -1) errno is not set correctly");
+
+  free (p);
+
+  errno = 0;
+
+  /* Test to expose integer overflow in malloc internals from BZ #15857.  */
+  p = memalign (pagesize, -pagesize);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("memalign (pagesize, -pagesize) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("memalign (pagesize, -pagesize) errno is not set correctly");
+
+  free (p);
+
+  errno = 0;
+
+  /* Test to expose integer overflow in malloc internals from BZ #16038.  */
+  p = memalign (-1, pagesize);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("memalign (-1, pagesize) succeeded.");
+
+  if (p == NULL && save != EINVAL)
+    merror ("memalign (-1, pagesize) errno is not set correctly");
+
+  free (p);
+
+  /* A zero-sized allocation should succeed with glibc, returning a
+     non-NULL value.  */
+  p = memalign (sizeof (void *), 0);
+
+  if (p == NULL)
+    merror ("memalign (sizeof (void *), 0) failed.");
+
+  free (p);
+
+  /* Check the alignment of the returned pointer is correct.  */
+  p = memalign (0x100, 10);
+
+  if (p == NULL)
+    merror ("memalign (0x100, 10) failed.");
+
+  ptrval = (unsigned long) p;
+
+  if ((ptrval & 0xff) != 0)
+    merror ("pointer is not aligned to 0x100");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-mtrace.c b/REORG.TODO/malloc/tst-mtrace.c
new file mode 100644
index 0000000000..2726c68b76
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mtrace.c
@@ -0,0 +1,105 @@
+/* Test program for mtrace.
+   Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <mcheck.h>
+#include <paths.h>
+#include <search.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+static void print (const void *node, VISIT value, int level);
+
+/* Used for several purposes.  */
+static FILE *fp;
+
+
+static int
+do_test (void)
+{
+  void *root = NULL;
+  size_t linelen = 0;
+  char *line = NULL;
+
+  /* Enable memory usage tracing.  */
+  mtrace ();
+
+  /* Perform some operations which definitely will allocate some
+     memory.  */
+  fp = fopen (__FILE__, "r");
+  if (fp == NULL)
+    /* Shouldn't happen since this program is executed in the source
+       directory.  */
+    abort ();
+
+  while (!feof (fp))
+    {
+      char **p;
+      char *copy;
+      ssize_t n = getline (&line, &linelen, fp);
+
+      if (n < 0)
+        break;
+
+      if (n == 0)
+        continue;
+
+      copy = strdup (line);
+      if (copy == NULL)
+        abort ();
+
+      p = (char **) tsearch (copy, &root,
+                             (int (*)(const void *, const void *))strcmp);
+      if (*p != copy)
+        /* This line wasn't added.  */
+        free (copy);
+    }
+
+  fclose (fp);
+
+  fp = fopen (_PATH_DEVNULL, "w");
+  if (fp != NULL)
+    {
+      /* Write something through stdout.  */
+      twalk (root, print);
+
+      fclose (fp);
+    }
+
+  /* Free everything.  */
+  tdestroy (root, free);
+
+  /* Also the line buffer.  */
+  free (line);
+
+  /* That's it.  */
+  return 0;
+}
+
+
+static void
+print (const void *node, VISIT value, int level)
+{
+  static int cnt;
+  if (value == postorder || value == leaf)
+    fprintf (fp, "%3d: %s", ++cnt, *(const char **) node);
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-mtrace.sh b/REORG.TODO/malloc/tst-mtrace.sh
new file mode 100755
index 0000000000..6ab799f9d0
--- /dev/null
+++ b/REORG.TODO/malloc/tst-mtrace.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# Testing the mtrace function.
+# Copyright (C) 2000-2017 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, see
+# <http://www.gnu.org/licenses/>.
+
+set -e
+
+common_objpfx=$1; shift
+test_program_prefix_before_env=$1; shift
+run_program_env=$1; shift
+test_program_prefix_after_env=$1; shift
+
+status=0
+trap "rm -f ${common_objpfx}malloc/tst-mtrace.leak; exit 1" 1 2 15
+
+${test_program_prefix_before_env} \
+${run_program_env} \
+MALLOC_TRACE=${common_objpfx}malloc/tst-mtrace.leak \
+${test_program_prefix_after_env} \
+  ${common_objpfx}malloc/tst-mtrace || status=1
+
+if test $status -eq 0 && test -f ${common_objpfx}malloc/mtrace; then
+  ${common_objpfx}malloc/mtrace ${common_objpfx}malloc/tst-mtrace.leak \
+    > ${common_objpfx}malloc/tst-mtrace.out|| status=1
+fi
+
+rm -f ${common_objpfx}malloc/tst-mtrace.leak
+
+exit $status
diff --git a/REORG.TODO/malloc/tst-obstack.c b/REORG.TODO/malloc/tst-obstack.c
new file mode 100644
index 0000000000..ee1385d0f7
--- /dev/null
+++ b/REORG.TODO/malloc/tst-obstack.c
@@ -0,0 +1,67 @@
+/* Test case by Alexandre Duret-Lutz <duret_g@epita.fr>.  */
+#include <obstack.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define obstack_chunk_alloc verbose_malloc
+#define obstack_chunk_free verbose_free
+#define ALIGN_BOUNDARY 64
+#define ALIGN_MASK (ALIGN_BOUNDARY - 1)
+#define OBJECT_SIZE 1000
+
+static void *
+verbose_malloc (size_t size)
+{
+  void *buf = malloc (size);
+  printf ("malloc (%zu) => %p\n", size, buf);
+  return buf;
+}
+
+static void
+verbose_free (void *buf)
+{
+  free (buf);
+  printf ("free (%p)\n", buf);
+}
+
+static int
+do_test (void)
+{
+  int result = 0;
+  int align = 2;
+
+  while (align <= 64)
+    {
+      struct obstack obs;
+      int i;
+      int align_mask = align - 1;
+
+      printf ("\n Alignment mask: %d\n", align_mask);
+
+      obstack_init (&obs);
+      obstack_alignment_mask (&obs) = align_mask;
+      /* finish an empty object to take alignment into account */
+      obstack_finish (&obs);
+
+      /* let's allocate some objects and print their addresses */
+      for (i = 15; i > 0; --i)
+	{
+	  void *obj = obstack_alloc (&obs, OBJECT_SIZE);
+
+	  printf ("obstack_alloc (%u) => %p \t%s\n", OBJECT_SIZE, obj,
+		  ((uintptr_t) obj & align_mask) ? "(not aligned)" : "");
+	  result |= ((uintptr_t) obj & align_mask) != 0;
+	}
+
+      /* clean up */
+      obstack_free (&obs, 0);
+
+      align <<= 1;
+    }
+
+  return result;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-posix_memalign.c b/REORG.TODO/malloc/tst-posix_memalign.c
new file mode 100644
index 0000000000..31573b699e
--- /dev/null
+++ b/REORG.TODO/malloc/tst-posix_memalign.c
@@ -0,0 +1,118 @@
+/* Test for posix_memalign.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p;
+  int ret;
+  unsigned long pagesize = getpagesize ();
+  unsigned long ptrval;
+
+  p = NULL;
+
+  /* An attempt to allocate a huge value should return ENOMEM and
+     p should remain NULL.  */
+  ret = posix_memalign (&p, sizeof (void *), -1);
+
+  if (ret != ENOMEM)
+    merror ("posix_memalign (&p, sizeof (void *), -1) succeeded.");
+
+  if (ret == ENOMEM && p != NULL)
+    merror ("returned an error but pointer was modified");
+
+  free (p);
+
+  p = NULL;
+
+  /* Test to expose integer overflow in malloc internals from BZ #15857.  */
+  ret = posix_memalign (&p, pagesize, -pagesize);
+
+  if (ret != ENOMEM)
+    merror ("posix_memalign (&p, pagesize, -pagesize) succeeded.");
+
+  free (p);
+
+  p = NULL;
+
+  /* Test to expose integer overflow in malloc internals from BZ #16038.  */
+  ret = posix_memalign (&p, -1, pagesize);
+
+  if (ret != EINVAL)
+    merror ("posix_memalign (&p, -1, pagesize) succeeded.");
+
+  free (p);
+
+  p = NULL;
+
+  /* A zero-sized allocation should succeed with glibc, returning zero
+     and setting p to a non-NULL value.  */
+  ret = posix_memalign (&p, sizeof (void *), 0);
+
+  if (ret != 0 || p == NULL)
+    merror ("posix_memalign (&p, sizeof (void *), 0) failed.");
+
+  free (p);
+
+  ret = posix_memalign (&p, 0x300, 10);
+
+  if (ret != EINVAL)
+    merror ("posix_memalign (&p, 0x300, 10) succeeded.");
+
+  ret = posix_memalign (&p, 0, 10);
+
+  if (ret != EINVAL)
+    merror ("posix_memalign (&p, 0, 10) succeeded.");
+
+  p = NULL;
+
+  ret = posix_memalign (&p, 0x100, 10);
+
+  if (ret != 0)
+    merror ("posix_memalign (&p, 0x100, 10) failed.");
+
+  if (ret == 0 && p == NULL)
+    merror ("returned success but pointer is NULL");
+
+  ptrval = (unsigned long) p;
+
+  if (ret == 0 && (ptrval & 0xff) != 0)
+    merror ("pointer is not aligned to 0x100");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-pvalloc.c b/REORG.TODO/malloc/tst-pvalloc.c
new file mode 100644
index 0000000000..00f1952c36
--- /dev/null
+++ b/REORG.TODO/malloc/tst-pvalloc.c
@@ -0,0 +1,99 @@
+/* Test for pvalloc.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p;
+  unsigned long pagesize = getpagesize ();
+  unsigned long ptrval;
+  int save;
+
+  errno = 0;
+
+  /* An attempt to allocate a huge value should return NULL and set
+     errno to ENOMEM.  */
+  p = pvalloc (-1);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("pvalloc (-1) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("pvalloc (-1) errno is not set correctly");
+
+  free (p);
+
+  errno = 0;
+
+  /* Test to expose integer overflow in malloc internals from BZ #15855.  */
+  p = pvalloc (-pagesize);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("pvalloc (-pagesize) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("pvalloc (-pagesize) errno is not set correctly");
+
+  free (p);
+
+  /* A zero-sized allocation should succeed with glibc, returning a
+     non-NULL value.  */
+  p = pvalloc (0);
+
+  if (p == NULL)
+    merror ("pvalloc (0) failed.");
+
+  free (p);
+
+  /* Check the alignment of the returned pointer is correct.  */
+  p = pvalloc (32);
+
+  if (p == NULL)
+    merror ("pvalloc (32) failed.");
+
+  ptrval = (unsigned long) p;
+
+  if ((ptrval & (pagesize - 1)) != 0)
+    merror ("returned pointer is not page aligned.");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-realloc.c b/REORG.TODO/malloc/tst-realloc.c
new file mode 100644
index 0000000000..31a58bd026
--- /dev/null
+++ b/REORG.TODO/malloc/tst-realloc.c
@@ -0,0 +1,161 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <string.h>
+#include <libc-diag.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p;
+  unsigned char *c;
+  int save, i, ok;
+
+  errno = 0;
+
+  /* realloc (NULL, ...) behaves similarly to malloc (C89).  */
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  p = realloc (NULL, -1);
+  DIAG_POP_NEEDS_COMMENT;
+  save = errno;
+
+  if (p != NULL)
+    merror ("realloc (NULL, -1) succeeded.");
+
+  /* errno should be set to ENOMEM on failure (POSIX).  */
+  if (p == NULL && save != ENOMEM)
+    merror ("errno is not set correctly");
+
+  errno = 0;
+
+  /* realloc (NULL, ...) behaves similarly to malloc (C89).  */
+  p = realloc (NULL, 10);
+  save = errno;
+
+  if (p == NULL)
+    merror ("realloc (NULL, 10) failed.");
+
+  /* errno should be clear on success (POSIX).  */
+  if (p != NULL && save != 0)
+    merror ("errno is set but should not be");
+
+  free (p);
+
+  p = calloc (20, 1);
+  if (p == NULL)
+    merror ("calloc (20, 1) failed.");
+
+  /* Check increasing size preserves contents (C89).  */
+  p = realloc (p, 200);
+  if (p == NULL)
+    merror ("realloc (p, 200) failed.");
+
+  c = p;
+  ok = 1;
+
+  for (i = 0; i < 20; i++)
+    {
+      if (c[i] != 0)
+        ok = 0;
+    }
+
+  if (ok == 0)
+    merror ("first 20 bytes were not cleared");
+
+  free (p);
+
+  p = realloc (NULL, 100);
+  if (p == NULL)
+    merror ("realloc (NULL, 100) failed.");
+
+  memset (p, 0xff, 100);
+
+  /* Check decreasing size preserves contents (C89).  */
+  p = realloc (p, 16);
+  if (p == NULL)
+    merror ("realloc (p, 16) failed.");
+
+  c = p;
+  ok = 1;
+
+  for (i = 0; i < 16; i++)
+    {
+      if (c[i] != 0xff)
+        ok = 0;
+    }
+
+  if (ok == 0)
+    merror ("first 16 bytes were not correct");
+
+  /* Check failed realloc leaves original untouched (C89).  */
+  DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (7, 0)
+  /* GCC 7 warns about too-large allocations; here we want to test
+     that they fail.  */
+  DIAG_IGNORE_NEEDS_COMMENT (7, "-Walloc-size-larger-than=");
+#endif
+  c = realloc (p, -1);
+  DIAG_POP_NEEDS_COMMENT;
+  if (c != NULL)
+    merror ("realloc (p, -1) succeeded.");
+
+  c = p;
+  ok = 1;
+
+  for (i = 0; i < 16; i++)
+    {
+      if (c[i] != 0xff)
+        ok = 0;
+    }
+
+  if (ok == 0)
+    merror ("first 16 bytes were not correct after failed realloc");
+
+  /* realloc (p, 0) frees p (C89) and returns NULL (glibc).  */
+  p = realloc (p, 0);
+  if (p != NULL)
+    merror ("realloc (p, 0) returned non-NULL.");
+
+  /* realloc (NULL, 0) acts like malloc (0) (glibc).  */
+  p = realloc (NULL, 0);
+  if (p == NULL)
+    merror ("realloc (NULL, 0) returned NULL.");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-reallocarray.c b/REORG.TODO/malloc/tst-reallocarray.c
new file mode 100644
index 0000000000..f1cbf7fe0a
--- /dev/null
+++ b/REORG.TODO/malloc/tst-reallocarray.c
@@ -0,0 +1,118 @@
+/* Test for reallocarray.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <malloc.h>
+#include <string.h>
+#include <support/check.h>
+
+static int
+do_test (void)
+{
+  void *ptr = NULL;
+  void *ptr2 = NULL;
+  unsigned char *c;
+  size_t i;
+  int ok;
+  const size_t max = ~(size_t)0;
+  size_t a, b;
+
+  /* Test overflow detection.  */
+  errno = 0;
+  ptr = reallocarray (NULL, max, 2);
+  TEST_VERIFY (!ptr);
+  TEST_VERIFY (errno == ENOMEM);
+
+  errno = 0;
+  ptr = reallocarray (NULL, 2, max);
+  TEST_VERIFY (!ptr);
+  TEST_VERIFY (errno == ENOMEM);
+
+  a = 65537;
+  b = max/65537 + 1;
+  errno = 0;
+  ptr = reallocarray (NULL, a, b);
+  TEST_VERIFY (!ptr);
+  TEST_VERIFY (errno == ENOMEM);
+
+  errno = 0;
+  ptr = reallocarray (NULL, b, a);
+  TEST_VERIFY (!ptr);
+  TEST_VERIFY (errno == ENOMEM);
+
+  /* Test realloc-like behavior.  */
+  /* Allocate memory like malloc.  */
+  ptr = reallocarray (NULL, 10, 2);
+  TEST_VERIFY_EXIT (ptr);
+  TEST_VERIFY_EXIT (malloc_usable_size (ptr) >= 10*2);
+
+  memset (ptr, 0xAF, 10*2);
+
+  /* Enlarge buffer.   */
+  ptr2 = reallocarray (ptr, 20, 2);
+  TEST_VERIFY (ptr2);
+  if (ptr2)
+    ptr = ptr2;
+  TEST_VERIFY (malloc_usable_size (ptr) >= 20*2);
+
+  c = ptr;
+  ok = 1;
+  for (i = 0; i < 10*2; ++i)
+    {
+      if (c[i] != 0xAF)
+        ok = 0;
+    }
+  TEST_VERIFY (ok);
+
+  /* Decrease buffer size.  */
+  ptr2 = reallocarray (ptr, 5, 3);
+  TEST_VERIFY (ptr2);
+  if (ptr2)
+    ptr = ptr2;
+  TEST_VERIFY_EXIT (malloc_usable_size (ptr) >= 5*3);
+
+  c = ptr;
+  ok = 1;
+  for (i = 0; i < 5*3; ++i)
+    {
+      if (c[i] != 0xAF)
+        ok = 0;
+    }
+  TEST_VERIFY (ok);
+
+  /* Overflow should leave buffer untouched.  */
+  errno = 0;
+  ptr2 = reallocarray (ptr, 2, ~(size_t)0);
+  TEST_VERIFY (!ptr2);
+  TEST_VERIFY (errno == ENOMEM);
+
+  c = ptr;
+  ok = 1;
+  for (i = 0; i < 5*3; ++i)
+    {
+      if (c[i] != 0xAF)
+        ok = 0;
+    }
+  TEST_VERIFY (ok);
+
+  free (ptr);
+
+  return 0;
+}
+
+#include <support/test-driver.c>
diff --git a/REORG.TODO/malloc/tst-scratch_buffer.c b/REORG.TODO/malloc/tst-scratch_buffer.c
new file mode 100644
index 0000000000..5c9f3442ae
--- /dev/null
+++ b/REORG.TODO/malloc/tst-scratch_buffer.c
@@ -0,0 +1,155 @@
+/*
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <scratch_buffer.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+static bool
+unchanged_array_size (struct scratch_buffer *buf, size_t a, size_t b)
+{
+  size_t old_length = buf->length;
+  if (!scratch_buffer_set_array_size (buf, a, b))
+    {
+      printf ("scratch_buffer_set_array_size failed: %zu %zu\n",
+	      a, b);
+      return false;
+    }
+  if (old_length != buf->length)
+    {
+      printf ("scratch_buffer_set_array_size did not preserve size: %zu %zu\n",
+	      a, b);
+      return false;
+    }
+  return true;
+}
+
+static bool
+array_size_must_fail (size_t a, size_t b)
+{
+  for (int pass = 0; pass < 2; ++pass)
+    {
+      struct scratch_buffer buf;
+      scratch_buffer_init (&buf);
+      if (pass > 0)
+	if (!scratch_buffer_grow (&buf))
+	  {
+	    printf ("scratch_buffer_grow in array_size_must_fail failed\n");
+	    return false;
+	  }
+      if (scratch_buffer_set_array_size (&buf, a, b))
+	{
+	  printf ("scratch_buffer_set_array_size passed: %d %zu %zu\n",
+		  pass, a, b);
+	  return false;
+	}
+      if (buf.data != buf.__space)
+	{
+	  printf ("scratch_buffer_set_array_size did not free: %d %zu %zu\n",
+		  pass, a, b);
+	  return false;
+	}
+    }
+  return true;
+}
+
+static int
+do_test (void)
+{
+  {
+    struct scratch_buffer buf;
+    scratch_buffer_init (&buf);
+    memset (buf.data, ' ', buf.length);
+    scratch_buffer_free (&buf);
+  }
+  {
+    struct scratch_buffer buf;
+    scratch_buffer_init (&buf);
+    memset (buf.data, ' ', buf.length);
+    size_t old_length = buf.length;
+    scratch_buffer_grow (&buf);
+    if (buf.length <= old_length)
+      {
+	printf ("scratch_buffer_grow did not enlarge buffer\n");
+	return 1;
+      }
+    memset (buf.data, ' ', buf.length);
+    scratch_buffer_free (&buf);
+  }
+  {
+    struct scratch_buffer buf;
+    scratch_buffer_init (&buf);
+    memset (buf.data, '@', buf.length);
+    strcpy (buf.data, "prefix");
+    size_t old_length = buf.length;
+    scratch_buffer_grow_preserve (&buf);
+    if (buf.length <= old_length)
+      {
+	printf ("scratch_buffer_grow_preserve did not enlarge buffer\n");
+	return 1;
+      }
+    if (strcmp (buf.data, "prefix") != 0)
+      {
+	printf ("scratch_buffer_grow_preserve did not copy buffer\n");
+	return 1;
+      }
+    for (unsigned i = 7; i < old_length; ++i)
+      if (((char *)buf.data)[i] != '@')
+	{
+	  printf ("scratch_buffer_grow_preserve did not copy buffer (%u)\n",
+		  i);
+	  return 1;
+	}
+    scratch_buffer_free (&buf);
+  }
+  {
+    struct scratch_buffer buf;
+    scratch_buffer_init (&buf);
+    for (int pass = 0; pass < 4; ++pass)
+      {
+	if (!(unchanged_array_size (&buf, 0, 0)
+	      && unchanged_array_size (&buf, 1, 0)
+	      && unchanged_array_size (&buf, 0, 1)
+	      && unchanged_array_size (&buf, -1, 0)
+	      && unchanged_array_size (&buf, 0, -1)
+	      && unchanged_array_size (&buf, 1ULL << 16, 0)
+	      && unchanged_array_size (&buf, 0, 1ULL << 16)
+	      && unchanged_array_size (&buf, (size_t) (1ULL << 32), 0)
+	      && unchanged_array_size (&buf, 0, (size_t) (1ULL << 32))))
+	  return 1;
+	if (!scratch_buffer_grow (&buf))
+	  {
+	    printf ("scratch_buffer_grow_failed (pass %d)\n", pass);
+	  }
+      }
+    scratch_buffer_free (&buf);
+  }
+  {
+    if (!(array_size_must_fail (-1, 1)
+	  && array_size_must_fail (-1, -1)
+	  && array_size_must_fail (1, -1)
+	  && array_size_must_fail (((size_t)-1) / 4, 4)
+	  && array_size_must_fail (4, ((size_t)-1) / 4)))
+	return 1;
+  }
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-trim1.c b/REORG.TODO/malloc/tst-trim1.c
new file mode 100644
index 0000000000..310707e0e1
--- /dev/null
+++ b/REORG.TODO/malloc/tst-trim1.c
@@ -0,0 +1,56 @@
+#include <malloc.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define N 10000
+
+static void *arr[N];
+
+static int
+do_test (void)
+{
+  for (int i = 0; i < N; ++i)
+    {
+      size_t size = random () % 16384;
+
+      if ((arr[i] = malloc (size)) == NULL)
+	{
+	nomem:
+	  puts ("not enough memory");
+	  return 0;
+	}
+
+      memset (arr[i], size, size);
+    }
+
+  void *p = malloc (256);
+  if (p == NULL)
+    goto nomem;
+  memset (p, 1, 256);
+
+  puts ("==================================================================");
+
+  for (int i = 0; i < N; ++i)
+    if (i % 13 != 0)
+      free (arr[i]);
+
+  puts ("==================================================================");
+
+  malloc_trim (0);
+
+  puts ("==================================================================");
+
+  p = malloc (30000);
+  if (p == NULL)
+    goto nomem;
+
+  memset (p, 2, 30000);
+
+  malloc_trim (0);
+
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
diff --git a/REORG.TODO/malloc/tst-valloc.c b/REORG.TODO/malloc/tst-valloc.c
new file mode 100644
index 0000000000..57161f6b7d
--- /dev/null
+++ b/REORG.TODO/malloc/tst-valloc.c
@@ -0,0 +1,99 @@
+/* Test for valloc.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+static int errors = 0;
+
+static void
+merror (const char *msg)
+{
+  ++errors;
+  printf ("Error: %s\n", msg);
+}
+
+static int
+do_test (void)
+{
+  void *p;
+  unsigned long pagesize = getpagesize ();
+  unsigned long ptrval;
+  int save;
+
+  errno = 0;
+
+  /* An attempt to allocate a huge value should return NULL and set
+     errno to ENOMEM.  */
+  p = valloc (-1);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("valloc (-1) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("valloc (-1) errno is not set correctly");
+
+  free (p);
+
+  errno = 0;
+
+  /* Test to expose integer overflow in malloc internals from BZ #15856.  */
+  p = valloc (-pagesize);
+
+  save = errno;
+
+  if (p != NULL)
+    merror ("valloc (-pagesize) succeeded.");
+
+  if (p == NULL && save != ENOMEM)
+    merror ("valloc (-pagesize) errno is not set correctly");
+
+  free (p);
+
+  /* A zero-sized allocation should succeed with glibc, returning a
+     non-NULL value.  */
+  p = valloc (0);
+
+  if (p == NULL)
+    merror ("valloc (0) failed.");
+
+  free (p);
+
+  /* Check the alignment of the returned pointer is correct.  */
+  p = valloc (32);
+
+  if (p == NULL)
+    merror ("valloc (32) failed.");
+
+  ptrval = (unsigned long) p;
+
+  if ((ptrval & (pagesize - 1)) != 0)
+    merror ("returned pointer is not page aligned.");
+
+  free (p);
+
+  return errors != 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"