about summary refs log tree commit diff
path: root/malloc/arena.c
diff options
context:
space:
mode:
authorOndřej Bílka <neleai@seznam.cz>2014-01-02 09:38:18 +0100
committerOndřej Bílka <neleai@seznam.cz>2014-01-02 09:40:10 +0100
commit6c8dbf00f536d78b1937b5af6f57be47fd376344 (patch)
treead86d3e7433a907cac50ebbd9c39ca3402a87c6a /malloc/arena.c
parent9a3c6a6ff602c88d7155139a7d7d0000b7b7e946 (diff)
downloadglibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.tar.gz
glibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.tar.xz
glibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.zip
Reformat malloc to gnu style.
Diffstat (limited to 'malloc/arena.c')
-rw-r--r--malloc/arena.c791
1 files changed, 418 insertions, 373 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 5beb13d98b..5088a253ec 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -21,12 +21,12 @@
 
 /* Compile-time constants.  */
 
-#define HEAP_MIN_SIZE (32*1024)
+#define HEAP_MIN_SIZE (32 * 1024)
 #ifndef HEAP_MAX_SIZE
 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
 #  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
 # else
-#  define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
+#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
 # endif
 #endif
 
@@ -39,7 +39,7 @@
 
 
 #ifndef THREAD_STATS
-#define THREAD_STATS 0
+# define THREAD_STATS 0
 #endif
 
 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
@@ -53,12 +53,13 @@
    malloc_chunks.  It is allocated with mmap() and always starts at an
    address aligned to HEAP_MAX_SIZE.  */
 
-typedef struct _heap_info {
+typedef struct _heap_info
+{
   mstate ar_ptr; /* Arena for this heap. */
   struct _heap_info *prev; /* Previous heap. */
   size_t size;   /* Current size in bytes. */
-  size_t mprotect_size;	/* Size in bytes that has been mprotected
-			   PROT_READ|PROT_WRITE.  */
+  size_t mprotect_size; /* Size in bytes that has been mprotected
+                           PROT_READ|PROT_WRITE.  */
   /* Make sure the following data is properly aligned, particularly
      that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
      MALLOC_ALIGNMENT. */
@@ -68,8 +69,8 @@ typedef struct _heap_info {
 /* Get a compile-time error if the heap_info padding is not correct
    to make alignment work as expected in sYSMALLOc.  */
 extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
-					     + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
-					    ? -1 : 1];
+                                             + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
+                                            ? -1 : 1];
 
 /* Thread specific data */
 
@@ -80,9 +81,9 @@ static mstate free_list;
 
 #if THREAD_STATS
 static int stat_n_heaps;
-#define THREAD_STAT(x) x
+# define THREAD_STAT(x) x
 #else
-#define THREAD_STAT(x) do ; while(0)
+# define THREAD_STAT(x) do ; while (0)
 #endif
 
 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
@@ -103,28 +104,28 @@ int __malloc_initialized = -1;
    in the new arena. */
 
 #define arena_get(ptr, size) do { \
-  arena_lookup(ptr); \
-  arena_lock(ptr, size); \
-} while(0)
+      arena_lookup (ptr);						      \
+      arena_lock (ptr, size);						      \
+  } while (0)
 
 #define arena_lookup(ptr) do { \
-  void *vptr = NULL; \
-  ptr = (mstate)tsd_getspecific(arena_key, vptr); \
-} while(0)
+      void *vptr = NULL;						      \
+      ptr = (mstate) tsd_getspecific (arena_key, vptr);			      \
+  } while (0)
 
-# define arena_lock(ptr, size) do { \
-  if(ptr) \
-    (void)mutex_lock(&ptr->mutex); \
-  else \
-    ptr = arena_get2(ptr, (size), NULL); \
-} while(0)
+#define arena_lock(ptr, size) do {					      \
+      if (ptr)								      \
+        (void) mutex_lock (&ptr->mutex);				      \
+      else								      \
+        ptr = arena_get2 (ptr, (size), NULL);				      \
+  } while (0)
 
 /* find the heap and corresponding arena for a given ptr */
 
 #define heap_for_ptr(ptr) \
- ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
+  ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
 #define arena_for_chunk(ptr) \
- (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
+  (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
 
 
 /**************************************************************************/
@@ -133,51 +134,58 @@ int __malloc_initialized = -1;
 
 /* atfork support.  */
 
-static void *(*save_malloc_hook) (size_t __size, const void *);
+static void *(*save_malloc_hook)(size_t __size, const void *);
 static void (*save_free_hook) (void *__ptr, const void *);
 static void *save_arena;
 
-#ifdef ATFORK_MEM
+# ifdef ATFORK_MEM
 ATFORK_MEM;
-#endif
+# endif
 
 /* Magic value for the thread-specific arena pointer when
    malloc_atfork() is in use.  */
 
-#define ATFORK_ARENA_PTR ((void*)-1)
+# define ATFORK_ARENA_PTR ((void *) -1)
 
 /* The following hooks are used while the `atfork' handling mechanism
    is active. */
 
-static void*
-malloc_atfork(size_t sz, const void *caller)
+static void *
+malloc_atfork (size_t sz, const void *caller)
 {
   void *vptr = NULL;
   void *victim;
 
-  tsd_getspecific(arena_key, vptr);
-  if(vptr == ATFORK_ARENA_PTR) {
-    /* We are the only thread that may allocate at all.  */
-    if(save_malloc_hook != malloc_check) {
-      return _int_malloc(&main_arena, sz);
-    } else {
-      if(top_check()<0)
-	return 0;
-      victim = _int_malloc(&main_arena, sz+1);
-      return mem2mem_check(victim, sz);
+  tsd_getspecific (arena_key, vptr);
+  if (vptr == ATFORK_ARENA_PTR)
+    {
+      /* We are the only thread that may allocate at all.  */
+      if (save_malloc_hook != malloc_check)
+        {
+          return _int_malloc (&main_arena, sz);
+        }
+      else
+        {
+          if (top_check () < 0)
+            return 0;
+
+          victim = _int_malloc (&main_arena, sz + 1);
+          return mem2mem_check (victim, sz);
+        }
+    }
+  else
+    {
+      /* Suspend the thread until the `atfork' handlers have completed.
+         By that time, the hooks will have been reset as well, so that
+         mALLOc() can be used again. */
+      (void) mutex_lock (&list_lock);
+      (void) mutex_unlock (&list_lock);
+      return __libc_malloc (sz);
     }
-  } else {
-    /* Suspend the thread until the `atfork' handlers have completed.
-       By that time, the hooks will have been reset as well, so that
-       mALLOc() can be used again. */
-    (void)mutex_lock(&list_lock);
-    (void)mutex_unlock(&list_lock);
-    return __libc_malloc(sz);
-  }
 }
 
 static void
-free_atfork(void* mem, const void *caller)
+free_atfork (void *mem, const void *caller)
 {
   void *vptr = NULL;
   mstate ar_ptr;
@@ -186,17 +194,17 @@ free_atfork(void* mem, const void *caller)
   if (mem == 0)                              /* free(0) has no effect */
     return;
 
-  p = mem2chunk(mem);         /* do not bother to replicate free_check here */
+  p = mem2chunk (mem);         /* do not bother to replicate free_check here */
 
-  if (chunk_is_mmapped(p))                       /* release mmapped memory. */
-  {
-    munmap_chunk(p);
-    return;
-  }
+  if (chunk_is_mmapped (p))                       /* release mmapped memory. */
+    {
+      munmap_chunk (p);
+      return;
+    }
 
-  ar_ptr = arena_for_chunk(p);
-  tsd_getspecific(arena_key, vptr);
-  _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
+  ar_ptr = arena_for_chunk (p);
+  tsd_getspecific (arena_key, vptr);
+  _int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
 }
 
 
@@ -214,33 +222,36 @@ ptmalloc_lock_all (void)
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
-  if (mutex_trylock(&list_lock))
+
+  if (mutex_trylock (&list_lock))
     {
       void *my_arena;
-      tsd_getspecific(arena_key, my_arena);
+      tsd_getspecific (arena_key, my_arena);
       if (my_arena == ATFORK_ARENA_PTR)
-	/* This is the same thread which already locks the global list.
-	   Just bump the counter.  */
-	goto out;
+        /* This is the same thread which already locks the global list.
+           Just bump the counter.  */
+        goto out;
 
       /* This thread has to wait its turn.  */
-      (void)mutex_lock(&list_lock);
+      (void) mutex_lock (&list_lock);
+    }
+  for (ar_ptr = &main_arena;; )
+    {
+      (void) mutex_lock (&ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
     }
-  for(ar_ptr = &main_arena;;) {
-    (void)mutex_lock(&ar_ptr->mutex);
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
   save_malloc_hook = __malloc_hook;
   save_free_hook = __free_hook;
   __malloc_hook = malloc_atfork;
   __free_hook = free_atfork;
   /* Only the current thread may perform malloc/free calls now. */
-  tsd_getspecific(arena_key, save_arena);
-  tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
- out:
+  tsd_getspecific (arena_key, save_arena);
+  tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
+out:
   ++atfork_recursive_cntr;
 }
 
@@ -249,19 +260,23 @@ ptmalloc_unlock_all (void)
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
+
   if (--atfork_recursive_cntr != 0)
     return;
-  tsd_setspecific(arena_key, save_arena);
+
+  tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
-  for(ar_ptr = &main_arena;;) {
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
-  (void)mutex_unlock(&list_lock);
+  for (ar_ptr = &main_arena;; )
+    {
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
+    }
+  (void) mutex_unlock (&list_lock);
 }
 
 # ifdef __linux__
@@ -276,31 +291,33 @@ ptmalloc_unlock_all2 (void)
 {
   mstate ar_ptr;
 
-  if(__malloc_initialized < 1)
+  if (__malloc_initialized < 1)
     return;
-  tsd_setspecific(arena_key, save_arena);
+
+  tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
   free_list = NULL;
-  for(ar_ptr = &main_arena;;) {
-    mutex_init(&ar_ptr->mutex);
-    if (ar_ptr != save_arena) {
-      ar_ptr->next_free = free_list;
-      free_list = ar_ptr;
+  for (ar_ptr = &main_arena;; )
+    {
+      mutex_init (&ar_ptr->mutex);
+      if (ar_ptr != save_arena)
+        {
+          ar_ptr->next_free = free_list;
+          free_list = ar_ptr;
+        }
+      ar_ptr = ar_ptr->next;
+      if (ar_ptr == &main_arena)
+        break;
     }
-    ar_ptr = ar_ptr->next;
-    if(ar_ptr == &main_arena) break;
-  }
-  mutex_init(&list_lock);
+  mutex_init (&list_lock);
   atfork_recursive_cntr = 0;
 }
 
 # else
 
 #  define ptmalloc_unlock_all2 ptmalloc_unlock_all
-
 # endif
-
 #endif  /* !NO_THREADS */
 
 /* Initialization routine. */
@@ -317,20 +334,20 @@ next_env_entry (char ***position)
   while (*current != NULL)
     {
       if (__builtin_expect ((*current)[0] == 'M', 0)
-	  && (*current)[1] == 'A'
-	  && (*current)[2] == 'L'
-	  && (*current)[3] == 'L'
-	  && (*current)[4] == 'O'
-	  && (*current)[5] == 'C'
-	  && (*current)[6] == '_')
-	{
-	  result = &(*current)[7];
+          && (*current)[1] == 'A'
+          && (*current)[2] == 'L'
+          && (*current)[3] == 'L'
+          && (*current)[4] == 'O'
+          && (*current)[5] == 'C'
+          && (*current)[6] == '_')
+        {
+          result = &(*current)[7];
 
-	  /* Save current position for next visit.  */
-	  *position = ++current;
+          /* Save current position for next visit.  */
+          *position = ++current;
 
-	  break;
-	}
+          break;
+        }
 
       ++current;
     }
@@ -353,7 +370,9 @@ libc_hidden_proto (_dl_open_hook);
 static void
 ptmalloc_init (void)
 {
-  if(__malloc_initialized >= 0) return;
+  if (__malloc_initialized >= 0)
+    return;
+
   __malloc_initialized = 0;
 
 #ifdef SHARED
@@ -364,13 +383,13 @@ ptmalloc_init (void)
 
   if (_dl_open_hook != NULL
       || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
-	  && l->l_ns != LM_ID_BASE))
+          && l->l_ns != LM_ID_BASE))
     __morecore = __failing_morecore;
 #endif
 
-  tsd_key_create(&arena_key, NULL);
-  tsd_setspecific(arena_key, (void *)&main_arena);
-  thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
+  tsd_key_create (&arena_key, NULL);
+  tsd_setspecific (arena_key, (void *) &main_arena);
+  thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
   const char *s = NULL;
   if (__builtin_expect (_environ != NULL, 1))
     {
@@ -378,66 +397,67 @@ ptmalloc_init (void)
       char *envline;
 
       while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
-			       0))
-	{
-	  size_t len = strcspn (envline, "=");
-
-	  if (envline[len] != '=')
-	    /* This is a "MALLOC_" variable at the end of the string
-	       without a '=' character.  Ignore it since otherwise we
-	       will access invalid memory below.  */
-	    continue;
-
-	  switch (len)
-	    {
-	    case 6:
-	      if (memcmp (envline, "CHECK_", 6) == 0)
-		s = &envline[7];
-	      break;
-	    case 8:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "TOP_PAD_", 8) == 0)
-		    __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
-		  else if (memcmp (envline, "PERTURB_", 8) == 0)
-		    __libc_mallopt(M_PERTURB, atoi(&envline[9]));
-		}
-	      break;
-	    case 9:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
-		    __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
-		  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
-		    __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
-		}
-	      break;
-	    case 10:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "ARENA_TEST", 10) == 0)
-		    __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
-		}
-	      break;
-	    case 15:
-	      if (! __builtin_expect (__libc_enable_secure, 0))
-		{
-		  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
-		    __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
-		  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
-		    __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
-		}
-	      break;
-	    default:
-	      break;
-	    }
-	}
+                               0))
+        {
+          size_t len = strcspn (envline, "=");
+
+          if (envline[len] != '=')
+            /* This is a "MALLOC_" variable at the end of the string
+               without a '=' character.  Ignore it since otherwise we
+               will access invalid memory below.  */
+            continue;
+
+          switch (len)
+            {
+            case 6:
+              if (memcmp (envline, "CHECK_", 6) == 0)
+                s = &envline[7];
+              break;
+            case 8:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
+                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
+                  else if (memcmp (envline, "PERTURB_", 8) == 0)
+                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
+                }
+              break;
+            case 9:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
+                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
+                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
+                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
+                }
+              break;
+            case 10:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
+                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
+                }
+              break;
+            case 15:
+              if (!__builtin_expect (__libc_enable_secure, 0))
+                {
+                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
+                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
+                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
+                }
+              break;
+            default:
+              break;
+            }
+        }
+    }
+  if (s && s[0])
+    {
+      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
+      if (check_action != 0)
+        __malloc_check_init ();
     }
-  if(s && s[0]) {
-    __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
-    if (check_action != 0)
-      __malloc_check_init();
-  }
   void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
   if (hook != NULL)
     (*hook)();
@@ -446,11 +466,11 @@ ptmalloc_init (void)
 
 /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
 #ifdef thread_atfork_static
-thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
-		     ptmalloc_unlock_all2)
+thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all,		      \
+                      ptmalloc_unlock_all2)
 #endif
 
-
+
 
 /* Managing heaps and arenas (for concurrent threads) */
 
@@ -459,30 +479,33 @@ thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
 /* Print the complete contents of a single heap to stderr. */
 
 static void
-dump_heap(heap_info *heap)
+dump_heap (heap_info *heap)
 {
   char *ptr;
   mchunkptr p;
 
-  fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
-  ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
-    (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
-  p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
-		  ~MALLOC_ALIGN_MASK);
-  for(;;) {
-    fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
-    if(p == top(heap->ar_ptr)) {
-      fprintf(stderr, " (top)\n");
-      break;
-    } else if(p->size == (0|PREV_INUSE)) {
-      fprintf(stderr, " (fence)\n");
-      break;
+  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
+  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
+        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
+  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
+                   ~MALLOC_ALIGN_MASK);
+  for (;; )
+    {
+      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
+      if (p == top (heap->ar_ptr))
+        {
+          fprintf (stderr, " (top)\n");
+          break;
+        }
+      else if (p->size == (0 | PREV_INUSE))
+        {
+          fprintf (stderr, " (fence)\n");
+          break;
+        }
+      fprintf (stderr, "\n");
+      p = next_chunk (p);
     }
-    fprintf(stderr, "\n");
-    p = next_chunk(p);
-  }
 }
-
 #endif /* MALLOC_DEBUG > 1 */
 
 /* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
@@ -500,18 +523,18 @@ static char *aligned_heap_area;
 
 static heap_info *
 internal_function
-new_heap(size_t size, size_t top_pad)
+new_heap (size_t size, size_t top_pad)
 {
-  size_t page_mask = GLRO(dl_pagesize) - 1;
+  size_t page_mask = GLRO (dl_pagesize) - 1;
   char *p1, *p2;
   unsigned long ul;
   heap_info *h;
 
-  if(size+top_pad < HEAP_MIN_SIZE)
+  if (size + top_pad < HEAP_MIN_SIZE)
     size = HEAP_MIN_SIZE;
-  else if(size+top_pad <= HEAP_MAX_SIZE)
+  else if (size + top_pad <= HEAP_MAX_SIZE)
     size += top_pad;
-  else if(size > HEAP_MAX_SIZE)
+  else if (size > HEAP_MAX_SIZE)
     return 0;
   else
     size = HEAP_MAX_SIZE;
@@ -522,46 +545,55 @@ new_heap(size_t size, size_t top_pad)
      mapping (on Linux, this is the case for all non-writable mappings
      anyway). */
   p2 = MAP_FAILED;
-  if(aligned_heap_area) {
-    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
-		      MAP_NORESERVE);
-    aligned_heap_area = NULL;
-    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
-      __munmap(p2, HEAP_MAX_SIZE);
-      p2 = MAP_FAILED;
+  if (aligned_heap_area)
+    {
+      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+                          MAP_NORESERVE);
+      aligned_heap_area = NULL;
+      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
+        {
+          __munmap (p2, HEAP_MAX_SIZE);
+          p2 = MAP_FAILED;
+        }
     }
-  }
-  if(p2 == MAP_FAILED) {
-    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
-    if(p1 != MAP_FAILED) {
-      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
-		    & ~(HEAP_MAX_SIZE-1));
-      ul = p2 - p1;
-      if (ul)
-	__munmap(p1, ul);
+  if (p2 == MAP_FAILED)
+    {
+      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
+      if (p1 != MAP_FAILED)
+        {
+          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
+                         & ~(HEAP_MAX_SIZE - 1));
+          ul = p2 - p1;
+          if (ul)
+            __munmap (p1, ul);
+          else
+            aligned_heap_area = p2 + HEAP_MAX_SIZE;
+          __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+        }
       else
-	aligned_heap_area = p2 + HEAP_MAX_SIZE;
-      __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
-    } else {
-      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
-	 is already aligned. */
-      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
-      if(p2 == MAP_FAILED)
-	return 0;
-      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
-	__munmap(p2, HEAP_MAX_SIZE);
-	return 0;
-      }
+        {
+          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+             is already aligned. */
+          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
+          if (p2 == MAP_FAILED)
+            return 0;
+
+          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
+            {
+              __munmap (p2, HEAP_MAX_SIZE);
+              return 0;
+            }
+        }
     }
-  }
-  if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
-    __munmap(p2, HEAP_MAX_SIZE);
-    return 0;
-  }
-  h = (heap_info *)p2;
+  if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
+    {
+      __munmap (p2, HEAP_MAX_SIZE);
+      return 0;
+    }
+  h = (heap_info *) p2;
   h->size = size;
   h->mprotect_size = size;
-  THREAD_STAT(stat_n_heaps++);
+  THREAD_STAT (stat_n_heaps++);
   LIBC_PROBE (memory_heap_new, 2, h, h->size);
   return h;
 }
@@ -570,22 +602,25 @@ new_heap(size_t size, size_t top_pad)
    multiple of the page size. */
 
 static int
-grow_heap(heap_info *h, long diff)
+grow_heap (heap_info *h, long diff)
 {
-  size_t page_mask = GLRO(dl_pagesize) - 1;
+  size_t page_mask = GLRO (dl_pagesize) - 1;
   long new_size;
 
   diff = (diff + page_mask) & ~page_mask;
-  new_size = (long)h->size + diff;
-  if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+  new_size = (long) h->size + diff;
+  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
     return -1;
-  if((unsigned long) new_size > h->mprotect_size) {
-    if (__mprotect((char *)h + h->mprotect_size,
-		   (unsigned long) new_size - h->mprotect_size,
-		   PROT_READ|PROT_WRITE) != 0)
-      return -2;
-    h->mprotect_size = new_size;
-  }
+
+  if ((unsigned long) new_size > h->mprotect_size)
+    {
+      if (__mprotect ((char *) h + h->mprotect_size,
+                      (unsigned long) new_size - h->mprotect_size,
+                      PROT_READ | PROT_WRITE) != 0)
+        return -2;
+
+      h->mprotect_size = new_size;
+    }
 
   h->size = new_size;
   LIBC_PROBE (memory_heap_more, 2, h, h->size);
@@ -595,24 +630,26 @@ grow_heap(heap_info *h, long diff)
 /* Shrink a heap.  */
 
 static int
-shrink_heap(heap_info *h, long diff)
+shrink_heap (heap_info *h, long diff)
 {
   long new_size;
 
-  new_size = (long)h->size - diff;
-  if(new_size < (long)sizeof(*h))
+  new_size = (long) h->size - diff;
+  if (new_size < (long) sizeof (*h))
     return -1;
+
   /* Try to re-map the extra heap space freshly to save memory, and make it
      inaccessible.  See malloc-sysdep.h to know when this is true.  */
   if (__builtin_expect (check_may_shrink_heap (), 0))
     {
-      if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
-		      MAP_FIXED) == (char *) MAP_FAILED)
-	return -2;
+      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
+                         MAP_FIXED) == (char *) MAP_FAILED)
+        return -2;
+
       h->mprotect_size = new_size;
     }
   else
-    __madvise ((char *)h + new_size, diff, MADV_DONTNEED);
+    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
   /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
 
   h->size = new_size;
@@ -623,66 +660,70 @@ shrink_heap(heap_info *h, long diff)
 /* Delete a heap. */
 
 #define delete_heap(heap) \
-  do {								\
-    if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area)	\
-      aligned_heap_area = NULL;					\
-    __munmap((char*)(heap), HEAP_MAX_SIZE);			\
-  } while (0)
+  do {									      \
+      if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
+        aligned_heap_area = NULL;					      \
+      __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
+    } while (0)
 
 static int
 internal_function
-heap_trim(heap_info *heap, size_t pad)
+heap_trim (heap_info *heap, size_t pad)
 {
   mstate ar_ptr = heap->ar_ptr;
-  unsigned long pagesz = GLRO(dl_pagesize);
-  mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
+  unsigned long pagesz = GLRO (dl_pagesize);
+  mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
   heap_info *prev_heap;
   long new_size, top_size, extra, prev_size, misalign;
 
   /* Can this heap go away completely? */
-  while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
-    prev_heap = heap->prev;
-    prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
-    p = chunk_at_offset(prev_heap, prev_size);
-    /* fencepost must be properly aligned.  */
-    misalign = ((long) p) & MALLOC_ALIGN_MASK;
-    p = chunk_at_offset(prev_heap, prev_size - misalign);
-    assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
-    p = prev_chunk(p);
-    new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
-    assert(new_size>0 && new_size<(long)(2*MINSIZE));
-    if(!prev_inuse(p))
-      new_size += p->prev_size;
-    assert(new_size>0 && new_size<HEAP_MAX_SIZE);
-    if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
-      break;
-    ar_ptr->system_mem -= heap->size;
-    arena_mem -= heap->size;
-    LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
-    delete_heap(heap);
-    heap = prev_heap;
-    if(!prev_inuse(p)) { /* consolidate backward */
-      p = prev_chunk(p);
-      unlink(p, bck, fwd);
+  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
+    {
+      prev_heap = heap->prev;
+      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
+      p = chunk_at_offset (prev_heap, prev_size);
+      /* fencepost must be properly aligned.  */
+      misalign = ((long) p) & MALLOC_ALIGN_MASK;
+      p = chunk_at_offset (prev_heap, prev_size - misalign);
+      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+      p = prev_chunk (p);
+      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
+      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
+      if (!prev_inuse (p))
+        new_size += p->prev_size;
+      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
+      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
+        break;
+      ar_ptr->system_mem -= heap->size;
+      arena_mem -= heap->size;
+      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
+      delete_heap (heap);
+      heap = prev_heap;
+      if (!prev_inuse (p)) /* consolidate backward */
+        {
+          p = prev_chunk (p);
+          unlink (p, bck, fwd);
+        }
+      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
+      assert (((char *) p + new_size) == ((char *) heap + heap->size));
+      top (ar_ptr) = top_chunk = p;
+      set_head (top_chunk, new_size | PREV_INUSE);
+      /*check_chunk(ar_ptr, top_chunk);*/
     }
-    assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
-    assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
-    top(ar_ptr) = top_chunk = p;
-    set_head(top_chunk, new_size | PREV_INUSE);
-    /*check_chunk(ar_ptr, top_chunk);*/
-  }
-  top_size = chunksize(top_chunk);
+  top_size = chunksize (top_chunk);
   extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
-  if(extra < (long)pagesz)
+  if (extra < (long) pagesz)
     return 0;
+
   /* Try to shrink. */
-  if(shrink_heap(heap, extra) != 0)
+  if (shrink_heap (heap, extra) != 0)
     return 0;
+
   ar_ptr->system_mem -= extra;
   arena_mem -= extra;
 
   /* Success. Adjust top accordingly. */
-  set_head(top_chunk, (top_size - extra) | PREV_INUSE);
+  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
   /*check_chunk(ar_ptr, top_chunk);*/
   return 1;
 }
@@ -690,52 +731,53 @@ heap_trim(heap_info *heap, size_t pad)
 /* Create a new arena with initial size "size".  */
 
 static mstate
-_int_new_arena(size_t size)
+_int_new_arena (size_t size)
 {
   mstate a;
   heap_info *h;
   char *ptr;
   unsigned long misalign;
 
-  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
-	       mp_.top_pad);
-  if(!h) {
-    /* Maybe size is too large to fit in a single heap.  So, just try
-       to create a minimally-sized arena and let _int_malloc() attempt
-       to deal with the large request via mmap_chunk().  */
-    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
-    if(!h)
-      return 0;
-  }
-  a = h->ar_ptr = (mstate)(h+1);
-  malloc_init_state(a);
+  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
+                mp_.top_pad);
+  if (!h)
+    {
+      /* Maybe size is too large to fit in a single heap.  So, just try
+         to create a minimally-sized arena and let _int_malloc() attempt
+         to deal with the large request via mmap_chunk().  */
+      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+      if (!h)
+        return 0;
+    }
+  a = h->ar_ptr = (mstate) (h + 1);
+  malloc_init_state (a);
   /*a->next = NULL;*/
   a->system_mem = a->max_system_mem = h->size;
   arena_mem += h->size;
 
   /* Set up the top chunk, with proper alignment. */
-  ptr = (char *)(a + 1);
-  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+  ptr = (char *) (a + 1);
+  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
   if (misalign > 0)
     ptr += MALLOC_ALIGNMENT - misalign;
-  top(a) = (mchunkptr)ptr;
-  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+  top (a) = (mchunkptr) ptr;
+  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
 
   LIBC_PROBE (memory_arena_new, 2, a, size);
-  tsd_setspecific(arena_key, (void *)a);
-  mutex_init(&a->mutex);
-  (void)mutex_lock(&a->mutex);
+  tsd_setspecific (arena_key, (void *) a);
+  mutex_init (&a->mutex);
+  (void) mutex_lock (&a->mutex);
 
-  (void)mutex_lock(&list_lock);
+  (void) mutex_lock (&list_lock);
 
   /* Add the new arena to the global list.  */
   a->next = main_arena.next;
   atomic_write_barrier ();
   main_arena.next = a;
 
-  (void)mutex_unlock(&list_lock);
+  (void) mutex_unlock (&list_lock);
 
-  THREAD_STAT(++(a->stat_lock_loop));
+  THREAD_STAT (++(a->stat_lock_loop));
 
   return a;
 }
@@ -747,19 +789,19 @@ get_free_list (void)
   mstate result = free_list;
   if (result != NULL)
     {
-      (void)mutex_lock(&list_lock);
+      (void) mutex_lock (&list_lock);
       result = free_list;
       if (result != NULL)
-	free_list = result->next_free;
-      (void)mutex_unlock(&list_lock);
+        free_list = result->next_free;
+      (void) mutex_unlock (&list_lock);
 
       if (result != NULL)
-	{
-	  LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
-	  (void)mutex_lock(&result->mutex);
-	  tsd_setspecific(arena_key, (void *)result);
-	  THREAD_STAT(++(result->stat_lock_loop));
-	}
+        {
+          LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
+          (void) mutex_lock (&result->mutex);
+          tsd_setspecific (arena_key, (void *) result);
+          THREAD_STAT (++(result->stat_lock_loop));
+        }
     }
 
   return result;
@@ -779,8 +821,8 @@ reused_arena (mstate avoid_arena)
   result = next_to_use;
   do
     {
-      if (!mutex_trylock(&result->mutex))
-	goto out;
+      if (!mutex_trylock (&result->mutex))
+        goto out;
 
       result = result->next;
     }
@@ -793,12 +835,12 @@ reused_arena (mstate avoid_arena)
 
   /* No arena available.  Wait for the next in line.  */
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
-  (void)mutex_lock(&result->mutex);
+  (void) mutex_lock (&result->mutex);
 
- out:
+out:
   LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
-  tsd_setspecific(arena_key, (void *)result);
-  THREAD_STAT(++(result->stat_lock_loop));
+  tsd_setspecific (arena_key, (void *) result);
+  THREAD_STAT (++(result->stat_lock_loop));
   next_to_use = result->next;
 
   return result;
@@ -806,7 +848,7 @@ reused_arena (mstate avoid_arena)
 
 static mstate
 internal_function
-arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
+arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
 {
   mstate a;
 
@@ -817,40 +859,40 @@ arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
     {
       /* Nothing immediately available, so generate a new arena.  */
       if (narenas_limit == 0)
-	{
-	  if (mp_.arena_max != 0)
-	    narenas_limit = mp_.arena_max;
-	  else if (narenas > mp_.arena_test)
-	    {
-	      int n  = __get_nprocs ();
-
-	      if (n >= 1)
-		narenas_limit = NARENAS_FROM_NCORES (n);
-	      else
-		/* We have no information about the system.  Assume two
-		   cores.  */
-		narenas_limit = NARENAS_FROM_NCORES (2);
-	    }
-	}
+        {
+          if (mp_.arena_max != 0)
+            narenas_limit = mp_.arena_max;
+          else if (narenas > mp_.arena_test)
+            {
+              int n = __get_nprocs ();
+
+              if (n >= 1)
+                narenas_limit = NARENAS_FROM_NCORES (n);
+              else
+                /* We have no information about the system.  Assume two
+                   cores.  */
+                narenas_limit = NARENAS_FROM_NCORES (2);
+            }
+        }
     repeat:;
       size_t n = narenas;
       /* NB: the following depends on the fact that (size_t)0 - 1 is a
-	 very large number and that the underflow is OK.  If arena_max
-	 is set the value of arena_test is irrelevant.  If arena_test
-	 is set but narenas is not yet larger or equal to arena_test
-	 narenas_limit is 0.  There is no possibility for narenas to
-	 be too big for the test to always fail since there is not
-	 enough address space to create that many arenas.  */
+         very large number and that the underflow is OK.  If arena_max
+         is set the value of arena_test is irrelevant.  If arena_test
+         is set but narenas is not yet larger or equal to arena_test
+         narenas_limit is 0.  There is no possibility for narenas to
+         be too big for the test to always fail since there is not
+         enough address space to create that many arenas.  */
       if (__builtin_expect (n <= narenas_limit - 1, 0))
-	{
-	  if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
-	    goto repeat;
-	  a = _int_new_arena (size);
-	  if (__builtin_expect (a == NULL, 0))
-	    catomic_decrement (&narenas);
-	}
+        {
+          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
+            goto repeat;
+          a = _int_new_arena (size);
+          if (__builtin_expect (a == NULL, 0))
+            catomic_decrement (&narenas);
+        }
       else
-	a = reused_arena (avoid_arena);
+        a = reused_arena (avoid_arena);
     }
   return a;
 }
@@ -863,16 +905,19 @@ static mstate
 arena_get_retry (mstate ar_ptr, size_t bytes)
 {
   LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
-  if(ar_ptr != &main_arena) {
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = &main_arena;
-    (void)mutex_lock(&ar_ptr->mutex);
-  } else {
-    /* Grab ar_ptr->next prior to releasing its lock.  */
-    mstate prev = ar_ptr->next ? ar_ptr : 0;
-    (void)mutex_unlock(&ar_ptr->mutex);
-    ar_ptr = arena_get2(prev, bytes, ar_ptr);
-  }
+  if (ar_ptr != &main_arena)
+    {
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = &main_arena;
+      (void) mutex_lock (&ar_ptr->mutex);
+    }
+  else
+    {
+      /* Grab ar_ptr->next prior to releasing its lock.  */
+      mstate prev = ar_ptr->next ? ar_ptr : 0;
+      (void) mutex_unlock (&ar_ptr->mutex);
+      ar_ptr = arena_get2 (prev, bytes, ar_ptr);
+    }
 
   return ar_ptr;
 }
@@ -881,15 +926,15 @@ static void __attribute__ ((section ("__libc_thread_freeres_fn")))
 arena_thread_freeres (void)
 {
   void *vptr = NULL;
-  mstate a = tsd_getspecific(arena_key, vptr);
-  tsd_setspecific(arena_key, NULL);
+  mstate a = tsd_getspecific (arena_key, vptr);
+  tsd_setspecific (arena_key, NULL);
 
   if (a != NULL)
     {
-      (void)mutex_lock(&list_lock);
+      (void) mutex_lock (&list_lock);
       a->next_free = free_list;
       free_list = a;
-      (void)mutex_unlock(&list_lock);
+      (void) mutex_unlock (&list_lock);
     }
 }
 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);