about summary refs log tree commit diff
path: root/malloc/arena.c
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2016-10-28 16:26:57 +0200
committerFlorian Weimer <fweimer@redhat.com>2016-10-28 16:45:45 +0200
commite9c4fe93b3855239752819303ca377dff0ed0553 (patch)
treea6699b660d68aca544292661543b27b95dda4daf /malloc/arena.c
parent4725d33eed118d69b8110285f7741cde9ddc8b4f (diff)
downloadglibc-e9c4fe93b3855239752819303ca377dff0ed0553.tar.gz
glibc-e9c4fe93b3855239752819303ca377dff0ed0553.tar.xz
glibc-e9c4fe93b3855239752819303ca377dff0ed0553.zip
malloc: Use accessors for chunk metadata access
This change allows us to change the encoding of these struct members
in a centralized fashion.
Diffstat (limited to 'malloc/arena.c')
-rw-r--r--malloc/arena.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 976048370a..eed42471a7 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -122,7 +122,7 @@ int __malloc_initialized = -1;
 #define heap_for_ptr(ptr) \
   ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
 #define arena_for_chunk(ptr) \
-  (chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
+  (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
 
 
 /**************************************************************************/
@@ -560,12 +560,12 @@ heap_trim (heap_info *heap, size_t pad)
       /* fencepost must be properly aligned.  */
       misalign = ((long) p) & MALLOC_ALIGN_MASK;
       p = chunk_at_offset (prev_heap, prev_size - misalign);
-      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+      assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
       p = prev_chunk (p);
       new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
       assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
       if (!prev_inuse (p))
-        new_size += p->prev_size;
+        new_size += prev_size (p);
       assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
       if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
         break;