about summary refs log tree commit diff
path: root/src/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/malloc/malloc.c')
-rw-r--r--src/malloc/malloc.c86
1 files changed, 28 insertions, 58 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index a42aeede..290fda1c 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -13,7 +13,6 @@
 #define inline inline __attribute__((always_inline))
 #endif
 
-uintptr_t __brk(uintptr_t);
 void *__mmap(void *, size_t, int, int, int, off_t);
 int __munmap(void *, size_t);
 void *__mremap(void *, size_t, size_t, int, ...);
@@ -31,13 +30,9 @@ struct bin {
 };
 
 static struct {
-	uintptr_t brk;
-	size_t *heap;
 	volatile uint64_t binmap;
 	struct bin bins[64];
-	volatile int brk_lock[2];
 	volatile int free_lock[2];
-	unsigned mmap_step;
 } mal;
 
 
@@ -152,77 +147,52 @@ void __dump_heap(int x)
 }
 #endif
 
-static int is_near_stack(uintptr_t b)
-{
-	const uintptr_t c = 8<<20;
-	uintptr_t a = (uintptr_t)libc.auxv;
-	uintptr_t d = (uintptr_t)&b;
-	return a-b<=c || d-b<=c;
-}
+void *__expand_heap(size_t *);
 
 static struct chunk *expand_heap(size_t n)
 {
-	static int init;
+	static int heap_lock[2];
+	static void *end;
+	void *p;
 	struct chunk *w;
-	uintptr_t new;
-
-	lock(mal.brk_lock);
 
-	if (!init) {
-		mal.brk = __brk(0);
-#ifdef SHARED
-		mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
-#endif
-		mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
-		mal.heap = (void *)mal.brk;
-		init = 1;
-	}
+	/* The argument n already accounts for the caller's chunk
+	 * overhead needs, but if the heap can't be extended in-place,
+	 * we need room for an extra zero-sized sentinel chunk. */
+	n += SIZE_ALIGN;
 
-	if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail;
-	new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
-	n = new - mal.brk;
+	lock(heap_lock);
 
-	if (is_near_stack(mal.brk) || __brk(new) != new) {
-		size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2;
-		n += -n & PAGE_SIZE-1;
-		if (n < min) n = min;
-		void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
-			MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-		if (area == MAP_FAILED) goto fail;
+	p = __expand_heap(&n);
+	if (!p) {
+		unlock(heap_lock);
+		return 0;
+	}
 
-		mal.mmap_step++;
-		area = (char *)area + SIZE_ALIGN - OVERHEAD;
-		w = area;
+	/* If not just expanding existing space, we need to make a
+	 * new sentinel chunk below the allocated space. */
+	if (p != end) {
+		/* Valid/safe because of the prologue increment. */
 		n -= SIZE_ALIGN;
+		p = (char *)p + SIZE_ALIGN;
+		w = MEM_TO_CHUNK(p);
 		w->psize = 0 | C_INUSE;
-		w->csize = n | C_INUSE;
-		w = NEXT_CHUNK(w);
-		w->psize = n | C_INUSE;
-		w->csize = 0 | C_INUSE;
-
-		unlock(mal.brk_lock);
-
-		return area;
 	}
 
-	w = MEM_TO_CHUNK(mal.heap);
-	w->psize = 0 | C_INUSE;
-
-	w = MEM_TO_CHUNK(new);
+	/* Record new heap end and fill in footer. */
+	end = (char *)p + n;
+	w = MEM_TO_CHUNK(end);
 	w->psize = n | C_INUSE;
 	w->csize = 0 | C_INUSE;
 
-	w = MEM_TO_CHUNK(mal.brk);
+	/* Fill in header, which may be new or may be replacing a
+	 * zero-size sentinel header at the old end-of-heap. */
+	w = MEM_TO_CHUNK(p);
 	w->csize = n | C_INUSE;
-	mal.brk = new;
-	
-	unlock(mal.brk_lock);
+
+	unlock(heap_lock);
 
 	return w;
-fail:
-	unlock(mal.brk_lock);
-	errno = ENOMEM;
-	return 0;
 }
 
 static int adjust_size(size_t *n)