about summary refs log tree commit diff
path: root/src/malloc
diff options
context:
space:
mode:
authorAlexander Monakov <amonakov@ispras.ru>2018-04-16 20:54:36 +0300
committerRich Felker <dalias@aerifal.cx>2018-04-17 19:23:00 -0400
commitce7ae11acfd9db8eb92cc6823c132e1825918d92 (patch)
tree209dc86f7454ff68af570afb3ad29e2bbc1a1a27 /src/malloc
parentd889cc3463edc92869676c1eec34a8f52d942adb (diff)
downloadmusl-ce7ae11acfd9db8eb92cc6823c132e1825918d92.tar.gz
musl-ce7ae11acfd9db8eb92cc6823c132e1825918d92.tar.xz
musl-ce7ae11acfd9db8eb92cc6823c132e1825918d92.zip
ldso, malloc: implement reclaim_gaps via __malloc_donate
Split 'free' into unmap_chunk and bin_chunk, use the latter to introduce
__malloc_donate and use it in reclaim_gaps instead of calling 'free'.
Diffstat (limited to 'src/malloc')
-rw-r--r--src/malloc/malloc.c61
1 files changed, 43 insertions, 18 deletions
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index db19bc34..6605ec3a 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -299,6 +299,8 @@ static int pretrim(struct chunk *self, size_t n, int i, int j)
 	return 1;
 }
 
+static void bin_chunk(struct chunk *);
+
 static void trim(struct chunk *self, size_t n)
 {
 	size_t n1 = CHUNK_SIZE(self);
@@ -314,7 +316,7 @@ static void trim(struct chunk *self, size_t n)
 	next->psize = n1-n | C_INUSE;
 	self->csize = n | C_INUSE;
 
-	free(CHUNK_TO_MEM(split));
+	bin_chunk(split);
 }
 
 void *malloc(size_t n)
@@ -465,29 +467,14 @@ copy_free_ret:
 	return new;
 }
 
-void free(void *p)
+static void bin_chunk(struct chunk *self)
 {
-	struct chunk *self, *next;
+	struct chunk *next = NEXT_CHUNK(self);
 	size_t final_size, new_size, size;
 	int reclaim=0;
 	int i;
 
-	if (!p) return;
-
-	self = MEM_TO_CHUNK(p);
-
-	if (IS_MMAPPED(self)) {
-		size_t extra = self->psize;
-		char *base = (char *)self - extra;
-		size_t len = CHUNK_SIZE(self) + extra;
-		/* Crash on double free */
-		if (extra & 1) a_crash();
-		__munmap(base, len);
-		return;
-	}
-
 	final_size = new_size = CHUNK_SIZE(self);
-	next = NEXT_CHUNK(self);
 
 	/* Crash on corrupted footer (likely from buffer overflow) */
 	if (next->psize != self->csize) a_crash();
@@ -548,3 +535,41 @@ void free(void *p)
 
 	unlock_bin(i);
 }
+
+static void unmap_chunk(struct chunk *self)
+{
+	size_t extra = self->psize;
+	char *base = (char *)self - extra;
+	size_t len = CHUNK_SIZE(self) + extra;
+	/* Crash on double free */
+	if (extra & 1) a_crash();
+	__munmap(base, len);
+}
+
+void free(void *p)
+{
+	if (!p) return;
+
+	struct chunk *self = MEM_TO_CHUNK(p);
+
+	if (IS_MMAPPED(self))
+		unmap_chunk(self);
+	else
+		bin_chunk(self);
+}
+
+void __malloc_donate(char *start, char *end)
+{
+	size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
+	size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
+
+	if (end - start <= OVERHEAD + align_start_up + align_end_down)
+		return;
+	start += align_start_up + OVERHEAD;
+	end   -= align_end_down;
+
+	struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
+	c->psize = n->csize = C_INUSE;
+	c->csize = n->psize = C_INUSE | (end-start);
+	bin_chunk(c);
+}