about summary refs log tree commit diff
path: root/src/malloc/memalign.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2018-04-19 20:45:48 -0400
committerRich Felker <dalias@aerifal.cx>2018-04-19 20:45:48 -0400
commit3c2cbbe7ba8b4486299ae0d5336ae01ab520d116 (patch)
treef269c212fa64c10f68e990550ef6c91c885592e3 /src/malloc/memalign.c
parent23389b1988b061e8487c316893a8a8eb77770a2f (diff)
downloadmusl-3c2cbbe7ba8b4486299ae0d5336ae01ab520d116.tar.gz
musl-3c2cbbe7ba8b4486299ae0d5336ae01ab520d116.tar.xz
musl-3c2cbbe7ba8b4486299ae0d5336ae01ab520d116.zip
using malloc implementation types/macros/idioms for memalign
the generated code should be mostly unchanged, except for explicit use
of C_INUSE in place of copying the low bits from existing chunk
headers/footers.

these changes also remove mild UB due to dubious arithmetic on
pointers into imaginary size_t[] arrays.
Diffstat (limited to 'src/malloc/memalign.c')
-rw-r--r--src/malloc/memalign.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/src/malloc/memalign.c b/src/malloc/memalign.c
index 8fb2002c..9c420229 100644
--- a/src/malloc/memalign.c
+++ b/src/malloc/memalign.c
@@ -2,49 +2,51 @@
 #include <stdint.h>
 #include <errno.h>
 #include "libc.h"
+#include "malloc_impl.h"
 
 void *__memalign(size_t align, size_t len)
 {
-	unsigned char *mem, *new, *end;
-	size_t header, footer;
+	unsigned char *mem, *new;
 
 	if ((align & -align) != align) {
 		errno = EINVAL;
-		return NULL;
+		return 0;
 	}
 
 	if (len > SIZE_MAX - align) {
 		errno = ENOMEM;
-		return NULL;
+		return 0;
 	}
 
-	if (align <= 4*sizeof(size_t)) {
-		if (!(mem = malloc(len)))
-			return NULL;
-		return mem;
-	}
+	if (align <= SIZE_ALIGN)
+		return malloc(len);
 
 	if (!(mem = malloc(len + align-1)))
-		return NULL;
+		return 0;
 
 	new = (void *)((uintptr_t)mem + align-1 & -align);
 	if (new == mem) return mem;
 
-	header = ((size_t *)mem)[-1];
+	struct chunk *c = MEM_TO_CHUNK(mem);
+	struct chunk *n = MEM_TO_CHUNK(new);
 
-	if (!(header & 7)) {
-		((size_t *)new)[-2] = ((size_t *)mem)[-2] + (new-mem);
-		((size_t *)new)[-1] = ((size_t *)mem)[-1] - (new-mem);
+	if (IS_MMAPPED(c)) {
+		/* Apply difference between aligned and original
+		 * address to the "extra" field of mmapped chunk. */
+		n->psize = c->psize + (new-mem);
+		n->csize = c->csize - (new-mem);
 		return new;
 	}
 
-	end = mem + (header & -8);
-	footer = ((size_t *)end)[-2];
+	struct chunk *t = NEXT_CHUNK(c);
 
-	((size_t *)mem)[-1] = header&7 | new-mem;
-	((size_t *)new)[-2] = footer&7 | new-mem;
-	((size_t *)new)[-1] = header&7 | end-new;
-	((size_t *)end)[-2] = footer&7 | end-new;
+	/* Split the allocated chunk into two chunks. The aligned part
+	 * that will be used has the size in its footer reduced by the
+	 * difference between the aligned and original addresses, and
+	 * the resulting size copied to its header. A new header and
+	 * footer are written for the split-off part to be freed. */
+	n->psize = c->csize = C_INUSE | (new-mem);
+	n->csize = t->psize -= new-mem;
 
 	free(mem);
 	return new;