about summary refs log tree commit diff
path: root/src/malloc/oldmalloc/aligned_alloc.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2020-06-03 19:22:12 -0400
committerRich Felker <dalias@aerifal.cx>2020-06-03 19:23:02 -0400
commit384c0131ccda2656dec23a0416ad3f14101151a7 (patch)
tree0561f403dafc58c8e29de0f0105fc9d69e095550 /src/malloc/oldmalloc/aligned_alloc.c
parenteaa0f2496700c238e7e3c112d36445f3aee06ff1 (diff)
downloadmusl-384c0131ccda2656dec23a0416ad3f14101151a7.tar.gz
musl-384c0131ccda2656dec23a0416ad3f14101151a7.tar.xz
musl-384c0131ccda2656dec23a0416ad3f14101151a7.zip
move oldmalloc to its own directory under src/malloc
this sets the stage for replacement, and makes it practical to keep
oldmalloc around as a build option for a while if that ends up being
useful.

only the files which are actually part of the implementation are
moved. memalign and posix_memalign are entirely generic. in theory
calloc could be pulled out too, but it's useful to have it tied to the
implementation so as to optimize out unnecessary memset when
implementation details make it possible to know the memory is already
clear.
Diffstat (limited to 'src/malloc/oldmalloc/aligned_alloc.c')
-rw-r--r--src/malloc/oldmalloc/aligned_alloc.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/src/malloc/oldmalloc/aligned_alloc.c b/src/malloc/oldmalloc/aligned_alloc.c
new file mode 100644
index 00000000..e06c76ed
--- /dev/null
+++ b/src/malloc/oldmalloc/aligned_alloc.c
@@ -0,0 +1,52 @@
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+#include "malloc_impl.h"
+
+void *aligned_alloc(size_t align, size_t len)
+{
+	unsigned char *mem, *new;
+
+	if ((align & -align) != align) {
+		errno = EINVAL;
+		return 0;
+	}
+
+	if (len > SIZE_MAX - align || __malloc_replaced) {
+		errno = ENOMEM;
+		return 0;
+	}
+
+	if (align <= SIZE_ALIGN)
+		return malloc(len);
+
+	if (!(mem = malloc(len + align-1)))
+		return 0;
+
+	new = (void *)((uintptr_t)mem + align-1 & -align);
+	if (new == mem) return mem;
+
+	struct chunk *c = MEM_TO_CHUNK(mem);
+	struct chunk *n = MEM_TO_CHUNK(new);
+
+	if (IS_MMAPPED(c)) {
+		/* Apply difference between aligned and original
+		 * address to the "extra" field of mmapped chunk. */
+		n->psize = c->psize + (new-mem);
+		n->csize = c->csize - (new-mem);
+		return new;
+	}
+
+	struct chunk *t = NEXT_CHUNK(c);
+
+	/* Split the allocated chunk into two chunks. The aligned part
+	 * that will be used has the size in its footer reduced by the
+	 * difference between the aligned and original addresses, and
+	 * the resulting size copied to its header. A new header and
+	 * footer are written for the split-off part to be freed. */
+	n->psize = c->csize = C_INUSE | (new-mem);
+	n->csize = t->psize -= new-mem;
+
+	__bin_chunk(c);
+	return new;
+}