diff options
author | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2021-07-22 18:38:10 +0530 |
---|---|---|
committer | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2021-07-22 18:38:10 +0530 |
commit | 0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd (patch) | |
tree | baf150b3e1b728c6c13b95116241a61bff5f21ab /malloc/malloc.c | |
parent | b5bd5bfe88f496463ec9fab680a8edf64d7c2a42 (diff) | |
download | glibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.tar.gz glibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.tar.xz glibc-0552fd2c7d4e8a570cb4fe4dfe65e96f6d24b0cd.zip |
Move malloc_{g,s}et_state to libc_malloc_debug
These deprecated functions are only safe to call from __malloc_initialize_hook and as a result, are not useful in the general case. Move the implementations to libc_malloc_debug so that existing binaries that need it will now have to preload the debug DSO to work correctly. This also allows simplification of the core malloc implementation by dropping all the undumping support code that was added to make malloc_set_state work. One known breakage is that of ancient emacs binaries that depend on this. They will now crash when running with this libc. With LD_BIND_NOW=1, it will terminate immediately because of not being able to find malloc_set_state but with lazy binding it will crash in unpredictable ways. It will need a preloaded libc_malloc_debug.so so that its initialization hook is executed to allow its malloc implementation to work properly. Reviewed-by: Carlos O'Donell <carlos@redhat.com> Tested-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r-- | malloc/malloc.c | 55 |
1 files changed, 5 insertions, 50 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index b8fcb2f2d3..38b649fcba 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1921,19 +1921,6 @@ static struct malloc_state main_arena = .attached_threads = 1 }; -/* These variables are used for undumping support. Chunked are marked - as using mmap, but we leave them alone if they fall into this - range. NB: The chunk size for these chunks only includes the - initial size field (of SIZE_SZ bytes), there is no trailing size - field (unlike with regular mmapped chunks). */ -static mchunkptr dumped_main_arena_start; /* Inclusive. */ -static mchunkptr dumped_main_arena_end; /* Exclusive. */ - -/* True if the pointer falls into the dumped arena. Use this after - chunk_is_mmapped indicates a chunk is mmapped. */ -#define DUMPED_MAIN_ARENA_CHUNK(p) \ - ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end) - /* There is only one instance of the malloc parameters. */ static struct malloc_par mp_ = @@ -2083,7 +2070,7 @@ do_check_chunk (mstate av, mchunkptr p) assert (prev_inuse (p)); } } - else if (!DUMPED_MAIN_ARENA_CHUNK (p)) + else { /* address is outside main heap */ if (contiguous (av) && av->top != initial_top (av)) @@ -2948,11 +2935,6 @@ munmap_chunk (mchunkptr p) assert (chunk_is_mmapped (p)); - /* Do nothing if the chunk is a faked mmapped chunk in the dumped - main arena. We never free this memory. */ - if (DUMPED_MAIN_ARENA_CHUNK (p)) - return; - uintptr_t mem = (uintptr_t) chunk2mem (p); uintptr_t block = (uintptr_t) p - prev_size (p); size_t total_size = prev_size (p) + size; @@ -3275,8 +3257,7 @@ __libc_free (void *mem) Dumped fake mmapped chunks do not affect the threshold. */ if (!mp_.no_dyn_threshold && chunksize_nomask (p) > mp_.mmap_threshold - && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX - && !DUMPED_MAIN_ARENA_CHUNK (p)) + && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) { mp_.mmap_threshold = chunksize (p); mp_.trim_threshold = 2 * mp_.mmap_threshold; @@ -3343,12 +3324,9 @@ __libc_realloc (void *oldmem, size_t bytes) /* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by - accident or by "design" from some intruder. We need to bypass - this check for dumped fake mmap chunks from the old main arena - because the new malloc may provide additional alignment. */ + accident or by "design" from some intruder. */ if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) - || __builtin_expect (misaligned_chunk (oldp), 0)) - && !DUMPED_MAIN_ARENA_CHUNK (oldp)) + || __builtin_expect (misaligned_chunk (oldp), 0))) malloc_printerr ("realloc(): invalid pointer"); if (!checked_request2size (bytes, &nb)) @@ -3359,24 +3337,6 @@ __libc_realloc (void *oldmem, size_t bytes) if (chunk_is_mmapped (oldp)) { - /* If this is a faked mmapped chunk from the dumped main arena, - always make a copy (and do not free the old chunk). */ - if (DUMPED_MAIN_ARENA_CHUNK (oldp)) - { - /* Must alloc, copy, free. */ - void *newmem = __libc_malloc (bytes); - if (newmem == 0) - return NULL; - /* Copy as many bytes as are available from the old chunk - and fit into the new size. NB: The overhead for faked - mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for - regular mmapped chunks. */ - if (bytes > oldsize - SIZE_SZ) - bytes = oldsize - SIZE_SZ; - memcpy (newmem, oldmem, bytes); - return newmem; - } - void *newmem; #if HAVE_MREMAP @@ -5056,12 +5016,7 @@ musable (void *mem) p = mem2chunk (mem); if (chunk_is_mmapped (p)) - { - if (DUMPED_MAIN_ARENA_CHUNK (p)) - result = chunksize (p) - SIZE_SZ; - else - result = chunksize (p) - CHUNK_HDR_SZ; - } + result = chunksize (p) - CHUNK_HDR_SZ; else if (inuse (p)) result = memsize (p); |