diff options
author | Florian Weimer <fweimer@redhat.com> | 2022-08-15 16:45:40 +0200 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2022-08-15 16:45:40 +0200 |
commit | 85860ad6eaf4c9739318f6b2a1ff7c2fa6b12ab5 (patch) | |
tree | df93f85338333de44d8ee2002ff855da0927c573 /malloc | |
parent | f82e05ebb295cadd35f7372f652c72264da810ad (diff) | |
download | glibc-85860ad6eaf4c9739318f6b2a1ff7c2fa6b12ab5.tar.gz glibc-85860ad6eaf4c9739318f6b2a1ff7c2fa6b12ab5.tar.xz glibc-85860ad6eaf4c9739318f6b2a1ff7c2fa6b12ab5.zip |
malloc: Do not use MAP_NORESERVE to allocate heap segments
Address space for heap segments is reserved in a mmap call with MAP_ANONYMOUS | MAP_PRIVATE and protection flags PROT_NONE. This reservation does not count against the RSS limit of the process or system. Backing memory is allocated using mprotect in alloc_new_heap and grow_heap, and at this point, the allocator expects the kernel to provide memory (subject to memory overcommit). The SIGSEGV that might generate due to MAP_NORESERVE (according to the mmap manual page) does not seem to occur in practice, it's always SIGKILL from the OOM killer. Even if there is a way that SIGSEGV could be generated, it is confusing to applications that this only happens for secondary heaps, not for large mmap-based allocations, and not for the main arena. Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
Diffstat (limited to 'malloc')
-rw-r--r-- | malloc/arena.c | 5 | ||||
-rw-r--r-- | malloc/malloc.c | 4 |
2 files changed, 1 insertions, 8 deletions
diff --git a/malloc/arena.c b/malloc/arena.c index defd25c8a6..074ecbc09f 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -559,16 +559,13 @@ new_heap (size_t size, size_t top_pad) #if HAVE_TUNABLES if (__glibc_unlikely (mp_.hp_pagesize != 0)) { - /* MAP_NORESERVE is not used for huge pages because some kernel may - not reserve the mmap region and a subsequent access may trigger - a SIGBUS if there is no free pages in the pool. */ heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize, mp_.hp_flags); if (h != NULL) return h; } #endif - return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), MAP_NORESERVE); + return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), 0); } /* Grow a heap. size is automatically rounded up to a diff --git a/malloc/malloc.c b/malloc/malloc.c index 914052eb69..29fa71b3b2 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -1110,10 +1110,6 @@ static mchunkptr mremap_chunk(mchunkptr p, size_t new_size); # define MAP_ANONYMOUS MAP_ANON #endif -#ifndef MAP_NORESERVE -# define MAP_NORESERVE 0 -#endif - #define MMAP(addr, size, prot, flags) \ __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0) |