diff options
author | Ulrich Drepper <drepper@redhat.com> | 2004-10-04 02:27:39 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2004-10-04 02:27:39 +0000 |
commit | 26d550d38b5c49ddf29055d07301f18a61fc0145 (patch) | |
tree | 4752df4f8f80ea54f84bb77cfa2fafaab954b98b | |
parent | 48ad81fa2fcabe1a371813d52087e27c30d6e32a (diff) | |
download | glibc-26d550d38b5c49ddf29055d07301f18a61fc0145.tar.gz glibc-26d550d38b5c49ddf29055d07301f18a61fc0145.tar.xz glibc-26d550d38b5c49ddf29055d07301f18a61fc0145.zip |
Update.
2004-03-18 Jakub Jelinek <jakub@redhat.com> * malloc/arena.c (aligned_heap_area): New variable. (new_heap): If aligned_heap_area != NULL, attempt to use that first. If HEAP_MAX_SIZE << 1 area is already HEAP_MAX_SIZE bytes aligned, remember the second half in aligned_heap_area. (delete_heap): Clear aligned_heap_area if deleting the area right before aligned_heap_area.
-rw-r--r-- | ChangeLog | 9 | ||||
-rw-r--r-- | malloc/arena.c | 62 |
2 files changed, 56 insertions, 15 deletions
diff --git a/ChangeLog b/ChangeLog index b9d3a13b1f..cf16700e50 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,12 @@ +2004-03-18 Jakub Jelinek <jakub@redhat.com> + + * malloc/arena.c (aligned_heap_area): New variable. + (new_heap): If aligned_heap_area != NULL, attempt to use that + first. If HEAP_MAX_SIZE << 1 area is already HEAP_MAX_SIZE bytes + aligned, remember the second half in aligned_heap_area. + (delete_heap): Clear aligned_heap_area if deleting the area right + before aligned_heap_area. + 2004-10-03 Juerg Billeter <j@bitron.ch> * nscd/nscd_initgroups.c (__nscd_getgrouplist): Return -1 if nscd diff --git a/malloc/arena.c b/malloc/arena.c index 3adfbc45f8..00f40971f3 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -550,6 +550,16 @@ dump_heap(heap) heap_info *heap; #endif /* MALLOC_DEBUG > 1 */ +/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing + addresses as opposed to increasing, new_heap would badly fragment the + address space. In that case remember the second HEAP_MAX_SIZE part + aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) + call (if it is already aligned) and try to reuse it next time. We need + no locking for it, as kernel ensures the atomicity for us - worst case + we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in + multiple threads, but only one will succeed. */ +static char *aligned_heap_area; + /* Create a new heap. size is automatically rounded up to a multiple of the page size. */ @@ -580,21 +590,38 @@ new_heap(size, top_pad) size_t size, top_pad; No swap space needs to be reserved for the following large mapping (on Linux, this is the case for all non-writable mappings anyway). */ - p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); - if(p1 != MAP_FAILED) { - p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1)); - ul = p2 - p1; - munmap(p1, ul); - munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); - } else { - /* Try to take the chance that an allocation of only HEAP_MAX_SIZE - is already aligned. */ - p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); - if(p2 == MAP_FAILED) - return 0; - if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { + p2 = MAP_FAILED; + if(aligned_heap_area) { + p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, + MAP_PRIVATE|MAP_NORESERVE); + aligned_heap_area = NULL; + if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) { munmap(p2, HEAP_MAX_SIZE); - return 0; + p2 = MAP_FAILED; + } + } + if(p2 == MAP_FAILED) { + p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, + MAP_PRIVATE|MAP_NORESERVE); + if(p1 != MAP_FAILED) { + p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) + & ~(HEAP_MAX_SIZE-1)); + ul = p2 - p1; + if (ul) + munmap(p1, ul); + else + aligned_heap_area = p2 + HEAP_MAX_SIZE; + munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); + } else { + /* Try to take the chance that an allocation of only HEAP_MAX_SIZE + is already aligned. */ + p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); + if(p2 == MAP_FAILED) + return 0; + if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { + munmap(p2, HEAP_MAX_SIZE); + return 0; + } } } if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { @@ -644,7 +671,12 @@ grow_heap(h, diff) heap_info *h; long diff; /* Delete a heap. */ -#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE) +#define delete_heap(heap) \ + do { \ + if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \ + aligned_heap_area = NULL; \ + munmap((char*)(heap), HEAP_MAX_SIZE); \ + } while (0) static int internal_function |