diff options
author | Ondřej Bílka <neleai@seznam.cz> | 2013-10-30 16:24:38 +0100 |
---|---|---|
committer | Ondřej Bílka <neleai@seznam.cz> | 2013-10-30 16:25:21 +0100 |
commit | c6e4925d4069d38843c02994ffd284e8c87c8929 (patch) | |
tree | 5558a3ae83abb2b52818add3185bea2127cd7a39 | |
parent | bbea82f7fe8af40fd08e8956e1aaf4d877168652 (diff) | |
download | glibc-c6e4925d4069d38843c02994ffd284e8c87c8929.tar.gz glibc-c6e4925d4069d38843c02994ffd284e8c87c8929.tar.xz glibc-c6e4925d4069d38843c02994ffd284e8c87c8929.zip |
Use atomic operations to track memory. Fixes bug 11087
-rw-r--r-- | ChangeLog | 9 | ||||
-rw-r--r-- | NEWS | 16 | ||||
-rw-r--r-- | malloc/malloc.c | 23 |
3 files changed, 27 insertions, 21 deletions
diff --git a/ChangeLog b/ChangeLog index 07ea69da31..44448684d8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,11 @@ -2013-10-30 Ondřej Bílka <neleai@seznam.cz> +2013-10-30 Ondřej Bílka <neleai@seznam.cz> + + [BZ #11087] + * malloc/malloc.c (sysmalloc): Compute statistics atomically. + (munmap_chunk): Likewise. + (mremap_chunk): Likewise. + +2013-10-30 Ondřej Bílka <neleai@seznam.cz> [BZ 15799] * stdlib/div.c (div): Remove obsolete code. diff --git a/NEWS b/NEWS index 2c5873d345..273f935152 100644 --- a/NEWS +++ b/NEWS @@ -9,14 +9,14 @@ Version 2.19 * The following bugs are resolved with this release: - 156, 431, 832, 2801, 9954, 10278, 13028, 13982, 13985, 14029, 14155, - 14547, 14699, 14876, 14910, 15048, 15218, 15277, 15308, 15362, 15400, - 15427, 15522, 15531, 15532, 15608, 15609, 15610, 15632, 15640, 15670, - 15672, 15680, 15681, 15723, 15734, 15735, 15736, 15748, 15749, 15754, - 15760, 15764, 15797, 15799, 15825, 15844, 15847, 15849, 15855, 15856, - 15857, 15859, 15867, 15886, 15887, 15890, 15892, 15893, 15895, 15897, - 15905, 15909, 15919, 15921, 15923, 15939, 15948, 15963, 15966, 15988, - 16032, 16034, 16036, 16041, 16071, 16072, 16074, 16078. + 156, 431, 832, 2801, 9954, 10278, 11087, 13028, 13982, 13985, 14029, + 14155, 14547, 14699, 14876, 14910, 15048, 15218, 15277, 15308, 15362, + 15400, 15427, 15522, 15531, 15532, 15608, 15609, 15610, 15632, 15640, + 15670, 15672, 15680, 15681, 15723, 15734, 15735, 15736, 15748, 15749, + 15754, 15760, 15764, 15797, 15799, 15825, 15844, 15847, 15849, 15855, + 15856, 15857, 15859, 15867, 15886, 15887, 15890, 15892, 15893, 15895, + 15897, 15905, 15909, 15919, 15921, 15923, 15939, 15948, 15963, 15966, + 15988, 16032, 16034, 16036, 16041, 16071, 16072, 16074, 16078. * CVE-2012-4412 The strcoll implementation caches indices and rules for large collation sequences to optimize multiple passes. This cache diff --git a/malloc/malloc.c b/malloc/malloc.c index 1a18c3f5f2..79025b16d9 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -2253,7 +2253,6 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av) mchunkptr remainder; /* remainder from allocation */ unsigned long remainder_size; /* its size */ - unsigned long sum; /* for updating stats */ size_t pagemask = GLRO(dl_pagesize) - 1; bool tried_mmap = false; @@ -2325,12 +2324,12 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av) /* update statistics */ - if (++mp_.n_mmaps > mp_.max_n_mmaps) - mp_.max_n_mmaps = mp_.n_mmaps; + int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; + atomic_max (&mp_.max_n_mmaps, new); - sum = mp_.mmapped_mem += size; - if (sum > (unsigned long)(mp_.max_mmapped_mem)) - mp_.max_mmapped_mem = sum; + unsigned long sum; + sum = atomic_exchange_and_add(&mp_.mmapped_mem, size) + size; + atomic_max (&mp_.max_mmapped_mem, sum); check_chunk(av, p); @@ -2780,8 +2779,8 @@ munmap_chunk(mchunkptr p) return; } - mp_.n_mmaps--; - mp_.mmapped_mem -= total_size; + atomic_decrement (&mp_.n_mmaps); + atomic_add (&mp_.mmapped_mem, -total_size); /* If munmap failed the process virtual memory address space is in a bad shape. Just leave the block hanging around, the process will @@ -2822,10 +2821,10 @@ mremap_chunk(mchunkptr p, size_t new_size) assert((p->prev_size == offset)); set_head(p, (new_size - offset)|IS_MMAPPED); - mp_.mmapped_mem -= size + offset; - mp_.mmapped_mem += new_size; - if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem) - mp_.max_mmapped_mem = mp_.mmapped_mem; + INTERNAL_SIZE_T new; + new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset) + + new_size - size - offset; + atomic_max (&mp_.max_mmapped_mem, new); return p; } |