about summary refs log tree commit diff
path: root/nptl/allocatestack.c
diff options
context:
space:
mode:
authorCarlos O'Donell <carlos@redhat.com>2018-06-22 09:28:47 -0400
committerCarlos O'Donell <carlos@redhat.com>2018-06-29 22:39:06 -0400
commit2827ab990aefbb0e53374199b875d98f116d6390 (patch)
tree0063b4c07be87d887d4a4893b9681b9e61479e8b /nptl/allocatestack.c
parent37d3d244e1a0ca7e7ac89b8e768e665adbb2e2d8 (diff)
downloadglibc-2827ab990aefbb0e53374199b875d98f116d6390.tar.gz
glibc-2827ab990aefbb0e53374199b875d98f116d6390.tar.xz
glibc-2827ab990aefbb0e53374199b875d98f116d6390.zip
libc: Extend __libc_freeres framework (Bug 23329).
The __libc_freeres framework does not extend to non-libc.so objects.
This causes problems in general for valgrind and mtrace detecting
unfreed objects in both libdl.so and libpthread.so.  This change is
a pre-requisite to properly moving the malloc hooks out of malloc
since such a move now requires precise accounting of all allocated
data before destructors are run.

This commit adds a proper hook in libc.so.6 for both libdl.so and
for libpthread.so, this ensures that shm-directory.c which uses
freeit () to free memory is called properly.  We also remove the
nptl_freeres hook and fall back to using weak-ref-and-check idiom
for a loaded libpthread.so, thus making this process similar for
all DSOs.

Lastly we follow best practice and use explicit free calls for
both libdl.so and libpthread.so instead of the generic hook process
which has undefined order.

Tested on x86_64 with no regressions.

Signed-off-by: DJ Delorie <dj@redhat.com>
Signed-off-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r--nptl/allocatestack.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 9c10b993fd..f9e053f9e5 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -251,8 +251,8 @@ get_cached_stack (size_t *sizep, void **memp)
 
 
 /* Free stacks until cache size is lower than LIMIT.  */
-void
-__free_stacks (size_t limit)
+static void
+free_stacks (size_t limit)
 {
   /* We reduce the size of the cache.  Remove the last entries until
      the size is below the limit.  */
@@ -288,6 +288,12 @@ __free_stacks (size_t limit)
     }
 }
 
+/* Free all the stacks on cleanup.  */
+void
+__nptl_stacks_freeres (void)
+{
+  free_stacks (0);
+}
 
 /* Add a stack frame which is not used anymore to the stack.  Must be
    called with the cache lock held.  */
@@ -302,7 +308,7 @@ queue_stack (struct pthread *stack)
 
   stack_cache_actsize += stack->stackblock_size;
   if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
-    __free_stacks (stack_cache_maxsize);
+    free_stacks (stack_cache_maxsize);
 }