diff options
author | Ulrich Drepper <drepper@redhat.com> | 2007-05-19 07:08:23 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2007-05-19 07:08:23 +0000 |
commit | df94b6412e0628cd577da0ce5626358a3967ee44 (patch) | |
tree | 3b969d3e4175fe3a72f824c482d8c9f9a3b3bf3e /nptl/allocatestack.c | |
parent | 2acd01acb10d0c0113f87bf7e787e0854498269d (diff) | |
download | glibc-df94b6412e0628cd577da0ce5626358a3967ee44.tar.gz glibc-df94b6412e0628cd577da0ce5626358a3967ee44.tar.xz glibc-df94b6412e0628cd577da0ce5626358a3967ee44.zip |
* elf/dl-close.c (_dl_close_worker): When removing object from
global scope, wait for all lookups to finish afterwards. * elf/dl-open.c (add_to_global): When global scope array must grow, allocate a new one and free old array only after all lookups finish. * elf/dl-runtime.c (_dl_fixup): Protect using global scope. (_dl_lookup_symbol_x): Likewise. * elf/dl-support.c: Define _dl_wait_lookup_done. * sysdeps/generic/ldsodefs.h (struct rtld_global): Add _dl_wait_lookup_done.
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r-- | nptl/allocatestack.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index 6b60642042..e556dbac08 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -996,3 +996,60 @@ __pthread_init_static_tls (struct link_map *map) lll_unlock (stack_cache_lock); } + + +void +attribute_hidden +__wait_lookup_done (void) +{ + lll_lock (stack_cache_lock); + + struct pthread *self = THREAD_SELF; + + /* Iterate over the list with system-allocated threads first. */ + list_t *runp; + list_for_each (runp, &stack_used) + { + struct pthread *t = list_entry (runp, struct pthread, list); + if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) + continue; + + int *const gscope_flagp = &t->header.gscope_flag; + + /* We have to wait until this thread is done with the global + scope. First tell the thread that we are waiting and + possibly have to be woken. */ + if (atomic_compare_and_exchange_bool_acq (gscope_flagp, + THREAD_GSCOPE_FLAG_WAIT, + THREAD_GSCOPE_FLAG_USED)) + continue; + + do + lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT); + while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); + } + + /* Now the list with threads using user-allocated stacks. */ + list_for_each (runp, &__stack_user) + { + struct pthread *t = list_entry (runp, struct pthread, list); + if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED) + continue; + + int *const gscope_flagp = &t->header.gscope_flag; + + /* We have to wait until this thread is done with the global + scope. First tell the thread that we are waiting and + possibly have to be woken. */ + if (atomic_compare_and_exchange_bool_acq (gscope_flagp, + THREAD_GSCOPE_FLAG_WAIT, + THREAD_GSCOPE_FLAG_USED)) + continue; + + do + lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT); + while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT); + } + + lll_unlock (stack_cache_lock); +} |