diff options
Diffstat (limited to 'elf/dl-open.c')
-rw-r--r-- | elf/dl-open.c | 105 |
1 files changed, 37 insertions, 68 deletions
diff --git a/elf/dl-open.c b/elf/dl-open.c index c378da16c0..ba3c266e6a 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -363,17 +363,8 @@ resize_tls_slotinfo (struct link_map *new) { bool any_tls = false; for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) - { - struct link_map *imap = new->l_searchlist.r_list[i]; - - /* Only add TLS memory if this object is loaded now and - therefore is not yet initialized. */ - if (! imap->l_init_called && imap->l_tls_blocksize > 0) - { - _dl_add_to_slotinfo (imap, false); - any_tls = true; - } - } + if (_dl_add_to_slotinfo (new->l_searchlist.r_list[i], false)) + any_tls = true; return any_tls; } @@ -383,22 +374,8 @@ resize_tls_slotinfo (struct link_map *new) static void update_tls_slotinfo (struct link_map *new) { - unsigned int first_static_tls = new->l_searchlist.r_nlist; for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) - { - struct link_map *imap = new->l_searchlist.r_list[i]; - - /* Only add TLS memory if this object is loaded now and - therefore is not yet initialized. */ - if (! imap->l_init_called && imap->l_tls_blocksize > 0) - { - _dl_add_to_slotinfo (imap, true); - - if (imap->l_need_tls_init - && first_static_tls == new->l_searchlist.r_nlist) - first_static_tls = i; - } - } + _dl_add_to_slotinfo (new->l_searchlist.r_list[i], true); size_t newgen = GL(dl_tls_generation) + 1; if (__glibc_unlikely (newgen == 0)) @@ -410,13 +387,11 @@ TLS generation counter wrapped! Please report this.")); /* We need a second pass for static tls data, because _dl_update_slotinfo must not be run while calls to _dl_add_to_slotinfo are still pending. */ - for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i) + for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i) { struct link_map *imap = new->l_searchlist.r_list[i]; - if (imap->l_need_tls_init - && ! imap->l_init_called - && imap->l_tls_blocksize > 0) + if (imap->l_need_tls_init && imap->l_tls_blocksize > 0) { /* For static TLS we have to allocate the memory here and now, but we can delay updating the DTV. */ @@ -511,22 +486,11 @@ _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r, _dl_relocate_object (l, l->l_scope, reloc_mode, 0); } - -/* struct dl_init_args and call_dl_init are used to call _dl_init with - exception handling disabled. */ -struct dl_init_args -{ - struct link_map *new; - int argc; - char **argv; - char **env; -}; - static void call_dl_init (void *closure) { - struct dl_init_args *args = closure; - _dl_init (args->new, args->argc, args->argv, args->env); + struct dl_open_args *args = closure; + _dl_init (args->map, args->argc, args->argv, args->env); } static void @@ -601,6 +565,14 @@ dl_open_worker_begin (void *a) _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n", new->l_name, new->l_ns, new->l_direct_opencount); +#ifdef SHARED + /* No relocation processing on this execution path. But + relocation has not been performed for static + position-dependent executables, so disable the assert for + static linking. */ + assert (new->l_relocated); +#endif + /* If the user requested the object to be in the global namespace but it is not so far, prepare to add it now. This can raise an exception to do a malloc failure. */ @@ -622,10 +594,6 @@ dl_open_worker_begin (void *a) if ((mode & RTLD_GLOBAL) && new->l_global == 0) add_to_global_update (new); - const int r_state __attribute__ ((unused)) - = _dl_debug_update (args->nsid)->r_state; - assert (r_state == RT_CONSISTENT); - return; } @@ -656,17 +624,6 @@ dl_open_worker_begin (void *a) #endif } -#ifdef SHARED - /* Auditing checkpoint: we have added all objects. */ - _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT); -#endif - - /* Notify the debugger all new objects are now ready to go. */ - struct r_debug *r = _dl_debug_update (args->nsid); - r->r_state = RT_CONSISTENT; - _dl_debug_state (); - LIBC_PROBE (map_complete, 3, args->nsid, r, new); - _dl_open_check (new); /* Print scope information. */ @@ -713,6 +670,7 @@ dl_open_worker_begin (void *a) created dlmopen namespaces. Do not do this for static dlopen because libc has relocations against ld.so, which may not have been relocated at this point. */ + struct r_debug *r = _dl_debug_update (args->nsid); #ifdef SHARED if (GL(dl_ns)[args->nsid].libc_map != NULL) _dl_open_relocate_one_object (args, r, GL(dl_ns)[args->nsid].libc_map, @@ -804,6 +762,26 @@ dl_open_worker (void *a) __rtld_lock_unlock_recursive (GL(dl_load_tls_lock)); + /* Auditing checkpoint and debugger signalling. Do this even on + error, so that dlopen exists with consistent state. */ + if (args->nsid >= 0 || args->map != NULL) + { + Lmid_t nsid = args->map != NULL ? args->map->l_ns : args->nsid; + struct r_debug *r = _dl_debug_update (nsid); +#ifdef SHARED + bool was_not_consistent = r->r_state != RT_CONSISTENT; +#endif + r->r_state = RT_CONSISTENT; + _dl_debug_state (); + LIBC_PROBE (map_complete, 3, nsid, r, args->map); + +#ifdef SHARED + if (was_not_consistent) + /* Avoid redudant/recursive signalling. */ + _dl_audit_activity_nsid (nsid, LA_ACT_CONSISTENT); +#endif + } + if (__glibc_unlikely (ex.errstring != NULL)) /* Reraise the error. */ _dl_signal_exception (err, &ex, NULL); @@ -818,16 +796,7 @@ dl_open_worker (void *a) /* Run the initializer functions of new objects. Temporarily disable the exception handler, so that lazy binding failures are fatal. */ - { - struct dl_init_args init_args = - { - .new = new, - .argc = args->argc, - .argv = args->argv, - .env = args->env - }; - _dl_catch_exception (NULL, call_dl_init, &init_args); - } + _dl_catch_exception (NULL, call_dl_init, args); /* Now we can make the new map available in the global scope. */ if (mode & RTLD_GLOBAL) |