diff options
author | Ulrich Drepper <drepper@redhat.com> | 2006-10-10 00:51:29 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2006-10-10 00:51:29 +0000 |
commit | 1100f84983f22e570a5081cbe79b0ef8fe4952d7 (patch) | |
tree | 3472df1372abf7816fb10f02573ba114c5b5a003 /elf | |
parent | 7484f797e4d4f9c174d4391f59d208e83027b285 (diff) | |
download | glibc-1100f84983f22e570a5081cbe79b0ef8fe4952d7.tar.gz glibc-1100f84983f22e570a5081cbe79b0ef8fe4952d7.tar.xz glibc-1100f84983f22e570a5081cbe79b0ef8fe4952d7.zip |
Jakub Jelinek <jakub@redhat.com>
Implement reference counting of scope records. * elf/dl-close.c (_dl_close): Remove all scopes from removed objects from the list in objects which remain. Always allocate new scope record. * elf/dl-open.c (dl_open_worker): When growing array for scopes, don't resize, allocate a new one. * elf/dl-runtime.c: Update reference counters before using a scope array. * elf/dl-sym.c: Likewise. * elf/dl-libc.c: Adjust for l_scope name change. * elf/dl-load.c: Likewise. * elf/dl-object.c: Likewise. * elf/rtld.c: Likewise. * include/link.h: Inlcude <rtld-lowlevel.h>. Define struct r_scoperec. Replace r_scope with pointer to r_scoperec structure. Add l_scoperec_lock. * sysdeps/generic/ldsodefs.h: Include <rtld-lowlevel.h>. * sysdeps/generic/rtld-lowlevel.h: New file. * include/atomic.h: Rename atomic_and to atomic_and_val and atomic_or to atomic_or_val. Define new macros atomic_and and atomic_or which do not return values. * sysdeps/x86_64/bits/atomic.h: Define atomic_and and atomic_or. Various cleanups. * sysdeps/i386/i486/bits/atomic.h: Likewise.
Diffstat (limited to 'elf')
-rw-r--r-- | elf/dl-close.c | 148 | ||||
-rw-r--r-- | elf/dl-libc.c | 5 | ||||
-rw-r--r-- | elf/dl-load.c | 8 | ||||
-rw-r--r-- | elf/dl-object.c | 21 | ||||
-rw-r--r-- | elf/dl-open.c | 63 | ||||
-rw-r--r-- | elf/dl-runtime.c | 49 | ||||
-rw-r--r-- | elf/dl-sym.c | 85 | ||||
-rw-r--r-- | elf/rtld.c | 15 |
8 files changed, 310 insertions, 84 deletions
diff --git a/elf/dl-close.c b/elf/dl-close.c index 6a2ad976a7..2e7c506a3d 100644 --- a/elf/dl-close.c +++ b/elf/dl-close.c @@ -19,6 +19,7 @@ #include <assert.h> #include <dlfcn.h> +#include <errno.h> #include <libintl.h> #include <stddef.h> #include <stdio.h> @@ -35,6 +36,10 @@ typedef void (*fini_t) (void); +/* Special l_idx value used to indicate which objects remain loaded. */ +#define IDX_STILL_USED -1 + + #ifdef USE_TLS /* Returns true we an non-empty was found. */ static bool @@ -188,7 +193,7 @@ _dl_close (void *_map) done[done_index] = 1; used[done_index] = 1; /* Signal the object is still needed. */ - l->l_idx = -1; + l->l_idx = IDX_STILL_USED; /* Mark all dependencies as used. */ if (l->l_initfini != NULL) @@ -196,7 +201,7 @@ _dl_close (void *_map) struct link_map **lp = &l->l_initfini[1]; while (*lp != NULL) { - if ((*lp)->l_idx != -1) + if ((*lp)->l_idx != IDX_STILL_USED) { assert ((*lp)->l_idx >= 0 && (*lp)->l_idx < nloaded); @@ -217,7 +222,7 @@ _dl_close (void *_map) { struct link_map *jmap = l->l_reldeps[j]; - if (jmap->l_idx != -1) + if (jmap->l_idx != IDX_STILL_USED) { assert (jmap->l_idx >= 0 && jmap->l_idx < nloaded); @@ -310,8 +315,9 @@ _dl_close (void *_map) /* Else used[i]. */ else if (imap->l_type == lt_loaded) { - if (imap->l_searchlist.r_list == NULL - && imap->l_initfini != NULL) + struct r_scope_elem *new_list = NULL; + + if (imap->l_searchlist.r_list == NULL && imap->l_initfini != NULL) { /* The object is still used. But one of the objects we are unloading right now is responsible for loading it. If @@ -328,44 +334,114 @@ _dl_close (void *_map) imap->l_searchlist.r_list = &imap->l_initfini[cnt + 1]; imap->l_searchlist.r_nlist = cnt; - for (cnt = 0; imap->l_scope[cnt] != NULL; ++cnt) - /* This relies on l_scope[] entries being always set either - to its own l_symbolic_searchlist address, or some map's - l_searchlist address. */ - if (imap->l_scope[cnt] != &imap->l_symbolic_searchlist) - { - struct link_map *tmap; - - tmap = (struct link_map *) ((char *) imap->l_scope[cnt] - - offsetof (struct link_map, - l_searchlist)); - assert (tmap->l_ns == ns); - if (tmap->l_idx != -1) - { - imap->l_scope[cnt] = &imap->l_searchlist; - break; - } - } + new_list = &imap->l_searchlist; } - else + + /* Count the number of scopes which remain after the unload. + When we add the local search list count it. Always add + one for the terminating NULL pointer. */ + size_t remain = (new_list != NULL) + 1; + bool removed_any = false; + for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt) + /* This relies on l_scope[] entries being always set either + to its own l_symbolic_searchlist address, or some map's + l_searchlist address. */ + if (imap->l_scoperec->scope[cnt] != &imap->l_symbolic_searchlist) + { + struct link_map *tmap = (struct link_map *) + ((char *) imap->l_scoperec->scope[cnt] + - offsetof (struct link_map, l_searchlist)); + assert (tmap->l_ns == ns); + if (tmap->l_idx == IDX_STILL_USED) + ++remain; + else + removed_any = true; + } + else + ++remain; + + if (removed_any) { - unsigned int cnt = 0; - while (imap->l_scope[cnt] != NULL) + /* Always allocate a new array for the scope. This is + necessary since we must be able to determine the last + user of the current array. If possible use the link map's + memory. */ + size_t new_size; + struct r_scoperec *newp; + if (imap->l_scoperec != &imap->l_scoperec_mem + && remain < NINIT_SCOPE_ELEMS (imap) + && imap->l_scoperec_mem.nusers == 0) + { + new_size = NINIT_SCOPE_ELEMS (imap); + newp = &imap->l_scoperec_mem; + } + else + { + new_size = imap->l_scope_max; + newp = (struct r_scoperec *) + malloc (sizeof (struct r_scoperec) + + new_size * sizeof (struct r_scope_elem *)); + if (newp == NULL) + _dl_signal_error (ENOMEM, "dlclose", NULL, + N_("cannot create scope list")); + } + + newp->nusers = 0; + newp->remove_after_use = false; + newp->notify = false; + + /* Copy over the remaining scope elements. */ + remain = 0; + for (size_t cnt = 0; imap->l_scoperec->scope[cnt] != NULL; ++cnt) { - if (imap->l_scope[cnt] == &map->l_searchlist) + if (imap->l_scoperec->scope[cnt] + != &imap->l_symbolic_searchlist) { - while ((imap->l_scope[cnt] = imap->l_scope[cnt + 1]) - != NULL) - ++cnt; - break; + struct link_map *tmap = (struct link_map *) + ((char *) imap->l_scoperec->scope[cnt] + - offsetof (struct link_map, l_searchlist)); + if (tmap->l_idx != IDX_STILL_USED) + { + /* Remove the scope. Or replace with own map's + scope. */ + if (new_list != NULL) + { + newp->scope[remain++] = new_list; + new_list = NULL; + } + continue; + } } - ++cnt; + + newp->scope[remain++] = imap->l_scoperec->scope[cnt]; } + newp->scope[remain] = NULL; + + struct r_scoperec *old = imap->l_scoperec; + + __rtld_mrlock_change (imap->l_scoperec_lock); + imap->l_scoperec = newp; + __rtld_mrlock_done (imap->l_scoperec_lock); + + if (atomic_increment_val (&old->nusers) != 1) + { + old->remove_after_use = true; + old->notify = true; + if (atomic_decrement_val (&old->nusers) != 0) + __rtld_waitzero (old->nusers); + } + + /* No user anymore, we can free it now. */ + if (old != &imap->l_scoperec_mem) + free (old); + + imap->l_scope_max = new_size; } /* The loader is gone, so mark the object as not having one. - Note: l_idx != -1 -> object will be removed. */ - if (imap->l_loader != NULL && imap->l_loader->l_idx != -1) + Note: l_idx != IDX_STILL_USED -> object will be removed. */ + if (imap->l_loader != NULL + && imap->l_loader->l_idx != IDX_STILL_USED) imap->l_loader = NULL; /* Remember where the first dynamically loaded object is. */ @@ -570,8 +646,8 @@ _dl_close (void *_map) free (imap->l_initfini); /* Remove the scope array if we allocated it. */ - if (imap->l_scope != imap->l_scope_mem) - free (imap->l_scope); + if (imap->l_scoperec != &imap->l_scoperec_mem) + free (imap->l_scoperec); if (imap->l_phdr_allocated) free ((void *) imap->l_phdr); diff --git a/elf/dl-libc.c b/elf/dl-libc.c index 1b995eda92..8b78a7a388 100644 --- a/elf/dl-libc.c +++ b/elf/dl-libc.c @@ -1,5 +1,5 @@ /* Handle loading and unloading shared objects for internal libc purposes. - Copyright (C) 1999,2000,2001,2002,2004,2005 Free Software Foundation, Inc. + Copyright (C) 1999-2002,2004,2005,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Zack Weinberg <zack@rabi.columbia.edu>, 1999. @@ -133,7 +133,8 @@ do_dlsym_private (void *ptr) struct do_dlsym_args *args = (struct do_dlsym_args *) ptr; args->ref = NULL; l = GLRO(dl_lookup_symbol_x) (args->name, args->map, &args->ref, - args->map->l_scope, &vers, 0, 0, NULL); + args->map->l_scoperec->scope, &vers, 0, 0, + NULL); args->loadbase = l; } diff --git a/elf/dl-load.c b/elf/dl-load.c index 36dc123c01..172fb2fc35 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c @@ -1473,7 +1473,7 @@ cannot enable executable stack as shared object requires"); have to do this for the main map. */ if ((mode & RTLD_DEEPBIND) == 0 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0) - && &l->l_searchlist != l->l_scope[0]) + && &l->l_searchlist != l->l_scoperec->scope[0]) { /* Create an appropriate searchlist. It contains only this map. This is the definition of DT_SYMBOLIC in SysVr4. */ @@ -1490,11 +1490,11 @@ cannot enable executable stack as shared object requires"); l->l_symbolic_searchlist.r_nlist = 1; /* Now move the existing entries one back. */ - memmove (&l->l_scope[1], &l->l_scope[0], - (l->l_scope_max - 1) * sizeof (l->l_scope[0])); + memmove (&l->l_scoperec->scope[1], &l->l_scoperec->scope[0], + (l->l_scope_max - 1) * sizeof (l->l_scoperec->scope[0])); /* Now add the new entry. */ - l->l_scope[0] = &l->l_symbolic_searchlist; + l->l_scoperec->scope[0] = &l->l_symbolic_searchlist; } /* Remember whether this object must be initialized first. */ diff --git a/elf/dl-object.c b/elf/dl-object.c index 86f7a8e4d9..c5dae9ef11 100644 --- a/elf/dl-object.c +++ b/elf/dl-object.c @@ -1,5 +1,5 @@ /* Storage management for the chain of loaded shared objects. - Copyright (C) 1995-2002, 2004 Free Software Foundation, Inc. + Copyright (C) 1995-2002, 2004, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -82,8 +82,14 @@ _dl_new_object (char *realname, const char *libname, int type, /* Use the 'l_scope_mem' array by default for the the 'l_scope' information. If we need more entries we will allocate a large array dynamically. */ - new->l_scope = new->l_scope_mem; - new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]); + new->l_scoperec = &new->l_scoperec_mem; + new->l_scope_max = (sizeof (new->l_scope_realmem.scope_elems) + / sizeof (new->l_scope_realmem.scope_elems[0])); + + /* No need to initialize the scope lock if the initializer is zero. */ +#if _RTLD_MRLOCK_INITIALIZER != 0 + __rtld_mrlock_initialize (new->l_scoperec_mem.lock); +#endif /* Counter for the scopes we have to handle. */ idx = 0; @@ -98,7 +104,8 @@ _dl_new_object (char *realname, const char *libname, int type, l->l_next = new; /* Add the global scope. */ - new->l_scope[idx++] = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist; + new->l_scoperec->scope[idx++] + = &GL(dl_ns)[nsid]._ns_loaded->l_searchlist; } else GL(dl_ns)[nsid]._ns_loaded = new; @@ -114,15 +121,15 @@ _dl_new_object (char *realname, const char *libname, int type, loader = loader->l_loader; /* Insert the scope if it isn't the global scope we already added. */ - if (idx == 0 || &loader->l_searchlist != new->l_scope[0]) + if (idx == 0 || &loader->l_searchlist != new->l_scoperec->scope[0]) { if ((mode & RTLD_DEEPBIND) != 0 && idx != 0) { - new->l_scope[1] = new->l_scope[0]; + new->l_scoperec->scope[1] = new->l_scoperec->scope[0]; idx = 0; } - new->l_scope[idx] = &loader->l_searchlist; + new->l_scoperec->scope[idx] = &loader->l_searchlist; } new->l_local_scope[0] = &new->l_searchlist; diff --git a/elf/dl-open.c b/elf/dl-open.c index 8d057f82eb..5c90e06708 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -343,7 +343,7 @@ dl_open_worker (void *a) start the profiling. */ struct link_map *old_profile_map = GL(dl_profile_map); - _dl_relocate_object (l, l->l_scope, 1, 1); + _dl_relocate_object (l, l->l_scoperec->scope, 1, 1); if (old_profile_map == NULL && GL(dl_profile_map) != NULL) { @@ -356,7 +356,7 @@ dl_open_worker (void *a) } else #endif - _dl_relocate_object (l, l->l_scope, lazy, 0); + _dl_relocate_object (l, l->l_scoperec->scope, lazy, 0); } if (l == new) @@ -374,11 +374,13 @@ dl_open_worker (void *a) not been loaded here and now. */ if (imap->l_init_called && imap->l_type == lt_loaded) { - struct r_scope_elem **runp = imap->l_scope; + struct r_scope_elem **runp = imap->l_scoperec->scope; size_t cnt = 0; while (*runp != NULL) { + if (*runp == &new->l_searchlist) + break; ++cnt; ++runp; } @@ -391,35 +393,58 @@ dl_open_worker (void *a) { /* The 'r_scope' array is too small. Allocate a new one dynamically. */ - struct r_scope_elem **newp; - size_t new_size = imap->l_scope_max * 2; + size_t new_size; + struct r_scoperec *newp; - if (imap->l_scope == imap->l_scope_mem) + if (imap->l_scoperec != &imap->l_scoperec_mem + && imap->l_scope_max < NINIT_SCOPE_ELEMS (imap) + && imap->l_scoperec_mem.nusers == 0) { - newp = (struct r_scope_elem **) - malloc (new_size * sizeof (struct r_scope_elem *)); - if (newp == NULL) - _dl_signal_error (ENOMEM, "dlopen", NULL, - N_("cannot create scope list")); - imap->l_scope = memcpy (newp, imap->l_scope, - cnt * sizeof (imap->l_scope[0])); + new_size = NINIT_SCOPE_ELEMS (imap); + newp = &imap->l_scoperec_mem; } else { - newp = (struct r_scope_elem **) - realloc (imap->l_scope, - new_size * sizeof (struct r_scope_elem *)); + new_size = imap->l_scope_max * 2; + newp = (struct r_scoperec *) + malloc (sizeof (struct r_scoperec) + + new_size * sizeof (struct r_scope_elem *)); if (newp == NULL) _dl_signal_error (ENOMEM, "dlopen", NULL, N_("cannot create scope list")); - imap->l_scope = newp; + } + + newp->nusers = 0; + newp->remove_after_use = false; + newp->notify = false; + memcpy (newp->scope, imap->l_scoperec->scope, + cnt * sizeof (imap->l_scoperec->scope[0])); + struct r_scoperec *old = imap->l_scoperec; + + if (old == &imap->l_scoperec_mem) + imap->l_scoperec = newp; + else + { + __rtld_mrlock_change (imap->l_scoperec_lock); + imap->l_scoperec = newp; + __rtld_mrlock_done (imap->l_scoperec_lock); + + atomic_increment (&old->nusers); + old->remove_after_use = true; + if (atomic_decrement_val (&old->nusers) == 0) + /* No user, we can free it here and now. */ + free (old); } imap->l_scope_max = new_size; } - imap->l_scope[cnt++] = &new->l_searchlist; - imap->l_scope[cnt] = NULL; + /* First terminate the extended list. Otherwise a thread + might use the new last element and then use the garbage + at offset IDX+1. */ + imap->l_scoperec->scope[cnt + 1] = NULL; + atomic_write_barrier (); + imap->l_scoperec->scope[cnt] = &new->l_searchlist; } #if USE_TLS /* Only add TLS memory if this object is loaded now and diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c index f92cbe26bd..83d565ac71 100644 --- a/elf/dl-runtime.c +++ b/elf/dl-runtime.c @@ -1,5 +1,5 @@ /* On-demand PLT fixup for shared objects. - Copyright (C) 1995-2002,2003,2004,2005 Free Software Foundation, Inc. + Copyright (C) 1995-2002,2003,2004,2005,2006 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -92,16 +92,36 @@ _dl_fixup ( version = NULL; } + struct r_scoperec *scoperec = l->l_scoperec; + if (l->l_type == lt_loaded) + { + __rtld_mrlock_lock (l->l_scoperec_lock); + scoperec = l->l_scoperec; + atomic_increment (&scoperec->nusers); + __rtld_mrlock_unlock (l->l_scoperec_lock); + } + result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym, - l->l_scope, version, ELF_RTYPE_CLASS_PLT, + scoperec->scope, version, + ELF_RTYPE_CLASS_PLT, DL_LOOKUP_ADD_DEPENDENCY, NULL); + if (l->l_type == lt_loaded + && atomic_decrement_val (&scoperec->nusers) == 0 + && __builtin_expect (scoperec->remove_after_use, 0)) + { + if (scoperec->notify) + __rtld_notify (scoperec->nusers); + else + free (scoperec); + } + /* Currently result contains the base load address (or link map) of the object that defines sym. Now add in the symbol offset. */ value = DL_FIXUP_MAKE_VALUE (result, - sym ? LOOKUP_VALUE_ADDRESS (result) - + sym->st_value : 0); + sym ? (LOOKUP_VALUE_ADDRESS (result) + + sym->st_value) : 0); } else { @@ -174,11 +194,30 @@ _dl_profile_fixup ( version = NULL; } + struct r_scoperec *scoperec = l->l_scoperec; + if (l->l_type == lt_loaded) + { + __rtld_mrlock_lock (l->l_scoperec_lock); + scoperec = l->l_scoperec; + atomic_increment (&scoperec->nusers); + __rtld_mrlock_unlock (l->l_scoperec_lock); + } + result = _dl_lookup_symbol_x (strtab + refsym->st_name, l, &defsym, - l->l_scope, version, + scoperec->scope, version, ELF_RTYPE_CLASS_PLT, DL_LOOKUP_ADD_DEPENDENCY, NULL); + if (l->l_type == lt_loaded + && atomic_decrement_val (&scoperec->nusers) == 0 + && __builtin_expect (scoperec->remove_after_use, 0)) + { + if (scoperec->notify) + __rtld_notify (scoperec->nusers); + else + free (scoperec); + } + /* Currently result contains the base load address (or link map) of the object that defines sym. Now add in the symbol offset. */ diff --git a/elf/dl-sym.c b/elf/dl-sym.c index d2b0ec0dab..1c66310d7c 100644 --- a/elf/dl-sym.c +++ b/elf/dl-sym.c @@ -17,6 +17,7 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +#include <assert.h> #include <stddef.h> #include <setjmp.h> #include <libintl.h> @@ -58,6 +59,30 @@ _dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref) #endif +struct call_dl_lookup_args +{ + /* Arguments to do_dlsym. */ + struct link_map *map; + const char *name; + struct r_scope_elem **scope; + struct r_found_version *vers; + int flags; + + /* Return values of do_dlsym. */ + lookup_t loadbase; + const ElfW(Sym) **refp; +}; + +static void +call_dl_lookup (void *ptr) +{ + struct call_dl_lookup_args *args = (struct call_dl_lookup_args *) ptr; + args->map = GLRO(dl_lookup_symbol_x) (args->name, args->map, args->refp, + args->scope, args->vers, 0, + args->flags, NULL); +} + + static void * internal_function do_sym (void *handle, const char *name, void *who, @@ -84,10 +109,62 @@ do_sym (void *handle, const char *name, void *who, } if (handle == RTLD_DEFAULT) - /* Search the global scope. */ - result = GLRO(dl_lookup_symbol_x) (name, match, &ref, match->l_scope, - vers, 0, flags|DL_LOOKUP_ADD_DEPENDENCY, - NULL); + { + /* Search the global scope. We have the simple case where + we look up in the scope of an object which was part of + the initial binary. And then the more complex part + where the object is dynamically loaded and the scope + array can change. */ + if (match->l_type != lt_loaded) + result = GLRO(dl_lookup_symbol_x) (name, match, &ref, + match->l_scoperec->scope, vers, 0, + flags | DL_LOOKUP_ADD_DEPENDENCY, + NULL); + else + { + __rtld_mrlock_lock (match->l_scoperec_lock); + struct r_scoperec *scoperec = match->l_scoperec; + atomic_increment (&scoperec->nusers); + __rtld_mrlock_unlock (match->l_scoperec_lock); + + struct call_dl_lookup_args args; + args.name = name; + args.map = match; + args.scope = scoperec->scope; + args.vers = vers; + args.flags = flags | DL_LOOKUP_ADD_DEPENDENCY; + args.refp = &ref; + + const char *objname; + const char *errstring = NULL; + bool malloced; + int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced, + call_dl_lookup, &args); + + if (atomic_decrement_val (&scoperec->nusers) == 0 + && __builtin_expect (scoperec->remove_after_use, 0)) + { + if (scoperec->notify) + __rtld_notify (scoperec->nusers); + else + free (scoperec); + } + + if (__builtin_expect (errstring != NULL, 0)) + { + /* The lookup was unsuccessful. Rethrow the error. */ + char *errstring_dup = strdupa (errstring); + char *objname_dup = strdupa (objname); + if (malloced) + free ((char *) errstring); + + GLRO(dl_signal_error) (err, objname_dup, NULL, errstring_dup); + /* NOTREACHED */ + } + + result = args.map; + } + } else if (handle == RTLD_NEXT) { if (__builtin_expect (match == GL(dl_ns)[LM_ID_BASE]._ns_loaded, 0)) diff --git a/elf/rtld.c b/elf/rtld.c index 7746377f37..ace3a3099d 100644 --- a/elf/rtld.c +++ b/elf/rtld.c @@ -609,7 +609,7 @@ relocate_doit (void *a) { struct relocate_args *args = (struct relocate_args *) a; - _dl_relocate_object (args->l, args->l->l_scope, args->lazy, 0); + _dl_relocate_object (args->l, args->l->l_scoperec->scope, args->lazy, 0); } static void @@ -1963,8 +1963,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n", lookup_t result; result = _dl_lookup_symbol_x (INTUSE(_dl_argv)[i], main_map, - &ref, main_map->l_scope, NULL, - ELF_RTYPE_CLASS_PLT, + &ref, main_map->l_scoperec->scope, + NULL, ELF_RTYPE_CLASS_PLT, DL_LOOKUP_ADD_DEPENDENCY, NULL); loadbase = LOOKUP_VALUE_ADDRESS (result); @@ -2006,8 +2006,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n", { /* Mark the link map as not yet relocated again. */ GL(dl_rtld_map).l_relocated = 0; - _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope, - 0, 0); + _dl_relocate_object (&GL(dl_rtld_map), + main_map->l_scoperec->scope, 0, 0); } } #define VERNEEDTAG (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGIDX (DT_VERNEED)) @@ -2227,7 +2227,7 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n", } if (l != &GL(dl_rtld_map)) - _dl_relocate_object (l, l->l_scope, GLRO(dl_lazy), + _dl_relocate_object (l, l->l_scoperec->scope, GLRO(dl_lazy), consider_profiling); #ifdef USE_TLS @@ -2303,7 +2303,8 @@ ERROR: ld.so: object '%s' cannot be loaded as audit interface: %s; ignored.\n", HP_TIMING_NOW (start); /* Mark the link map as not yet relocated again. */ GL(dl_rtld_map).l_relocated = 0; - _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scope, 0, 0); + _dl_relocate_object (&GL(dl_rtld_map), main_map->l_scoperec->scope, + 0, 0); HP_TIMING_NOW (stop); HP_TIMING_DIFF (add, start, stop); HP_TIMING_ACCUM_NT (relocate_time, add); |