From 124dcac84b992d26cfe992f9017f49e92c37add2 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Sat, 15 Mar 2003 23:09:52 +0000 Subject: * sysdeps/powerpc/elf/libc-start.c (AUX_VECTOR_INIT): Define it. (LIBC_START_MAIN, LIBC_START_MAIN_AUXVEC_ARG, MAIN_AUXVEC_ARG) (INIT_MAIN_ARGS): Define, and #include . (__libc_start_main): Just call the generic one for most of the work. * sysdeps/generic/libc-start.c [LIBC_START_MAIN]: If defined, define a static function by that name instead of BP_SYM (__libc_start_main). [LIBC_START_MAIN_AUXVEC_ARG]: Take AUXVEC as argument. [MAIN_AUXVEC_ARG]: Pass 4th argument to MAIN. [INIT_MAIN_ARGS]: Give INIT the same args as MAIN. * sysdeps/generic/dl-sysdep.c (_dl_sysdep_start) [DL_PLATFORM_AUXV]: Use this macro for extra AT_* cases. * sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c (DL_PLATFORM_AUXV): New macro, guts from ... (__aux_init_cache): ... here, function removed. (DL_PLATFORM_INIT): Don't define this. * sysdeps/powerpc/powerpc32/memset.S: Put __cache_line_size in bss. * sysdeps/powerpc/powerpc64/memset.S: Likewise. * Versions.def (libthread_db): Add GLICB_2.3.3 set. --- ChangeLog | 25 ++++++ Versions.def | 1 + sysdeps/generic/dl-sysdep.c | 3 + sysdeps/powerpc/elf/libc-start.c | 132 ++++++---------------------- sysdeps/powerpc/powerpc32/memset.S | 76 ++++++++-------- sysdeps/powerpc/powerpc64/memset.S | 54 +++++------- sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c | 31 +++---- 7 files changed, 128 insertions(+), 194 deletions(-) diff --git a/ChangeLog b/ChangeLog index 75d527fac0..9a793132ee 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,28 @@ +2003-03-15 Roland McGrath + + * sysdeps/powerpc/elf/libc-start.c (AUX_VECTOR_INIT): Define it. + (LIBC_START_MAIN, LIBC_START_MAIN_AUXVEC_ARG, MAIN_AUXVEC_ARG) + (INIT_MAIN_ARGS): Define, and #include . + (__libc_start_main): Just call the generic one for most of the work. + + * sysdeps/generic/libc-start.c [LIBC_START_MAIN]: If defined, define a + static function by that name instead of BP_SYM (__libc_start_main). + [LIBC_START_MAIN_AUXVEC_ARG]: Take AUXVEC as argument. + [MAIN_AUXVEC_ARG]: Pass 4th argument to MAIN. + [INIT_MAIN_ARGS]: Give INIT the same args as MAIN. + + * sysdeps/generic/dl-sysdep.c (_dl_sysdep_start) [DL_PLATFORM_AUXV]: + Use this macro for extra AT_* cases. + * sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c (DL_PLATFORM_AUXV): + New macro, guts from ... + (__aux_init_cache): ... here, function removed. + (DL_PLATFORM_INIT): Don't define this. + + * sysdeps/powerpc/powerpc32/memset.S: Put __cache_line_size in bss. + * sysdeps/powerpc/powerpc64/memset.S: Likewise. + + * Versions.def (libthread_db): Add GLICB_2.3.3 set. + 2003-03-14 Roland McGrath * dlfcn/dlerror.c (dlerror): If objname is "", don't put ": " after it. diff --git a/Versions.def b/Versions.def index 23e66f7d94..861863434f 100644 --- a/Versions.def +++ b/Versions.def @@ -98,6 +98,7 @@ libthread_db { GLIBC_2.1.3 GLIBC_2.2.3 GLIBC_2.3 + GLIBC_2.3.3 } libanl { GLIBC_2.2.3 diff --git a/sysdeps/generic/dl-sysdep.c b/sysdeps/generic/dl-sysdep.c index 7d9a52e7d0..4d80dd7689 100644 --- a/sysdeps/generic/dl-sysdep.c +++ b/sysdeps/generic/dl-sysdep.c @@ -139,6 +139,9 @@ _dl_sysdep_start (void **start_argptr, case AT_SYSINFO: GL(dl_sysinfo) = av->a_un.a_val; break; +#endif +#ifdef DL_PLATFORM_AUXV + DL_PLATFORM_AUXV #endif } diff --git a/sysdeps/powerpc/elf/libc-start.c b/sysdeps/powerpc/elf/libc-start.c index aac34430a8..bbc4eeb3b8 100644 --- a/sysdeps/powerpc/elf/libc-start.c +++ b/sysdeps/powerpc/elf/libc-start.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1998,2000,2001,2002 Free Software Foundation, Inc. +/* Copyright (C) 1998,2000,2001,2002,2003 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -22,31 +22,9 @@ #include #include -extern void __libc_init_first (int argc, char **argv, char **envp); - extern int __cache_line_size; weak_extern (__cache_line_size) -extern int __libc_multiple_libcs; -extern void *__libc_stack_end; - -#ifndef SHARED -# include -extern void __pthread_initialize_minimal (void) -# if !(USE_TLS - 0) && !defined NONTLS_INIT_TP - __attribute__ ((weak)) -# endif - ; -#endif - -struct startup_info -{ - void *__unbounded sda_base; - int (*main) (int, char **, char **, void *); - int (*init) (int, char **, char **, void *); - void (*fini) (void); -}; - /* Scan the Aux Vector for the "Data Cache Block Size" entry. If found verify that the static extern __cache_line_size is defined by checking for not NULL. If it is defined then assign the cache block size @@ -66,16 +44,35 @@ __aux_init_cache (ElfW(auxv_t) *av) break; } } +/* This is used in sysdeps/generic/libc-start.c. */ +#define AUX_VECTOR_INIT __aux_init_cache + +/* The main work is done in the generic function. */ +#define LIBC_START_MAIN generic_start_main +#define LIBC_START_MAIN_AUXVEC_ARG +#define MAIN_AUXVEC_ARG +#define INIT_MAIN_ARGS +#include + + +struct startup_info +{ + void *__unbounded sda_base; + int (*main) (int, char **, char **, void *); + int (*init) (int, char **, char **, void *); + void (*fini) (void); +}; int /* GKM FIXME: GCC: this should get __BP_ prefix by virtue of the BPs in the arglist of startup_info.main and startup_info.init. */ BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av, - char *__unbounded *__unbounded ubp_ev, - ElfW(auxv_t) *__unbounded auxvec, void (*rtld_fini) (void), - struct startup_info *__unbounded stinfo, - char *__unbounded *__unbounded stack_on_entry) + char *__unbounded *__unbounded ubp_ev, + ElfW(auxv_t) *__unbounded auxvec, + void (*rtld_fini) (void), + struct startup_info *__unbounded stinfo, + char *__unbounded *__unbounded stack_on_entry) { #if __BOUNDED_POINTERS__ char **argv; @@ -83,15 +80,6 @@ BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av, # define argv ubp_av #endif -#ifndef SHARED - /* The next variable is only here to work around a bug in gcc <= 2.7.2.2. - If the address would be taken inside the expression the optimizer - would try to be too smart and throws it away. Grrr. */ - int *dummy_addr = &_dl_starting_up; - - __libc_multiple_libcs = dummy_addr && !_dl_starting_up; -#endif - /* the PPC SVR4 ABI says that the top thing on the stack will be a NULL pointer, so if not we assume that we're being called as a statically-linked program by Linux... */ @@ -110,78 +98,14 @@ BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av, while (*temp != NULL) ++temp; auxvec = (ElfW(auxv_t) *)++temp; - -# ifndef SHARED - _dl_aux_init (auxvec); -# endif #endif rtld_fini = NULL; } - INIT_ARGV_and_ENVIRON; - /* Initialize the __cache_line_size variable from the aux vector. */ - __aux_init_cache(auxvec); - - /* Store something that has some relationship to the end of the - stack, for backtraces. This variable should be thread-specific. - Use +8 so it works for both 32- and 64-bit. */ - __libc_stack_end = stack_on_entry + 8; - -#ifndef SHARED -# ifdef DL_SYSDEP_OSCHECK - if (!__libc_multiple_libcs) - { - /* This needs to run to initiliaze _dl_osversion before TLS - setup might check it. */ - DL_SYSDEP_OSCHECK (__libc_fatal); - } -# endif - /* Initialize the thread library at least a bit since the libgcc - functions are using thread functions if these are available and - we need to setup errno. If there is no thread library and we - handle TLS the function is defined in the libc to initialized the - TLS handling. */ -# if !(USE_TLS - 0) && !defined NONTLS_INIT_TP - if (__pthread_initialize_minimal) -# endif - __pthread_initialize_minimal (); - - /* Some security at this point. Prevent starting a SUID binary where - the standard file descriptors are not opened. We have to do this - only for statically linked applications since otherwise the dynamic - loader did the work already. */ - if (__builtin_expect (__libc_enable_secure, 0)) - __libc_check_standard_fds (); -#endif - - /* Register the destructor of the dynamic linker if there is any. */ - if (rtld_fini != NULL) - __cxa_atexit ((void (*) (void *)) rtld_fini, NULL, NULL); - - /* Call the initializer of the libc. */ -#ifdef SHARED - if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) - _dl_debug_printf ("\ninitialize libc\n\n"); -#endif - __libc_init_first (argc, argv, __environ); - - /* Register the destructor of the program, if any. */ - if (stinfo->fini) - __cxa_atexit ((void (*) (void *)) stinfo->fini, NULL, NULL); - - /* Call the initializer of the program, if any. */ -#ifdef SHARED - if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) - _dl_debug_printf ("\ninitialize program: %s\n\n", argv[0]); -#endif - if (stinfo->init) - stinfo->init (argc, argv, __environ, auxvec); - -#ifdef SHARED - if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0)) - _dl_debug_printf ("\ntransferring control: %s\n\n", argv[0]); -#endif + __aux_init_cache (auxvec); - exit (stinfo->main (argc, argv, __environ, auxvec)); + return generic_start_main (stinfo->main, argc, ubp_av, auxvec, + stinfo->init, stinfo->fini, rtld_fini, + stack_on_entry); } diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S index bee87af0ce..1a8eae5b15 100644 --- a/sysdeps/powerpc/powerpc32/memset.S +++ b/sysdeps/powerpc/powerpc32/memset.S @@ -1,5 +1,5 @@ /* Optimized memset implementation for PowerPC. - Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -21,27 +21,23 @@ #include #include -/* Define a global static that can hold the cache line size. The +/* Define a global static that can hold the cache line size. The assumption is that startup code will access the "aux vector" to - to obtain the value set by the kernel and store it into this + to obtain the value set by the kernel and store it into this variable. */ - + .globl __cache_line_size - .section ".data","aw" - .align 2 - .type __cache_line_size,@object - .size __cache_line_size,4 -__cache_line_size: - .long 0 - .section ".text" + .lcomm __cache_line_size,4,4 + /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5])); Returns 's'. The memset is done in four sizes: byte (8 bits), word (32 bits), 32-byte blocks (256 bits) and __cache_line_size (128, 256, 1024 bits). - There is a special case for setting whole cache lines to 0, which + There is a special case for setting whole cache lines to 0, which takes advantage of the dcbz instruction. */ + .section ".text" EALIGN (BP_SYM (memset), 5, 1) #define rTMP r0 @@ -123,14 +119,14 @@ L(caligned): cmplwi cr1, rCHR, 0 clrrwi. rALIGN, rLEN, 5 mtcrf 0x01, rLEN /* 40th instruction from .align */ - + /* Check if we can use the special case for clearing memory using dcbz. - This requires that we know the correct cache line size for this + This requires that we know the correct cache line size for this processor. Getting the __cache_line_size may require establishing GOT addressability, so branch out of line to set this up. */ - beq cr1, L(checklinesize) - -/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary. + beq cr1, L(checklinesize) + +/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary. Can't assume that rCHR is zero or that the cache line size is either 32-bytes or even known. */ L(nondcbz): @@ -172,7 +168,7 @@ L(cloopdone): .align 5 nop -/* Clear cache lines of memory in 128-byte chunks. +/* Clear cache lines of memory in 128-byte chunks. This code is optimized for processors with 32-byte cache lines. It is further optimized for the 601 processor, which requires some care in how the code is aligned in the i-cache. */ @@ -259,22 +255,22 @@ L(medium_28t): stw rCHR, -4(rMEMP) stw rCHR, -8(rMEMP) blr - + L(checklinesize): #ifdef SHARED mflr rTMP /* If the remaining length is less the 32 bytes then don't bother getting the cache line size. */ - beq L(medium) -/* Establishes GOT addressability so we can load __cache_line_size + beq L(medium) +/* Establishes GOT addressability so we can load __cache_line_size from static. This value was set from the aux vector during startup. */ bl _GLOBAL_OFFSET_TABLE_@local-4 mflr rGOT lwz rGOT,__cache_line_size@got(rGOT) lwz rCLS,0(rGOT) mtlr rTMP -#else -/* Load __cache_line_size from static. This value was set from the +#else +/* Load __cache_line_size from static. This value was set from the aux vector during startup. */ lis rCLS,__cache_line_size@ha /* If the remaining length is less the 32 bytes then don't bother getting @@ -282,22 +278,22 @@ L(checklinesize): beq L(medium) lwz rCLS,__cache_line_size@l(rCLS) #endif - + /*If the cache line size was not set then goto to L(nondcbz), which is - safe for any cache line size. */ + safe for any cache line size. */ cmplwi cr1,rCLS,0 beq cr1,L(nondcbz) - + /* If the cache line size is 32 bytes then goto to L(zloopstart), - which is coded specificly for 32-byte lines (and 601). */ + which is coded specificly for 32-byte lines (and 601). */ cmplwi cr1,rCLS,32 beq cr1,L(zloopstart) - -/* Now we know the cache line size and it is not 32-bytes. However - we may not yet be aligned to the cache line and may have a partial - line to fill. Touch it 1st to fetch the cache line. */ - dcbtst 0,rMEMP - + +/* Now we know the cache line size and it is not 32-bytes. However + we may not yet be aligned to the cache line and may have a partial + line to fill. Touch it 1st to fetch the cache line. */ + dcbtst 0,rMEMP + addi rCLM,rCLS,-1 L(getCacheAligned): cmplwi cr1,rLEN,32 @@ -317,8 +313,8 @@ L(getCacheAligned): stw rCHR,-8(rMEMP) stw rCHR,-4(rMEMP) b L(getCacheAligned) - -/* Now we are aligned to the cache line and can use dcbz. */ + +/* Now we are aligned to the cache line and can use dcbz. */ L(cacheAligned): cmplw cr1,rLEN,rCLS blt cr1,L(handletail32) @@ -327,12 +323,12 @@ L(cacheAligned): add rMEMP,rMEMP,rCLS b L(cacheAligned) -/* We are here because; the cache line size was set, it was not - 32-bytes, and the remainder (rLEN) is now less than the actual cache - line size. Set up the preconditions for L(nondcbz) and go there to - store the remaining bytes. */ +/* We are here because; the cache line size was set, it was not + 32-bytes, and the remainder (rLEN) is now less than the actual cache + line size. Set up the preconditions for L(nondcbz) and go there to + store the remaining bytes. */ L(handletail32): clrrwi. rALIGN, rLEN, 5 b L(nondcbz) - + END (BP_SYM (memset)) diff --git a/sysdeps/powerpc/powerpc64/memset.S b/sysdeps/powerpc/powerpc64/memset.S index 4bfe20d7b1..53a4a2753d 100644 --- a/sysdeps/powerpc/powerpc64/memset.S +++ b/sysdeps/powerpc/powerpc64/memset.S @@ -1,5 +1,5 @@ /* Optimized memset implementation for PowerPC64. - Copyright (C) 1997, 1999, 2000, 2002 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2000, 2002, 2003 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -21,17 +21,12 @@ #include #include -/* Define a global static that can hold the cache line size. The +/* Define a global static that can hold the cache line size. The assumption is that startup code will access the "aux vector" to - to obtain the value set by the kernel and store it into this + to obtain the value set by the kernel and store it into this variable. */ .globl __cache_line_size - .section ".data" - .align 2 - .type __cache_line_size,@object - .size __cache_line_size,4 -__cache_line_size: - .long 0 + .lcomm __cache_line_size,4,4 .section ".toc","aw" .LC0: .tc __cache_line_size[TC],__cache_line_size @@ -81,7 +76,7 @@ L(b0): andi. rALIGN, rMEMP0, 7 mr rMEMP, rMEMP0 ble- cr1, L(small) - + /* Align to doubleword boundary. */ cmpldi cr5, rLEN, 31 rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */ @@ -108,9 +103,9 @@ L(g4): bf+ 31, L(g0) stb rCHR, 0(rMEMP0) bt 30, L(aligned) -L(g0): - sth rCHR, -2(rMEMP) - +L(g0): + sth rCHR, -2(rMEMP) + /* Handle the case of size < 31. */ L(aligned2): rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */ @@ -138,9 +133,9 @@ L(a2): L(caligned): cmpldi cr1, rCHR, 0 clrrdi. rALIGN, rLEN, 5 - mtcrf 0x01, rLEN + mtcrf 0x01, rLEN beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */ -L(nondcbz): +L(nondcbz): srdi rTMP, rALIGN, 5 mtctr rTMP beq L(medium) /* We may not actually get to do a full line. */ @@ -168,21 +163,21 @@ L(cloopdone): .align 5 /* Clear lines of memory in 128-byte chunks. */ L(zloopstart): -/* If the remaining length is less the 32 bytes, don't bother getting +/* If the remaining length is less the 32 bytes, don't bother getting the cache line size. */ beq L(medium) ld rCLS,.LC0@toc(r2) - lwz rCLS,0(rCLS) -/* If the cache line size was not set just goto to L(nondcbz) which is - safe for any cache line size. */ + lwz rCLS,0(rCLS) +/* If the cache line size was not set just goto to L(nondcbz) which is + safe for any cache line size. */ cmpldi cr1,rCLS,0 beq cr1,L(nondcbz) - - + + /* Now we know the cache line size, and it is not 32-bytes, but - we may not yet be aligned to the cache line. May have a partial - line to fill, so touch it 1st. */ - dcbt 0,rMEMP + we may not yet be aligned to the cache line. May have a partial + line to fill, so touch it 1st. */ + dcbt 0,rMEMP addi rCLM,rCLS,-1 L(getCacheAligned): cmpldi cr1,rLEN,32 @@ -196,8 +191,8 @@ L(getCacheAligned): std rCHR,-16(rMEMP) std rCHR,-8(rMEMP) b L(getCacheAligned) - -/* Now we are aligned to the cache line and can use dcbz. */ + +/* Now we are aligned to the cache line and can use dcbz. */ L(cacheAligned): cmpld cr1,rLEN,rCLS blt cr1,L(handletail32) @@ -208,7 +203,7 @@ L(cacheAligned): /* We are here because the cache line size was set and was not 32-bytes and the remainder (rLEN) is less than the actual cache line size. - So set up the preconditions for L(nondcbz) and go there. */ + So set up the preconditions for L(nondcbz) and go there. */ L(handletail32): clrrwi. rALIGN, rLEN, 5 b L(nondcbz) @@ -264,7 +259,7 @@ L(medium_30t): bf- 29, L(medium_29f) L(medium_29t): stwu rCHR, -4(rMEMP) - blt- cr1, L(medium_27f) + blt- cr1, L(medium_27f) L(medium_27t): std rCHR, -8(rMEMP) stdu rCHR, -16(rMEMP) @@ -275,7 +270,7 @@ L(medium_28t): blr END_GEN_TB (BP_SYM (memset),TB_TOCLESS) -/* Copied from bzero.S to prevent the linker from inserting a stub +/* Copied from bzero.S to prevent the linker from inserting a stub between bzero and memset. */ ENTRY (BP_SYM (__bzero)) #if __BOUNDED_POINTERS__ @@ -293,4 +288,3 @@ ENTRY (BP_SYM (__bzero)) END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS) weak_alias (BP_SYM (__bzero), BP_SYM (bzero)) - diff --git a/sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c b/sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c index a8687b35b1..64e4e2c4d3 100644 --- a/sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c +++ b/sysdeps/unix/sysv/linux/powerpc/dl-sysdep.c @@ -1,5 +1,5 @@ /* Operating system support for run-time dynamic linker. Linux/PPC version. - Copyright (C) 1997, 1998, 2001 Free Software Foundation, Inc. + Copyright (C) 1997, 1998, 2001, 2003 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -25,27 +25,18 @@ extern int __cache_line_size; weak_extern (__cache_line_size) -#define DL_PLATFORM_INIT __aux_init_cache(_dl_auxv) - /* Scan the Aux Vector for the "Data Cache Block Size" entry. If found verify that the static extern __cache_line_size is defined by checking - for not NULL. If it is defined then assign the cache block size + for not NULL. If it is defined then assign the cache block size value to __cache_line_size. */ -static inline void -__aux_init_cache (ElfW(auxv_t) *av) -{ - for (; av->a_type != AT_NULL; ++av) - switch (av->a_type) - { - case AT_DCACHEBSIZE: - { - int *cls = & __cache_line_size; - if (cls != NULL) - *cls = av->a_un.a_val; - } - break; - } -} +#define DL_PLATFORM_AUXV \ + case AT_DCACHEBSIZE: \ + { \ + int *cls = & __cache_line_size; \ + if (cls != NULL) \ + *cls = av->a_un.a_val; \ + } \ + break; #ifndef __ASSUME_STD_AUXV @@ -78,4 +69,4 @@ __aux_init_cache (ElfW(auxv_t) *av) } while (0) #endif -#include +#include_next -- cgit 1.4.1