about summary refs log tree commit diff
path: root/sysdeps/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r--sysdeps/powerpc/elf/libc-start.c132
-rw-r--r--sysdeps/powerpc/powerpc32/memset.S76
-rw-r--r--sysdeps/powerpc/powerpc64/memset.S54
3 files changed, 88 insertions, 174 deletions
diff --git a/sysdeps/powerpc/elf/libc-start.c b/sysdeps/powerpc/elf/libc-start.c
index aac34430a8..bbc4eeb3b8 100644
--- a/sysdeps/powerpc/elf/libc-start.c
+++ b/sysdeps/powerpc/elf/libc-start.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1998,2000,2001,2002 Free Software Foundation, Inc.
+/* Copyright (C) 1998,2000,2001,2002,2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -22,31 +22,9 @@
 #include <bp-start.h>
 #include <bp-sym.h>
 
-extern void __libc_init_first (int argc, char **argv, char **envp);
-
 extern int __cache_line_size;
 weak_extern (__cache_line_size)
 
-extern int __libc_multiple_libcs;
-extern void *__libc_stack_end;
-
-#ifndef SHARED
-# include <tls.h>
-extern void __pthread_initialize_minimal (void)
-# if !(USE_TLS - 0) && !defined NONTLS_INIT_TP
-     __attribute__ ((weak))
-# endif
-     ;
-#endif
-
-struct startup_info
-{
-  void *__unbounded sda_base;
-  int (*main) (int, char **, char **, void *);
-  int (*init) (int, char **, char **, void *);
-  void (*fini) (void);
-};
-
 /* Scan the Aux Vector for the "Data Cache Block Size" entry.  If found
    verify that the static extern __cache_line_size is defined by checking
    for not NULL.  If it is defined then assign the cache block size
@@ -66,16 +44,35 @@ __aux_init_cache (ElfW(auxv_t) *av)
         break;
       }
 }
+/* This is used in sysdeps/generic/libc-start.c.  */
+#define AUX_VECTOR_INIT __aux_init_cache
+
+/* The main work is done in the generic function.  */
+#define LIBC_START_MAIN generic_start_main
+#define LIBC_START_MAIN_AUXVEC_ARG
+#define MAIN_AUXVEC_ARG
+#define INIT_MAIN_ARGS
+#include <sysdeps/generic/libc-start.c>
+
+
+struct startup_info
+{
+  void *__unbounded sda_base;
+  int (*main) (int, char **, char **, void *);
+  int (*init) (int, char **, char **, void *);
+  void (*fini) (void);
+};
 
 
 int
 /* GKM FIXME: GCC: this should get __BP_ prefix by virtue of the
    BPs in the arglist of startup_info.main and startup_info.init. */
 BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av,
-		   char *__unbounded *__unbounded ubp_ev,
-		   ElfW(auxv_t) *__unbounded auxvec, void (*rtld_fini) (void),
-		   struct startup_info *__unbounded stinfo,
-		   char *__unbounded *__unbounded stack_on_entry)
+			    char *__unbounded *__unbounded ubp_ev,
+			    ElfW(auxv_t) *__unbounded auxvec,
+			    void (*rtld_fini) (void),
+			    struct startup_info *__unbounded stinfo,
+			    char *__unbounded *__unbounded stack_on_entry)
 {
 #if __BOUNDED_POINTERS__
   char **argv;
@@ -83,15 +80,6 @@ BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av,
 # define argv ubp_av
 #endif
 
-#ifndef SHARED
-  /* The next variable is only here to work around a bug in gcc <= 2.7.2.2.
-     If the address would be taken inside the expression the optimizer
-     would try to be too smart and throws it away.  Grrr.  */
-  int *dummy_addr = &_dl_starting_up;
-
-  __libc_multiple_libcs = dummy_addr && !_dl_starting_up;
-#endif
-
   /* the PPC SVR4 ABI says that the top thing on the stack will
      be a NULL pointer, so if not we assume that we're being called
      as a statically-linked program by Linux...	 */
@@ -110,78 +98,14 @@ BP_SYM (__libc_start_main) (int argc, char *__unbounded *__unbounded ubp_av,
       while (*temp != NULL)
         ++temp;
       auxvec = (ElfW(auxv_t) *)++temp;
-
-# ifndef SHARED
-      _dl_aux_init (auxvec);
-# endif
 #endif
       rtld_fini = NULL;
     }
 
-  INIT_ARGV_and_ENVIRON;
-
   /* Initialize the __cache_line_size variable from the aux vector.  */
-  __aux_init_cache(auxvec);
-
-  /* Store something that has some relationship to the end of the
-     stack, for backtraces.  This variable should be thread-specific.
-     Use +8 so it works for both 32- and 64-bit.  */
-  __libc_stack_end = stack_on_entry + 8;
-
-#ifndef SHARED
-# ifdef DL_SYSDEP_OSCHECK
-  if (!__libc_multiple_libcs)
-    {
-      /* This needs to run to initiliaze _dl_osversion before TLS
-	 setup might check it.  */
-      DL_SYSDEP_OSCHECK (__libc_fatal);
-    }
-# endif
-  /* Initialize the thread library at least a bit since the libgcc
-     functions are using thread functions if these are available and
-     we need to setup errno.  If there is no thread library and we
-     handle TLS the function is defined in the libc to initialized the
-     TLS handling.  */
-# if !(USE_TLS - 0) && !defined NONTLS_INIT_TP
-  if (__pthread_initialize_minimal)
-# endif
-    __pthread_initialize_minimal ();
-
-  /* Some security at this point.  Prevent starting a SUID binary where
-     the standard file descriptors are not opened.  We have to do this
-     only for statically linked applications since otherwise the dynamic
-     loader did the work already.  */
-  if (__builtin_expect (__libc_enable_secure, 0))
-    __libc_check_standard_fds ();
-#endif
-
-  /* Register the destructor of the dynamic linker if there is any.  */
-  if (rtld_fini != NULL)
-    __cxa_atexit ((void (*) (void *)) rtld_fini, NULL, NULL);
-
-  /* Call the initializer of the libc.  */
-#ifdef SHARED
-  if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
-    _dl_debug_printf ("\ninitialize libc\n\n");
-#endif
-  __libc_init_first (argc, argv, __environ);
-
-  /* Register the destructor of the program, if any.  */
-  if (stinfo->fini)
-    __cxa_atexit ((void (*) (void *)) stinfo->fini, NULL, NULL);
-
-  /* Call the initializer of the program, if any.  */
-#ifdef SHARED
-  if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
-    _dl_debug_printf ("\ninitialize program: %s\n\n", argv[0]);
-#endif
-  if (stinfo->init)
-    stinfo->init (argc, argv, __environ, auxvec);
-
-#ifdef SHARED
-  if (__builtin_expect (GL(dl_debug_mask) & DL_DEBUG_IMPCALLS, 0))
-    _dl_debug_printf ("\ntransferring control: %s\n\n", argv[0]);
-#endif
+  __aux_init_cache (auxvec);
 
-  exit (stinfo->main (argc, argv, __environ, auxvec));
+  return generic_start_main (stinfo->main, argc, ubp_av, auxvec,
+			     stinfo->init, stinfo->fini, rtld_fini,
+			     stack_on_entry);
 }
diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S
index bee87af0ce..1a8eae5b15 100644
--- a/sysdeps/powerpc/powerpc32/memset.S
+++ b/sysdeps/powerpc/powerpc32/memset.S
@@ -1,5 +1,5 @@
 /* Optimized memset implementation for PowerPC.
-   Copyright (C) 1997, 1999, 2000 Free Software Foundation, Inc.
+   Copyright (C) 1997, 1999, 2000, 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -21,27 +21,23 @@
 #include <bp-sym.h>
 #include <bp-asm.h>
 
-/* Define a global static that can hold the cache line size.  The 
+/* Define a global static that can hold the cache line size.  The
    assumption is that startup code will access the "aux vector" to
-   to obtain the value set by the kernel and store it into this 
+   to obtain the value set by the kernel and store it into this
    variable.  */
-   
+
 	.globl __cache_line_size
-	.section	".data","aw"
-	.align 2
-	.type	 __cache_line_size,@object
-	.size	 __cache_line_size,4
-__cache_line_size:
-	.long 0
-	.section	".text"
+	.lcomm __cache_line_size,4,4
+
 /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
    Returns 's'.
 
    The memset is done in four sizes: byte (8 bits), word (32 bits),
    32-byte blocks (256 bits) and __cache_line_size (128, 256, 1024 bits).
-   There is a special case for setting whole cache lines to 0, which 
+   There is a special case for setting whole cache lines to 0, which
    takes advantage of the dcbz instruction.  */
 
+	.section	".text"
 EALIGN (BP_SYM (memset), 5, 1)
 
 #define rTMP	r0
@@ -123,14 +119,14 @@ L(caligned):
 	cmplwi	cr1, rCHR, 0
 	clrrwi.	rALIGN, rLEN, 5
 	mtcrf	0x01, rLEN	/* 40th instruction from .align */
-	
+
 /* Check if we can use the special case for clearing memory using dcbz.
-   This requires that we know the correct cache line size for this    
+   This requires that we know the correct cache line size for this
    processor.  Getting the __cache_line_size may require establishing GOT
    addressability, so branch out of line to set this up.  */
-	beq	cr1, L(checklinesize) 
-	
-/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary. 
+	beq	cr1, L(checklinesize)
+
+/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary.
    Can't assume that rCHR is zero or that the cache line size is either
    32-bytes or even known.  */
 L(nondcbz):
@@ -172,7 +168,7 @@ L(cloopdone):
 
 	.align 5
 	nop
-/* Clear cache lines of memory in 128-byte chunks.  
+/* Clear cache lines of memory in 128-byte chunks.
    This code is optimized for processors with 32-byte cache lines.
    It is further optimized for the 601 processor, which requires
    some care in how the code is aligned in the i-cache.  */
@@ -259,22 +255,22 @@ L(medium_28t):
 	stw	rCHR, -4(rMEMP)
 	stw	rCHR, -8(rMEMP)
 	blr
-	
+
 L(checklinesize):
 #ifdef SHARED
 	mflr rTMP
 /* If the remaining length is less the 32 bytes then don't bother getting
 	 the cache line size.  */
-	beq	L(medium)	
-/* Establishes GOT addressability so we can load __cache_line_size 
+	beq	L(medium)
+/* Establishes GOT addressability so we can load __cache_line_size
    from static. This value was set from the aux vector during startup.  */
 	bl   _GLOBAL_OFFSET_TABLE_@local-4
 	mflr rGOT
 	lwz	 rGOT,__cache_line_size@got(rGOT)
 	lwz	 rCLS,0(rGOT)
 	mtlr rTMP
-#else 
-/* Load __cache_line_size from static. This value was set from the 
+#else
+/* Load __cache_line_size from static. This value was set from the
    aux vector during startup.  */
 	lis	 rCLS,__cache_line_size@ha
 /* If the remaining length is less the 32 bytes then don't bother getting
@@ -282,22 +278,22 @@ L(checklinesize):
 	beq	L(medium)
 	lwz  rCLS,__cache_line_size@l(rCLS)
 #endif
-	
+
 /*If the cache line size was not set then goto to L(nondcbz), which is
-	safe for any cache line size.  */	
+	safe for any cache line size.  */
 	cmplwi cr1,rCLS,0
 	beq	cr1,L(nondcbz)
-	
+
 /* If the cache line size is 32 bytes then goto to L(zloopstart),
-	 which is coded specificly for 32-byte lines (and 601).  */	
+	 which is coded specificly for 32-byte lines (and 601).  */
 	cmplwi cr1,rCLS,32
 	beq	cr1,L(zloopstart)
-	
-/* Now we know the cache line size and it is not 32-bytes.  However 
-	 we may not yet be aligned to the cache line and may have a partial 
-	 line to fill.  Touch it 1st to fetch the cache line.  */	
-	dcbtst 0,rMEMP	
-	
+
+/* Now we know the cache line size and it is not 32-bytes.  However
+	 we may not yet be aligned to the cache line and may have a partial
+	 line to fill.  Touch it 1st to fetch the cache line.  */
+	dcbtst 0,rMEMP
+
 	addi rCLM,rCLS,-1
 L(getCacheAligned):
 	cmplwi cr1,rLEN,32
@@ -317,8 +313,8 @@ L(getCacheAligned):
 	stw	 rCHR,-8(rMEMP)
 	stw	 rCHR,-4(rMEMP)
 	b	 L(getCacheAligned)
-	
-/* Now we are aligned to the cache line and can use dcbz.  */	
+
+/* Now we are aligned to the cache line and can use dcbz.  */
 L(cacheAligned):
 	cmplw cr1,rLEN,rCLS
 	blt	 cr1,L(handletail32)
@@ -327,12 +323,12 @@ L(cacheAligned):
 	add	 rMEMP,rMEMP,rCLS
 	b	 L(cacheAligned)
 
-/* We are here because; the cache line size was set, it was not 
-   32-bytes, and the remainder (rLEN) is now less than the actual cache 
-   line size.  Set up the preconditions for L(nondcbz) and go there to 
-   store the remaining bytes.  */			
+/* We are here because; the cache line size was set, it was not
+   32-bytes, and the remainder (rLEN) is now less than the actual cache
+   line size.  Set up the preconditions for L(nondcbz) and go there to
+   store the remaining bytes.  */
 L(handletail32):
 	clrrwi.	rALIGN, rLEN, 5
 	b		L(nondcbz)
-		
+
 END (BP_SYM (memset))
diff --git a/sysdeps/powerpc/powerpc64/memset.S b/sysdeps/powerpc/powerpc64/memset.S
index 4bfe20d7b1..53a4a2753d 100644
--- a/sysdeps/powerpc/powerpc64/memset.S
+++ b/sysdeps/powerpc/powerpc64/memset.S
@@ -1,5 +1,5 @@
 /* Optimized memset implementation for PowerPC64.
-   Copyright (C) 1997, 1999, 2000, 2002 Free Software Foundation, Inc.
+   Copyright (C) 1997, 1999, 2000, 2002, 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -21,17 +21,12 @@
 #include <bp-sym.h>
 #include <bp-asm.h>
 
-/* Define a global static that can hold the cache line size.  The 
+/* Define a global static that can hold the cache line size.  The
    assumption is that startup code will access the "aux vector" to
-   to obtain the value set by the kernel and store it into this 
+   to obtain the value set by the kernel and store it into this
    variable.  */
 	.globl __cache_line_size
-	.section	".data"
-	.align 2
-	.type	__cache_line_size,@object
-	.size	__cache_line_size,4
-__cache_line_size:
-	.long	0
+	.lcomm __cache_line_size,4,4
 	.section	".toc","aw"
 .LC0:
 	.tc __cache_line_size[TC],__cache_line_size
@@ -81,7 +76,7 @@ L(b0):
 	andi.	rALIGN, rMEMP0, 7
 	mr	rMEMP, rMEMP0
 	ble-	cr1, L(small)
-	
+
 /* Align to doubleword boundary.  */
 	cmpldi	cr5, rLEN, 31
 	rlwimi	rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword.  */
@@ -108,9 +103,9 @@ L(g4):
 	bf+	31, L(g0)
 	stb	rCHR, 0(rMEMP0)
 	bt	30, L(aligned)
-L(g0):	
-	sth	rCHR, -2(rMEMP)	
-	
+L(g0):
+	sth	rCHR, -2(rMEMP)
+
 /* Handle the case of size < 31.  */
 L(aligned2):
 	rlwimi	rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word.  */
@@ -138,9 +133,9 @@ L(a2):
 L(caligned):
 	cmpldi	cr1, rCHR, 0
 	clrrdi.	rALIGN, rLEN, 5
-	mtcrf	0x01, rLEN	
+	mtcrf	0x01, rLEN
 	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
-L(nondcbz):	
+L(nondcbz):
 	srdi	rTMP, rALIGN, 5
 	mtctr	rTMP
 	beq	L(medium)	/* We may not actually get to do a full line.  */
@@ -168,21 +163,21 @@ L(cloopdone):
 	.align 5
 /* Clear lines of memory in 128-byte chunks.  */
 L(zloopstart):
-/* If the remaining length is less the 32 bytes, don't bother getting 
+/* If the remaining length is less the 32 bytes, don't bother getting
 	 the cache line size.  */
 	beq	L(medium)
 	ld	rCLS,.LC0@toc(r2)
-	lwz	rCLS,0(rCLS)	
-/* If the cache line size was not set just goto to L(nondcbz) which is 
-	 safe for any cache line size.  */	
+	lwz	rCLS,0(rCLS)
+/* If the cache line size was not set just goto to L(nondcbz) which is
+	 safe for any cache line size.  */
 	cmpldi	cr1,rCLS,0
 	beq		cr1,L(nondcbz)
-	
-	
+
+
 /* Now we know the cache line size, and it is not 32-bytes, but
-	 we may not yet be aligned to the cache line. May have a partial 
-	 line to fill, so touch it 1st.  */	
-	dcbt	0,rMEMP	
+	 we may not yet be aligned to the cache line. May have a partial
+	 line to fill, so touch it 1st.  */
+	dcbt	0,rMEMP
 	addi	rCLM,rCLS,-1
 L(getCacheAligned):
 	cmpldi	cr1,rLEN,32
@@ -196,8 +191,8 @@ L(getCacheAligned):
 	std		rCHR,-16(rMEMP)
 	std		rCHR,-8(rMEMP)
 	b		L(getCacheAligned)
-	
-/* Now we are aligned to the cache line and can use dcbz.  */	
+
+/* Now we are aligned to the cache line and can use dcbz.  */
 L(cacheAligned):
 	cmpld	cr1,rLEN,rCLS
 	blt		cr1,L(handletail32)
@@ -208,7 +203,7 @@ L(cacheAligned):
 
 /* We are here because the cache line size was set and was not 32-bytes
    and the remainder (rLEN) is less than the actual cache line size.
-   So set up the preconditions for L(nondcbz) and go there.  */			
+   So set up the preconditions for L(nondcbz) and go there.  */
 L(handletail32):
 	clrrwi.	rALIGN, rLEN, 5
 	b		L(nondcbz)
@@ -264,7 +259,7 @@ L(medium_30t):
 	bf-	29, L(medium_29f)
 L(medium_29t):
 	stwu	rCHR, -4(rMEMP)
-	blt-	cr1, L(medium_27f) 
+	blt-	cr1, L(medium_27f)
 L(medium_27t):
 	std	rCHR, -8(rMEMP)
 	stdu	rCHR, -16(rMEMP)
@@ -275,7 +270,7 @@ L(medium_28t):
 	blr
 END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
 
-/* Copied from bzero.S to prevent the linker from inserting a stub 
+/* Copied from bzero.S to prevent the linker from inserting a stub
    between bzero and memset.  */
 ENTRY (BP_SYM (__bzero))
 #if __BOUNDED_POINTERS__
@@ -293,4 +288,3 @@ ENTRY (BP_SYM (__bzero))
 END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
 
 weak_alias (BP_SYM (__bzero), BP_SYM (bzero))
-