summary refs log tree commit diff
path: root/sysdeps/powerpc/powerpc32
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2013-01-07 11:20:53 -0600
committerRyan S. Arnold <rsa@linux.vnet.ibm.com>2013-01-07 11:20:53 -0600
commit2ccdea26f290f6990606f4a43de5272afa1a784d (patch)
tree4b31e4613c48117cafa62f6404a1f207bb123832 /sysdeps/powerpc/powerpc32
parent375607b9cc9ddf46a379bab6bf2998c54099d6b5 (diff)
downloadglibc-2ccdea26f290f6990606f4a43de5272afa1a784d.tar.gz
glibc-2ccdea26f290f6990606f4a43de5272afa1a784d.tar.xz
glibc-2ccdea26f290f6990606f4a43de5272afa1a784d.zip
Fix spelling errors in sysdeps/powerpc files.
Diffstat (limited to 'sysdeps/powerpc/powerpc32')
-rw-r--r--sysdeps/powerpc/powerpc32/bits/atomic.h4
-rw-r--r--sysdeps/powerpc/powerpc32/cell/memcpy.S4
-rw-r--r--sysdeps/powerpc/powerpc32/dl-machine.c2
-rw-r--r--sysdeps/powerpc/powerpc32/dl-start.S2
-rw-r--r--sysdeps/powerpc/powerpc32/memset.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power4/fpu/mpa.c6
-rw-r--r--sysdeps/powerpc/powerpc32/power4/fpu/slowpow.c2
-rw-r--r--sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power4/hp-timing.h2
-rw-r--r--sysdeps/powerpc/powerpc32/power4/memcmp.S4
-rw-r--r--sysdeps/powerpc/powerpc32/power4/strncmp.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power6/memcpy.S16
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memchr.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memcmp.S4
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memrchr.S2
-rw-r--r--sysdeps/powerpc/powerpc32/power7/strcasecmp.S4
-rw-r--r--sysdeps/powerpc/powerpc32/power7/strncmp.S2
-rw-r--r--sysdeps/powerpc/powerpc32/strncmp.S2
21 files changed, 35 insertions, 35 deletions
diff --git a/sysdeps/powerpc/powerpc32/bits/atomic.h b/sysdeps/powerpc/powerpc32/bits/atomic.h
index 2f441ed985..3e3a1effe5 100644
--- a/sysdeps/powerpc/powerpc32/bits/atomic.h
+++ b/sysdeps/powerpc/powerpc32/bits/atomic.h
@@ -21,7 +21,7 @@
     This is a hint to the hardware to expect additional updates adjacent
     to the lock word or not.  If we are acquiring a Mutex, the hint
     should be true. Otherwise we releasing a Mutex or doing a simple
-    atomic operation.  In that case we don't expect addtional updates
+    atomic operation.  In that case we don't expect additional updates
     adjacent to the lock word after the Store Conditional and the hint
     should be false.  */
     
@@ -35,7 +35,7 @@
 
 /*
  * The 32-bit exchange_bool is different on powerpc64 because the subf
- * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
+ * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
  * (a load word and zero (high 32) form).  So powerpc64 has a slightly
  * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
  */
diff --git a/sysdeps/powerpc/powerpc32/cell/memcpy.S b/sysdeps/powerpc/powerpc32/cell/memcpy.S
index 5fbdab1db4..6d7d4ce5db 100644
--- a/sysdeps/powerpc/powerpc32/cell/memcpy.S
+++ b/sysdeps/powerpc/powerpc32/cell/memcpy.S
@@ -34,7 +34,7 @@
  * latency to memory is >400 clocks
  * To improve copy performance we need to prefetch source data
  * far ahead to hide this latency
- * For best performance instructionforms ending in "." like "andi."
+ * For best performance instruction forms ending in "." like "andi."
  * should be avoided as the are implemented in microcode on CELL.
  * The below code is loop unrolled for the CELL cache line of 128 bytes
  */
@@ -146,7 +146,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 	lfd	fp9, 0x08(r4)
 	dcbz	r11,r6
 	lfd	fp10, 0x10(r4)	/* 4 register stride copy is optimal  */
-	lfd	fp11, 0x18(r4)	/* to hide 1st level cache lantency.  */
+	lfd	fp11, 0x18(r4)	/* to hide 1st level cache latency.  */
 	lfd	fp12, 0x20(r4)
 	stfd	fp9, 0x08(r6)
 	stfd	fp10, 0x10(r6)
diff --git a/sysdeps/powerpc/powerpc32/dl-machine.c b/sysdeps/powerpc/powerpc32/dl-machine.c
index f9f2a5d8f3..bd42fdf7d5 100644
--- a/sysdeps/powerpc/powerpc32/dl-machine.c
+++ b/sysdeps/powerpc/powerpc32/dl-machine.c
@@ -113,7 +113,7 @@ __elf_preferred_address (struct link_map *loader, size_t maplength,
   /* Otherwise, quickly look for a suitable gap between 0x3FFFF and
      0x70000000.  0x3FFFF is so that references off NULL pointers will
      cause a segfault, 0x70000000 is just paranoia (it should always
-     be superceded by the program's load address).  */
+     be superseded by the program's load address).  */
   low =  0x0003FFFF;
   high = 0x70000000;
   for (nsid = 0; nsid < DL_NNS; ++nsid)
diff --git a/sysdeps/powerpc/powerpc32/dl-start.S b/sysdeps/powerpc/powerpc32/dl-start.S
index 01484e8e94..fa9c9bc4ae 100644
--- a/sysdeps/powerpc/powerpc32/dl-start.S
+++ b/sysdeps/powerpc/powerpc32/dl-start.S
@@ -74,7 +74,7 @@ _dl_start_user:
 	slwi	r5,r3,2
 	add	r6,r4,r5
 	addi	r5,r6,4
-/* pass the auxilary vector in r6. This is passed to us just after _envp.  */
+/* pass the auxiliary vector in r6. This is passed to us just after _envp.  */
 2:	lwzu	r0,4(r6)
 	cmpwi	r0,0
 	bne	2b
diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S
index 2e86d1c910..45c79d858b 100644
--- a/sysdeps/powerpc/powerpc32/memset.S
+++ b/sysdeps/powerpc/powerpc32/memset.S
@@ -275,7 +275,7 @@ L(checklinesize):
 	beq	cr1,L(nondcbz)
 
 /* If the cache line size is 32 bytes then goto to L(zloopstart),
-   which is coded specificly for 32-byte lines (and 601).  */
+   which is coded specifically for 32-byte lines (and 601).  */
 	cmplwi	cr1,rCLS,32
 	beq	cr1,L(zloopstart)
 
diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/mpa.c b/sysdeps/powerpc/powerpc32/power4/fpu/mpa.c
index f167969ea3..b6f8341afa 100644
--- a/sysdeps/powerpc/powerpc32/power4/fpu/mpa.c
+++ b/sysdeps/powerpc/powerpc32/power4/fpu/mpa.c
@@ -409,9 +409,9 @@ void __mul(const mp_no *x, const mp_no *y, mp_no *z, int p) {
     if (k > p2)  {i1=k-p2; i2=p2+1; }
     else        {i1=1;   i2=k;   }
 #if 1
-    /* rearange this inner loop to allow the fmadd instructions to be
+    /* rearrange this inner loop to allow the fmadd instructions to be
        independent and execute in parallel on processors that have
-       dual symetrical FP pipelines.  */
+       dual symmetrical FP pipelines.  */
     if (i1 < (i2-1))
     {
 	/* make sure we have at least 2 iterations */
@@ -437,7 +437,7 @@ void __mul(const mp_no *x, const mp_no *y, mp_no *z, int p) {
 	zk += x->d[i1]*y->d[i1];
     }
 #else
-    /* The orginal code.  */
+    /* The original code.  */
     for (i=i1,j=i2-1; i<i2; i++,j--)  zk += X[i]*Y[j];
 #endif
 
diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/slowpow.c b/sysdeps/powerpc/powerpc32/power4/fpu/slowpow.c
index 098e19a5f0..7c97d95817 100644
--- a/sysdeps/powerpc/powerpc32/power4/fpu/slowpow.c
+++ b/sysdeps/powerpc/powerpc32/power4/fpu/slowpow.c
@@ -59,7 +59,7 @@ __slowpow (double x, double y, double z)
   res1 = (double) (ldpp - ldeps);
 
   if (res != res1)		/* if result still not accurate enough */
-    {				/* use mpa for higher persision.  */
+    {				/* use mpa for higher precision.  */
       mp_no mpx, mpy, mpz, mpw, mpp, mpr, mpr1;
       static const mp_no eps = { -3, {1.0, 4.0} };
       int p;
diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S
index cb55816204..4f1c17680d 100644
--- a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S
+++ b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S
@@ -22,7 +22,7 @@
 /* double [fp1] sqrt (double x [fp1])
    Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
    The fsqrt instruction generates the correct value for all inputs and
-   sets the appropriate floating point exceptions.  Extented checking is
+   sets the appropriate floating point exceptions.  Extended checking is
    only needed to set errno (via __kernel_standard) if the input value
    is negative.
    
diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S
index a13a846875..0da5b7a8e3 100644
--- a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S
+++ b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S
@@ -22,7 +22,7 @@
 /* float [fp1] sqrts (float x [fp1])
    Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
    The fsqrts instruction generates the correct value for all inputs and
-   sets the appropriate floating point exceptions.  Extented checking is
+   sets the appropriate floating point exceptions.  Extended checking is
    only needed to set errno (via __kernel_standard) if the input value
    is negative.
    
diff --git a/sysdeps/powerpc/powerpc32/power4/hp-timing.h b/sysdeps/powerpc/powerpc32/power4/hp-timing.h
index 4742d76242..7d6c96e9e9 100644
--- a/sysdeps/powerpc/powerpc32/power4/hp-timing.h
+++ b/sysdeps/powerpc/powerpc32/power4/hp-timing.h
@@ -82,7 +82,7 @@ typedef unsigned long long int hp_timing_t;
 /* That's quite simple.  Use the `mftb' instruction.  Note that the value
    might not be 100% accurate since there might be some more instructions
    running in this moment.  This could be changed by using a barrier like
-   'lwsync' right before the `mftb' instruciton.  But we are not interested
+   'lwsync' right before the `mftb' instruction.  But we are not interested
    in accurate clock cycles here so we don't do this.  */
 
 #define HP_TIMING_NOW(Var)						\
diff --git a/sysdeps/powerpc/powerpc32/power4/memcmp.S b/sysdeps/powerpc/powerpc32/power4/memcmp.S
index 65a0d809a7..bbee6f4d35 100644
--- a/sysdeps/powerpc/powerpc32/power4/memcmp.S
+++ b/sysdeps/powerpc/powerpc32/power4/memcmp.S
@@ -69,7 +69,7 @@ EALIGN (BP_SYM(memcmp), 4, 0)
    Otherwise we know the two strings have the same alignment (but not
    yet word aligned).  So we force the string addresses to the next lower
    word boundary and special case this first word using shift left to
-   eliminate bits preceeding the first byte.  Since we want to join the
+   eliminate bits preceding the first byte.  Since we want to join the
    normal (word aligned) compare loop, starting at the second word,
    we need to adjust the length (rN) and special case the loop
    versioning for the first word. This insures that the loop count is
@@ -517,7 +517,7 @@ L(zeroLength):
    Otherwise we know that rSTR1 is not aready word aligned yet.
    So we can force the string addresses to the next lower word
    boundary and special case this first word using shift left to
-   eliminate bits preceeding the first byte.  Since we want to join the
+   eliminate bits preceding the first byte.  Since we want to join the
    normal (Wualigned) compare loop, starting at the second word,
    we need to adjust the length (rN) and special case the loop
    versioning for the first W. This insures that the loop count is
diff --git a/sysdeps/powerpc/powerpc32/power4/strncmp.S b/sysdeps/powerpc/powerpc32/power4/strncmp.S
index ba12632085..50d79dc967 100644
--- a/sysdeps/powerpc/powerpc32/power4/strncmp.S
+++ b/sysdeps/powerpc/powerpc32/power4/strncmp.S
@@ -51,7 +51,7 @@ EALIGN (BP_SYM(strncmp), 4, 0)
 	cmplwi	cr1, rN, 0
 	lis	rFEFE, -0x101
 	bne	L(unaligned)
-/* We are word alligned so set up for two loops.  first a word
+/* We are word aligned so set up for two loops.  first a word
    loop, then fall into the byte loop if any residual.  */
 	srwi.	rTMP, rN, 2
 	clrlwi	rN, rN, 30
diff --git a/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt.S b/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt.S
index aab4e56609..23559aa192 100644
--- a/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt.S
+++ b/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt.S
@@ -22,7 +22,7 @@
 /* double [fp1] sqrt (double x [fp1])
    Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
    The fsqrt instruction generates the correct value for all inputs and
-   sets the appropriate floating point exceptions.  Extented checking is
+   sets the appropriate floating point exceptions.  Extended checking is
    only needed to set errno (via __kernel_standard) if the input value
    is negative.
    
diff --git a/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf.S b/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf.S
index 6d80ad93c4..590c24caf3 100644
--- a/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf.S
+++ b/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf.S
@@ -22,7 +22,7 @@
 /* float [fp1] sqrts (float x [fp1])
    Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
    The fsqrts instruction generates the correct value for all inputs and
-   sets the appropriate floating point exceptions.  Extented checking is
+   sets the appropriate floating point exceptions.  Extended checking is
    only needed to set errno (via __kernel_standard) if the input value
    is negative.
    
diff --git a/sysdeps/powerpc/powerpc32/power6/memcpy.S b/sysdeps/powerpc/powerpc32/power6/memcpy.S
index c1dd74df0b..203c979d1b 100644
--- a/sysdeps/powerpc/powerpc32/power6/memcpy.S
+++ b/sysdeps/powerpc/powerpc32/power6/memcpy.S
@@ -411,31 +411,31 @@ L(wdu):
      not.  For power4, power5 and power6 machines there is penalty for
      unaligned loads (src) that cross 32-byte, cacheline, or page
      boundaries. So we want to use simple (unaligned) loads where
-     posible but avoid them where we know the load would span a 32-byte
+     possible but avoid them where we know the load would span a 32-byte
      boundary.
 
      At this point we know we have at least 29 (32-3) bytes to copy
      the src is unaligned. and we may cross at least one 32-byte
-     boundary. Also we have the following regester values:
+     boundary. Also we have the following register values:
      r3 == adjusted dst, word aligned
      r4 == unadjusted src
      r5 == unadjusted len
      r9 == adjusted Word length
      r10 == src alignment (1-3)
-     r12 == adjuested src, not aligned
+     r12 == adjusted src, not aligned
      r31 == adjusted len
 
-     First we need to copy word upto but not crossing the next 32-byte
+     First we need to copy word up to but not crossing the next 32-byte
      boundary. Then perform aligned loads just before and just after
-     the boundary and use shifts and or to gernerate the next aligned
+     the boundary and use shifts and or to generate the next aligned
      word for dst. If more then 32 bytes remain we copy (unaligned src)
      the next 7 words and repeat the loop until less then 32-bytes
-     remaim.
+     remain.
 
      Then if more then 4 bytes remain we again use aligned loads,
      shifts and or to generate the next dst word. We then process the
      remaining words using unaligned loads as needed. Finally we check
-     if there more then 0 bytes (1-3) bytes remainting and use
+     if there more then 0 bytes (1-3) bytes remaining and use
      halfword and or byte load/stores to complete the copy.
 */
     mr      4,12      /* restore unaligned adjusted src ptr */
@@ -512,7 +512,7 @@ L(wdu_h32_4):
     addi  3,3,4
     .align  4
 L(wdu_h32_0):
-/*  set up for 32-byte boundry crossing word move and possibly 32-byte
+/*  set up for 32-byte boundary crossing word move and possibly 32-byte
     move loop.  */
     clrrwi  12,4,2
     cmplwi  cr5,31,32
diff --git a/sysdeps/powerpc/powerpc32/power7/memchr.S b/sysdeps/powerpc/powerpc32/power7/memchr.S
index 1412061206..3d8389e95c 100644
--- a/sysdeps/powerpc/powerpc32/power7/memchr.S
+++ b/sysdeps/powerpc/powerpc32/power7/memchr.S
@@ -44,7 +44,7 @@ L(proceed):
 	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
 	cmpli	cr6,r6,0      /* cr6 == Do we have padding?  */
 	lwz	r12,0(r8)     /* Load word from memory.  */
-	cmpb	r10,r12,r4    /* Check for BYTE's in WORD1.  */
+	cmpb	r10,r12,r4    /* Check for BYTEs in WORD1.  */
 	beq	cr6,L(proceed_no_padding)
 	slw	r10,r10,r6
 	srw	r10,r10,r6
diff --git a/sysdeps/powerpc/powerpc32/power7/memcmp.S b/sysdeps/powerpc/powerpc32/power7/memcmp.S
index f2cb1dfc2f..815e3c3953 100644
--- a/sysdeps/powerpc/powerpc32/power7/memcmp.S
+++ b/sysdeps/powerpc/powerpc32/power7/memcmp.S
@@ -73,7 +73,7 @@ EALIGN (BP_SYM(memcmp),4,0)
    Otherwise we know the two strings have the same alignment (but not
    yet word aligned).  So we force the string addresses to the next lower
    word boundary and special case this first word using shift left to
-   eliminate bits preceeding the first byte.  Since we want to join the
+   eliminate bits preceding the first byte.  Since we want to join the
    normal (word aligned) compare loop, starting at the second word,
    we need to adjust the length (rN) and special case the loop
    versioning for the first word. This insures that the loop count is
@@ -520,7 +520,7 @@ L(zeroLength):
    Otherwise we know that rSTR1 is not aready word aligned yet.
    So we can force the string addresses to the next lower word
    boundary and special case this first word using shift left to
-   eliminate bits preceeding the first byte.  Since we want to join the
+   eliminate bits preceding the first byte.  Since we want to join the
    normal (Wualigned) compare loop, starting at the second word,
    we need to adjust the length (rN) and special case the loop
    versioning for the first W. This insures that the loop count is
diff --git a/sysdeps/powerpc/powerpc32/power7/memrchr.S b/sysdeps/powerpc/powerpc32/power7/memrchr.S
index a6f495521a..9ff8d662f5 100644
--- a/sysdeps/powerpc/powerpc32/power7/memrchr.S
+++ b/sysdeps/powerpc/powerpc32/power7/memrchr.S
@@ -51,7 +51,7 @@ L(proceed):
 	cmpb	r10,r12,r4    /* Check for BYTE in WORD1.  */
 	slw	r10,r10,r0
 	srw	r10,r10,r0
-	cmplwi	cr7,r10,0     /* If r10 == 0, no BYTE's have been found.  */
+	cmplwi	cr7,r10,0     /* If r10 == 0, no BYTEs have been found.  */
 	bne	cr7,L(done)
 
 	/* Are we done already?  */
diff --git a/sysdeps/powerpc/powerpc32/power7/strcasecmp.S b/sysdeps/powerpc/powerpc32/power7/strcasecmp.S
index 2fcca034d5..52d73d9f89 100644
--- a/sysdeps/powerpc/powerpc32/power7/strcasecmp.S
+++ b/sysdeps/powerpc/powerpc32/power7/strcasecmp.S
@@ -39,8 +39,8 @@ ENTRY (BP_SYM (__STRCMP))
 #define rSTR1	r5	/* 1st string */
 #define rSTR2	r4	/* 2nd string */
 #define rLOCARG	r5	/* 3rd argument: locale_t */
-#define rCHAR1	r6	/* Byte readed from 1st string */
-#define rCHAR2	r7	/* Byte readed from 2nd string */
+#define rCHAR1	r6	/* Byte read from 1st string */
+#define rCHAR2	r7	/* Byte read from 2nd string */
 #define rADDR1	r8	/* Address of tolower(rCHAR1) */
 #define rADDR2	r12	/* Address of tolower(rCHAR2) */
 #define rLWR1	r8	/* Byte tolower(rCHAR1) */
diff --git a/sysdeps/powerpc/powerpc32/power7/strncmp.S b/sysdeps/powerpc/powerpc32/power7/strncmp.S
index 201651902c..3629783bc0 100644
--- a/sysdeps/powerpc/powerpc32/power7/strncmp.S
+++ b/sysdeps/powerpc/powerpc32/power7/strncmp.S
@@ -55,7 +55,7 @@ EALIGN (BP_SYM(strncmp),5,0)
 	cmplwi	cr1,rN,0
 	lis	rFEFE,-0x101
 	bne	L(unaligned)
-/* We are word alligned so set up for two loops.  first a word
+/* We are word aligned so set up for two loops.  first a word
    loop, then fall into the byte loop if any residual.  */
 	srwi.	rTMP,rN,2
 	clrlwi	rN,rN,30
diff --git a/sysdeps/powerpc/powerpc32/strncmp.S b/sysdeps/powerpc/powerpc32/strncmp.S
index 149e51a132..d9e274b69b 100644
--- a/sysdeps/powerpc/powerpc32/strncmp.S
+++ b/sysdeps/powerpc/powerpc32/strncmp.S
@@ -49,7 +49,7 @@ EALIGN (BP_SYM(strncmp), 4, 0)
 	cmplwi	cr1, rN, 0
 	lis	rFEFE, -0x101
 	bne	L(unaligned)
-/* We are word alligned so set up for two loops.  first a word
+/* We are word aligned so set up for two loops.  first a word
    loop, then fall into the byte loop if any residual.  */
 	srwi.	rTMP, rN, 2
 	clrlwi	rN, rN, 30