about summary refs log tree commit diff
path: root/sysdeps/powerpc/powerpc64/memcpy.S
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@gmail.com>2011-04-22 21:38:13 -0400
committerUlrich Drepper <drepper@gmail.com>2011-04-22 21:38:13 -0400
commit7a41d99a35ca4c13ad2db1bc3894e1a8ec70721b (patch)
tree9d324bc3400ad38820966e50ca45f0889ec99581 /sysdeps/powerpc/powerpc64/memcpy.S
parentded5b9b7c7c0afc7edc520911d76558564638bda (diff)
downloadglibc-7a41d99a35ca4c13ad2db1bc3894e1a8ec70721b.tar.gz
glibc-7a41d99a35ca4c13ad2db1bc3894e1a8ec70721b.tar.xz
glibc-7a41d99a35ca4c13ad2db1bc3894e1a8ec70721b.zip
Fix whitespaces.
Diffstat (limited to 'sysdeps/powerpc/powerpc64/memcpy.S')
-rw-r--r--sysdeps/powerpc/powerpc64/memcpy.S72
1 files changed, 36 insertions, 36 deletions
diff --git a/sysdeps/powerpc/powerpc64/memcpy.S b/sysdeps/powerpc/powerpc64/memcpy.S
index 15869e2ee3..28fc6cf5b4 100644
--- a/sysdeps/powerpc/powerpc64/memcpy.S
+++ b/sysdeps/powerpc/powerpc64/memcpy.S
@@ -24,10 +24,10 @@
 /* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
    Returns 'dst'.
 
-   Memcpy handles short copies (< 32-bytes) using a binary move blocks 
-   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled 
-   with the appropriate combination of byte and halfword load/stores. 
-   There is minimal effort to optimize the alignment of short moves.  
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled
+   with the appropriate combination of byte and halfword load/stores.
+   There is minimal effort to optimize the alignment of short moves.
    The 64-bit implementations of POWER3 and POWER4 do a reasonable job
    of handling unligned load/stores that do not cross 32-byte boundries.
 
@@ -49,13 +49,13 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     clrldi 10,4,61	/* check alignement of src.  */
     cmpldi cr6,5,8
     ble-  cr1,.L2	/* If move < 32 bytes use short move code.  */
-    cmpld cr6,10,11     
+    cmpld cr6,10,11
     mr    12,4
     srdi  9,5,3		/* Number of full double words remaining.  */
     mtcrf 0x01,0
     mr    31,5
     beq   .L0
-  
+
     subf  31,0,5
   /* Move 0-7 bytes as needed to get the destination doubleword alligned.  */
 1:  bf    31,2f
@@ -74,17 +74,17 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     stw   6,0(3)
     addi  3,3,4
 0:
-    clrldi 10,12,61	/* check alignement of src again.  */     
+    clrldi 10,12,61	/* check alignement of src again.  */
     srdi  9,31,3	/* Number of full double words remaining.  */
-    
+
   /* Copy doublewords from source to destination, assumpting the
      destination is aligned on a doubleword boundary.
 
      At this point we know there are at least 25 bytes left (32-7) to copy.
-     The next step is to determine if the source is also doubleword aligned. 
+     The next step is to determine if the source is also doubleword aligned.
      If not branch to the unaligned move code at .L6. which uses
      a load, shift, store strategy.
-     
+
      Otherwise source and destination are doubleword aligned, and we can
      the optimized doubleword copy loop.  */
 .L0:
@@ -94,16 +94,16 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 
   /* Move doublewords where destination and source are DW aligned.
      Use a unrolled loop to copy 4 doubleword (32-bytes) per iteration.
-     If the copy is not an exact multiple of 32 bytes, 1-3 
+     If the copy is not an exact multiple of 32 bytes, 1-3
      doublewords are copied as needed to set up the main loop.  After
-     the main loop exits there may be a tail of 1-7 bytes. These byte are 
+     the main loop exits there may be a tail of 1-7 bytes. These byte are
      copied a word/halfword/byte at a time as needed to preserve alignment.  */
 
     srdi  8,31,5
     cmpldi	cr1,9,4
     cmpldi	cr6,11,0
     mr    11,12
-    
+
     bf    30,1f
     ld    6,0(12)
     ld    7,8(12)
@@ -114,7 +114,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     addi  10,3,16
     bf    31,4f
     ld    0,16(12)
-    std   0,16(3)    
+    std   0,16(3)
     blt   cr1,3f
     addi  11,12,24
     addi  10,3,24
@@ -128,7 +128,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     addi  11,12,8
     std   6,0(3)
     addi  10,3,8
-    
+
     .align  4
 4:
     ld    6,0(11)
@@ -143,7 +143,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     std   0,24(10)
     addi  10,10,32
     bdnz  4b
-3:  
+3:
 
     rldicr 0,31,0,60
     mtcrf 0x01,31
@@ -151,7 +151,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 .L9:
     add   3,3,0
     add   12,12,0
-    
+
 /*  At this point we have a tail of 0-7 bytes and we know that the
     destiniation is double word aligned.  */
 4:  bf    29,2f
@@ -172,29 +172,29 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     ld 31,-8(1)
     ld 3,-16(1)
     blr
-       
-/* Copy up to 31 bytes.  This divided into two cases 0-8 bytes and 9-31 
-   bytes.  Each case is handled without loops, using binary (1,2,4,8) 
-   tests.  
-   
+
+/* Copy up to 31 bytes.  This divided into two cases 0-8 bytes and 9-31
+   bytes.  Each case is handled without loops, using binary (1,2,4,8)
+   tests.
+
    In the short (0-8 byte) case no attempt is made to force alignment
-   of either source or destination.  The hardware will handle the 
-   unaligned load/stores with small delays for crossing 32- 64-byte, and 
+   of either source or destination.  The hardware will handle the
+   unaligned load/stores with small delays for crossing 32- 64-byte, and
    4096-byte boundaries. Since these short moves are unlikely to be
-   unaligned or cross these boundaries, the overhead to force 
+   unaligned or cross these boundaries, the overhead to force
    alignment is not justified.
-   
+
    The longer (9-31 byte) move is more likely to cross 32- or 64-byte
    boundaries.  Since only loads are sensitive to the 32-/64-byte
-   boundaries it is more important to align the source then the 
+   boundaries it is more important to align the source then the
    destination.  If the source is not already word aligned, we first
-   move 1-3 bytes as needed.  Since we are only word aligned we don't 
-   use double word load/stores to insure that all loads are aligned. 
+   move 1-3 bytes as needed.  Since we are only word aligned we don't
+   use double word load/stores to insure that all loads are aligned.
    While the destination and stores may still be unaligned, this
    is only an issue for page (4096 byte boundary) crossing, which
    should be rare for these short moves.  The hardware handles this
-   case automatically with a small delay.  */ 
-   
+   case automatically with a small delay.  */
+
     .align  4
 .L2:
     mtcrf 0x01,5
@@ -257,11 +257,11 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     lwz   6,0(12)
     addi  12,12,4
     stw   6,0(3)
-    addi  3,3,4    
+    addi  3,3,4
 2:  /* Move 2-3 bytes.  */
     bf    30,1f
     lhz   6,0(12)
-    sth   6,0(3) 
+    sth   6,0(3)
     bf    31,0f
     lbz   7,2(12)
     stb   7,2(3)
@@ -282,7 +282,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
     mr    12,4
     bne   cr6,4f
 /* Would have liked to use use ld/std here but the 630 processors are
-   slow for load/store doubles that are not at least word aligned.  
+   slow for load/store doubles that are not at least word aligned.
    Unaligned Load/Store word execute with only a 1 cycle penaltity.  */
     lwz   6,0(4)
     lwz   7,4(4)
@@ -298,14 +298,14 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 6:
     bf    30,5f
     lhz   7,4(4)
-    sth   7,4(3) 
+    sth   7,4(3)
     bf    31,0f
     lbz   8,6(4)
     stb   8,6(3)
     ld 3,-16(1)
     blr
     .align  4
-5:  
+5:
     bf    31,0f
     lbz   6,4(4)
     stb   6,4(3)