about summary refs log tree commit diff
path: root/sysdeps/powerpc/powerpc32/cell/memcpy.S
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2010-01-18 12:43:47 -0800
committerUlrich Drepper <drepper@redhat.com>2010-01-18 12:43:47 -0800
commitd6ac9329b3baf72e1f7a6dfd10ff5236668c2d10 (patch)
tree06ee7350aa40aad10b93ef234d069d1a44207e33 /sysdeps/powerpc/powerpc32/cell/memcpy.S
parent057edf90e015117bcb7c7cf2e895359e7244dbf8 (diff)
downloadglibc-d6ac9329b3baf72e1f7a6dfd10ff5236668c2d10.tar.gz
glibc-d6ac9329b3baf72e1f7a6dfd10ff5236668c2d10.tar.xz
glibc-d6ac9329b3baf72e1f7a6dfd10ff5236668c2d10.zip
Fix whitespace issues.
Diffstat (limited to 'sysdeps/powerpc/powerpc32/cell/memcpy.S')
-rw-r--r--sysdeps/powerpc/powerpc32/cell/memcpy.S24
1 files changed, 12 insertions, 12 deletions
diff --git a/sysdeps/powerpc/powerpc32/cell/memcpy.S b/sysdeps/powerpc/powerpc32/cell/memcpy.S
index e6c076cbe1..cc1da99fd9 100644
--- a/sysdeps/powerpc/powerpc32/cell/memcpy.S
+++ b/sysdeps/powerpc/powerpc32/cell/memcpy.S
@@ -43,16 +43,16 @@
 .align  7
 
 EALIGN (BP_SYM (memcpy), 5, 0)
-        CALL_MCOUNT
+	CALL_MCOUNT
 
 	dcbt	0,r4		/* Prefetch ONE SRC cacheline  */
 	cmplwi	cr1,r5,16	/* is size < 16 ?  */
-	mr	r6,r3		
+	mr	r6,r3
 	blt+	cr1,.Lshortcopy
 
 .Lbigcopy:
 	neg	r8,r3		/* LS 3 bits = # bytes to 8-byte dest bdry  */
-        clrlwi  r8,r8,32-4	/* aling to 16byte boundary  */
+	clrlwi  r8,r8,32-4	/* aling to 16byte boundary  */
 	sub     r7,r4,r3
 	cmplwi	cr0,r8,0
 	beq+	.Ldst_aligned
@@ -112,8 +112,8 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 
 .LprefetchSRC:
 	dcbt    r12,r4
-        addi    r12,r12,128
-        bdnz    .LprefetchSRC
+	addi    r12,r12,128
+	bdnz    .LprefetchSRC
 
 .Lnocacheprefetch:
 	mtctr	r7
@@ -122,7 +122,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 	beq	cr6,.Lcachelinealigned
 
 .Laligntocacheline:
-	lfd 	fp9,0x08(r4)
+	lfd	fp9,0x08(r4)
 	lfdu	fp10,0x10(r4)
 	stfd	fp9,0x08(r6)
 	stfdu	fp10,0x10(r6)
@@ -131,10 +131,10 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 
 .Lcachelinealigned:		/* copy while cache lines  */
 
-	blt- 	cr1,.Llessthancacheline	/* size <128  */
+	blt-	cr1,.Llessthancacheline	/* size <128  */
 
 .Louterloop:
-        cmpwi   r11,0
+	cmpwi   r11,0
 	mtctr	r11
 	beq-	.Lendloop
 
@@ -142,7 +142,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 
 .align	4
 	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
-.Lloop: 			/* Copy aligned body  */
+.Lloop:				/* Copy aligned body  */
 	dcbt	r12,r4		/* PREFETCH SOURCE some cache lines ahead  */
 	lfd	fp9, 0x08(r4)
 	dcbz	r11,r6
@@ -186,7 +186,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 	beq-	.Lendloop2
 	mtctr	r10
 
-.Lloop2: 			/* Copy aligned body  */
+.Lloop2:			/* Copy aligned body  */
 	lfd	fp9, 0x08(r4)
 	lfd	fp10, 0x10(r4)
 	lfd	fp11, 0x18(r4)
@@ -206,7 +206,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 	mtctr	r7
 
 .Lcopy_remaining:
-	lfd 	fp9,0x08(r4)
+	lfd	fp9,0x08(r4)
 	lfdu	fp10,0x10(r4)
 	stfd	fp9,0x08(r6)
 	stfdu	fp10,0x10(r6)
@@ -214,7 +214,7 @@ EALIGN (BP_SYM (memcpy), 5, 0)
 
 .Ldo_lt16:			/* less than 16 ?  */
 	cmplwi	cr0,r5,0	/* copy remaining bytes (0-15)  */
-	beqlr+			/* no rest to copy  */	
+	beqlr+			/* no rest to copy  */
 	addi	r4,r4,8
 	addi	r6,r6,8