diff options
Diffstat (limited to 'sysdeps/powerpc/powerpc32/power4')
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S | 4 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S | 4 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S | 6 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S | 6 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S | 6 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/hp-timing.c | 2 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/memcmp.S | 126 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/memset.S | 2 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/wordcopy.c | 8 |
9 files changed, 82 insertions, 82 deletions
diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S b/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S index 2ac986db8b..55b2850fd1 100644 --- a/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S +++ b/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S @@ -20,7 +20,7 @@ #include <math_ldbl_opt.h> /* long long int[r3, r4] __llrint (double x[fp1]) */ -ENTRY (__llrint) +ENTRY (__llrint) CALL_MCOUNT stwu r1,-16(r1) cfi_adjust_cfa_offset (16) @@ -31,7 +31,7 @@ ENTRY (__llrint) nop lwz r3,8(r1) lwz r4,12(r1) - addi r1,r1,16 + addi r1,r1,16 blr END (__llrint) diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S b/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S index 98e3aafc8e..cc80fcb02a 100644 --- a/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S +++ b/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S @@ -19,7 +19,7 @@ #include <sysdep.h> /* long long int[r3, r4] __llrintf (float x[fp1]) */ -ENTRY (__llrintf) +ENTRY (__llrintf) CALL_MCOUNT stwu r1,-16(r1) cfi_adjust_cfa_offset (16) @@ -30,7 +30,7 @@ ENTRY (__llrintf) nop lwz r3,8(r1) lwz r4,12(r1) - addi r1,r1,16 + addi r1,r1,16 blr END (__llrintf) diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S b/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S index 07beb0a568..631180f072 100644 --- a/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S +++ b/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S @@ -29,11 +29,11 @@ .section ".text" /* long [r3] lround (float x [fp1]) - IEEE 1003.1 lround function. IEEE specifies "round to the nearest + IEEE 1003.1 lround function. IEEE specifies "round to the nearest integer value, rounding halfway cases away from zero, regardless of the current rounding mode." However PowerPC Architecture defines - "round to Nearest" as "Choose the best approximation. In case of a - tie, choose the one that is even (least significant bit o).". + "round to Nearest" as "Choose the best approximation. In case of a + tie, choose the one that is even (least significant bit o).". So we can't use the PowerPC "round to Nearest" mode. Instead we set "round toward Zero" mode and round by adding +-0.5 before rounding to the integer value. diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S index 4f1c17680d..3648e4a69f 100644 --- a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S +++ b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt.S @@ -25,20 +25,20 @@ sets the appropriate floating point exceptions. Extended checking is only needed to set errno (via __kernel_standard) if the input value is negative. - + The fsqrt will set FPCC and FU (Floating Point Unordered or NaN to indicated that the input value was negative or NaN. Use Move to Condition Register from FPSCR to copy the FPCC field to cr1. The branch on summary overflow transfers control to w_sqrt to process any error conditions. Otherwise we can return the result directly. - + This part of the function is a leaf routine, so no need to stack a frame or execute prologue/epilogue code. This means it is safe to transfer directly to w_sqrt as long as the input value (f1) is preserved. Putting the sqrt result into f2 (double parameter 2) allows passing both the input value and sqrt result into the extended wrapper so there is no need to recompute. - + This tactic avoids the overhead of stacking a frame for the normal (non-error) case. Until gcc supports prologue shrink-wrapping this is the best we can do. */ diff --git a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S index 0da5b7a8e3..153843c7cd 100644 --- a/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S +++ b/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf.S @@ -25,20 +25,20 @@ sets the appropriate floating point exceptions. Extended checking is only needed to set errno (via __kernel_standard) if the input value is negative. - + The fsqrts will set FPCC and FU (Floating Point Unordered or NaN to indicated that the input value was negative or NaN. Use Move to Condition Register from FPSCR to copy the FPCC field to cr1. The branch on summary overflow transfers control to w_sqrt to process any error conditions. Otherwise we can return the result directly. - + This part of the function is a leaf routine, so no need to stack a frame or execute prologue/epilogue code. This means it is safe to transfer directly to w_sqrt as long as the input value (f1) is preserved. Putting the sqrt result into f2 (float parameter 2) allows passing both the input value and sqrt result into the extended wrapper so there is no need to recompute. - + This tactic avoids the overhead of stacking a frame for the normal (non-error) case. Until gcc supports prologue shrink-wrapping this is the best we can do. */ diff --git a/sysdeps/powerpc/powerpc32/power4/hp-timing.c b/sysdeps/powerpc/powerpc32/power4/hp-timing.c index f54a5f879b..5073adb0e5 100644 --- a/sysdeps/powerpc/powerpc32/power4/hp-timing.c +++ b/sysdeps/powerpc/powerpc32/power4/hp-timing.c @@ -1,4 +1,4 @@ -/* Support for high precision, low overhead timing functions. +/* Support for high precision, low overhead timing functions. powerpc64 version. Copyright (C) 2005-2013 Free Software Foundation, Inc. This file is part of the GNU C Library. diff --git a/sysdeps/powerpc/powerpc32/power4/memcmp.S b/sysdeps/powerpc/powerpc32/power4/memcmp.S index edec7ab274..d7050a2f73 100644 --- a/sysdeps/powerpc/powerpc32/power4/memcmp.S +++ b/sysdeps/powerpc/powerpc32/power4/memcmp.S @@ -53,17 +53,17 @@ EALIGN (memcmp, 4, 0) blt cr1, L(bytealigned) stwu 1,-64(1) cfi_adjust_cfa_offset(64) - stw r31,48(1) + stw r31,48(1) cfi_offset(31,(48-64)) - stw r30,44(1) + stw r30,44(1) cfi_offset(30,(44-64)) bne L(unaligned) /* At this point we know both strings have the same alignment and the compare length is at least 8 bytes. rBITDIF contains the low order 2 bits of rSTR1 and cr5 contains the result of the logical compare - of rBITDIF to 0. If rBITDIF == 0 then we are already word + of rBITDIF to 0. If rBITDIF == 0 then we are already word aligned and can perform the word aligned loop. - + Otherwise we know the two strings have the same alignment (but not yet word aligned). So we force the string addresses to the next lower word boundary and special case this first word using shift left to @@ -143,7 +143,7 @@ L(Waligned): beq L(dP4) bgt cr1, L(dP3) beq cr1, L(dP2) - + /* Remainder is 4 */ .align 4 L(dP1): @@ -152,7 +152,7 @@ L(dP1): (8-15 byte compare), we want to use only volatile registers. This means we can avoid restoring non-volatile registers since we did not change any on the early exit path. The key here is the non-early - exit path only cares about the condition code (cr5), not about which + exit path only cares about the condition code (cr5), not about which register pair was used. */ lwz rWORD5, 0(rSTR1) lwz rWORD6, 0(rSTR2) @@ -170,7 +170,7 @@ L(dP1e): cmplw cr6, rWORD5, rWORD6 bne cr5, L(dLcr5) bne cr0, L(dLcr0) - + lwzu rWORD7, 16(rSTR1) lwzu rWORD8, 16(rSTR2) bne cr1, L(dLcr1) @@ -188,7 +188,7 @@ L(dP1x): bne L(d00) li rRTN, 0 blr - + /* Remainder is 8 */ .align 4 L(dP2): @@ -230,7 +230,7 @@ L(dP2x): bne L(d00) li rRTN, 0 blr - + /* Remainder is 12 */ .align 4 L(dP3): @@ -273,7 +273,7 @@ L(dP3x): bne L(d00) li rRTN, 0 blr - + /* Count is a multiple of 16, remainder is 0 */ .align 4 L(dP4): @@ -316,8 +316,8 @@ L(dLoop3): lwzu rWORD8, 16(rSTR2) bne- cr1, L(dLcr1) cmplw cr0, rWORD1, rWORD2 - bdnz+ L(dLoop) - + bdnz+ L(dLoop) + L(dL4): cmplw cr1, rWORD3, rWORD4 bne cr6, L(dLcr6) @@ -332,7 +332,7 @@ L(d24): bne cr6, L(dLcr6) L(d14): slwi. r12, rN, 3 - bne cr5, L(dLcr5) + bne cr5, L(dLcr5) L(d04): lwz r30,44(1) lwz r31,48(1) @@ -341,10 +341,10 @@ L(d04): beq L(zeroLength) /* At this point we have a remainder of 1 to 3 bytes to compare. Since we are aligned it is safe to load the whole word, and use - shift right to eliminate bits beyond the compare length. */ + shift right to eliminate bits beyond the compare length. */ L(d00): lwz rWORD1, 4(rSTR1) - lwz rWORD2, 4(rSTR2) + lwz rWORD2, 4(rSTR2) srw rWORD1, rWORD1, rN srw rWORD2, rWORD2, rN cmplw rWORD1,rWORD2 @@ -392,22 +392,22 @@ L(dLcr5x): bgtlr cr5 li rRTN, -1 blr - + .align 4 L(bytealigned): cfi_adjust_cfa_offset(-64) mtctr rN /* Power4 wants mtctr 1st in dispatch group */ /* We need to prime this loop. This loop is swing modulo scheduled - to avoid pipe delays. The dependent instruction latencies (load to + to avoid pipe delays. The dependent instruction latencies (load to compare to conditional branch) is 2 to 3 cycles. In this loop each dispatch group ends in a branch and takes 1 cycle. Effectively - the first iteration of the loop only serves to load operands and - branches based on compares are delayed until the next loop. + the first iteration of the loop only serves to load operands and + branches based on compares are delayed until the next loop. So we must precondition some registers and condition codes so that we don't exit the loop early on the first iteration. */ - + lbz rWORD1, 0(rSTR1) lbz rWORD2, 0(rSTR2) bdz- L(b11) @@ -427,7 +427,7 @@ L(bLoop): cmplw cr6, rWORD5, rWORD6 bdz- L(b3i) - + lbzu rWORD3, 1(rSTR1) lbzu rWORD4, 1(rSTR2) bne- cr1, L(bLcr1) @@ -441,10 +441,10 @@ L(bLoop): cmplw cr1, rWORD3, rWORD4 bdnz+ L(bLoop) - + /* We speculatively loading bytes before we have tested the previous bytes. But we must avoid overrunning the length (in the ctr) to - prevent these speculative loads from causing a segfault. In this + prevent these speculative loads from causing a segfault. In this case the loop will exit early (before the all pending bytes are tested. In this case we must complete the pending operations before returning. */ @@ -488,7 +488,7 @@ L(bx56): nop L(b12): bne- cr0, L(bx12) -L(bx34): +L(bx34): sub rRTN, rWORD3, rWORD4 blr @@ -497,7 +497,7 @@ L(bx12): sub rRTN, rWORD1, rWORD2 blr - .align 4 + .align 4 L(zeroLengthReturn): L(zeroLength): @@ -509,9 +509,9 @@ L(zeroLength): /* At this point we know the strings have different alignment and the compare length is at least 8 bytes. rBITDIF contains the low order 2 bits of rSTR1 and cr5 contains the result of the logical compare - of rBITDIF to 0. If rBITDIF == 0 then rStr1 is word aligned and can + of rBITDIF to 0. If rBITDIF == 0 then rStr1 is word aligned and can perform the Wunaligned loop. - + Otherwise we know that rSTR1 is not aready word aligned yet. So we can force the string addresses to the next lower word boundary and special case this first word using shift left to @@ -531,13 +531,13 @@ L(zeroLength): #define rE r0 /* Right rotation temp for rWORD6. */ #define rG r12 /* Right rotation temp for rWORD8. */ L(unaligned): - stw r29,40(r1) - cfi_offset(r29,(40-64)) + stw r29,40(r1) + cfi_offset(r29,(40-64)) clrlwi rSHL, rSTR2, 30 - stw r28,36(r1) + stw r28,36(r1) cfi_offset(r28,(36-64)) beq cr5, L(Wunaligned) - stw r27,32(r1) + stw r27,32(r1) cfi_offset(r27,(32-64)) /* Adjust the logical start of rSTR2 to compensate for the extra bits in the 1st rSTR1 W. */ @@ -545,19 +545,19 @@ L(unaligned): /* But do not attempt to address the W before that W that contains the actual start of rSTR2. */ clrrwi rSTR2, rSTR2, 2 - stw r26,28(r1) + stw r26,28(r1) cfi_offset(r26,(28-64)) /* Compute the left/right shift counts for the unalign rSTR2, - compensating for the logical (W aligned) start of rSTR1. */ + compensating for the logical (W aligned) start of rSTR1. */ clrlwi rSHL, r27, 30 - clrrwi rSTR1, rSTR1, 2 - stw r25,24(r1) + clrrwi rSTR1, rSTR1, 2 + stw r25,24(r1) cfi_offset(r25,(24-64)) slwi rSHL, rSHL, 3 cmplw cr5, r27, rSTR2 add rN, rN, rBITDIF slwi r11, rBITDIF, 3 - stw r24,20(r1) + stw r24,20(r1) cfi_offset(r24,(20-64)) subfic rSHR, rSHL, 32 srwi rTMP, rN, 4 /* Divide by 16 */ @@ -633,16 +633,16 @@ L(duPs4): compare length is at least 8 bytes. */ .align 4 L(Wunaligned): - stw r27,32(r1) + stw r27,32(r1) cfi_offset(r27,(32-64)) clrrwi rSTR2, rSTR2, 2 - stw r26,28(r1) + stw r26,28(r1) cfi_offset(r26,(28-64)) srwi rTMP, rN, 4 /* Divide by 16 */ - stw r25,24(r1) + stw r25,24(r1) cfi_offset(r25,(24-64)) andi. rBITDIF, rN, 12 /* Get the W remainder */ - stw r24,20(r1) + stw r24,20(r1) cfi_offset(r24,(20-64)) slwi rSHL, rSHL, 3 lwz rWORD6, 0(rSTR2) @@ -656,7 +656,7 @@ L(Wunaligned): mtctr rTMP /* Power4 wants mtctr 1st in dispatch group */ bgt cr1, L(duP3) beq cr1, L(duP2) - + /* Remainder is 4 */ .align 4 L(duP1): @@ -687,7 +687,7 @@ L(duP1e): bne cr0, L(duLcr0) or rWORD6, rE, rF cmplw cr6, rWORD5, rWORD6 - b L(duLoop3) + b L(duLoop3) .align 4 /* At this point we exit early with the first word compare complete and remainder of 0 to 3 bytes. See L(du14) for details on @@ -751,7 +751,7 @@ L(duP2x): lwz rWORD2, 4(rSTR2) srw rA, rWORD2, rSHR b L(dutrim) - + /* Remainder is 12 */ .align 4 L(duP3): @@ -801,7 +801,7 @@ L(duP3x): lwz rWORD2, 4(rSTR2) srw rA, rWORD2, rSHR b L(dutrim) - + /* Count is a multiple of 16, remainder is 0 */ .align 4 L(duP4): @@ -867,8 +867,8 @@ L(duLoop3): srw rG, rWORD8, rSHR slw rB, rWORD8, rSHL or rWORD8, rG, rH - bdnz+ L(duLoop) - + bdnz+ L(duLoop) + L(duL4): bne cr1, L(duLcr1) cmplw cr1, rWORD3, rWORD4 @@ -886,9 +886,9 @@ L(du14): slwi. rN, rN, 3 bne cr5, L(duLcr5) /* At this point we have a remainder of 1 to 3 bytes to compare. We use - shift right to eliminate bits beyond the compare length. + shift right to eliminate bits beyond the compare length. - However it may not be safe to load rWORD2 which may be beyond the + However it may not be safe to load rWORD2 which may be beyond the string length. So we compare the bit length of the remainder to the right shift count (rSHR). If the bit count is less than or equal we do not need to load rWORD2 (all significant bits are already in @@ -903,13 +903,13 @@ L(du14): L(dutrim): lwz rWORD1, 4(rSTR1) lwz r31,48(1) - subfic rN, rN, 32 /* Shift count is 32 - (rN * 8). */ + subfic rN, rN, 32 /* Shift count is 32 - (rN * 8). */ or rWORD2, rA, rB lwz r30,44(1) lwz r29,40(r1) srw rWORD1, rWORD1, rN srw rWORD2, rWORD2, rN - lwz r28,36(r1) + lwz r28,36(r1) lwz r27,32(r1) cmplw rWORD1,rWORD2 li rRTN,0 @@ -923,9 +923,9 @@ L(duLcr0): lwz r31,48(1) lwz r30,44(1) li rRTN, 1 - bgt cr0, L(dureturn29) + bgt cr0, L(dureturn29) lwz r29,40(r1) - lwz r28,36(r1) + lwz r28,36(r1) li rRTN, -1 b L(dureturn27) .align 4 @@ -933,9 +933,9 @@ L(duLcr1): lwz r31,48(1) lwz r30,44(1) li rRTN, 1 - bgt cr1, L(dureturn29) + bgt cr1, L(dureturn29) lwz r29,40(r1) - lwz r28,36(r1) + lwz r28,36(r1) li rRTN, -1 b L(dureturn27) .align 4 @@ -943,9 +943,9 @@ L(duLcr6): lwz r31,48(1) lwz r30,44(1) li rRTN, 1 - bgt cr6, L(dureturn29) + bgt cr6, L(dureturn29) lwz r29,40(r1) - lwz r28,36(r1) + lwz r28,36(r1) li rRTN, -1 b L(dureturn27) .align 4 @@ -953,9 +953,9 @@ L(duLcr5): lwz r31,48(1) lwz r30,44(1) li rRTN, 1 - bgt cr5, L(dureturn29) + bgt cr5, L(dureturn29) lwz r29,40(r1) - lwz r28,36(r1) + lwz r28,36(r1) li rRTN, -1 b L(dureturn27) .align 3 @@ -965,14 +965,14 @@ L(duZeroReturn): L(dureturn): lwz r31,48(1) lwz r30,44(1) -L(dureturn29): +L(dureturn29): lwz r29,40(r1) - lwz r28,36(r1) -L(dureturn27): + lwz r28,36(r1) +L(dureturn27): lwz r27,32(r1) -L(dureturn26): +L(dureturn26): lwz r26,28(r1) -L(dureturn25): +L(dureturn25): lwz r25,24(r1) lwz r24,20(r1) lwz 1,0(1) diff --git a/sysdeps/powerpc/powerpc32/power4/memset.S b/sysdeps/powerpc/powerpc32/power4/memset.S index 1e8785cb4a..c2d288b38b 100644 --- a/sysdeps/powerpc/powerpc32/power4/memset.S +++ b/sysdeps/powerpc/powerpc32/power4/memset.S @@ -162,7 +162,7 @@ L(cacheAligned): add rMEMP,rMEMP,rCLS b L(cacheAligned) -/* We are here because the cache line size was set and the remainder +/* We are here because the cache line size was set and the remainder (rLEN) is less than the actual cache line size. So set up the preconditions for L(nondcbz) and go there. */ L(handletail32): diff --git a/sysdeps/powerpc/powerpc32/power4/wordcopy.c b/sysdeps/powerpc/powerpc32/power4/wordcopy.c index 6dd0fa3924..5d857f61eb 100644 --- a/sysdeps/powerpc/powerpc32/power4/wordcopy.c +++ b/sysdeps/powerpc/powerpc32/power4/wordcopy.c @@ -37,7 +37,7 @@ _wordcopy_fwd_aligned (dstp, srcp, len) if (len & 1) { ((op_t *) dstp)[0] = ((op_t *) srcp)[0]; - + if (len == 1) return; srcp += OPSIZ; @@ -88,10 +88,10 @@ _wordcopy_fwd_dest_aligned (dstp, srcp, len) { a1 = ((op_t *) srcp)[1]; ((op_t *) dstp)[0] = MERGE (a0, sh_1, a1, sh_2); - + if (len == 1) return; - + a0 = a1; srcp += OPSIZ; dstp += OPSIZ; @@ -131,7 +131,7 @@ _wordcopy_bwd_aligned (dstp, srcp, len) srcp -= OPSIZ; dstp -= OPSIZ; ((op_t *) dstp)[0] = ((op_t *) srcp)[0]; - + if (len == 1) return; len -= 1; |