diff options
author | Alexandre Oliva <aoliva@redhat.com> | 2003-04-09 02:51:04 +0000 |
---|---|---|
committer | Alexandre Oliva <aoliva@redhat.com> | 2003-04-09 02:51:04 +0000 |
commit | c9efbeda6fa2b15aab04a51ec8adaf6089207550 (patch) | |
tree | de67577faaeb81a91674e9ccf9f940b7f7abe091 /sysdeps/mips/mips64 | |
parent | 9afe4964163b658f7271653f116f7570e826eda6 (diff) | |
download | glibc-c9efbeda6fa2b15aab04a51ec8adaf6089207550.tar.gz glibc-c9efbeda6fa2b15aab04a51ec8adaf6089207550.tar.xz glibc-c9efbeda6fa2b15aab04a51ec8adaf6089207550.zip |
* sysdeps/mips/sys/regdef.h (t4,t5,t6,t7): Renamed to t0..t3 on NewABI. (ta0, ta1, ta2, ta3): Defined to t4..t7 on o32, and a4..a7 on NewABI. * sysdeps/mips/mips64/memcpy.S: Adjust register naming conventions. * sysdeps/mips/mips64/memset.S: Likewise. * sysdeps/unix/mips/sysdep.S (__syscall_error) [_LIBC_REENTRANT]: Use t0 instead of t4 as temporary.
2003-04-08 Alexandre Oliva <aoliva@redhat.com> * sysdeps/mips/sys/regdef.h (t4,t5,t6,t7): Renamed to t0..t3 on NewABI. (ta0, ta1, ta2, ta3): Defined to t4..t7 on o32, and a4..a7 on NewABI. * sysdeps/mips/mips64/memcpy.S: Adjust register naming conventions. * sysdeps/mips/mips64/memset.S: Likewise. * sysdeps/unix/mips/sysdep.S (__syscall_error) [_LIBC_REENTRANT]: Use t0 instead of t4 as temporary.
Diffstat (limited to 'sysdeps/mips/mips64')
-rw-r--r-- | sysdeps/mips/mips64/memcpy.S | 100 | ||||
-rw-r--r-- | sysdeps/mips/mips64/memset.S | 40 |
2 files changed, 70 insertions, 70 deletions
diff --git a/sysdeps/mips/mips64/memcpy.S b/sysdeps/mips/mips64/memcpy.S index c4ba7a8f51..e9fc2b712a 100644 --- a/sysdeps/mips/mips64/memcpy.S +++ b/sysdeps/mips/mips64/memcpy.S @@ -42,71 +42,71 @@ ENTRY (memcpy) .set noreorder - slti a4, a2, 16 # Less than 16? - bne a4, zero, L(last16) + slti t0, a2, 16 # Less than 16? + bne t0, zero, L(last16) move v0, a0 # Setup exit value before too late - xor a4, a1, a0 # Find a0/a1 displacement - andi a4, 0x7 - bne a4, zero, L(shift) # Go handle the unaligned case - PTR_SUBU a5, zero, a1 - andi a5, 0x7 # a0/a1 are aligned, but are we - beq a5, zero, L(chk8w) # starting in the middle of a word? - PTR_SUBU a2, a5 - LDHI a4, 0(a1) # Yes we are... take care of that - PTR_ADDU a1, a5 - SDHI a4, 0(a0) - PTR_ADDU a0, a5 + xor t0, a1, a0 # Find a0/a1 displacement + andi t0, 0x7 + bne t0, zero, L(shift) # Go handle the unaligned case + PTR_SUBU t1, zero, a1 + andi t1, 0x7 # a0/a1 are aligned, but are we + beq t1, zero, L(chk8w) # starting in the middle of a word? + PTR_SUBU a2, t1 + LDHI t0, 0(a1) # Yes we are... take care of that + PTR_ADDU a1, t1 + SDHI t0, 0(a0) + PTR_ADDU a0, t1 L(chk8w): - andi a4, a2, 0x3f # 64 or more bytes left? - beq a4, a2, L(chk1w) - PTR_SUBU a3, a2, a4 # Yes + andi t0, a2, 0x3f # 64 or more bytes left? + beq t0, a2, L(chk1w) + PTR_SUBU a3, a2, t0 # Yes PTR_ADDU a3, a1 # a3 = end address of loop - move a2, a4 # a2 = what will be left after loop + move a2, t0 # a2 = what will be left after loop L(lop8w): - ld a4, 0(a1) # Loop taking 8 words at a time - ld a5, 8(a1) - ld a6, 16(a1) - ld a7, 24(a1) - ld t4, 32(a1) - ld t5, 40(a1) - ld t6, 48(a1) - ld t7, 56(a1) + ld t0, 0(a1) # Loop taking 8 words at a time + ld t1, 8(a1) + ld t2, 16(a1) + ld t3, 24(a1) + ld ta0, 32(a1) + ld ta1, 40(a1) + ld ta2, 48(a1) + ld ta3, 56(a1) PTR_ADDIU a0, 64 PTR_ADDIU a1, 64 - sd a4, -64(a0) - sd a5, -56(a0) - sd a6, -48(a0) - sd a7, -40(a0) - sd t4, -32(a0) - sd t5, -24(a0) - sd t6, -16(a0) + sd t0, -64(a0) + sd t1, -56(a0) + sd t2, -48(a0) + sd t3, -40(a0) + sd ta0, -32(a0) + sd ta1, -24(a0) + sd ta2, -16(a0) bne a1, a3, L(lop8w) - sd t7, -8(a0) + sd ta3, -8(a0) L(chk1w): - andi a4, a2, 0x7 # 8 or more bytes left? - beq a4, a2, L(last16) - PTR_SUBU a3, a2, a4 # Yes, handle them one dword at a time + andi t0, a2, 0x7 # 8 or more bytes left? + beq t0, a2, L(last16) + PTR_SUBU a3, a2, t0 # Yes, handle them one dword at a time PTR_ADDU a3, a1 # a3 again end address - move a2, a4 + move a2, t0 L(lop1w): - ld a4, 0(a1) + ld t0, 0(a1) PTR_ADDIU a0, 8 PTR_ADDIU a1, 8 bne a1, a3, L(lop1w) - sd a4, -8(a0) + sd t0, -8(a0) L(last16): blez a2, L(lst16e) # Handle last 16 bytes, one at a time PTR_ADDU a3, a2, a1 L(lst16l): - lb a4, 0(a1) + lb t0, 0(a1) PTR_ADDIU a0, 1 PTR_ADDIU a1, 1 bne a1, a3, L(lst16l) - sb a4, -1(a0) + sb t0, -1(a0) L(lst16e): jr ra # Bye, bye nop @@ -116,24 +116,24 @@ L(shift): andi a3, 0x7 # (unoptimized case...) beq a3, zero, L(shft1) PTR_SUBU a2, a3 # a2 = bytes left - LDHI a4, 0(a1) # Take care of first odd part - LDLO a4, 7(a1) + LDHI t0, 0(a1) # Take care of first odd part + LDLO t0, 7(a1) PTR_ADDU a1, a3 - SDHI a4, 0(a0) + SDHI t0, 0(a0) PTR_ADDU a0, a3 L(shft1): - andi a4, a2, 0x7 - PTR_SUBU a3, a2, a4 + andi t0, a2, 0x7 + PTR_SUBU a3, a2, t0 PTR_ADDU a3, a1 L(shfth): - LDHI a5, 0(a1) # Limp through, dword by dword - LDLO a5, 7(a1) + LDHI t1, 0(a1) # Limp through, dword by dword + LDLO t1, 7(a1) PTR_ADDIU a0, 8 PTR_ADDIU a1, 8 bne a1, a3, L(shfth) - sd a5, -8(a0) + sd t1, -8(a0) b L(last16) # Handle anything which may be left - move a2, a4 + move a2, t0 .set reorder END (memcpy) diff --git a/sysdeps/mips/mips64/memset.S b/sysdeps/mips/mips64/memset.S index d6e1790fbe..784fa5deee 100644 --- a/sysdeps/mips/mips64/memset.S +++ b/sysdeps/mips/mips64/memset.S @@ -36,33 +36,33 @@ ENTRY (memset) .set noreorder - slti t5, a2, 16 # Less than 16? - bne t5, zero, L(last16) + slti ta1, a2, 16 # Less than 16? + bne ta1, zero, L(last16) move v0, a0 # Setup exit value before too late beq a1, zero, L(ueven) # If zero pattern, no need to extend andi a1, 0xff # Avoid problems with bogus arguments - dsll t4, a1, 8 - or a1, t4 - dsll t4, a1, 16 - or a1, t4 # a1 is now pattern in full word - dsll t4, a1, 32 - or a1, t4 # a1 is now pattern in double word + dsll ta0, a1, 8 + or a1, ta0 + dsll ta0, a1, 16 + or a1, ta0 # a1 is now pattern in full word + dsll ta0, a1, 32 + or a1, ta0 # a1 is now pattern in double word L(ueven): - PTR_SUBU t4, zero, a0 # Unaligned address? - andi t4, 0x7 - beq t4, zero, L(chkw) - PTR_SUBU a2, t4 + PTR_SUBU ta0, zero, a0 # Unaligned address? + andi ta0, 0x7 + beq ta0, zero, L(chkw) + PTR_SUBU a2, ta0 SDHI a1, 0(a0) # Yes, handle first unaligned part - PTR_ADDU a0, t4 # Now both a0 and a2 are updated + PTR_ADDU a0, ta0 # Now both a0 and a2 are updated L(chkw): - andi t4, a2, 0xf # Enough left for one loop iteration? - beq t4, a2, L(chkl) - PTR_SUBU a3, a2, t4 + andi ta0, a2, 0xf # Enough left for one loop iteration? + beq ta0, a2, L(chkl) + PTR_SUBU a3, a2, ta0 PTR_ADDU a3, a0 # a3 is last loop address +1 - move a2, t4 # a2 is now # of bytes left after loop + move a2, ta0 # a2 is now # of bytes left after loop L(loopw): PTR_ADDIU a0, 16 # Handle 2 dwords pr. iteration sd a1, -16(a0) @@ -70,9 +70,9 @@ L(loopw): sd a1, -8(a0) L(chkl): - andi t4, a2, 0x8 # Check if there is at least a double - beq t4, zero, L(last16) # word remaining after the loop - PTR_SUBU a2, t4 + andi ta0, a2, 0x8 # Check if there is at least a double + beq ta0, zero, L(last16) # word remaining after the loop + PTR_SUBU a2, ta0 sd a1, 0(a0) # Yes... PTR_ADDIU a0, 8 |