about summary refs log tree commit diff
path: root/sysdeps/mips
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2003-03-14 03:59:59 +0000
committerRoland McGrath <roland@gnu.org>2003-03-14 03:59:59 +0000
commitd8a5edc2217214f0b9a1b921b6edc2aa5048fb1e (patch)
treed716f13c28a17b6d31e0f81d023858b320cbf7d6 /sysdeps/mips
parent6461e57784358cf4fd261e5e61d064a315aae924 (diff)
downloadglibc-d8a5edc2217214f0b9a1b921b6edc2aa5048fb1e.tar.gz
glibc-d8a5edc2217214f0b9a1b921b6edc2aa5048fb1e.tar.xz
glibc-d8a5edc2217214f0b9a1b921b6edc2aa5048fb1e.zip
* elf/dl-load.c (_dl_map_object_from_fd): Bail if no PT_LOAD phdrs
	found.  Reported by Alexandre Oliva <aoliva@redhat.com>.

2003-03-13  Alexandre Oliva  <aoliva@redhat.com>

	* stdio-common/_itoa.c (_itoa_base_table): Make 64-bit
	literals long long.
	* stdlib/fpioconst.c: Likewise.
	* stdlib/strtod.c: Likewise.

	* sysdeps/mips/add_n.S: Use L macro for local labels.
	* sysdeps/mips/addmul_1.S: Likewise.
	* sysdeps/mips/lshift.S: Likewise.
	* sysdeps/mips/memcpy.S: Likewise.
	* sysdeps/mips/memset.S: Likewise.
	* sysdeps/mips/mul_1.S: Likewise.
	* sysdeps/mips/rshift.S: Likewise.
	* sysdeps/mips/sub_n.S: Likewise.
	* sysdeps/mips/submul_1.S: Likewise.
	* sysdeps/mips/mips64/add_n.S: Likewise.
	* sysdeps/mips/mips64/addmul_1.S: Likewise.
	* sysdeps/mips/mips64/lshift.S: Likewise.
	* sysdeps/mips/mips64/mul_1.S: Likewise.
	* sysdeps/mips/mips64/rshift.S: Likewise.
	* sysdeps/mips/mips64/sub_n.S: Likewise.
	* sysdeps/mips/mips64/submul_1.S: Likewise.
	* sysdeps/unix/mips/sysdep.h: Define L() according to ABI
	conventions.  Define END as in sys/asm.h.
	* sysdeps/unix/mips/sysdep.S: Likewise.
	* sysdeps/unix/mips/wait.S: Likewise.
	* sysdeps/unix/sysv/linux/mips/clone.S: Likewise.

	* sysdeps/ieee754/dbl-64/dbl2mpn.c (__mpn_extract_double):
	Cast shifted values that may be too narrow to mp_limb_t.
	* sysdeps/ieee754/dbl-64/mpn2dbl.c (__mpn_construct_double):
	Likewise.
	* sysdeps/ieee754/flt-32/mpn2flt.c (__mpn_construct_float):
	Likewise.
	* sysdeps/ieee754/ldbl-128/ldbl2mpn.c
	(__mpn_extract_long_double): Likewise.
	* sysdeps/ieee754/ldbl-128/mpn2ldbl.c
	(__mpn_construct_long_double): Likewise.
	* sysdeps/ieee754/ldbl-96/ldbl2mpn.c
	(__mpn_extract_long_double): Likewise.
	* sysdeps/ieee754/ldbl-96/mpn2ldbl.c
	(__mpn_construct_long_double): Likewise.

2003-03-13  Roland McGrath  <roland@redhat.com>
Diffstat (limited to 'sysdeps/mips')
-rw-r--r--sysdeps/mips/add_n.S22
-rw-r--r--sysdeps/mips/addmul_1.S14
-rw-r--r--sysdeps/mips/lshift.S24
-rw-r--r--sysdeps/mips/memcpy.S44
-rw-r--r--sysdeps/mips/memset.S30
-rw-r--r--sysdeps/mips/mips64/add_n.S29
-rw-r--r--sysdeps/mips/mips64/addmul_1.S23
-rw-r--r--sysdeps/mips/mips64/lshift.S29
-rw-r--r--sysdeps/mips/mips64/mul_1.S24
-rw-r--r--sysdeps/mips/mips64/rshift.S29
-rw-r--r--sysdeps/mips/mips64/sub_n.S29
-rw-r--r--sysdeps/mips/mips64/submul_1.S24
-rw-r--r--sysdeps/mips/mul_1.S14
-rw-r--r--sysdeps/mips/rshift.S16
-rw-r--r--sysdeps/mips/sub_n.S24
-rw-r--r--sysdeps/mips/submul_1.S14
16 files changed, 199 insertions, 190 deletions
diff --git a/sysdeps/mips/add_n.S b/sysdeps/mips/add_n.S
index da7b2d456d..c82871f701 100644
--- a/sysdeps/mips/add_n.S
+++ b/sysdeps/mips/add_n.S
@@ -1,7 +1,7 @@
 /* MIPS2 __mpn_add_n -- Add two limb vectors of the same length > 0 and
 store sum in a third limb vector.
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -43,12 +43,12 @@ ENTRY (__mpn_add_n)
 
 	addiu	$7,$7,-1
 	and	$9,$7,4-1	/* number of limbs in first loop */
-	beq	$9,$0,.L0	/* if multiple of 4 limbs, skip first loop */
+	beq	$9,$0,L(L0)	/* if multiple of 4 limbs, skip first loop */
 	move	$2,$0
 
 	subu	$7,$7,$9
 
-.Loop0:	addiu	$9,$9,-1
+L(Loop0):	addiu	$9,$9,-1
 	lw	$12,4($5)
 	addu	$11,$11,$2
 	lw	$13,4($6)
@@ -62,13 +62,13 @@ ENTRY (__mpn_add_n)
 	addiu	$6,$6,4
 	move	$10,$12
 	move	$11,$13
-	bne	$9,$0,.Loop0
-	 addiu	$4,$4,4
+	bne	$9,$0,L(Loop0)
+	addiu	$4,$4,4
 
-.L0:	beq	$7,$0,.Lend
-	 nop
+L(L0):	beq	$7,$0,L(end)
+	nop
 
-.Loop:	addiu	$7,$7,-4
+L(Loop):	addiu	$7,$7,-4
 
 	lw	$12,4($5)
 	addu	$11,$11,$2
@@ -109,10 +109,10 @@ ENTRY (__mpn_add_n)
 	addiu	$5,$5,16
 	addiu	$6,$6,16
 
-	bne	$7,$0,.Loop
-	 addiu	$4,$4,16
+	bne	$7,$0,L(Loop)
+	addiu	$4,$4,16
 
-.Lend:	addu	$11,$11,$2
+L(end):	addu	$11,$11,$2
 	sltu	$8,$11,$2
 	addu	$11,$10,$11
 	sltu	$2,$11,$10
diff --git a/sysdeps/mips/addmul_1.S b/sysdeps/mips/addmul_1.S
index 32df1d780b..3e1fc09d10 100644
--- a/sysdeps/mips/addmul_1.S
+++ b/sysdeps/mips/addmul_1.S
@@ -1,7 +1,7 @@
 /* MIPS __mpn_addmul_1 -- Multiply a limb vector with a single limb and
 add the product to a second limb vector.
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -46,14 +46,14 @@ ENTRY (__mpn_addmul_1)
 	multu	$8,$7
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC0
+	beq	$6,$0,L(LC0)
 	move	$2,$0		/* zero cy2 */
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	lw	$8,0($5)	/* load new s1 limb as early as possible */
 
-Loop:	lw	$10,0($4)
+L(Loop):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addiu	$5,$5,4
@@ -67,11 +67,11 @@ Loop:	lw	$10,0($4)
 	addu	$2,$2,$10
 	sw	$3,0($4)
 	addiu	$4,$4,4
-	bne	$6,$0,Loop	/* should be "bnel" */
+	bne	$6,$0,L(Loop)	/* should be "bnel" */
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 1 */
-$LC1:	lw	$10,0($4)
+L(LC1):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addu	$3,$3,$2
@@ -85,7 +85,7 @@ $LC1:	lw	$10,0($4)
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 0 */
-$LC0:	lw	$10,0($4)
+L(LC0):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addu	$3,$3,$2
diff --git a/sysdeps/mips/lshift.S b/sysdeps/mips/lshift.S
index b1a858dd9a..0217bfc586 100644
--- a/sysdeps/mips/lshift.S
+++ b/sysdeps/mips/lshift.S
@@ -1,6 +1,6 @@
 /* MIPS2 __mpn_lshift --
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -44,12 +44,12 @@ ENTRY (__mpn_lshift)
 	addu	$4,$4,$2	/* make r4 point at end of res */
 	addiu	$6,$6,-1
 	and	$9,$6,4-1	/* number of limbs in first loop */
-	beq	$9,$0,.L0	/* if multiple of 4 limbs, skip first loop */
-	 srl	$2,$10,$13	/* compute function result */
+	beq	$9,$0,L(L0)	/* if multiple of 4 limbs, skip first loop */
+	srl	$2,$10,$13	/* compute function result */
 
 	subu	$6,$6,$9
 
-.Loop0:	lw	$3,-8($5)
+L(Loop0):	lw	$3,-8($5)
 	addiu	$4,$4,-4
 	addiu	$5,$5,-4
 	addiu	$9,$9,-1
@@ -57,13 +57,13 @@ ENTRY (__mpn_lshift)
 	srl	$12,$3,$13
 	move	$10,$3
 	or	$8,$11,$12
-	bne	$9,$0,.Loop0
-	 sw	$8,0($4)
+	bne	$9,$0,L(Loop0)
+	sw	$8,0($4)
 
-.L0:	beq	$6,$0,.Lend
-	 nop
+L(L0):	beq	$6,$0,L(Lend)
+	nop
 
-.Loop:	lw	$3,-8($5)
+L(Loop):	lw	$3,-8($5)
 	addiu	$4,$4,-16
 	addiu	$6,$6,-4
 	sll	$11,$10,$7
@@ -89,10 +89,10 @@ ENTRY (__mpn_lshift)
 
 	addiu	$5,$5,-16
 	or	$8,$14,$9
-	bgtz	$6,.Loop
-	 sw	$8,0($4)
+	bgtz	$6,L(Loop)
+	sw	$8,0($4)
 
-.Lend:	sll	$8,$10,$7
+L(Lend):	sll	$8,$10,$7
 	j	$31
 	sw	$8,-4($4)
 	END (__mpn_lshift)
diff --git a/sysdeps/mips/memcpy.S b/sysdeps/mips/memcpy.S
index 394265eed7..3d49ac976b 100644
--- a/sysdeps/mips/memcpy.S
+++ b/sysdeps/mips/memcpy.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Hartvig Ekner <hartvige@mips.com>, 2002.
 
@@ -44,27 +44,27 @@ ENTRY (memcpy)
 	.set	noreorder
 
 	slti	t0, a2, 8		# Less than 8?
-	bne	t0, zero, $last8
+	bne	t0, zero, L(last8)
 	move	v0, a0			# Setup exit value before too late
 
 	xor	t0, a1, a0		# Find a0/a1 displacement
 	andi	t0, 0x3
-	bne	t0, zero, $shift	# Go handle the unaligned case
+	bne	t0, zero, L(shift)	# Go handle the unaligned case
 	subu	t1, zero, a1
 	andi	t1, 0x3			# a0/a1 are aligned, but are we
-	beq	t1, zero, $chk8w	#  starting in the middle of a word?
+	beq	t1, zero, L(chk8w)	#  starting in the middle of a word?
 	subu	a2, t1
 	LWHI	t0, 0(a1)		# Yes we are... take care of that
 	addu	a1, t1
 	SWHI	t0, 0(a0)
 	addu	a0, t1
 
-$chk8w:	andi	t0, a2, 0x1f		# 32 or more bytes left?
-	beq	t0, a2, $chk1w
+L(chk8w):	andi	t0, a2, 0x1f		# 32 or more bytes left?
+	beq	t0, a2, L(chk1w)
 	subu	a3, a2, t0		# Yes
 	addu	a3, a1			# a3 = end address of loop
 	move	a2, t0			# a2 = what will be left after loop
-$lop8w:	lw	t0,  0(a1)		# Loop taking 8 words at a time
+L(lop8w):	lw	t0,  0(a1)		# Loop taking 8 words at a time
 	lw	t1,  4(a1)
 	lw	t2,  8(a1)
 	lw	t3, 12(a1)
@@ -81,49 +81,49 @@ $lop8w:	lw	t0,  0(a1)		# Loop taking 8 words at a time
 	sw	t4, -16(a0)
 	sw	t5, -12(a0)
 	sw	t6,  -8(a0)
-	bne	a1, a3, $lop8w
+	bne	a1, a3, L(lop8w)
 	sw	t7,  -4(a0)
 
-$chk1w:	andi	t0, a2, 0x3		# 4 or more bytes left?
-	beq	t0, a2, $last8
+L(chk1w):	andi	t0, a2, 0x3		# 4 or more bytes left?
+	beq	t0, a2, L(last8)
 	subu	a3, a2, t0		# Yes, handle them one word at a time
 	addu	a3, a1			# a3 again end address
 	move	a2, t0
-$lop1w:	lw	t0, 0(a1)
+L(lop1w):	lw	t0, 0(a1)
 	addiu	a0, 4
 	addiu	a1, 4
-	bne	a1, a3, $lop1w
+	bne	a1, a3, L(lop1w)
 	sw	t0, -4(a0)
 
-$last8:	blez	a2, $lst8e		# Handle last 8 bytes, one at a time
+L(last8):	blez	a2, L(lst8e)		# Handle last 8 bytes, one at a time
 	addu	a3, a2, a1
-$lst8l:	lb	t0, 0(a1)
+L(lst8l):	lb	t0, 0(a1)
 	addiu	a0, 1
 	addiu	a1, 1
-	bne	a1, a3, $lst8l
+	bne	a1, a3, L(lst8l)
 	sb	t0, -1(a0)
-$lst8e:	jr	ra			# Bye, bye
+L(lst8e):	jr	ra			# Bye, bye
 	nop
 
-$shift:	subu	a3, zero, a0		# Src and Dest unaligned 
+L(shift):	subu	a3, zero, a0		# Src and Dest unaligned 
 	andi	a3, 0x3			#  (unoptimized case...)
-	beq	a3, zero, $shft1
+	beq	a3, zero, L(shft1)
 	subu	a2, a3			# a2 = bytes left
 	LWHI	t0, 0(a1)		# Take care of first odd part
 	LWLO	t0, 3(a1)
 	addu	a1, a3
 	SWHI	t0, 0(a0)
 	addu	a0, a3
-$shft1:	andi	t0, a2, 0x3
+L(shft1):	andi	t0, a2, 0x3
 	subu	a3, a2, t0
 	addu	a3, a1
-$shfth:	LWHI	t1, 0(a1)		# Limp through, word by word
+L(shfth):	LWHI	t1, 0(a1)		# Limp through, word by word
 	LWLO	t1, 3(a1)
 	addiu	a0, 4
 	addiu	a1, 4
-	bne	a1, a3, $shfth
+	bne	a1, a3, L(shfth)
 	sw	t1, -4(a0)
-	b	$last8			# Handle anything which may be left
+	b	L(last8)			# Handle anything which may be left
 	move	a2, t0
 
 	.set	reorder
diff --git a/sysdeps/mips/memset.S b/sysdeps/mips/memset.S
index 7e3f129af9..7825dea880 100644
--- a/sysdeps/mips/memset.S
+++ b/sysdeps/mips/memset.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Hartvig Ekner <hartvige@mips.com>, 2002.
 
@@ -38,45 +38,45 @@ ENTRY (memset)
 	.set	noreorder
 
 	slti	t1, a2, 8		# Less than 8?
-	bne	t1, zero, $last8
+	bne	t1, zero, L(last8)
 	move	v0, a0			# Setup exit value before too late
 
-	beq	a1, zero, $ueven	# If zero pattern, no need to extend
+	beq	a1, zero, L(ueven)	# If zero pattern, no need to extend
 	andi	a1, 0xff		# Avoid problems with bogus arguments
 	sll	t0, a1, 8
 	or	a1, t0
 	sll	t0, a1, 16
 	or	a1, t0			# a1 is now pattern in full word
 
-$ueven:	subu	t0, zero, a0		# Unaligned address?
+L(ueven):	subu	t0, zero, a0		# Unaligned address?
 	andi	t0, 0x3
-	beq	t0, zero, $chkw
+	beq	t0, zero, L(chkw)
 	subu	a2, t0
 	SWHI	a1, 0(a0)		# Yes, handle first unaligned part
 	addu	a0, t0			# Now both a0 and a2 are updated
 
-$chkw:	andi	t0, a2, 0x7		# Enough left for one loop iteration?
-	beq	t0, a2, $chkl
+L(chkw):	andi	t0, a2, 0x7		# Enough left for one loop iteration?
+	beq	t0, a2, L(chkl)
 	subu	a3, a2, t0
 	addu	a3, a0			# a3 is last loop address +1
 	move	a2, t0			# a2 is now # of bytes left after loop
-$loopw:	addiu	a0, 8			# Handle 2 words pr. iteration
+L(loopw):	addiu	a0, 8			# Handle 2 words pr. iteration
 	sw	a1, -8(a0)
-	bne	a0, a3, $loopw
+	bne	a0, a3, L(loopw)
 	sw	a1, -4(a0)
 
-$chkl:	andi	t0, a2, 0x4		# Check if there is at least a full
-	beq	t0, zero, $last8	#  word remaining after the loop
+L(chkl):	andi	t0, a2, 0x4		# Check if there is at least a full
+	beq	t0, zero, L(last8)	#  word remaining after the loop
 	subu	a2, t0
 	sw	a1, 0(a0)		# Yes...
 	addiu	a0, 4
 
-$last8:	blez	a2, $exit		# Handle last 8 bytes (if cnt>0)
+L(last8):	blez	a2, L(exit)		# Handle last 8 bytes (if cnt>0)
 	addu	a3, a2, a0		# a3 is last address +1
-$lst8l:	addiu	a0, 1
-	bne	a0, a3, $lst8l
+L(lst8l):	addiu	a0, 1
+	bne	a0, a3, L(lst8l)
 	sb	a1, -1(a0)
-$exit:	j	ra			# Bye, bye
+L(exit):	j	ra			# Bye, bye
 	nop
 
 	.set	reorder
diff --git a/sysdeps/mips/mips64/add_n.S b/sysdeps/mips/mips64/add_n.S
index 771d51981a..072f4f0b73 100644
--- a/sysdeps/mips/mips64/add_n.S
+++ b/sysdeps/mips/mips64/add_n.S
@@ -1,7 +1,7 @@
 /* MIPS3 __mpn_add_n -- Add two limb vectors of the same length > 0 and
  * store sum in a third limb vector.
  *
- * Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -22,6 +22,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /*
  * INPUT PARAMETERS
@@ -38,10 +39,10 @@
 	.globl	__mpn_add_n
 	.ent	__mpn_add_n
 __mpn_add_n:
-	.set	noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set	noreorder
 	.set	nomacro
 
 	ld	$10,0($5)
@@ -49,12 +50,12 @@ __mpn_add_n:
 
 	daddiu	$7,$7,-1
 	and	$9,$7,4-1	# number of limbs in first loop
-	beq	$9,$0,.L0	# if multiple of 4 limbs, skip first loop
-	 move	$2,$0
+	beq	$9,$0,L(L0)	# if multiple of 4 limbs, skip first loop
+	move	$2,$0
 
 	dsubu	$7,$7,$9
 
-.Loop0:	daddiu	$9,$9,-1
+L(Loop0):	daddiu	$9,$9,-1
 	ld	$12,8($5)
 	daddu	$11,$11,$2
 	ld	$13,8($6)
@@ -68,13 +69,13 @@ __mpn_add_n:
 	daddiu	$6,$6,8
 	move	$10,$12
 	move	$11,$13
-	bne	$9,$0,.Loop0
-	 daddiu	$4,$4,8
+	bne	$9,$0,L(Loop0)
+	daddiu	$4,$4,8
 
-.L0:	beq	$7,$0,.Lend
-	 nop
+L(L0):	beq	$7,$0,L(Lend)
+	nop
 
-.Loop:	daddiu	$7,$7,-4
+L(Loop):	daddiu	$7,$7,-4
 
 	ld	$12,8($5)
 	daddu	$11,$11,$2
@@ -115,10 +116,10 @@ __mpn_add_n:
 	daddiu	$5,$5,32
 	daddiu	$6,$6,32
 
-	bne	$7,$0,.Loop
-	 daddiu	$4,$4,32
+	bne	$7,$0,L(Loop)
+	daddiu	$4,$4,32
 
-.Lend:	daddu	$11,$11,$2
+L(Lend):	daddu	$11,$11,$2
 	sltu	$8,$11,$2
 	daddu	$11,$10,$11
 	sltu	$2,$11,$10
diff --git a/sysdeps/mips/mips64/addmul_1.S b/sysdeps/mips/mips64/addmul_1.S
index f6cf428315..f5ecd83702 100644
--- a/sysdeps/mips/mips64/addmul_1.S
+++ b/sysdeps/mips/mips64/addmul_1.S
@@ -1,7 +1,7 @@
 /* MIPS3 __mpn_addmul_1 -- Multiply a limb vector with a single limb and
  * add the product to a second limb vector.
  *
- * Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+ * Copyright (C) 1992, 1994, 1995, 2002, 2003 Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -22,6 +22,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -38,10 +39,10 @@
 	.globl	__mpn_addmul_1
 	.ent	__mpn_addmul_1
 __mpn_addmul_1:
-	.set    noreorder
 #ifdef PIC
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set    noreorder
 	.set    nomacro
 
  # warm up phase 0
@@ -52,14 +53,14 @@ __mpn_addmul_1:
 	dmultu	$8,$7
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC0
-	 move	$2,$0		# zero cy2
+	beq	$6,$0,L(LC0)
+	move	$2,$0		# zero cy2
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	ld	$8,0($5)	# load new s1 limb as early as possible
 
-Loop:	ld	$10,0($4)
+L(Loop):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddiu	$5,$5,8
@@ -73,11 +74,11 @@ Loop:	ld	$10,0($4)
 	daddu	$2,$2,$10
 	sd	$3,0($4)
 	daddiu	$4,$4,8
-	bne	$6,$0,Loop
-	 daddu	$2,$9,$2	# add high product limb and carry from addition
+	bne	$6,$0,L(Loop)
+	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 1
-$LC1:	ld	$10,0($4)
+L(LC1):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddu	$3,$3,$2
@@ -91,7 +92,7 @@ $LC1:	ld	$10,0($4)
 	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 0
-$LC0:	ld	$10,0($4)
+L(LC0):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddu	$3,$3,$2
diff --git a/sysdeps/mips/mips64/lshift.S b/sysdeps/mips/mips64/lshift.S
index d06ba0d309..20f9e3da19 100644
--- a/sysdeps/mips/mips64/lshift.S
+++ b/sysdeps/mips/mips64/lshift.S
@@ -1,6 +1,6 @@
 /* MIPS3 __mpn_lshift --
  *
- * Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -21,6 +21,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -37,10 +38,10 @@
 	.globl	__mpn_lshift
 	.ent	__mpn_lshift
 __mpn_lshift:
-	.set	noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set	noreorder
 	.set	nomacro
 
 	dsll	$2,$6,3
@@ -50,12 +51,12 @@ __mpn_lshift:
 	daddu	$4,$4,$2	# make r4 point at end of res
 	daddiu	$6,$6,-1
 	and	$9,$6,4-1	# number of limbs in first loop
-	beq	$9,$0,.L0	# if multiple of 4 limbs, skip first loop
-	 dsrl	$2,$10,$13	# compute function result
+	beq	$9,$0,L(L0)	# if multiple of 4 limbs, skip first loop
+	dsrl	$2,$10,$13	# compute function result
 
 	dsubu	$6,$6,$9
 
-.Loop0:	ld	$3,-16($5)
+L(Loop0):	ld	$3,-16($5)
 	daddiu	$4,$4,-8
 	daddiu	$5,$5,-8
 	daddiu	$9,$9,-1
@@ -63,13 +64,13 @@ __mpn_lshift:
 	dsrl	$12,$3,$13
 	move	$10,$3
 	or	$8,$11,$12
-	bne	$9,$0,.Loop0
-	 sd	$8,0($4)
+	bne	$9,$0,L(Loop0)
+	sd	$8,0($4)
 
-.L0:	beq	$6,$0,.Lend
-	 nop
+L(L0):	beq	$6,$0,L(Lend)
+	nop
 
-.Loop:	ld	$3,-16($5)
+L(Loop):	ld	$3,-16($5)
 	daddiu	$4,$4,-32
 	daddiu	$6,$6,-4
 	dsll	$11,$10,$7
@@ -95,10 +96,10 @@ __mpn_lshift:
 
 	daddiu	$5,$5,-32
 	or	$8,$14,$9
-	bgtz	$6,.Loop
-	 sd	$8,0($4)
+	bgtz	$6,L(Loop)
+	sd	$8,0($4)
 
-.Lend:	dsll	$8,$10,$7
+L(Lend):	dsll	$8,$10,$7
 	j	$31
 	sd	$8,-8($4)
 	.end	__mpn_lshift
diff --git a/sysdeps/mips/mips64/mul_1.S b/sysdeps/mips/mips64/mul_1.S
index bf32953f43..c711783001 100644
--- a/sysdeps/mips/mips64/mul_1.S
+++ b/sysdeps/mips/mips64/mul_1.S
@@ -1,7 +1,8 @@
 /* MIPS3 __mpn_mul_1 -- Multiply a limb vector with a single limb and
  * store the product in a second limb vector.
  *
- * Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1992, 1994, 1995, 2000, 2002, 2003
+ * Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -22,6 +23,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -38,10 +40,10 @@
 	.globl	__mpn_mul_1
 	.ent	__mpn_mul_1
 __mpn_mul_1:
-	.set    noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set    noreorder
 	.set    nomacro
 
  # warm up phase 0
@@ -52,14 +54,14 @@ __mpn_mul_1:
 	dmultu	$8,$7
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC0
-	 move	$2,$0		# zero cy2
+	beq	$6,$0,L(LC0)
+	move	$2,$0		# zero cy2
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	ld	$8,0($5)	# load new s1 limb as early as possible
 
-Loop:	mflo	$10
+L(Loop):	mflo	$10
 	mfhi	$9
 	daddiu	$5,$5,8
 	daddu	$10,$10,$2	# add old carry limb to low product limb
@@ -69,11 +71,11 @@ Loop:	mflo	$10
 	sltu	$2,$10,$2	# carry from previous addition -> $2
 	sd	$10,0($4)
 	daddiu	$4,$4,8
-	bne	$6,$0,Loop
-	 daddu	$2,$9,$2	# add high product limb and carry from addition
+	bne	$6,$0,L(Loop)
+	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 1
-$LC1:	mflo	$10
+L(LC1):	mflo	$10
 	mfhi	$9
 	daddu	$10,$10,$2
 	sltu	$2,$10,$2
@@ -83,7 +85,7 @@ $LC1:	mflo	$10
 	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 0
-$LC0:	mflo	$10
+L(LC0):	mflo	$10
 	mfhi	$9
 	daddu	$10,$10,$2
 	sltu	$2,$10,$2
diff --git a/sysdeps/mips/mips64/rshift.S b/sysdeps/mips/mips64/rshift.S
index f39c1b3314..e6a8a06d3d 100644
--- a/sysdeps/mips/mips64/rshift.S
+++ b/sysdeps/mips/mips64/rshift.S
@@ -1,6 +1,6 @@
 /* MIPS3 __mpn_rshift --
  *
- * Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -21,6 +21,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -37,22 +38,22 @@
 	.globl	__mpn_rshift
 	.ent	__mpn_rshift
 __mpn_rshift:
-	.set	noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set	noreorder
 	.set	nomacro
 
 	ld	$10,0($5)	# load first limb
 	dsubu	$13,$0,$7
 	daddiu	$6,$6,-1
 	and	$9,$6,4-1	# number of limbs in first loop
-	beq	$9,$0,.L0	# if multiple of 4 limbs, skip first loop
-	 dsll	$2,$10,$13	# compute function result
+	beq	$9,$0,L(L0)	# if multiple of 4 limbs, skip first loop
+	dsll	$2,$10,$13	# compute function result
 
 	dsubu	$6,$6,$9
 
-.Loop0:	ld	$3,8($5)
+L(Loop0):	ld	$3,8($5)
 	daddiu	$4,$4,8
 	daddiu	$5,$5,8
 	daddiu	$9,$9,-1
@@ -60,13 +61,13 @@ __mpn_rshift:
 	dsll	$12,$3,$13
 	move	$10,$3
 	or	$8,$11,$12
-	bne	$9,$0,.Loop0
-	 sd	$8,-8($4)
+	bne	$9,$0,L(Loop0)
+	sd	$8,-8($4)
 
-.L0:	beq	$6,$0,.Lend
-	 nop
+L(L0):	beq	$6,$0,L(Lend)
+	nop
 
-.Loop:	ld	$3,8($5)
+L(Loop):	ld	$3,8($5)
 	daddiu	$4,$4,32
 	daddiu	$6,$6,-4
 	dsrl	$11,$10,$7
@@ -92,10 +93,10 @@ __mpn_rshift:
 
 	daddiu	$5,$5,32
 	or	$8,$14,$9
-	bgtz	$6,.Loop
-	 sd	$8,-8($4)
+	bgtz	$6,L(Loop)
+	sd	$8,-8($4)
 
-.Lend:	dsrl	$8,$10,$7
+L(Lend):	dsrl	$8,$10,$7
 	j	$31
 	sd	$8,0($4)
 	.end	__mpn_rshift
diff --git a/sysdeps/mips/mips64/sub_n.S b/sysdeps/mips/mips64/sub_n.S
index d566658bfd..aa8b0dcf9a 100644
--- a/sysdeps/mips/mips64/sub_n.S
+++ b/sysdeps/mips/mips64/sub_n.S
@@ -1,7 +1,7 @@
 /* MIPS3 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
  * store difference in a third limb vector.
  *
- * Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -22,6 +22,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -38,10 +39,10 @@
 	.globl	__mpn_sub_n
 	.ent	__mpn_sub_n
 __mpn_sub_n:
-	.set	noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set	noreorder
 	.set	nomacro
 
 	ld	$10,0($5)
@@ -49,12 +50,12 @@ __mpn_sub_n:
 
 	daddiu	$7,$7,-1
 	and	$9,$7,4-1	# number of limbs in first loop
-	beq	$9,$0,.L0	# if multiple of 4 limbs, skip first loop
-	 move	$2,$0
+	beq	$9,$0,L(L0)	# if multiple of 4 limbs, skip first loop
+	move	$2,$0
 
 	dsubu	$7,$7,$9
 
-.Loop0:	daddiu	$9,$9,-1
+L(Loop0):	daddiu	$9,$9,-1
 	ld	$12,8($5)
 	daddu	$11,$11,$2
 	ld	$13,8($6)
@@ -68,13 +69,13 @@ __mpn_sub_n:
 	daddiu	$6,$6,8
 	move	$10,$12
 	move	$11,$13
-	bne	$9,$0,.Loop0
-	 daddiu	$4,$4,8
+	bne	$9,$0,L(Loop0)
+	daddiu	$4,$4,8
 
-.L0:	beq	$7,$0,.Lend
-	 nop
+L(L0):	beq	$7,$0,L(Lend)
+	nop
 
-.Loop:	daddiu	$7,$7,-4
+L(Loop):	daddiu	$7,$7,-4
 
 	ld	$12,8($5)
 	daddu	$11,$11,$2
@@ -115,10 +116,10 @@ __mpn_sub_n:
 	daddiu	$5,$5,32
 	daddiu	$6,$6,32
 
-	bne	$7,$0,.Loop
-	 daddiu	$4,$4,32
+	bne	$7,$0,L(Loop)
+	daddiu	$4,$4,32
 
-.Lend:	daddu	$11,$11,$2
+L(Lend):	daddu	$11,$11,$2
 	sltu	$8,$11,$2
 	dsubu	$11,$10,$11
 	sltu	$2,$10,$11
diff --git a/sysdeps/mips/mips64/submul_1.S b/sysdeps/mips/mips64/submul_1.S
index 510923f366..4971b992a1 100644
--- a/sysdeps/mips/mips64/submul_1.S
+++ b/sysdeps/mips/mips64/submul_1.S
@@ -1,7 +1,8 @@
 /* MIPS3 __mpn_submul_1 -- Multiply a limb vector with a single limb and
  * subtract the product from a second limb vector.
  *
- * Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
+ * Copyright (C) 1992, 1994, 1995, 2000, 2002, 2003
+ * Free Software Foundation, Inc.
  *
  * This file is part of the GNU MP Library.
  *
@@ -22,6 +23,7 @@
  */
 
 #include <sysdep.h>
+#include <sys/asm.h>
 
 /* INPUT PARAMETERS
  * res_ptr	$4
@@ -38,10 +40,10 @@
 	.globl	__mpn_submul_1
 	.ent	__mpn_submul_1
 __mpn_submul_1:
-	.set    noreorder
 #ifdef __PIC__
-	.cpload t9
+	SETUP_GP /* ??? unused */
 #endif
+	.set    noreorder
 	.set    nomacro
 
  # warm up phase 0
@@ -52,14 +54,14 @@ __mpn_submul_1:
 	dmultu	$8,$7
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC0
-	 move	$2,$0		# zero cy2
+	beq	$6,$0,L(LC0)
+	move	$2,$0		# zero cy2
 
 	daddiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	ld	$8,0($5)	# load new s1 limb as early as possible
 
-Loop:	ld	$10,0($4)
+L(Loop):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddiu	$5,$5,8
@@ -73,11 +75,11 @@ Loop:	ld	$10,0($4)
 	daddu	$2,$2,$10
 	sd	$3,0($4)
 	daddiu	$4,$4,8
-	bne	$6,$0,Loop
-	 daddu	$2,$9,$2	# add high product limb and carry from addition
+	bne	$6,$0,L(Loop)
+	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 1
-$LC1:	ld	$10,0($4)
+L(LC1):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddu	$3,$3,$2
@@ -91,7 +93,7 @@ $LC1:	ld	$10,0($4)
 	daddu	$2,$9,$2	# add high product limb and carry from addition
 
  # cool down phase 0
-$LC0:	ld	$10,0($4)
+L(LC0):	ld	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	daddu	$3,$3,$2
diff --git a/sysdeps/mips/mul_1.S b/sysdeps/mips/mul_1.S
index 255623edeb..72f538670c 100644
--- a/sysdeps/mips/mul_1.S
+++ b/sysdeps/mips/mul_1.S
@@ -1,7 +1,7 @@
 /* MIPS __mpn_mul_1 -- Multiply a limb vector with a single limb and
 store the product in a second limb vector.
 
-Copyright (C) 1995, 1998, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 1998, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -46,14 +46,14 @@ ENTRY (__mpn_mul_1)
 	multu	$8,$7
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC0
+	beq	$6,$0,L(LC0)
 	move	$2,$0		/* zero cy2 */
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	lw	$8,0($5)	/* load new s1 limb as early as possible */
 
-Loop:	mflo	$10
+L(Loop):	mflo	$10
 	mfhi	$9
 	addiu	$5,$5,4
 	addu	$10,$10,$2	/* add old carry limb to low product limb */
@@ -63,11 +63,11 @@ Loop:	mflo	$10
 	sltu	$2,$10,$2	/* carry from previous addition -> $2 */
 	sw	$10,0($4)
 	addiu	$4,$4,4
-	bne	$6,$0,Loop	/* should be "bnel" */
+	bne	$6,$0,L(Loop)	/* should be "bnel" */
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 1 */
-$LC1:	mflo	$10
+L(LC1):	mflo	$10
 	mfhi	$9
 	addu	$10,$10,$2
 	sltu	$2,$10,$2
@@ -77,7 +77,7 @@ $LC1:	mflo	$10
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 0 */
-$LC0:	mflo	$10
+L(LC0):	mflo	$10
 	mfhi	$9
 	addu	$10,$10,$2
 	sltu	$2,$10,$2
diff --git a/sysdeps/mips/rshift.S b/sysdeps/mips/rshift.S
index 46df86b5dc..cb688fe2ac 100644
--- a/sysdeps/mips/rshift.S
+++ b/sysdeps/mips/rshift.S
@@ -1,6 +1,6 @@
 /* MIPS2 __mpn_rshift --
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -41,12 +41,12 @@ ENTRY (__mpn_rshift)
 	subu	$13,$0,$7
 	addiu	$6,$6,-1
 	and	$9,$6,4-1	/* number of limbs in first loop */
-	beq	$9,$0,.L0	/* if multiple of 4 limbs, skip first loop*/
+	beq	$9,$0,L(L0)	/* if multiple of 4 limbs, skip first loop*/
 	 sll	$2,$10,$13	/* compute function result */
 
 	subu	$6,$6,$9
 
-.Loop0:	lw	$3,4($5)
+L(Loop0):	lw	$3,4($5)
 	addiu	$4,$4,4
 	addiu	$5,$5,4
 	addiu	$9,$9,-1
@@ -54,13 +54,13 @@ ENTRY (__mpn_rshift)
 	sll	$12,$3,$13
 	move	$10,$3
 	or	$8,$11,$12
-	bne	$9,$0,.Loop0
+	bne	$9,$0,L(Loop0)
 	 sw	$8,-4($4)
 
-.L0:	beq	$6,$0,.Lend
+L(L0):	beq	$6,$0,L(Lend)
 	 nop
 
-.Loop:	lw	$3,4($5)
+L(Loop):	lw	$3,4($5)
 	addiu	$4,$4,16
 	addiu	$6,$6,-4
 	srl	$11,$10,$7
@@ -86,10 +86,10 @@ ENTRY (__mpn_rshift)
 
 	addiu	$5,$5,16
 	or	$8,$14,$9
-	bgtz	$6,.Loop
+	bgtz	$6,L(Loop)
 	 sw	$8,-4($4)
 
-.Lend:	srl	$8,$10,$7
+L(Lend):	srl	$8,$10,$7
 	j	$31
 	sw	$8,0($4)
 	END (__mpn_rshift)
diff --git a/sysdeps/mips/sub_n.S b/sysdeps/mips/sub_n.S
index 633f3e3143..53fa019343 100644
--- a/sysdeps/mips/sub_n.S
+++ b/sysdeps/mips/sub_n.S
@@ -1,7 +1,7 @@
 /* MIPS2 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
 store difference in a third limb vector.
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -43,12 +43,12 @@ ENTRY (__mpn_sub_n)
 
 	addiu	$7,$7,-1
 	and	$9,$7,4-1	/* number of limbs in first loop */
-	beq	$9,$0,.L0	/* if multiple of 4 limbs, skip first loop */
-	 move	$2,$0
+	beq	$9,$0,L(L0)	/* if multiple of 4 limbs, skip first loop */
+	move	$2,$0
 
 	subu	$7,$7,$9
 
-.Loop0:	addiu	$9,$9,-1
+L(Loop0):	addiu	$9,$9,-1
 	lw	$12,4($5)
 	addu	$11,$11,$2
 	lw	$13,4($6)
@@ -62,13 +62,13 @@ ENTRY (__mpn_sub_n)
 	addiu	$6,$6,4
 	move	$10,$12
 	move	$11,$13
-	bne	$9,$0,.Loop0
-	 addiu	$4,$4,4
+	bne	$9,$0,L(Loop0)
+	addiu	$4,$4,4
 
-.L0:	beq	$7,$0,.Lend
-	 nop
+L(L0):	beq	$7,$0,L(Lend)
+	nop
 
-.Loop:	addiu	$7,$7,-4
+L(Loop):	addiu	$7,$7,-4
 
 	lw	$12,4($5)
 	addu	$11,$11,$2
@@ -109,10 +109,10 @@ ENTRY (__mpn_sub_n)
 	addiu	$5,$5,16
 	addiu	$6,$6,16
 
-	bne	$7,$0,.Loop
-	 addiu	$4,$4,16
+	bne	$7,$0,L(Loop)
+	addiu	$4,$4,16
 
-.Lend:	addu	$11,$11,$2
+L(Lend):	addu	$11,$11,$2
 	sltu	$8,$11,$2
 	subu	$11,$10,$11
 	sltu	$2,$10,$11
diff --git a/sysdeps/mips/submul_1.S b/sysdeps/mips/submul_1.S
index 7de9ca74f8..4c8a612650 100644
--- a/sysdeps/mips/submul_1.S
+++ b/sysdeps/mips/submul_1.S
@@ -1,7 +1,7 @@
 /* MIPS __mpn_submul_1 -- Multiply a limb vector with a single limb and
 subtract the product from a second limb vector.
 
-Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+Copyright (C) 1995, 2000, 2002, 2003 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -46,14 +46,14 @@ ENTRY (__mpn_submul_1)
 	multu	$8,$7
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC0
+	beq	$6,$0,L(LC0)
 	move	$2,$0		/* zero cy2 */
 
 	addiu	$6,$6,-1
-	beq	$6,$0,$LC1
+	beq	$6,$0,L(LC1)
 	lw	$8,0($5)	/* load new s1 limb as early as possible */
 
-Loop:	lw	$10,0($4)
+L(Loop):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addiu	$5,$5,4
@@ -67,11 +67,11 @@ Loop:	lw	$10,0($4)
 	addu	$2,$2,$10
 	sw	$3,0($4)
 	addiu	$4,$4,4
-	bne	$6,$0,Loop	/* should be "bnel" */
+	bne	$6,$0,L(Loop)	/* should be "bnel" */
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 1 */
-$LC1:	lw	$10,0($4)
+L(LC1):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addu	$3,$3,$2
@@ -85,7 +85,7 @@ $LC1:	lw	$10,0($4)
 	addu	$2,$9,$2	/* add high product limb and carry from addition */
 
 	/* cool down phase 0 */
-$LC0:	lw	$10,0($4)
+L(LC0):	lw	$10,0($4)
 	mflo	$3
 	mfhi	$9
 	addu	$3,$3,$2