about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>1999-07-27 04:43:32 +0000
committerUlrich Drepper <drepper@redhat.com>1999-07-27 04:43:32 +0000
commit8cb079d41b2108d7a6db4c91a51156464912548b (patch)
tree6cbfca0ae13331d50e1559c50c9a128dec6082a0 /sysdeps
parentf05f5ca3857fbf83460003f12e81667c2f60851e (diff)
downloadglibc-8cb079d41b2108d7a6db4c91a51156464912548b.tar.gz
glibc-8cb079d41b2108d7a6db4c91a51156464912548b.tar.xz
glibc-8cb079d41b2108d7a6db4c91a51156464912548b.zip
Update.
	* sysdeps/sparc/sparc64/add_n.S: Avoid using %g2, %g3, %g7 registers
	as much as possible. Declare them using .register pseudo-op if they
	are still used.
	* sysdeps/sparc/sparc64/lshift.S: Likewise.
	* sysdeps/sparc/sparc64/memchr.S: Likewise.
	* sysdeps/sparc/sparc64/memcmp.S: Likewise.
	* sysdeps/sparc/sparc64/memcpy.S: Likewise.
	* sysdeps/sparc/sparc64/memset.S: Likewise.
	* sysdeps/sparc/sparc64/rawmemchr.S: Likewise.
	* sysdeps/sparc/sparc64/rshift.S: Likewise.
	* sysdeps/sparc/sparc64/stpcpy.S: Likewise.
	* sysdeps/sparc/sparc64/stpncpy.S: Likewise.
	* sysdeps/sparc/sparc64/strcat.S: Likewise.
	* sysdeps/sparc/sparc64/strchr.S: Likewise.
	* sysdeps/sparc/sparc64/strcmp.S: Likewise.
	* sysdeps/sparc/sparc64/strcpy.S: Likewise.
	* sysdeps/sparc/sparc64/strcspn.S: Likewise.
	* sysdeps/sparc/sparc64/strlen.S: Likewise.
	* sysdeps/sparc/sparc64/strncmp.S: Likewise.
	* sysdeps/sparc/sparc64/strncpy.S: Likewise.
	* sysdeps/sparc/sparc64/strpbrk.S: Likewise.
	* sysdeps/sparc/sparc64/strspn.S: Likewise.
	* sysdeps/sparc/sparc64/sub_n.S: Likewise.
	* sysdeps/sparc/sparc64/dl-machine.h: Likewise.
	Optimize trampoline code for .plt4-.plt32767.
	Fix trampolines for .plt32768+.

1999-07-25  Jakub Jelinek  <jj@ultra.linux.cz>
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/sparc/sparc64/add_n.S14
-rw-r--r--sysdeps/sparc/sparc64/dl-machine.h156
-rw-r--r--sysdeps/sparc/sparc64/lshift.S3
-rw-r--r--sysdeps/sparc/sparc64/memchr.S2
-rw-r--r--sysdeps/sparc/sparc64/memcmp.S10
-rw-r--r--sysdeps/sparc/sparc64/memcpy.S3
-rw-r--r--sysdeps/sparc/sparc64/memset.S32
-rw-r--r--sysdeps/sparc/sparc64/rawmemchr.S6
-rw-r--r--sysdeps/sparc/sparc64/rshift.S3
-rw-r--r--sysdeps/sparc/sparc64/stpcpy.S5
-rw-r--r--sysdeps/sparc/sparc64/stpncpy.S3
-rw-r--r--sysdeps/sparc/sparc64/strcat.S3
-rw-r--r--sysdeps/sparc/sparc64/strchr.S3
-rw-r--r--sysdeps/sparc/sparc64/strcmp.S5
-rw-r--r--sysdeps/sparc/sparc64/strcpy.S5
-rw-r--r--sysdeps/sparc/sparc64/strcspn.S71
-rw-r--r--sysdeps/sparc/sparc64/strlen.S42
-rw-r--r--sysdeps/sparc/sparc64/strncmp.S3
-rw-r--r--sysdeps/sparc/sparc64/strncpy.S3
-rw-r--r--sysdeps/sparc/sparc64/strpbrk.S49
-rw-r--r--sysdeps/sparc/sparc64/strspn.S71
-rw-r--r--sysdeps/sparc/sparc64/sub_n.S14
22 files changed, 284 insertions, 222 deletions
diff --git a/sysdeps/sparc/sparc64/add_n.S b/sysdeps/sparc/sparc64/add_n.S
index 68bb008a7d..2e88a8c0c0 100644
--- a/sysdeps/sparc/sparc64/add_n.S
+++ b/sysdeps/sparc/sparc64/add_n.S
@@ -1,7 +1,7 @@
 /* SPARC v9 __mpn_add_n -- Add two limb vectors of the same length > 0 and
    store sum in a third limb vector.
   
-   Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+   Copyright (C) 1995, 1996, 1997, 1999 Free Software Foundation, Inc.
 
    This file is part of the GNU MP Library.
 
@@ -33,23 +33,23 @@
 
 ENTRY(__mpn_add_n)
 
-	sub	%g0,%o3,%g3
+	sub	%g0,%o3,%g5
 	sllx	%o3,3,%g1
 	add	%o1,%g1,%o1		! make s1_ptr point at end
 	add	%o2,%g1,%o2		! make s2_ptr point at end
 	add	%o0,%g1,%o0		! make res_ptr point at end
 	mov	0,%o4			! clear carry variable
-	sllx	%g3,3,%o5		! compute initial address index
+	sllx	%g5,3,%o5		! compute initial address index
 
 1:	ldx	[%o2+%o5],%g1		! load s2 limb
-	add	%g3,1,%g3		! increment loop count
-	ldx	[%o1+%o5],%g2		! load s1 limb
+	add	%g5,1,%g5		! increment loop count
+	ldx	[%o1+%o5],%o3		! load s1 limb
 	addcc	%g1,%o4,%g1		! add s2 limb and carry variable
 	movcc	%xcc,0,%o4		! if carry-out, o4 was 1; clear it
-	addcc	%g1,%g2,%g1		! add s1 limb to sum
+	addcc	%g1,%o3,%g1		! add s1 limb to sum
 	stx	%g1,[%o0+%o5]		! store result
 	add	%o5,8,%o5		! increment address index
-	brnz,pt	%g3,1b
+	brnz,pt	%g5,1b
 	 movcs	%xcc,1,%o4		! if s1 add gave carry, record it
 
 	retl
diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h
index 554321ffe2..855e93acb6 100644
--- a/sysdeps/sparc/sparc64/dl-machine.h
+++ b/sysdeps/sparc/sparc64/dl-machine.h
@@ -64,7 +64,7 @@ elf_machine_load_address (void)
   return pc - *(Elf64_Addr *)(elf_pic_register + la);
 }
 
-/* We have 3 cases to handle.  And we code different code sequences
+/* We have 4 cases to handle.  And we code different code sequences
    for each one.  I love V9 code models...  */
 static inline void
 elf_machine_fixup_plt(struct link_map *map, const Elf64_Rela *reloc,
@@ -76,9 +76,14 @@ elf_machine_fixup_plt(struct link_map *map, const Elf64_Rela *reloc,
   /* Now move plt_vaddr up to the call instruction.  */
   plt_vaddr += (2 * 4);
 
+  /* PLT entries .PLT32768 and above look always the same.  */
+  if (__builtin_expect (reloc->r_addend, 0) != 0)
+    {
+      *reloc_addr = value - map->l_addr;
+    }
   /* 32-bit Sparc style, the target is in the lower 32-bits of
      address space.  */
-  if ((value >> 32) == 0)
+  else if ((value >> 32) == 0)
     {
       /* sethi	%hi(target), %g1
 	 jmpl	%g1 + %lo(target), %g0  */
@@ -126,26 +131,26 @@ elf_machine_fixup_plt(struct link_map *map, const Elf64_Rela *reloc,
 	     constant formation code I wrote.  -DaveM  */
 
       /* sethi	%hh(value), %g1
-	 sethi	%lm(value), %g2
-	 or	%g1, %hl(value), %g1
-	 or	%g2, %lo(value), %g2
+	 sethi	%lm(value), %g5
+	 or	%g1, %hm(value), %g1
+	 or	%g5, %lo(value), %g5
 	 sllx	%g1, 32, %g1
-	 jmpl	%g1 + %g2, %g0
+	 jmpl	%g1 + %g5, %g0
 	  nop  */
 
-      insns[6] = 0x81c04002;
+      insns[6] = 0x81c04005;
       __asm __volatile ("flush %0 + 24" : : "r" (insns));
 
       insns[5] = 0x83287020;
       __asm __volatile ("flush %0 + 20" : : "r" (insns));
 
-      insns[4] = 0x8410a000 | (low32 & 0x3ff);
+      insns[4] = 0x8a116000 | (low32 & 0x3ff);
       __asm __volatile ("flush %0 + 16" : : "r" (insns));
 
       insns[3] = 0x82106000 | (high32 & 0x3ff);
       __asm __volatile ("flush %0 + 12" : : "r" (insns));
 
-      insns[2] = 0x05000000 | (low32 >> 10);
+      insns[2] = 0x0b000000 | (low32 >> 10);
       __asm __volatile ("flush %0 + 8" : : "r" (insns));
 
       insns[1] = 0x03000000 | (high32 >> 10);
@@ -381,33 +386,44 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
       /* PLT0 looks like:
 
 	 save	%sp, -192, %sp
-	 sethi	%hh(_dl_runtime_{resolve,profile}_0), %g3
-	 sethi	%lm(_dl_runtime_{resolve,profile}_0), %g4
-	 or	%g3, %hm(_dl_runtime_{resolve,profile}_0), %g3
-	 or	%g4, %lo(_dl_runtime_{resolve,profile}_0), %g4
-	 sllx	%g3, 32, %g3
-	 jmpl	%g3 + %g4, %o0
-	  nop
-
-	 PLT1 is similar except we jump to _dl_runtime_{resolve,profile}_1.  */
+	 sethi	%hh(_dl_runtime_{resolve,profile}_0), %l0
+	 sethi	%lm(_dl_runtime_{resolve,profile}_0), %l1
+	 or	%l0, %hm(_dl_runtime_{resolve,profile}_0), %l0
+	 or	%l1, %lo(_dl_runtime_{resolve,profile}_0), %l1
+	 sllx	%l0, 32, %l0
+	 jmpl	%l0 + %l1, %l6
+	  sethi	%hi(0xffc00), %l2
+       */
 
       plt[0] = 0x9de3bf40;
-      plt[1] = 0x07000000 | (res0_addr >> (64 - 22));
-      plt[2] = 0x09000000 | ((res0_addr >> 10) & 0x003fffff);
-      plt[3] = 0x8610e000 | ((res0_addr >> 32) & 0x3ff);
-      plt[4] = 0x88112000 | (res0_addr & 0x3ff);
-      plt[5] = 0x8728f020;
-      plt[6] = 0x91c0c004;
-      plt[7] = 0x01000000;
+      plt[1] = 0x21000000 | (res0_addr >> (64 - 22));
+      plt[2] = 0x23000000 | ((res0_addr >> 10) & 0x003fffff);
+      plt[3] = 0xa0142000 | ((res0_addr >> 32) & 0x3ff);
+      plt[4] = 0xa2146000 | (res0_addr & 0x3ff);
+      plt[5] = 0xa12c3020;
+      plt[6] = 0xadc40011;
+      plt[7] = 0x250003ff;
+
+      /* PLT1 looks like:
+
+	 save	%sp, -192, %sp
+	 sethi	%hh(_dl_runtime_{resolve,profile}_1), %l0
+	 sethi	%lm(_dl_runtime_{resolve,profile}_1), %l1
+	 or	%l0, %hm(_dl_runtime_{resolve,profile}_1), %l0
+	 or	%l1, %lo(_dl_runtime_{resolve,profile}_1), %l1
+	 sllx	%l0, 32, %l0
+	 jmpl	%l0 + %l1, %l6
+	  srlx	%g1, 12, %o1
+       */
 
       plt[8 + 0] = 0x9de3bf40;
-      plt[8 + 1] = 0x07000000 | (res1_addr >> (64 - 22));
-      plt[8 + 2] = 0x09000000 | ((res1_addr >> 10) & 0x003fffff);
-      plt[8 + 3] = 0x8610e000 | ((res1_addr >> 32) & 0x3ff);
-      plt[8 + 4] = 0x88112000 | (res1_addr & 0x3ff);
-      plt[8 + 5] = 0x8728f020;
-      plt[8 + 6] = 0x91c0c004;
-      plt[8 + 7] = 0x01000000;
+      plt[8 + 1] = 0x21000000 | (res1_addr >> (64 - 22));
+      plt[8 + 2] = 0x23000000 | ((res1_addr >> 10) & 0x003fffff);
+      plt[8 + 3] = 0xa0142000 | ((res1_addr >> 32) & 0x3ff);
+      plt[8 + 4] = 0xa2146000 | (res1_addr & 0x3ff);
+      plt[8 + 5] = 0xa12c3020;
+      plt[8 + 6] = 0xadc40011;
+      plt[8 + 7] = 0x9330700c;
 
       /* Now put the magic cookie at the beginning of .PLT3
 	 Entry .PLT4 is unused by this implementation.  */
@@ -426,28 +442,27 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
 	.type	" #tramp_name "_0, @function
 	.align	32
 " #tramp_name "_0:
-	ldx	[%o0 + 32 + 8], %l0
-	sethi	%hi(1048576), %g2
-	sub	%g1, %o0, %o0
-	xor	%g2, -20, %g2
-	sethi	%hi(5120), %g3
-	add	%o0, %g2, %o0
-	sethi	%hi(32768), %o2
-	udivx	%o0, %g3, %g3
-	sllx	%g3, 2, %g1
-	add	%g1, %g3, %g1
-	sllx	%g1, 10, %g2
-	sllx	%g1, 5, %g1
-	sub	%o0, %g2, %o0
-	udivx	%o0, 24, %o0
-	add	%o0, %o2, %o0
-	add	%g1, %o0, %g1
-	sllx	%g1, 1, %o1
-	mov	%l0, %o0
-	add	%o1, %g1, %o1
+	! sethi   %hi(1047552), %l2 - Done in .PLT0
+	ldx	[%l6 + 32 + 8], %o0
+	sub     %g1, %l6, %l0
+	xor     %l2, -1016, %l2
+	sethi   %hi(5120), %l3
+	add     %l0, %l2, %l0
+	sethi   %hi(32768), %l4
+	udivx   %l0, %l3, %l3
+	sllx    %l3, 2, %l1
+	add     %l1, %l3, %l1
+	sllx    %l1, 10, %l2
+	sllx    %l1, 5, %l1
+	sub     %l0, %l2, %l0
+	udivx   %l0, 24, %l0
+	add     %l0, %l4, %l0
+	add     %l1, %l0, %l1
+	add     %l1, %l1, %l0
+	add     %l0, %l1, %l0
 	mov	%i7, %o2
 	call	" #fixup_name "
-	 sllx	%o1, 3, %o1
+	 sllx    %l0, 3, %o1
 	jmp	%o0
 	 restore
 	.size	" #tramp_name "_0, . - " #tramp_name "_0
@@ -456,13 +471,12 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
 	.type	" #tramp_name "_1, @function
 	.align	32
 " #tramp_name "_1:
-	srlx	%g1, 15, %o1
-	ldx	[%o0 + 8], %o0
-	sllx	%o1, 1, %o3
-	add	%o1, %o3, %o1
+	! srlx	%g1, 12, %o1 - Done in .PLT1
+	ldx	[%l6 + 8], %o0
+	add	%o1, %o1, %o3
 	mov	%i7, %o2
 	call	" #fixup_name "
-	 sllx	%o1, 3, %o1
+	 add	%o1, %o3, %o1
 	jmp	%o0
 	 restore
 	.size	" #tramp_name "_1, . - " #tramp_name "_1
@@ -513,17 +527,17 @@ _dl_start_user:
    /* Save the user entry point address in %l0.  */
 	mov	%o0,%l0
   /* Store the highest stack address.  */
-	sethi	%hi(__libc_stack_end), %g2
-	or	%g2, %lo(__libc_stack_end), %g2
-	ldx	[%l7 + %g2], %l1
+	sethi	%hi(__libc_stack_end), %g5
+	or	%g5, %lo(__libc_stack_end), %g5
+	ldx	[%l7 + %g5], %l1
 	add	%sp, 6*8, %l2
 	stx	%l2, [%l1]
    /* See if we were run as a command with the executable file name as an
       extra leading argument.  If so, we must shift things around since we
       must keep the stack doubleword aligned.  */
-	sethi	%hi(_dl_skip_args), %g2
-	or	%g2, %lo(_dl_skip_args), %g2
-	ldx	[%l7+%g2], %i0
+	sethi	%hi(_dl_skip_args), %g5
+	or	%g5, %lo(_dl_skip_args), %g5
+	ldx	[%l7+%g5], %i0
 	ld	[%i0], %i0
 	brz,pt	%i0, 2f
 	 nop
@@ -555,10 +569,10 @@ _dl_start_user:
 	brnz,pt	%i3, 13b
 	 add	%i1, 16, %i1
   /* Load searchlist of the main object to pass to _dl_init_next.  */
-2:	sethi	%hi(_dl_main_searchlist), %g2
-	or	%g2, %lo(_dl_main_searchlist), %g2
-	ldx	[%l7+%g2], %g2
-	ldx	[%g2], %l1
+2:	sethi	%hi(_dl_main_searchlist), %g5
+	or	%g5, %lo(_dl_main_searchlist), %g5
+	ldx	[%l7+%g5], %g5
+	ldx	[%g5], %l1
    /* Call _dl_init_next to return the address of an initializer to run.  */
 3:	call	_dl_init_next
 	 mov	%l1, %o0
@@ -567,10 +581,10 @@ _dl_start_user:
 	jmpl	%o0, %o7
 	 sub	%o7, 24, %o7
    /* Clear the startup flag.  */
-4:	sethi	%hi(_dl_starting_up), %g2
-	or	%g2, %lo(_dl_starting_up), %g2
-	ldx	[%l7+%g2], %g2
-	st	%g0, [%g2]
+4:	sethi	%hi(_dl_starting_up), %g5
+	or	%g5, %lo(_dl_starting_up), %g5
+	ldx	[%l7+%g5], %g5
+	st	%g0, [%g5]
    /* Pass our finalizer function to the user in %g1.  */
 	sethi	%hi(_dl_fini), %g1
 	or	%g1, %lo(_dl_fini), %g1
diff --git a/sysdeps/sparc/sparc64/lshift.S b/sysdeps/sparc/sparc64/lshift.S
index 4f265ad96b..81b1dbeb7c 100644
--- a/sysdeps/sparc/sparc64/lshift.S
+++ b/sysdeps/sparc/sparc64/lshift.S
@@ -27,6 +27,9 @@
    size		%o2
    cnt		%o3  */
 
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
 ENTRY(__mpn_lshift)
 	sllx	%o2,3,%g1
 	add	%o1,%g1,%o1	! make %o1 point at end of src
diff --git a/sysdeps/sparc/sparc64/memchr.S b/sysdeps/sparc/sparc64/memchr.S
index 87e9022779..1662f60d88 100644
--- a/sysdeps/sparc/sparc64/memchr.S
+++ b/sysdeps/sparc/sparc64/memchr.S
@@ -35,6 +35,8 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/memcmp.S b/sysdeps/sparc/sparc64/memcmp.S
index bd3253ce5c..944751bc61 100644
--- a/sysdeps/sparc/sparc64/memcmp.S
+++ b/sysdeps/sparc/sparc64/memcmp.S
@@ -25,6 +25,8 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
 #endif
 
 	.text
@@ -114,12 +116,12 @@ ENTRY(memcmp)
 
 	ldxa		[%o0] ASI_PNF, %g5		/* Load				*/
 	sub		%o1, %o0, %o1			/* IEU1				*/
-	ldxa		[%o0 + %o1] ASI_PNF, %g7	/* Load		Group		*/
+	ldxa		[%o0 + %o1] ASI_PNF, %g4	/* Load		Group		*/
 	add		%o0, 8, %o0			/* IEU0				*/
 
-11:	sllx		%g7, %g2, %o4			/* IEU0		Group		*/
-	ldxa		[%o0 + %o1] ASI_PNF, %g7	/* Load				*/
-	srlx		%g7, %g3, %o5			/* IEU0		Group		*/
+11:	sllx		%g4, %g2, %o4			/* IEU0		Group		*/
+	ldxa		[%o0 + %o1] ASI_PNF, %g4	/* Load				*/
+	srlx		%g4, %g3, %o5			/* IEU0		Group		*/
 	mov		%g5, %o3			/* IEU1				*/   
 
 	ldxa		[%o0] ASI_PNF, %g5		/* Load				*/
diff --git a/sysdeps/sparc/sparc64/memcpy.S b/sysdeps/sparc/sparc64/memcpy.S
index 180ff23e20..73057ff0a1 100644
--- a/sysdeps/sparc/sparc64/memcpy.S
+++ b/sysdeps/sparc/sparc64/memcpy.S
@@ -24,6 +24,9 @@
 #include <asm/asi.h>
 #ifndef XCC
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 #define FPRS_FEF	4
 
diff --git a/sysdeps/sparc/sparc64/memset.S b/sysdeps/sparc/sparc64/memset.S
index c2b23647f5..9c0f234a6e 100644
--- a/sysdeps/sparc/sparc64/memset.S
+++ b/sysdeps/sparc/sparc64/memset.S
@@ -39,7 +39,7 @@
 	.align		32
 ENTRY(memset)
 	andcc		%o1, 0xff, %o1
-	mov		%o0, %g3
+	mov		%o0, %o5
 	be,a,pt		%icc, 50f
 #ifndef USE_BPR
 	 srl		%o2, 0, %o1
@@ -79,19 +79,19 @@ ENTRY(memset)
 	blu,pn		%xcc, 9f
 	 andcc		%o0, 0x38, %g5
 	be,pn		%icc, 6f
-	 mov		64, %o5
+	 mov		64, %o4
 	andcc		%o0, 8, %g0
 	be,pn		%icc, 1f
-	 sub		%o5, %g5, %o5
+	 sub		%o4, %g5, %o4
 	stx		%o1, [%o0]
 	add		%o0, 8, %o0
-1:	andcc		%o5, 16, %g0
+1:	andcc		%o4, 16, %g0
 	be,pn		%icc, 1f
-	 sub		%o2, %o5, %o2
+	 sub		%o2, %o4, %o2
 	stx		%o1, [%o0]
 	stx		%o1, [%o0 + 8]
 	add		%o0, 16, %o0
-1:	andcc		%o5, 32, %g0
+1:	andcc		%o4, 32, %g0
 	be,pn		%icc, 7f
 	 andncc		%o2, 0x3f, %o3
 	stw		%o1, [%o0]
@@ -162,14 +162,14 @@ ENTRY(memset)
 1:	bne,a,pn	%xcc, 8f
 	 stb		%o1, [%o0]
 8:	retl
-	 mov		%g3, %o0
+	 mov		%o5, %o0
 17:	brz,pn		%o2, 0f
 8:	 add		%o0, 1, %o0
 	subcc		%o2, 1, %o2
 	bne,pt		%xcc, 8b
 	 stb		%o1, [%o0 - 1]
 0:	retl
-	 mov		%g3, %o0
+	 mov		%o5, %o0
 
 6:	stx		%o1, [%o0]
 	andncc		%o2, 0x3f, %o3
@@ -195,7 +195,7 @@ ENTRY(__bzero)
 #ifndef USE_BPR
 	srl		%o1, 0, %o1
 #endif
-	mov		%o0, %g3
+	mov		%o0, %o5
 50:	cmp		%o1, 7
 	bleu,pn		%xcc, 17f
 	 andcc		%o0, 3, %o2
@@ -220,19 +220,19 @@ ENTRY(__bzero)
 2:	blu,pn		%xcc, 9f
 	 andcc		%o0, 0x38, %o2
 	be,pn		%icc, 6f
-	 mov		64, %o5
+	 mov		64, %o4
 	andcc		%o0, 8, %g0
 	be,pn		%icc, 1f
-	 sub		%o5, %o2, %o5
+	 sub		%o4, %o2, %o4
 	stx		%g0, [%o0]
 	add		%o0, 8, %o0
-1:	andcc		%o5, 16, %g0
+1:	andcc		%o4, 16, %g0
 	be,pn		%icc, 1f
-	 sub		%o1, %o5, %o1
+	 sub		%o1, %o4, %o1
 	stx		%g0, [%o0]
 	stx		%g0, [%o0 + 8]
 	add		%o0, 16, %o0
-1:	andcc		%o5, 32, %g0
+1:	andcc		%o4, 32, %g0
 	be,pn		%icc, 7f
 	 andncc		%o1, 0x3f, %o3
 	stx		%g0, [%o0]
@@ -299,7 +299,7 @@ ENTRY(__bzero)
 1:	bne,a,pn	%xcc, 8f
 	 stb		%g0, [%o0]
 8:	retl
-	 mov		%g3, %o0
+	 mov		%o5, %o0
 17:	be,pn		%xcc, 13b
 	 orcc		%o1, 0, %g0
 	be,pn		%xcc, 0f
@@ -308,7 +308,7 @@ ENTRY(__bzero)
 	bne,pt		%xcc, 8b
 	 stb		%g0, [%o0 - 1]
 0:	retl
-	 mov		%g3, %o0
+	 mov		%o5, %o0
 END(__bzero)
 
 weak_alias(__bzero, bzero)
diff --git a/sysdeps/sparc/sparc64/rawmemchr.S b/sysdeps/sparc/sparc64/rawmemchr.S
index 8f48ed940a..bd80c88053 100644
--- a/sysdeps/sparc/sparc64/rawmemchr.S
+++ b/sysdeps/sparc/sparc64/rawmemchr.S
@@ -33,6 +33,8 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
 #endif
 
 	/* Normally, this uses
@@ -81,9 +83,9 @@ ENTRY(__rawmemchr)
 
 	sub		%o3, %g1, %o2			/* IEU0		Group		*/
 #ifdef EIGHTBIT_NOT_RARE
-	andn		%o2, %o3, %g7			/* IEU0		Group		*/
+	andn		%o2, %o3, %o5			/* IEU0		Group		*/
 	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
-	andcc		%g7, %g2, %g0			/* IEU1		Group		*/
+	andcc		%o5, %g2, %g0			/* IEU1		Group		*/
 #else
 	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
 	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
diff --git a/sysdeps/sparc/sparc64/rshift.S b/sysdeps/sparc/sparc64/rshift.S
index f43d25efe8..2394363b6f 100644
--- a/sysdeps/sparc/sparc64/rshift.S
+++ b/sysdeps/sparc/sparc64/rshift.S
@@ -27,6 +27,9 @@
    size		%o2
    cnt		%o3  */
 
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
 ENTRY(__mpn_rshift)
 	ldx	[%o1],%g2	! load first limb
 	sub	%g0,%o3,%o5	! negate shift count
diff --git a/sysdeps/sparc/sparc64/stpcpy.S b/sysdeps/sparc/sparc64/stpcpy.S
index 1dd48e701a..cb92e7bf38 100644
--- a/sysdeps/sparc/sparc64/stpcpy.S
+++ b/sysdeps/sparc/sparc64/stpcpy.S
@@ -22,6 +22,11 @@
 
 #include <sysdep.h>
 #include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
+#endif
 
 	/* Normally, this uses
 	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
diff --git a/sysdeps/sparc/sparc64/stpncpy.S b/sysdeps/sparc/sparc64/stpncpy.S
index ee1a935d3e..92a1e5cbbe 100644
--- a/sysdeps/sparc/sparc64/stpncpy.S
+++ b/sysdeps/sparc/sparc64/stpncpy.S
@@ -27,6 +27,9 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/strcat.S b/sysdeps/sparc/sparc64/strcat.S
index 55a6468665..482a18ff9a 100644
--- a/sysdeps/sparc/sparc64/strcat.S
+++ b/sysdeps/sparc/sparc64/strcat.S
@@ -25,6 +25,9 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/strchr.S b/sysdeps/sparc/sparc64/strchr.S
index b017640914..361e0cefbb 100644
--- a/sysdeps/sparc/sparc64/strchr.S
+++ b/sysdeps/sparc/sparc64/strchr.S
@@ -25,6 +25,9 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/strcmp.S b/sysdeps/sparc/sparc64/strcmp.S
index 1288688c0c..428eb9ccaf 100644
--- a/sysdeps/sparc/sparc64/strcmp.S
+++ b/sysdeps/sparc/sparc64/strcmp.S
@@ -22,6 +22,11 @@
 
 #include <sysdep.h>
 #include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
+#endif
 
 	/* Normally, this uses
 	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
diff --git a/sysdeps/sparc/sparc64/strcpy.S b/sysdeps/sparc/sparc64/strcpy.S
index 773caf5656..353dde4865 100644
--- a/sysdeps/sparc/sparc64/strcpy.S
+++ b/sysdeps/sparc/sparc64/strcpy.S
@@ -22,6 +22,11 @@
 
 #include <sysdep.h>
 #include <asm/asi.h>
+#ifndef XCC
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
+#endif
 
 	/* Normally, this uses
 	   ((xword - 0x0101010101010101) & 0x8080808080808080) test
diff --git a/sysdeps/sparc/sparc64/strcspn.S b/sysdeps/sparc/sparc64/strcspn.S
index 51876b0455..9603c6c955 100644
--- a/sysdeps/sparc/sparc64/strcspn.S
+++ b/sysdeps/sparc/sparc64/strcspn.S
@@ -26,6 +26,7 @@
 #define XCC xcc
 #define STACK_SIZE	128
 #define STACK_OFFSET	128+0x7ff
+	.register	%g2, #scratch
 #else
 #define STACK_SIZE	64
 #define STACK_OFFSET	64
@@ -37,7 +38,7 @@ ENTRY(strcspn)
 	sub		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	mov		1, %o4				/* IEU1				*/
 	stx		%o4, [%sp + STACK_OFFSET]	/* Store	Group		*/
-	mov		%o0, %g7			/* IEU0				*/
+	mov		%o0, %g4			/* IEU0				*/
 
 	stx		%g0, [%sp + STACK_OFFSET + 8]	/* Store	Group		*/
 	add		%sp, STACK_OFFSET, %o5		/* IEU0				*/
@@ -79,74 +80,74 @@ ENTRY(strcspn)
 	ldx		[%o0], %o2			/* Load		Group		*/
 4:	srlx		%o2, 59, %o3			/* IEU0		Group		*/
 
-	srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	srlx		%o2, 56, %g5			/* IEU0		Group		*/
 5:	and		%o3, 0x18, %o3			/* IEU1				*/
-	andcc		%g3, 0x3f, %g3			/* IEU1		Group		*/
+	andcc		%g5, 0x3f, %g5			/* IEU1		Group		*/
 	ldx		[%o5 + %o3], %g2		/* Load				*/
 
 	srlx		%o2, 51, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 48, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 48, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 13f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 43, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 40, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 40, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 14f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 35, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 32, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 32, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 15f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 27, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 24, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 24, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 16f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 19, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 16, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 16, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 17f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 11, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
 	add		%o0, 8, %o0			/* IEU1				*/
-	srlx		%o2, 8, %g3			/* IEU0		Group		*/
+	srlx		%o2, 8, %g5			/* IEU0		Group		*/
 
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 18f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
-	sllx		%o4, %g3, %g1			/* IEU0				*/
-	mov		%o2, %g3			/* IEU1				*/
+	sllx		%o4, %g5, %g1			/* IEU0				*/
+	mov		%o2, %g5			/* IEU1				*/
 	srlx		%o2, 3, %o3			/* IEU0		Group		*/
 
 	ldxa		[%o0] ASI_PNF, %o2		/* Load				*/
@@ -154,58 +155,58 @@ ENTRY(strcspn)
 	bne,pn		%xcc, 19f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
 
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
-	sllx		%o4, %g3, %g1			/* IEU0				*/
+	sllx		%o4, %g5, %g1			/* IEU0				*/
 	srlx		%o2, 59, %o3			/* IEU0		Group		*/
 
 	andcc		%g2, %g1, %g2			/* IEU1		Group		*/
 	be,pt		%xcc, 5b			/* CTI				*/
-	 srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	 srlx		%o2, 56, %g5			/* IEU0		Group		*/
 	sub		%o0, 1, %o0			/* IEU1				*/
 
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 	.align		16
 19:	sub		%o0, 2, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 18:	sub		%o0, 3, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 17:	add		%o0, 4, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 16:	add		%o0, 3, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 15:	add		%o0, 2, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 14:	add		%o0, 1, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 13:	add		%sp, STACK_SIZE+32, %sp		/* IEU1				*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 	.align		16
 12:	sub		%o0, 1, %o0			/* IEU0		Group		*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU1				*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 END(strcspn)
diff --git a/sysdeps/sparc/sparc64/strlen.S b/sysdeps/sparc/sparc64/strlen.S
index 3facf40746..aca7a42fdb 100644
--- a/sysdeps/sparc/sparc64/strlen.S
+++ b/sysdeps/sparc/sparc64/strlen.S
@@ -45,16 +45,16 @@ ENTRY(strlen)
 	or		%g1, %lo(0x01010101), %g1	/* IEU0		Group		*/
 	mov		%o0, %o1			/* IEU1				*/
 
-	sllx		%g1, 32, %g2			/* IEU0		Group 		*/
+	sllx		%g1, 32, %g4			/* IEU0		Group 		*/
 	andcc		%o0, 7, %g0			/* IEU1				*/
-	or		%g1, %g2, %g1			/* IEU0		Group		*/
+	or		%g1, %g4, %g1			/* IEU0		Group		*/
 	brz,pn		%o3, 13f			/* CTI+IEU1			*/
 
-	 sllx		%g1, 7, %g2			/* IEU0		Group		*/
+	 sllx		%g1, 7, %g4			/* IEU0		Group		*/
 	bne,a,pn	%icc, 15f			/* CTI				*/
 	 add		%o0, 1, %o0			/* IEU1				*/
 							/* %g1 = 0x0101010101010101	*
-							 * %g2 = 0x8080808080808080	*
+							 * %g4 = 0x8080808080808080	*
 							 * %o0 = string pointer		*
 							 * %o1 = start of string	*/
 1:	ldx		[%o0], %o3			/* Load		Group		*/
@@ -62,63 +62,63 @@ ENTRY(strlen)
 	add		%o0, 8, %o0			/* IEU1				*/
 2:	sub		%o3, %g1, %o2			/* IEU0		Group		*/
 #ifdef EIGHTBIT_NOT_RARE
-	andn		%o2, %o3, %g7			/* IEU0		Group		*/
+	andn		%o2, %o3, %o5			/* IEU0		Group		*/
 	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
-	andcc		%g7, %g2, %g0			/* IEU1		Group		*/
+	andcc		%o5, %g4, %g0			/* IEU1		Group		*/
 #else
 	ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
-	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
 #endif
 
 	be,pt		%xcc, 2b			/* CTI				*/
 	 add		%o0, 8, %o0			/* IEU0				*/
- 	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+ 	addcc		%o2, %g1, %g5			/* IEU1		Group		*/
 #ifdef EIGHTBIT_NOT_RARE
-	srlx		%g7, 32, %g7			/* IEU0				*/
+	srlx		%o5, 32, %o5			/* IEU0				*/
 
-3:	andcc		%g7, %g2, %g0			/* IEU1		Group		*/
+3:	andcc		%o5, %g4, %g0			/* IEU1		Group		*/
 #else
 	srlx		%o2, 32, %o2			/* IEU0				*/
 
-3:	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+3:	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
 #endif
 	be,pn		%xcc, 4f			/* CTI				*/
-	 srlx		%g3, 56, %o2			/* IEU0				*/
+	 srlx		%g5, 56, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 
 	be,pn		%icc, 12f			/* CTI				*/
-	 srlx		%g3, 48, %o2			/* IEU0				*/
+	 srlx		%g5, 48, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 11f			/* CTI				*/
 
-	 srlx		%g3, 40, %o2			/* IEU0				*/
+	 srlx		%g5, 40, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 10f			/* CTI				*/
-	 srlx		%g3, 32, %o2			/* IEU0				*/
+	 srlx		%g5, 32, %o2			/* IEU0				*/
 
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 9f			/* CTI				*/
-4:	 srlx		%g3, 24, %o2			/* IEU0				*/
+4:	 srlx		%g5, 24, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 
 	be,pn		%icc, 8f			/* CTI				*/
-	 srlx		%g3, 16, %o2			/* IEU0				*/
+	 srlx		%g5, 16, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 7f			/* CTI				*/
 
-	 srlx		%g3, 8, %o2			/* IEU0				*/
+	 srlx		%g5, 8, %o2			/* IEU0				*/
 	andcc		%o2, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 6f			/* CTI				*/
 	 sub		%o3, %g1, %o2			/* IEU0				*/
 
-	andcc		%g3, 0xff, %g0			/* IEU1		Group		*/
+	andcc		%g5, 0xff, %g0			/* IEU1		Group		*/
 	be,pn		%icc, 5f			/* CTI				*/
 	 ldxa		[%o0] ASI_PNF, %o3		/* Load				*/
-	andcc		%o2, %g2, %g0			/* IEU1		Group		*/
+	andcc		%o2, %g4, %g0			/* IEU1		Group		*/
 
 	be,pt		%xcc, 2b			/* CTI				*/
 	 add		%o0, 8, %o0			/* IEU0				*/
-	addcc		%o2, %g1, %g3			/* IEU1		Group		*/
+	addcc		%o2, %g1, %g5			/* IEU1		Group		*/
 	ba,pt		%xcc, 3b			/* CTI				*/
 
 	 srlx		%o2, 32, %o2			/* IEU0				*/
diff --git a/sysdeps/sparc/sparc64/strncmp.S b/sysdeps/sparc/sparc64/strncmp.S
index 361748dd26..85149b5351 100644
--- a/sysdeps/sparc/sparc64/strncmp.S
+++ b/sysdeps/sparc/sparc64/strncmp.S
@@ -27,6 +27,9 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/strncpy.S b/sysdeps/sparc/sparc64/strncpy.S
index 3d850a8db2..09a27df119 100644
--- a/sysdeps/sparc/sparc64/strncpy.S
+++ b/sysdeps/sparc/sparc64/strncpy.S
@@ -27,6 +27,9 @@
 #ifndef XCC
 #define XCC xcc
 #define USE_BPR
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+	.register	%g7, #scratch
 #endif
 
 	/* Normally, this uses
diff --git a/sysdeps/sparc/sparc64/strpbrk.S b/sysdeps/sparc/sparc64/strpbrk.S
index c23ae9995d..d06f26b54f 100644
--- a/sysdeps/sparc/sparc64/strpbrk.S
+++ b/sysdeps/sparc/sparc64/strpbrk.S
@@ -26,6 +26,7 @@
 #define XCC xcc
 #define STACK_SIZE	128
 #define STACK_OFFSET	128+0x7ff
+	.register	%g2, #scratch
 #else
 #define STACK_SIZE	64
 #define STACK_OFFSET	64
@@ -77,74 +78,74 @@ ENTRY(strpbrk)
 	 ldub		[%o0], %o2			/* Load				*/
 	ldx		[%o0], %o2			/* Load		Group		*/
 4:	srlx		%o2, 59, %o3			/* IEU0		Group		*/
-	srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	srlx		%o2, 56, %g4			/* IEU0		Group		*/
 
 5:	and		%o3, 0x18, %o3			/* IEU1				*/
-	andcc		%g3, 0x3f, %g3			/* IEU1		Group		*/
+	andcc		%g4, 0x3f, %g4			/* IEU1		Group		*/
 	ldx		[%o5 + %o3], %g2		/* Load				*/
 	srlx		%o2, 51, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 48, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
+	srlx		%o2, 48, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 13f			/* CTI				*/
 
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 	srlx		%o2, 43, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 40, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
+	srlx		%o2, 40, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 14f			/* CTI				*/
 
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 	srlx		%o2, 35, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 32, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
+	srlx		%o2, 32, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 15f			/* CTI				*/
 
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 	srlx		%o2, 27, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 24, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
+	srlx		%o2, 24, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 16f			/* CTI				*/
 
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 	srlx		%o2, 19, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 16, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
+	srlx		%o2, 16, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	bne,pn		%xcc, 17f			/* CTI				*/
 
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 	srlx		%o2, 11, %o3			/* IEU0				*/
 
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
+	sllx		%o4, %g4, %g1			/* IEU0		Group		*/
 	add		%o0, 8, %o0			/* IEU1				*/
-	srlx		%o2, 8, %g3			/* IEU0		Group		*/
+	srlx		%o2, 8, %g4			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	bne,pn		%xcc, 18f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g4, 0x3f, %g4			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
-	sllx		%o4, %g3, %g1			/* IEU0				*/
+	sllx		%o4, %g4, %g1			/* IEU0				*/
 	mov		%o2, %g5			/* IEU1				*/
 	srlx		%o2, 3, %o3			/* IEU0		Group		*/
 	ldxa		[%o0] ASI_PNF, %o2		/* Load				*/
@@ -152,15 +153,15 @@ ENTRY(strpbrk)
 	andcc		%g2, %g1, %g2			/* IEU1		Group		*/
 	bne,pn		%xcc, 19f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g5, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g4			/* IEU1				*/
 
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
-	sllx		%o4, %g3, %g1			/* IEU0				*/
+	sllx		%o4, %g4, %g1			/* IEU0				*/
 	srlx		%o2, 59, %o3			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1		Group		*/
 
 	be,pt		%xcc, 5b			/* CTI				*/
-	 srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	 srlx		%o2, 56, %g4			/* IEU0		Group		*/
 	sub		%o0, 1, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 
diff --git a/sysdeps/sparc/sparc64/strspn.S b/sysdeps/sparc/sparc64/strspn.S
index de440c0755..69e82d17b7 100644
--- a/sysdeps/sparc/sparc64/strspn.S
+++ b/sysdeps/sparc/sparc64/strspn.S
@@ -26,6 +26,7 @@
 #define XCC xcc
 #define STACK_SIZE	128
 #define STACK_OFFSET	128+0x7ff
+	.register	%g2, #scratch
 #else
 #define STACK_SIZE	64
 #define STACK_OFFSET	64
@@ -37,7 +38,7 @@ ENTRY(strspn)
 	sub		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	mov		1, %o4				/* IEU1				*/
 	stx		%g0, [%sp + STACK_OFFSET]	/* Store	Group		*/
-	mov		%o0, %g7			/* IEU0				*/
+	mov		%o0, %g4			/* IEU0				*/
 
 	stx		%g0, [%sp + STACK_OFFSET + 8]	/* Store	Group		*/
 	add		%sp, STACK_OFFSET, %o5		/* IEU0				*/
@@ -79,74 +80,74 @@ ENTRY(strspn)
 	ldx		[%o0], %o2			/* Load		Group		*/
 4:	srlx		%o2, 59, %o3			/* IEU0		Group		*/
 
-	srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	srlx		%o2, 56, %g5			/* IEU0		Group		*/
 5:	and		%o3, 0x18, %o3			/* IEU1				*/
-	andcc		%g3, 0x3f, %g3			/* IEU1		Group		*/
+	andcc		%g5, 0x3f, %g5			/* IEU1		Group		*/
 	ldx		[%o5 + %o3], %g2		/* Load				*/
 
 	srlx		%o2, 51, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 48, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 48, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	be,pn		%xcc, 13f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 43, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 40, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 40, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	be,pn		%xcc, 14f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 35, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 32, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 32, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	be,pn		%xcc, 15f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 27, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 24, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 24, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	be,pn		%xcc, 16f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 19, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
-	srlx		%o2, 16, %g3			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
+	srlx		%o2, 16, %g5			/* IEU0		Group		*/
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 
 	be,pn		%xcc, 17f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
 
 	srlx		%o2, 11, %o3			/* IEU0				*/
-	sllx		%o4, %g3, %g1			/* IEU0		Group		*/
+	sllx		%o4, %g5, %g1			/* IEU0		Group		*/
 	add		%o0, 8, %o0			/* IEU1				*/
-	srlx		%o2, 8, %g3			/* IEU0		Group		*/
+	srlx		%o2, 8, %g5			/* IEU0		Group		*/
 
 	andcc		%g2, %g1, %g2			/* IEU1				*/
 	be,pn		%xcc, 18f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
-	sllx		%o4, %g3, %g1			/* IEU0				*/
-	mov		%o2, %g3			/* IEU1				*/
+	sllx		%o4, %g5, %g1			/* IEU0				*/
+	mov		%o2, %g5			/* IEU1				*/
 	srlx		%o2, 3, %o3			/* IEU0		Group		*/
 
 	ldxa		[%o0] ASI_PNF, %o2		/* Load				*/
@@ -154,58 +155,58 @@ ENTRY(strspn)
 	be,pn		%xcc, 19f			/* CTI				*/
 	 and		%o3, 0x18, %o3			/* IEU0		Group		*/
 
-	and		%g3, 0x3f, %g3			/* IEU1				*/
+	and		%g5, 0x3f, %g5			/* IEU1				*/
 	ldx		[%o5 + %o3], %g2		/* Load		Group		*/
-	sllx		%o4, %g3, %g1			/* IEU0				*/
+	sllx		%o4, %g5, %g1			/* IEU0				*/
 	srlx		%o2, 59, %o3			/* IEU0		Group		*/
 
 	andcc		%g2, %g1, %g2			/* IEU1		Group		*/
 	bne,pt		%xcc, 5b			/* CTI				*/
-	 srlx		%o2, 56, %g3			/* IEU0		Group		*/
+	 srlx		%o2, 56, %g5			/* IEU0		Group		*/
 	sub		%o0, 1, %o0			/* IEU1				*/
 
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 	.align		16
 19:	sub		%o0, 2, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 18:	sub		%o0, 3, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 17:	add		%o0, 4, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 16:	add		%o0, 3, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 15:	add		%o0, 2, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 14:	add		%o0, 1, %o0			/* IEU1				*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU0		Group		*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 13:	add		%sp, STACK_SIZE+32, %sp		/* IEU1				*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 
 	.align		16
 12:	sub		%o0, 1, %o0			/* IEU0		Group		*/
 	add		%sp, STACK_SIZE+32, %sp		/* IEU1				*/
 	retl						/* CTI+IEU1	Group		*/
-	 sub		%o0, %g7, %o0			/* IEU0				*/
+	 sub		%o0, %g4, %o0			/* IEU0				*/
 END(strspn)
diff --git a/sysdeps/sparc/sparc64/sub_n.S b/sysdeps/sparc/sparc64/sub_n.S
index 403d50c704..b0d6b75bfe 100644
--- a/sysdeps/sparc/sparc64/sub_n.S
+++ b/sysdeps/sparc/sparc64/sub_n.S
@@ -1,7 +1,7 @@
 /* SPARC v9 __mpn_sub_n -- Subtract two limb vectors of the same length > 0
    and store difference in a third limb vector.
 
-   Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+   Copyright (C) 1995, 1996, 1997, 1999 Free Software Foundation, Inc.
 
    This file is part of the GNU MP Library.
 
@@ -30,23 +30,23 @@
 
 ENTRY(__mpn_sub_n)
 
-	sub %g0,%o3,%g3
+	sub %g0,%o3,%g5
 	sllx %o3,3,%g1
 	add %o1,%g1,%o1			! make s1_ptr point at end
 	add %o2,%g1,%o2			! make s2_ptr point at end
 	add %o0,%g1,%o0			! make res_ptr point at end
 	mov 0,%o4			! clear carry variable
-	sllx %g3,3,%o5			! compute initial address index
+	sllx %g5,3,%o5			! compute initial address index
 
 1:	ldx [%o2+%o5],%g1		! load s2 limb
-	add %g3,1,%g3			! increment loop count
-	ldx [%o1+%o5],%g2		! load s1 limb
+	add %g5,1,%g5			! increment loop count
+	ldx [%o1+%o5],%o3		! load s1 limb
 	addcc %g1,%o4,%g1		! add s2 limb and carry variable
 	movcc %xcc,0,%o4		! if carry-out, o4 was 1; clear it
-	subcc %g2,%g1,%g1		! subtract s1 limb from sum
+	subcc %o3,%g1,%g1		! subtract s1 limb from sum
 	stx %g1,[%o0+%o5]		! store result
 	add %o5,8,%o5			! increment address index
-	brnz,pt %g3,1b
+	brnz,pt %g5,1b
 	 movcs %xcc,1,%o4		! if s1 subtract gave carry, record it
 
 	retl