about summary refs log tree commit diff
path: root/sysdeps/sparc
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>1995-12-04 18:37:56 +0000
committerRoland McGrath <roland@gnu.org>1995-12-04 18:37:56 +0000
commitba848785bb048e7700555ef97c9d1fd3911a3da3 (patch)
tree646ee57c65b8d2231e235caa069d7fea634e8b64 /sysdeps/sparc
parentc13a4f3dbd44ff03d85ad1ac35cca38c3f35d33c (diff)
downloadglibc-ba848785bb048e7700555ef97c9d1fd3911a3da3.tar.gz
glibc-ba848785bb048e7700555ef97c9d1fd3911a3da3.tar.xz
glibc-ba848785bb048e7700555ef97c9d1fd3911a3da3.zip
Updated from ../=mpn/gmp-1.910
Diffstat (limited to 'sysdeps/sparc')
-rw-r--r--sysdeps/sparc/add_n.S304
-rw-r--r--sysdeps/sparc/lshift.S94
-rw-r--r--sysdeps/sparc/rshift.S91
-rw-r--r--sysdeps/sparc/sub_n.S391
4 files changed, 660 insertions, 220 deletions
diff --git a/sysdeps/sparc/add_n.S b/sysdeps/sparc/add_n.S
index 13704d32d2..80c3b99640 100644
--- a/sysdeps/sparc/add_n.S
+++ b/sysdeps/sparc/add_n.S
@@ -1,7 +1,7 @@
 ! sparc __mpn_add_n -- Add two limb vectors of the same length > 0 and store
 ! sum in a third limb vector.
 
-! Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+! Copyright (C) 1995 Free Software Foundation, Inc.
 
 ! This file is part of the GNU MP Library.
 
@@ -21,10 +21,10 @@
 
 
 ! INPUT PARAMETERS
-! res_ptr	%o0
-! s1_ptr	%o1
-! s2_ptr	%o2
-! size		%o3
+#define res_ptr	%o0
+#define s1_ptr	%o1
+#define s2_ptr	%o2
+#define size	%o3
 
 #include "sysdep.h"
 
@@ -32,108 +32,192 @@
 	.align	4
 	.global	C_SYMBOL_NAME(__mpn_add_n)
 C_SYMBOL_NAME(__mpn_add_n):
-	ld	[%o1+0],%o4		! read first limb from s1_ptr
-	srl	%o3,4,%g1
-	ld	[%o2+0],%o5		! read first limb from s2_ptr
-
-	sub	%g0,%o3,%o3
-	andcc	%o3,(16-1),%o3
-	be	Lzero
-	 mov	%o4,%g2			! put first s1_limb in g2 too
-
-	sll	%o3,2,%o3		! multiply by 4
-	sub	%o0,%o3,%o0		! adjust res_ptr
-	sub	%o1,%o3,%o1		! adjust s1_ptr
-	sub	%o2,%o3,%o2		! adjust s2_ptr
-
-#if PIC
-	mov	%o7,%g4			! Save return address register
-	call	1f
-	add	%o7,Lbase-1f,%g3
-1:	mov	%g4,%o7			! Restore return address register
-#else
-	sethi	%hi(Lbase),%g3
-	or	%g3,%lo(Lbase),%g3
-#endif
-	sll	%o3,2,%o3		! multiply by 4
-	jmp	%g3+%o3
-	 mov	%o5,%g3			! put first s2_limb in g3 too
-
-Loop:	addxcc	%g2,%g3,%o3
-	add	%o1,64,%o1
-	st	%o3,[%o0+60]
-	add	%o2,64,%o2
-	ld	[%o1+0],%o4
-	add	%o0,64,%o0
-	ld	[%o2+0],%o5
-Lzero:	sub	%g1,1,%g1	! add 0 + 16r limbs (adjust loop counter)
-Lbase:	ld	[%o1+4],%g2
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+4],%g3
-	st	%o3,[%o0+0]
-	ld	[%o1+8],%o4	! add 15 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+8],%o5
-	st	%o3,[%o0+4]
-	ld	[%o1+12],%g2	! add 14 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+12],%g3
-	st	%o3,[%o0+8]
-	ld	[%o1+16],%o4	! add 13 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+16],%o5
-	st	%o3,[%o0+12]
-	ld	[%o1+20],%g2	! add 12 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+20],%g3
-	st	%o3,[%o0+16]
-	ld	[%o1+24],%o4	! add 11 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+24],%o5
-	st	%o3,[%o0+20]
-	ld	[%o1+28],%g2	! add 10 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+28],%g3
-	st	%o3,[%o0+24]
-	ld	[%o1+32],%o4	! add 9 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+32],%o5
-	st	%o3,[%o0+28]
-	ld	[%o1+36],%g2	! add 8 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+36],%g3
-	st	%o3,[%o0+32]
-	ld	[%o1+40],%o4	! add 7 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+40],%o5
-	st	%o3,[%o0+36]
-	ld	[%o1+44],%g2	! add 6 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+44],%g3
-	st	%o3,[%o0+40]
-	ld	[%o1+48],%o4	! add 5 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+48],%o5
-	st	%o3,[%o0+44]
-	ld	[%o1+52],%g2	! add 4 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+52],%g3
-	st	%o3,[%o0+48]
-	ld	[%o1+56],%o4	! add 3 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+56],%o5
-	st	%o3,[%o0+52]
-	ld	[%o1+60],%g2	! add 2 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+60],%g3
-	st	%o3,[%o0+56]
-	addx	%g0,%g0,%o4
-	tst	%g1
-	bne	Loop
-	 subcc	%g0,%o4,%g0	! restore cy (delay slot)
-
-	addxcc	%g2,%g3,%o3
-	st	%o3,[%o0+60]	! store most significant limb
-
-	retl
-	 addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+	cmp	size,8
+	mov	0,%o4			! clear cy-save register
+	blt,a	Ltriv
+	addcc	size,-2,size
+	xor	s2_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L1			! branch if alignment differs
+	nop
+L0:	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1			! if no, branch
+	nop
+! **  V1a  **
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	addcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v1:	ld	[s1_ptr+0],%g4
+	ld	[s1_ptr+4],%g1
+	ldd	[s2_ptr+0],%g2
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-10,size
+	blt	Lfin1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1:	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+16],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+20],%g1
+	ldd	[s2_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+24],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+28],%g1
+	ldd	[s2_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+32],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+36],%g1
+	ldd	[s2_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1:	addcc	size,8-2,size
+	blt	Lend1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loop1b:	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loop1b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1:	addxcc	%g4,%g2,%o4
+	addxcc	%g1,%g3,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr+8],%g4
+	ld	[s2_ptr+8],%g2
+	addxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+L1:	xor	s1_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L2
+	nop
+! **  V1b  **
+	mov	s2_ptr,%g1
+	mov	s1_ptr,s2_ptr
+	b	L0
+	mov	%g1,s1_ptr
+
+! **  V2  **
+/* If we come here, the alignment of s1_ptr and res_ptr as well as the
+   alignment of s2_ptr and res_ptr differ.  Since there are only two ways
+   things can be aligned (that we care about) we now know that the alignment
+   of s1_ptr and s2_ptr are the same.  */
+
+L2:	andcc	s1_ptr,4,%g0		! s1_ptr unaligned? Side effect: cy=0
+	beq	L_v2			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	addcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v2:	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	blt	Lfin2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	ldd	[s1_ptr+8],%g2
+	ldd	[s2_ptr+8],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+8]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+12]
+	ldd	[s1_ptr+16],%g2
+	ldd	[s2_ptr+16],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+16]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+20]
+	ldd	[s1_ptr+24],%g2
+	ldd	[s2_ptr+24],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+24]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+28]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop2
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin2:	addcc	size,8-2,size
+Ltriv:	blt	Lend2
+	subcc	%g0,%o4,%g0		! restore cy
+Loop2b:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loop2b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend2:	andcc	size,1,%g0
+	be	Lret2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr],%g4
+	ld	[s2_ptr],%g2
+	addxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+
+Lret2:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
diff --git a/sysdeps/sparc/lshift.S b/sysdeps/sparc/lshift.S
new file mode 100644
index 0000000000..497272af0f
--- /dev/null
+++ b/sysdeps/sparc/lshift.S
@@ -0,0 +1,94 @@
+! sparc __mpn_lshift --
+
+! Copyright (C) 1995 Free Software Foundation, Inc.
+
+! This file is part of the GNU MP Library.
+
+! The GNU MP Library is free software; you can redistribute it and/or modify
+! it under the terms of the GNU Library General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or (at your
+! option) any later version.
+
+! The GNU MP Library is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+! or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+! License for more details.
+
+! You should have received a copy of the GNU Library General Public License
+! along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+! the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+! INPUT PARAMETERS
+! res_ptr	%o0
+! src_ptr	%o1
+! size		%o2
+! cnt		%o3
+
+#include "sysdep.h"
+
+	.text
+	.align	4
+	.global	C_SYMBOL_NAME(__mpn_lshift)
+C_SYMBOL_NAME(__mpn_lshift):
+	sll	%o2,2,%g1
+	add	%o1,%g1,%o1	! make %o1 point at end of src
+	ld	[%o1-4],%g2	! load first limb
+	sub	%g0,%o3,%o5	! negate shift count
+	add	%o0,%g1,%o0	! make %o0 point at end of res
+	add	%o2,-1,%o2
+	andcc	%o2,4-1,%g4	! number of limbs in first loop
+	srl	%g2,%o5,%g1	! compute function result
+	beq	L0		! if multiple of 4 limbs, skip first loop
+	st	%g1,[%sp+80]
+
+	sub	%o2,%g4,%o2	! adjust count for main loop
+
+Loop0:	ld	[%o1-8],%g3
+	add	%o0,-4,%o0
+	add	%o1,-4,%o1
+	addcc	%g4,-1,%g4
+	sll	%g2,%o3,%o4
+	srl	%g3,%o5,%g1
+	mov	%g3,%g2
+	or	%o4,%g1,%o4
+	bne	Loop0
+	 st	%o4,[%o0+0]
+
+L0:	tst	%o2
+	beq	Lend
+	 nop
+
+Loop:	ld	[%o1-8],%g3
+	add	%o0,-16,%o0
+	addcc	%o2,-4,%o2
+	sll	%g2,%o3,%o4
+	srl	%g3,%o5,%g1
+
+	ld	[%o1-12],%g2
+	sll	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0+12]
+	srl	%g2,%o5,%g1
+
+	ld	[%o1-16],%g3
+	sll	%g2,%o3,%o4
+	or	%g4,%g1,%g4
+	st	%g4,[%o0+8]
+	srl	%g3,%o5,%g1
+
+	ld	[%o1-20],%g2
+	sll	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0+4]
+	srl	%g2,%o5,%g1
+
+	add	%o1,-16,%o1
+	or	%g4,%g1,%g4
+	bne	Loop
+	 st	%g4,[%o0+0]
+
+Lend:	sll	%g2,%o3,%g2
+	st	%g2,[%o0-4]
+	retl
+	ld	[%sp+80],%o0
diff --git a/sysdeps/sparc/rshift.S b/sysdeps/sparc/rshift.S
new file mode 100644
index 0000000000..3428cfe9a4
--- /dev/null
+++ b/sysdeps/sparc/rshift.S
@@ -0,0 +1,91 @@
+! sparc __mpn_rshift --
+
+! Copyright (C) 1995 Free Software Foundation, Inc.
+
+! This file is part of the GNU MP Library.
+
+! The GNU MP Library is free software; you can redistribute it and/or modify
+! it under the terms of the GNU Library General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or (at your
+! option) any later version.
+
+! The GNU MP Library is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+! or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+! License for more details.
+
+! You should have received a copy of the GNU Library General Public License
+! along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+! the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+! INPUT PARAMETERS
+! res_ptr	%o0
+! src_ptr	%o1
+! size		%o2
+! cnt		%o3
+
+#include "sysdep.h"
+
+	.text
+	.align	4
+	.global	C_SYMBOL_NAME(__mpn_rshift)
+C_SYMBOL_NAME(__mpn_rshift):
+	ld	[%o1],%g2	! load first limb
+	sub	%g0,%o3,%o5	! negate shift count
+	add	%o2,-1,%o2
+	andcc	%o2,4-1,%g4	! number of limbs in first loop
+	sll	%g2,%o5,%g1	! compute function result
+	beq	L0		! if multiple of 4 limbs, skip first loop
+	st	%g1,[%sp+80]
+
+	sub	%o2,%g4,%o2	! adjust count for main loop
+
+Loop0:	ld	[%o1+4],%g3
+	add	%o0,4,%o0
+	add	%o1,4,%o1
+	addcc	%g4,-1,%g4
+	srl	%g2,%o3,%o4
+	sll	%g3,%o5,%g1
+	mov	%g3,%g2
+	or	%o4,%g1,%o4
+	bne	Loop0
+	 st	%o4,[%o0-4]
+
+L0:	tst	%o2
+	beq	Lend
+	 nop
+
+Loop:	ld	[%o1+4],%g3
+	add	%o0,16,%o0
+	addcc	%o2,-4,%o2
+	srl	%g2,%o3,%o4
+	sll	%g3,%o5,%g1
+
+	ld	[%o1+8],%g2
+	srl	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0-16]
+	sll	%g2,%o5,%g1
+
+	ld	[%o1+12],%g3
+	srl	%g2,%o3,%o4
+	or	%g4,%g1,%g4
+	st	%g4,[%o0-12]
+	sll	%g3,%o5,%g1
+
+	ld	[%o1+16],%g2
+	srl	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0-8]
+	sll	%g2,%o5,%g1
+
+	add	%o1,16,%o1
+	or	%g4,%g1,%g4
+	bne	Loop
+	 st	%g4,[%o0-4]
+
+Lend:	srl	%g2,%o3,%g2
+	st	%g2,[%o0-0]
+	retl
+	ld	[%sp+80],%o0
diff --git a/sysdeps/sparc/sub_n.S b/sysdeps/sparc/sub_n.S
index 6264344009..2e217ed679 100644
--- a/sysdeps/sparc/sub_n.S
+++ b/sysdeps/sparc/sub_n.S
@@ -1,7 +1,7 @@
 ! sparc __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
 ! store difference in a third limb vector.
 
-! Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+! Copyright (C) 1995 Free Software Foundation, Inc.
 
 ! This file is part of the GNU MP Library.
 
@@ -21,10 +21,10 @@
 
 
 ! INPUT PARAMETERS
-! res_ptr	%o0
-! s1_ptr	%o1
-! s2_ptr	%o2
-! size		%o3
+#define res_ptr	%o0
+#define s1_ptr	%o1
+#define s2_ptr	%o2
+#define size	%o3
 
 #include "sysdep.h"
 
@@ -32,108 +32,279 @@
 	.align	4
 	.global	C_SYMBOL_NAME(__mpn_sub_n)
 C_SYMBOL_NAME(__mpn_sub_n):
-	ld	[%o1+0],%o4		! read first limb from s1_ptr
-	srl	%o3,4,%g1
-	ld	[%o2+0],%o5		! read first limb from s2_ptr
-
-	sub	%g0,%o3,%o3
-	andcc	%o3,(16-1),%o3
-	be	Lzero
-	 mov	%o4,%g2			! put first s1_limb in g2 too
-
-	sll	%o3,2,%o3		! multiply by 4
-	sub	%o0,%o3,%o0		! adjust res_ptr
-	sub	%o1,%o3,%o1		! adjust s1_ptr
-	sub	%o2,%o3,%o2		! adjust s2_ptr
-
-#if PIC
-	mov	%o7,%g4			! Save return address register
-	call	1f
-	add	%o7,Lbase-1f,%g3
-1:	mov	%g4,%o7			! Restore return address register
-#else
-	sethi	%hi(Lbase),%g3
-	or	%g3,%lo(Lbase),%g3
-#endif
-	sll	%o3,2,%o3		! multiply by 4
-	jmp	%g3+%o3
-	 mov	%o5,%g3			! put first s2_limb in g3 too
-
-Loop:	subxcc	%g2,%g3,%o3
-	add	%o1,64,%o1
-	st	%o3,[%o0+60]
-	add	%o2,64,%o2
-	ld	[%o1+0],%o4
-	add	%o0,64,%o0
-	ld	[%o2+0],%o5
-Lzero:	sub	%g1,1,%g1	! add 0 + 16r limbs (adjust loop counter)
-Lbase:	ld	[%o1+4],%g2
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+4],%g3
-	st	%o3,[%o0+0]
-	ld	[%o1+8],%o4	! add 15 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+8],%o5
-	st	%o3,[%o0+4]
-	ld	[%o1+12],%g2	! add 14 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+12],%g3
-	st	%o3,[%o0+8]
-	ld	[%o1+16],%o4	! add 13 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+16],%o5
-	st	%o3,[%o0+12]
-	ld	[%o1+20],%g2	! add 12 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+20],%g3
-	st	%o3,[%o0+16]
-	ld	[%o1+24],%o4	! add 11 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+24],%o5
-	st	%o3,[%o0+20]
-	ld	[%o1+28],%g2	! add 10 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+28],%g3
-	st	%o3,[%o0+24]
-	ld	[%o1+32],%o4	! add 9 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+32],%o5
-	st	%o3,[%o0+28]
-	ld	[%o1+36],%g2	! add 8 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+36],%g3
-	st	%o3,[%o0+32]
-	ld	[%o1+40],%o4	! add 7 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+40],%o5
-	st	%o3,[%o0+36]
-	ld	[%o1+44],%g2	! add 6 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+44],%g3
-	st	%o3,[%o0+40]
-	ld	[%o1+48],%o4	! add 5 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+48],%o5
-	st	%o3,[%o0+44]
-	ld	[%o1+52],%g2	! add 4 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+52],%g3
-	st	%o3,[%o0+48]
-	ld	[%o1+56],%o4	! add 3 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+56],%o5
-	st	%o3,[%o0+52]
-	ld	[%o1+60],%g2	! add 2 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+60],%g3
-	st	%o3,[%o0+56]
-	subx	%g0,%g0,%o4
-	tst	%g1
-	bne	Loop
-	 subcc	%g0,%o4,%g0	! restore cy (delay slot)
-
-	subxcc	%g2,%g3,%o3
-	st	%o3,[%o0+60]	! store most significant limb
-
-	retl
-	 addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+	xor	s2_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L1			! branch if alignment differs
+	nop
+! **  V1a  **
+	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	subcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+L_v1:	addx	%g0,%g0,%o4		! save cy in register
+	cmp	size,2			! if size < 2 ...
+	bl	Lend2			! ... branch to tail code
+	subcc	%g0,%o4,%g0		! restore cy
+
+	ld	[s1_ptr+0],%g4
+	addcc	size,-10,size
+	ld	[s1_ptr+4],%g1
+	ldd	[s2_ptr+0],%g2
+	blt	Lfin1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1:	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+16],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+20],%g1
+	ldd	[s2_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+24],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+28],%g1
+	ldd	[s2_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+32],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+36],%g1
+	ldd	[s2_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1:	addcc	size,8-2,size
+	blt	Lend1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loope1:	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope1
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1:	subxcc	%g4,%g2,%o4
+	subxcc	%g1,%g3,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr+8],%g4
+	ld	[s2_ptr+8],%g2
+	subxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+L1:	xor	s1_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L2
+	nop
+! **  V1b  **
+	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1b			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s2_ptr],%g4
+	add	s2_ptr,4,s2_ptr
+	ld	[s1_ptr],%g2
+	add	s1_ptr,4,s1_ptr
+	add	size,-1,size
+	subcc	%g2,%g4,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+L_v1b:	addx	%g0,%g0,%o4		! save cy in register
+	cmp	size,2			! if size < 2 ...
+	bl	Lend2			! ... branch to tail code
+	subcc	%g0,%o4,%g0		! restore cy
+
+	ld	[s2_ptr+0],%g4
+	addcc	size,-10,size
+	ld	[s2_ptr+4],%g1
+	ldd	[s1_ptr+0],%g2
+	blt	Lfin1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1b:	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+8],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+12],%g1
+	ldd	[s1_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+16],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+20],%g1
+	ldd	[s1_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+24],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+28],%g1
+	ldd	[s1_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+32],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+36],%g1
+	ldd	[s1_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1b
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1b:	addcc	size,8-2,size
+	blt	Lend1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loope1b:subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+8],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+12],%g1
+	ldd	[s1_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope1b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1b:	subxcc	%g2,%g4,%o4
+	subxcc	%g3,%g1,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s2_ptr+8],%g4
+	ld	[s1_ptr+8],%g2
+	subxcc	%g2,%g4,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1b:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+! **  V2  **
+/* If we come here, the alignment of s1_ptr and res_ptr as well as the
+   alignment of s2_ptr and res_ptr differ.  Since there are only two ways
+   things can be aligned (that we care about) we now know that the alignment
+   of s1_ptr and s2_ptr are the same.  */
+
+L2:	cmp	size,1
+	be	Ljone
+	nop
+	andcc	s1_ptr,4,%g0		! s1_ptr unaligned? Side effect: cy=0
+	beq	L_v2			! if no, branch
+	nop
+/* Add least significant limb separately to align s1_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	subcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v2:	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	blt	Lfin2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	ldd	[s1_ptr+8],%g2
+	ldd	[s2_ptr+8],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+8]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+12]
+	ldd	[s1_ptr+16],%g2
+	ldd	[s2_ptr+16],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+16]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+20]
+	ldd	[s1_ptr+24],%g2
+	ldd	[s2_ptr+24],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+24]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+28]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop2
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin2:	addcc	size,8-2,size
+	blt	Lend2
+	subcc	%g0,%o4,%g0		! restore cy
+Loope2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope2
+	subcc	%g0,%o4,%g0		! restore cy
+Lend2:	andcc	size,1,%g0
+	be	Lret2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+Ljone:	ld	[s1_ptr],%g4
+	ld	[s2_ptr],%g2
+	subxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+
+Lret2:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb