about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--stdlib/gmp-impl.h20
-rw-r--r--stdlib/gmp.h11
-rw-r--r--stdlib/longlong.h88
-rw-r--r--sysdeps/generic/mul_n.c20
-rw-r--r--sysdeps/i386/i586/lshift.S11
-rw-r--r--sysdeps/i386/i586/rshift.S11
-rw-r--r--sysdeps/m68k/add_n.S76
-rw-r--r--sysdeps/m68k/sub_n.S76
-rw-r--r--sysdeps/m88k/add_n.s103
-rw-r--r--sysdeps/m88k/mul_1.s128
-rw-r--r--sysdeps/m88k/sub_n.s104
-rw-r--r--sysdeps/rs6000/add_n.s2
-rw-r--r--sysdeps/rs6000/sub_n.s2
-rw-r--r--sysdeps/sparc/add_n.S304
-rw-r--r--sysdeps/sparc/lshift.S94
-rw-r--r--sysdeps/sparc/rshift.S91
-rw-r--r--sysdeps/sparc/sub_n.S391
17 files changed, 1246 insertions, 286 deletions
diff --git a/stdlib/gmp-impl.h b/stdlib/gmp-impl.h
index 0d2a8fcede..2f0956d960 100644
--- a/stdlib/gmp-impl.h
+++ b/stdlib/gmp-impl.h
@@ -179,24 +179,22 @@ void _mp_default_free ();
    strings in base 2..36.  */
 struct bases
 {
-  /* Number of digits in the conversion base that always fits in
-     an mp_limb.  For example, for base 10 this is 10, since
-     2**32 = 4294967296 has ten digits.  */
+  /* Number of digits in the conversion base that always fits in an mp_limb.
+     For example, for base 10 on a machine where a mp_limb has 32 bits this
+     is 9, since 10**9 is the largest number that fits into a mp_limb.  */
   int chars_per_limb;
 
   /* log(2)/log(conversion_base) */
   float chars_per_bit_exactly;
 
-  /* big_base is conversion_base**chars_per_limb, i.e. the biggest
-     number that fits a word, built by factors of conversion_base.
-     Exception: For 2, 4, 8, etc, big_base is log2(base), i.e. the
-     number of bits used to represent each digit in the base.  */
+  /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
+     factors of base.  Exception: For 2, 4, 8, etc, big_base is log2(base),
+     i.e. the number of bits used to represent each digit in the base.  */
   mp_limb big_base;
 
-  /* big_base_inverted is a BITS_PER_MP_LIMB bit approximation to
-     1/big_base, represented as a fixed-point number.  Instead of
-     dividing by big_base an application can choose to multiply
-     by big_base_inverted.  */
+  /* A BITS_PER_MP_LIMB bit approximation to 1/big_base, represented as a
+     fixed-point number.  Instead of dividing by big_base an application can
+     choose to multiply by big_base_inverted.  */
   mp_limb big_base_inverted;
 };
 
diff --git a/stdlib/gmp.h b/stdlib/gmp.h
index 0b2cb29014..243779996d 100644
--- a/stdlib/gmp.h
+++ b/stdlib/gmp.h
@@ -23,6 +23,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
 #ifndef __GNU_MP__
 #define __need_size_t
 #include <stddef.h>
+#undef __need_size_t
 
 #if defined (__STDC__)
 #define __gmp_const const
@@ -40,7 +41,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
 typedef unsigned int		mp_limb;
 typedef int			mp_limb_signed;
 #else
-#if _LONG_LONG_LIMB
+#ifdef _LONG_LONG_LIMB
 typedef unsigned long long int	mp_limb;
 typedef long long int		mp_limb_signed;
 #else
@@ -110,11 +111,11 @@ typedef __mpq_struct mpq_t[1];
 
 typedef struct
 {
-  mp_size_t alloc;		/* Number of *limbs* allocated and pointed
-				   to by the D field.  */
   mp_size_t prec;		/* Max precision, in number of `mp_limb's.
 				   Set by mpf_init and modified by
-				   mpf_set_prec.  */
+				   mpf_set_prec.  The area pointed to
+				   by the `d' field contains `prec' + 1
+				   limbs.  */
   mp_size_t size;		/* abs(SIZE) is the number of limbs
 				   the last field points to.  If SIZE
 				   is negative this is a negative
@@ -127,7 +128,7 @@ typedef struct
 typedef __mpf_struct mpf_t[1];
 
 /* Types for function declarations in gmp files.  */
-/* ??? Should not pollute user name space ??? */
+/* ??? Should not pollute user name space with these ??? */
 typedef __gmp_const __mpz_struct *mpz_srcptr;
 typedef __mpz_struct *mpz_ptr;
 typedef __gmp_const __mpf_struct *mpf_srcptr;
diff --git a/stdlib/longlong.h b/stdlib/longlong.h
index bbb92e3af8..e52bf32dba 100644
--- a/stdlib/longlong.h
+++ b/stdlib/longlong.h
@@ -139,6 +139,7 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
     __asm__ ("clz %0,%1"						\
 	     : "=r" ((USItype)(count))					\
 	     : "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
 #endif /* __a29k__ */
 
 #if defined (__alpha__) && W_TYPE_SIZE == 64
@@ -298,9 +299,9 @@ extern UDItype __udiv_qrnnd ();
 	   struct {USItype __h, __l;} __i;				\
 	  } __xx;							\
     __asm__ ("xmpyu %1,%2,%0"						\
-	     : "=fx" (__xx.__ll)					\
-	     : "fx" ((USItype)(u)),					\
-	       "fx" ((USItype)(v)));					\
+	     : "=*f" (__xx.__ll)					\
+	     : "*f" ((USItype)(u)),					\
+	       "*f" ((USItype)(v)));					\
     (wh) = __xx.__i.__h;						\
     (wl) = __xx.__i.__l;						\
   } while (0)
@@ -339,7 +340,7 @@ extern USItype __udiv_qrnnd ();
 	sub		%0,%1,%0		; Subtract it.
 	" : "=r" (count), "=r" (__tmp) : "1" (x));			\
   } while (0)
-#endif
+#endif /* hppa */
 
 #if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
 #define umul_ppmm(xh, xl, m0, m1) \
@@ -431,7 +432,29 @@ extern USItype __udiv_qrnnd ();
 #endif
 #endif /* 80x86 */
 
+#if defined (__i860__) && W_TYPE_SIZE == 32
+#define rshift_rhlc(r,h,l,c) \
+  __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0"				\
+	   "=r" (r) : "r" (h), "r" (l), "rn" (c))
+#endif /* i860 */
+
 #if defined (__i960__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+  __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0"			\
+	   : "=r" ((USItype)(sh)),					\
+	     "=&r" ((USItype)(sl))					\
+	   : "%dI" ((USItype)(ah)),					\
+	     "dI" ((USItype)(bh)),					\
+	     "%dI" ((USItype)(al)),					\
+	     "dI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+  __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0"			\
+	   : "=r" ((USItype)(sh)),					\
+	     "=&r" ((USItype)(sl))					\
+	   : "dI" ((USItype)(ah)),					\
+	     "dI" ((USItype)(bh)),					\
+	     "dI" ((USItype)(al)),					\
+	     "dI" ((USItype)(bl)))
 #define umul_ppmm(w1, w0, u, v) \
   ({union {UDItype __ll;						\
 	   struct {USItype __l, __h;} __i;				\
@@ -448,7 +471,39 @@ extern USItype __udiv_qrnnd ();
 	     : "%dI" ((USItype)(u)),					\
 	       "dI" ((USItype)(v)));					\
     __w; })  
-#endif /* __i960__ */
+#define udiv_qrnnd(q, r, nh, nl, d) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __nn;							\
+    __nn.__i.__h = (nh); __nn.__i.__l = (nl);				\
+    __asm__ ("ediv %d,%n,%0"						\
+	   : "=d" (__rq.__ll)						\
+	   : "dI" (__nn.__ll),						\
+	     "dI" ((USItype)(d)));					\
+    (r) = __rq.__i.__l; (q) = __rq.__i.__h;				\
+  } while (0)
+#define count_leading_zeros(count, x) \
+  do {									\
+    USItype __cbtmp;							\
+    __asm__ ("scanbit %1,%0"						\
+	     : "=r" (__cbtmp)						\
+	     : "r" ((USItype)(x)));					\
+    (count) = __cbtmp ^ 31;						\
+  } while (0)
+#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
+#if defined (__i960mx)		/* what is the proper symbol to test??? */
+#define rshift_rhlc(r,h,l,c) \
+  do {									\
+    union {UDItype __ll;						\
+	   struct {USItype __l, __h;} __i;				\
+	  } __nn;							\
+    __nn.__i.__h = (h); __nn.__i.__l = (l);				\
+    __asm__ ("shre %2,%1,%0"						\
+	     : "=d" (r) : "dI" (__nn.__ll), "dI" (c));			\
+  }
+#endif /* i960mx */
+#endif /* i960 */
 
 #if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
@@ -469,7 +524,7 @@ extern USItype __udiv_qrnnd ();
 	     "d" ((USItype)(bh)),					\
 	     "1" ((USItype)(al)),					\
 	     "g" ((USItype)(bl)))
-#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
 #define umul_ppmm(w1, w0, u, v) \
   __asm__ ("mulu%.l %3,%1:%0"						\
 	   : "=d" ((USItype)(w0)),					\
@@ -496,8 +551,9 @@ extern USItype __udiv_qrnnd ();
   __asm__ ("bfffo %1{%b2:%b2},%0"					\
 	   : "=d" ((USItype)(count))					\
 	   : "od" ((USItype)(x)), "n" (0))
+#define COUNT_LEADING_ZEROS_0 32
 #else /* not mc68020 */
-#define umul_ppmmxx(xh, xl, a, b) \
+#define umul_ppmm(xh, xl, a, b) \
   do { USItype __umul_tmp1, __umul_tmp2;				\
 	__asm__ ("| Inlined umul_ppmm
 	move%.l	%5,%3
@@ -557,6 +613,7 @@ extern USItype __udiv_qrnnd ();
 	     : "r" ((USItype)(x)));					\
     (count) = __cbtmp ^ 31;						\
   } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
 #if defined (__m88110__)
 #define umul_ppmm(wh, wl, u, v) \
   do {									\
@@ -738,6 +795,7 @@ extern USItype __udiv_qrnnd ();
   __asm__ ("{cntlz|cntlzw} %0,%1"					\
 	   : "=r" ((USItype)(count))					\
 	   : "r" ((USItype)(x)))
+#define COUNT_LEADING_ZEROS_0 32
 #if defined (_ARCH_PPC)
 #define umul_ppmm(ph, pl, m0, m1) \
   do {									\
@@ -887,7 +945,7 @@ extern USItype __udiv_qrnnd ();
 	(count) += 16;							\
       }									\
   } while (0)
-#endif
+#endif /* RT/ROMP */
 
 #if defined (__sh2__) && W_TYPE_SIZE == 32
 #define umul_ppmm(w1, w0, u, v) \
@@ -1154,20 +1212,6 @@ extern USItype __udiv_qrnnd ();
     (xh) += ((((signed int) __m0 >> 15) & __m1)				\
 	     + (((signed int) __m1 >> 15) & __m0));			\
   } while (0)
-#define umul_ppmm_off(xh, xl, m0, m1) \
-  do {									\
-    union {long int __ll;						\
-	   struct {unsigned int __h, __l;} __i;				\
-	  } __xx;							\
-    __asm__ ("mult	%S0,%H3"					\
-	     : "=r" (__xx.__i.__h),					\
-	       "=r" (__xx.__i.__l)					\
-	     : "%1" (m0),						\
-	       "rQR" (m1));						\
-    (xh) = __xx.__i.__h + ((((signed int) m0 >> 15) & m1)		\
-			   + (((signed int) m1 >> 15) & m0));		\
-    (xl) = __xx.__i.__l;						\
-  } while (0)
 #endif /* __z8000__ */
 
 #endif /* __GNUC__ */
diff --git a/sysdeps/generic/mul_n.c b/sysdeps/generic/mul_n.c
index 7900988143..e37c5d8290 100644
--- a/sysdeps/generic/mul_n.c
+++ b/sysdeps/generic/mul_n.c
@@ -216,15 +216,7 @@ ____mpn_mul_n (prodp, up, vp, size, tspace)
 
       cy += __mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
       if (cy)
-	{
-	  if (cy > 0)
-	    __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
-	  else
-	    {
-	      __mpn_sub_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
-	      abort ();
-	    }
-	}
+	__mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
 
       MPN_COPY (prodp, tspace, hsize);
       cy = __mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
@@ -362,15 +354,7 @@ ____mpn_sqr_n (prodp, up, size, tspace)
 
       cy += __mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
       if (cy)
-	{
-	  if (cy > 0)
-	    __mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
-	  else
-	    {
-	      __mpn_sub_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
-	      abort ();
-	    }
-	}
+	__mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
 
       MPN_COPY (prodp, tspace, hsize);
       cy = __mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
diff --git a/sysdeps/i386/i586/lshift.S b/sysdeps/i386/i586/lshift.S
index b9f8131297..c41f74e17d 100644
--- a/sysdeps/i386/i586/lshift.S
+++ b/sysdeps/i386/i586/lshift.S
@@ -43,12 +43,15 @@ C_SYMBOL_NAME(__mpn_lshift:)
 	movl	28(%esp),%ebp		/* size */
 	movl	32(%esp),%ecx		/* cnt */
 
+/* We can use faster code for shift-by-1 under certain conditions.  */
 	cmp	$1,%ecx
 	jne	Lnormal
-	movl	%edi,%eax
-	subl	%esi,%eax
-	cmpl	%ebp,%eax
-	jnc	Lspecial
+	leal	4(%esi),%eax
+	cmpl	%edi,%eax
+	jnc	Lspecial		/* jump if s_ptr + 1 >= res_ptr */
+	leal	(%esi,%ebp,4),%eax
+	cmpl	%eax,%edi
+	jnc	Lspecial		/* jump if res_ptr >= s_ptr + size */
 
 Lnormal:
 	leal	-4(%edi,%ebp,4),%edi
diff --git a/sysdeps/i386/i586/rshift.S b/sysdeps/i386/i586/rshift.S
index 51cde8f07f..a820a79bc7 100644
--- a/sysdeps/i386/i586/rshift.S
+++ b/sysdeps/i386/i586/rshift.S
@@ -43,12 +43,15 @@ C_SYMBOL_NAME(__mpn_rshift:)
 	movl	28(%esp),%ebp		/* size */
 	movl	32(%esp),%ecx		/* cnt */
 
+/* We can use faster code for shift-by-1 under certain conditions.  */
 	cmp	$1,%ecx
 	jne	Lnormal
-	movl	%edi,%eax
-	subl	%esi,%eax
-	cmpl	%ebp,%eax
-	jnc	Lspecial
+	leal	4(%edi),%eax
+	cmpl	%esi,%eax
+	jnc	Lspecial		/* jump if res_ptr + 1 >= s_ptr */
+	leal	(%edi,%ebp,4),%eax
+	cmpl	%eax,%esi
+	jnc	Lspecial		/* jump if s_ptr >= res_ptr + size */
 
 Lnormal:
 	movl	(%esi),%edx
diff --git a/sysdeps/m68k/add_n.S b/sysdeps/m68k/add_n.S
new file mode 100644
index 0000000000..ea7a4458ea
--- /dev/null
+++ b/sysdeps/m68k/add_n.S
@@ -0,0 +1,76 @@
+/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+   sum in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+  INPUT PARAMETERS
+  res_ptr	(sp + 4)
+  s1_ptr	(sp + 8)
+  s2_ptr	(sp + 16)
+  size		(sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+	TEXT
+	ALIGN
+	GLOBL	___mpn_add_n
+
+LAB(___mpn_add_n)
+/* Save used registers on the stack.  */
+	INSN2(move,l	,MEM_PREDEC(sp),d2)
+	INSN2(move,l	,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers.  Better use movem?  */
+	INSN2(move,l	,a2,MEM_DISP(sp,12))
+	INSN2(move,l	,a0,MEM_DISP(sp,16))
+	INSN2(move,l	,a1,MEM_DISP(sp,20))
+	INSN2(move,l	,d2,MEM_DISP(sp,24))
+
+	INSN2(eor,w	,d2,#1)
+	INSN2(lsr,l	,d2,#1)
+	bcc L1
+	INSN2(subq,l	,d2,#1)		/* clears cy as side effect */
+
+LAB(Loop)
+	INSN2(move,l	,d0,MEM_POSTINC(a0))
+	INSN2(move,l	,d1,MEM_POSTINC(a1))
+	INSN2(addx,l	,d0,d1)
+	INSN2(move,l	,MEM_POSTINC(a2),d0)
+LAB(L1)	INSN2(move,l	,d0,MEM_POSTINC(a0))
+	INSN2(move,l	,d1,MEM_POSTINC(a1))
+	INSN2(addx,l	,d0,d1)
+	INSN2(move,l	,MEM_POSTINC(a2),d0)
+
+	dbf d2,Loop			/* loop until 16 lsb of %4 == -1 */
+	INSN2(subx,l	,d0,d0)		/* d0 <= -cy; save cy as 0 or -1 in d0 */
+	INSN2(sub,l	,d2,#0x10000)
+	bcs L2
+	INSN2(add,l	,d0,d0)		/* restore cy */
+	bra Loop
+
+LAB(L2)
+	INSN1(neg,l	,d0)
+
+/* Restore used registers from stack frame.  */
+	INSN2(move,l	,a2,MEM_POSTINC(sp))
+	INSN2(move,l	,d2,MEM_POSTINC(sp))
+
+	rts
diff --git a/sysdeps/m68k/sub_n.S b/sysdeps/m68k/sub_n.S
new file mode 100644
index 0000000000..19f0ec1568
--- /dev/null
+++ b/sysdeps/m68k/sub_n.S
@@ -0,0 +1,76 @@
+/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+   store difference in a third limb vector.
+
+Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+  INPUT PARAMETERS
+  res_ptr	(sp + 4)
+  s1_ptr	(sp + 8)
+  s2_ptr	(sp + 16)
+  size		(sp + 12)
+*/
+
+#include "asm-syntax.h"
+
+	TEXT
+	ALIGN
+	GLOBL	___mpn_sub_n
+
+LAB(___mpn_sub_n)
+/* Save used registers on the stack.  */
+	INSN2(move,l	,MEM_PREDEC(sp),d2)
+	INSN2(move,l	,MEM_PREDEC(sp),a2)
+
+/* Copy the arguments to registers.  Better use movem?  */
+	INSN2(move,l	,a2,MEM_DISP(sp,12))
+	INSN2(move,l	,a0,MEM_DISP(sp,16))
+	INSN2(move,l	,a1,MEM_DISP(sp,20))
+	INSN2(move,l	,d2,MEM_DISP(sp,24))
+
+	INSN2(eor,w	,d2,#1)
+	INSN2(lsr,l	,d2,#1)
+	bcc L1
+	INSN2(subq,l	,d2,#1)		/* clears cy as side effect */
+
+LAB(Loop)
+	INSN2(move,l	,d0,MEM_POSTINC(a0))
+	INSN2(move,l	,d1,MEM_POSTINC(a1))
+	INSN2(subx,l	,d0,d1)
+	INSN2(move,l	,MEM_POSTINC(a2),d0)
+LAB(L1)	INSN2(move,l	,d0,MEM_POSTINC(a0))
+	INSN2(move,l	,d1,MEM_POSTINC(a1))
+	INSN2(subx,l	,d0,d1)
+	INSN2(move,l	,MEM_POSTINC(a2),d0)
+
+	dbf d2,Loop			/* loop until 16 lsb of %4 == -1 */
+	INSN2(subx,l	,d0,d0)		/* d0 <= -cy; save cy as 0 or -1 in d0 */
+	INSN2(sub,l	,d2,#0x10000)
+	bcs L2
+	INSN2(add,l	,d0,d0)		/* restore cy */
+	bra Loop
+
+LAB(L2)
+	INSN1(neg,l	,d0)
+
+/* Restore used registers from stack frame.  */
+	INSN2(move,l	,a2,MEM_POSTINC(sp))
+	INSN2(move,l	,d2,MEM_POSTINC(sp))
+
+	rts
diff --git a/sysdeps/m88k/add_n.s b/sysdeps/m88k/add_n.s
new file mode 100644
index 0000000000..7e4ccccb90
--- /dev/null
+++ b/sysdeps/m88k/add_n.s
@@ -0,0 +1,103 @@
+; mc88100 __mpn_add -- Add two limb vectors of the same length > 0 and store
+; sum in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr	r2
+; s1_ptr	r3
+; s2_ptr	r4
+; size		r5
+
+; This code has been optimized to run one instruction per clock, avoiding
+; load stalls and writeback contention.  As a result, the instruction
+; order is not always natural.
+
+; The speed is about 4.6 clocks/limb + 18 clocks/limb-vector on an 88100,
+; but on the 88110, it seems to run much slower, 6.6 clocks/limb.
+
+	text
+	align	 16
+	global	 ___mpn_add_n
+___mpn_add_n:
+	ld	r6,r3,0			; read first limb from s1_ptr
+	extu	r10,r5,3
+	ld	r7,r4,0			; read first limb from s2_ptr
+
+	subu.co	r5,r0,r5		; (clear carry as side effect)
+	mak	r5,r5,3<4>
+	bcnd	eq0,r5,Lzero
+
+	or	r12,r0,lo16(Lbase)
+	or.u	r12,r12,hi16(Lbase)
+	addu	r12,r12,r5		; r12 is address for entering in loop
+
+	extu	r5,r5,2			; divide by 4
+	subu	r2,r2,r5		; adjust res_ptr
+	subu	r3,r3,r5		; adjust s1_ptr
+	subu	r4,r4,r5		; adjust s2_ptr
+
+	or	r8,r6,r0
+
+	jmp.n	r12
+	 or	r9,r7,r0
+
+Loop:	addu	r3,r3,32
+	st	r8,r2,28
+	addu	r4,r4,32
+	ld	r6,r3,0
+	addu	r2,r2,32
+	ld	r7,r4,0
+Lzero:	subu	r10,r10,1		; add 0 + 8r limbs (adj loop cnt)
+Lbase:	ld	r8,r3,4
+	addu.cio r6,r6,r7
+	ld	r9,r4,4
+	st	r6,r2,0
+	ld	r6,r3,8			; add 7 + 8r limbs
+	addu.cio r8,r8,r9
+	ld	r7,r4,8
+	st	r8,r2,4
+	ld	r8,r3,12		; add 6 + 8r limbs
+	addu.cio r6,r6,r7
+	ld	r9,r4,12
+	st	r6,r2,8
+	ld	r6,r3,16		; add 5 + 8r limbs
+	addu.cio r8,r8,r9
+	ld	r7,r4,16
+	st	r8,r2,12
+	ld	r8,r3,20		; add 4 + 8r limbs
+	addu.cio r6,r6,r7
+	ld	r9,r4,20
+	st	r6,r2,16
+	ld	r6,r3,24		; add 3 + 8r limbs
+	addu.cio r8,r8,r9
+	ld	r7,r4,24
+	st	r8,r2,20
+	ld	r8,r3,28		; add 2 + 8r limbs
+	addu.cio r6,r6,r7
+	ld	r9,r4,28
+	st	r6,r2,24
+	bcnd.n	ne0,r10,Loop		; add 1 + 8r limbs
+	 addu.cio r8,r8,r9
+
+	st	r8,r2,28		; store most significant limb
+
+	jmp.n	 r1
+	 addu.ci r2,r0,r0		; return carry-out from most sign. limb
diff --git a/sysdeps/m88k/mul_1.s b/sysdeps/m88k/mul_1.s
new file mode 100644
index 0000000000..35c238d570
--- /dev/null
+++ b/sysdeps/m88k/mul_1.s
@@ -0,0 +1,128 @@
+; mc88100 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; store the product in a second limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr	r2
+; s1_ptr	r3
+; size		r4
+; s2_limb	r5
+
+; Common overhead is about 11 cycles/invocation.
+
+; The speed for S2_LIMB >= 0x10000 is approximately 21 cycles/limb.  (The
+; pipeline stalls 2 cycles due to WB contention.)
+
+; The speed for S2_LIMB < 0x10000 is approximately 16 cycles/limb.  (The
+; pipeline stalls 2 cycles due to WB contention and 1 cycle due to latency.)
+
+; To enhance speed:
+; 1. Unroll main loop 4-8 times.
+; 2. Schedule code to avoid WB contention.  It might be tempting to move the
+;    ld instruction in the loops down to save 2 cycles (less WB contention),
+;    but that looses because the ultimate value will be read from outside
+;    the allocated space.  But if we handle the ultimate multiplication in
+;    the tail, we can do this.
+; 3. Make the multiplication with less instructions.  I think the code for
+;    (S2_LIMB >= 0x10000) is not minimal.
+; With these techniques the (S2_LIMB >= 0x10000) case would run in 17 or
+; less cycles/limb; the (S2_LIMB < 0x10000) case would run in 11
+; cycles/limb.  (Assuming infinite unrolling.)
+
+	text
+	align	 16
+	global	 ___mpn_mul_1
+___mpn_mul_1:
+
+	; Make S1_PTR and RES_PTR point at the end of their blocks
+	; and negate SIZE.
+	lda	 r3,r3[r4]
+	lda	 r6,r2[r4]		; RES_PTR in r6 since r2 is retval
+	subu	 r4,r0,r4
+
+	addu.co	 r2,r0,r0		; r2 = cy = 0
+	ld	 r9,r3[r4]
+	mask	 r7,r5,0xffff		; r7 = lo(S2_LIMB)
+	extu	 r8,r5,16		; r8 = hi(S2_LIMB)
+	bcnd.n	 eq0,r8,Lsmall		; jump if (hi(S2_LIMB) == 0)
+	 subu	 r6,r6,4
+
+; General code for any value of S2_LIMB.
+
+	; Make a stack frame and save r25 and r26
+	subu	 r31,r31,16
+	st.d	 r25,r31,8
+
+	; Enter the loop in the middle
+	br.n	L1
+	addu	 r4,r4,1
+
+Loop:
+	ld	 r9,r3[r4]
+	st	 r26,r6[r4]
+; bcnd	ne0,r0,0			; bubble
+	addu	 r4,r4,1
+L1:	mul	 r26,r9,r5		; low word of product	mul_1	WB ld
+	mask	 r12,r9,0xffff		; r12 = lo(s1_limb)	mask_1
+	mul	 r11,r12,r7		; r11 =  prod_0		mul_2	WB mask_1
+	mul	 r10,r12,r8		; r10 = prod_1a		mul_3
+	extu	 r13,r9,16		; r13 = hi(s1_limb)	extu_1	WB mul_1
+	mul	 r12,r13,r7		; r12 = prod_1b		mul_4	WB extu_1
+	mul	 r25,r13,r8		; r25  = prod_2		mul_5	WB mul_2
+	extu	 r11,r11,16		; r11 = hi(prod_0)	extu_2	WB mul_3
+	addu	 r10,r10,r11		;			addu_1	WB extu_2
+; bcnd	ne0,r0,0			; bubble			WB addu_1
+	addu.co	 r10,r10,r12		;				WB mul_4
+	mask.u	 r10,r10,0xffff		; move the 16 most significant bits...
+	addu.ci	 r10,r10,r0		; ...to the low half of the word...
+	rot	 r10,r10,16		; ...and put carry in pos 16.
+	addu.co	 r26,r26,r2		; add old carry limb
+	bcnd.n	 ne0,r4,Loop
+	 addu.ci r2,r25,r10		; compute new carry limb
+
+	st	 r26,r6[r4]
+	ld.d	 r25,r31,8
+	jmp.n	 r1
+	 addu	 r31,r31,16
+
+; Fast code for S2_LIMB < 0x10000
+Lsmall:
+	; Enter the loop in the middle
+	br.n	SL1
+	addu	 r4,r4,1
+
+SLoop:
+	ld	 r9,r3[r4]		;
+	st	 r8,r6[r4]		;
+	addu	 r4,r4,1		;
+SL1:	mul	 r8,r9,r5		; low word of product
+	mask	 r12,r9,0xffff		; r12 = lo(s1_limb)
+	extu	 r13,r9,16		; r13 = hi(s1_limb)
+	mul	 r11,r12,r7		; r11 =  prod_0
+	mul	 r12,r13,r7		; r12 = prod_1b
+	addu.cio r8,r8,r2		; add old carry limb
+	extu	 r10,r11,16		; r11 = hi(prod_0)
+	addu	 r10,r10,r12		;
+	bcnd.n	 ne0,r4,SLoop
+	extu	 r2,r10,16		; r2 = new carry limb
+
+	jmp.n	 r1
+	st	 r8,r6[r4]
diff --git a/sysdeps/m88k/sub_n.s b/sysdeps/m88k/sub_n.s
new file mode 100644
index 0000000000..3963cd5479
--- /dev/null
+++ b/sysdeps/m88k/sub_n.s
@@ -0,0 +1,104 @@
+; mc88100 __mpn_sub -- Subtract two limb vectors of the same length > 0 and
+; store difference in a third limb vector.
+
+; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+
+; This file is part of the GNU MP Library.
+
+; The GNU MP Library is free software; you can redistribute it and/or modify
+; it under the terms of the GNU Library General Public License as published by
+; the Free Software Foundation; either version 2 of the License, or (at your
+; option) any later version.
+
+; The GNU MP Library is distributed in the hope that it will be useful, but
+; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+; License for more details.
+
+; You should have received a copy of the GNU Library General Public License
+; along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+; the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+; INPUT PARAMETERS
+; res_ptr	r2
+; s1_ptr	r3
+; s2_ptr	r4
+; size		r5
+
+; This code has been optimized to run one instruction per clock, avoiding
+; load stalls and writeback contention.  As a result, the instruction
+; order is not always natural.
+
+; The speed is about 4.6 clocks/limb + 18 clocks/limb-vector on an 88100,
+; but on the 88110, it seems to run much slower, 6.6 clocks/limb.
+
+	text
+	align	 16
+	global	 ___mpn_sub_n
+___mpn_sub_n:
+	ld	r6,r3,0			; read first limb from s1_ptr
+	extu	r10,r5,3
+	ld	r7,r4,0			; read first limb from s2_ptr
+
+	subu.co	r5,r0,r5		; (clear carry as side effect)
+	mak	r5,r5,3<4>
+	bcnd	eq0,r5,Lzero
+
+	or	r12,r0,lo16(Lbase)
+	or.u	r12,r12,hi16(Lbase)
+	addu	r12,r12,r5		; r12 is address for entering in loop
+
+	extu	r5,r5,2			; divide by 4
+	subu	r2,r2,r5		; adjust res_ptr
+	subu	r3,r3,r5		; adjust s1_ptr
+	subu	r4,r4,r5		; adjust s2_ptr
+
+	or	r8,r6,r0
+
+	jmp.n	r12
+	 or	r9,r7,r0
+
+Loop:	addu	r3,r3,32
+	st	r8,r2,28
+	addu	r4,r4,32
+	ld	r6,r3,0
+	addu	r2,r2,32
+	ld	r7,r4,0
+Lzero:	subu	r10,r10,1		; subtract 0 + 8r limbs (adj loop cnt)
+Lbase:	ld	r8,r3,4
+	subu.cio r6,r6,r7
+	ld	r9,r4,4
+	st	r6,r2,0
+	ld	r6,r3,8			; subtract 7 + 8r limbs
+	subu.cio r8,r8,r9
+	ld	r7,r4,8
+	st	r8,r2,4
+	ld	r8,r3,12		; subtract 6 + 8r limbs
+	subu.cio r6,r6,r7
+	ld	r9,r4,12
+	st	r6,r2,8
+	ld	r6,r3,16		; subtract 5 + 8r limbs
+	subu.cio r8,r8,r9
+	ld	r7,r4,16
+	st	r8,r2,12
+	ld	r8,r3,20		; subtract 4 + 8r limbs
+	subu.cio r6,r6,r7
+	ld	r9,r4,20
+	st	r6,r2,16
+	ld	r6,r3,24		; subtract 3 + 8r limbs
+	subu.cio r8,r8,r9
+	ld	r7,r4,24
+	st	r8,r2,20
+	ld	r8,r3,28		; subtract 2 + 8r limbs
+	subu.cio r6,r6,r7
+	ld	r9,r4,28
+	st	r6,r2,24
+	bcnd.n	ne0,r10,Loop		; subtract 1 + 8r limbs
+	 subu.cio r8,r8,r9
+
+	st	r8,r2,28		; store most significant limb
+
+	addu.ci r2,r0,r0		; return carry-out from most sign. limb
+	jmp.n	 r1
+	 xor	r2,r2,1
diff --git a/sysdeps/rs6000/add_n.s b/sysdeps/rs6000/add_n.s
index 34ad9e1d2d..7090cf1b00 100644
--- a/sysdeps/rs6000/add_n.s
+++ b/sysdeps/rs6000/add_n.s
@@ -45,7 +45,7 @@ __mpn_add_n:
 	bdz	Lend		# If done, skip loop
 Loop:	lu	8,4(4)		# load s1 limb and update s1_ptr
 	lu	0,4(5)		# load s2 limb and update s2_ptr
-	stu	7,4(3)		# store previous limb in load latecny slot
+	stu	7,4(3)		# store previous limb in load latency slot
 	ae	7,0,8		# add new limbs with cy, set cy
 	bdn	Loop		# decrement CTR and loop back
 Lend:	st	7,4(3)		# store ultimate result limb
diff --git a/sysdeps/rs6000/sub_n.s b/sysdeps/rs6000/sub_n.s
index 402fdcefc4..40fe7d68bd 100644
--- a/sysdeps/rs6000/sub_n.s
+++ b/sysdeps/rs6000/sub_n.s
@@ -46,7 +46,7 @@ __mpn_sub_n:
 	bdz	Lend		# If done, skip loop
 Loop:	lu	8,4(4)		# load s1 limb and update s1_ptr
 	lu	0,4(5)		# load s2 limb and update s2_ptr
-	stu	7,4(3)		# store previous limb in load latecny slot
+	stu	7,4(3)		# store previous limb in load latency slot
 	sfe	7,0,8		# add new limbs with cy, set cy
 	bdn	Loop		# decrement CTR and loop back
 Lend:	st	7,4(3)		# store ultimate result limb
diff --git a/sysdeps/sparc/add_n.S b/sysdeps/sparc/add_n.S
index 13704d32d2..80c3b99640 100644
--- a/sysdeps/sparc/add_n.S
+++ b/sysdeps/sparc/add_n.S
@@ -1,7 +1,7 @@
 ! sparc __mpn_add_n -- Add two limb vectors of the same length > 0 and store
 ! sum in a third limb vector.
 
-! Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+! Copyright (C) 1995 Free Software Foundation, Inc.
 
 ! This file is part of the GNU MP Library.
 
@@ -21,10 +21,10 @@
 
 
 ! INPUT PARAMETERS
-! res_ptr	%o0
-! s1_ptr	%o1
-! s2_ptr	%o2
-! size		%o3
+#define res_ptr	%o0
+#define s1_ptr	%o1
+#define s2_ptr	%o2
+#define size	%o3
 
 #include "sysdep.h"
 
@@ -32,108 +32,192 @@
 	.align	4
 	.global	C_SYMBOL_NAME(__mpn_add_n)
 C_SYMBOL_NAME(__mpn_add_n):
-	ld	[%o1+0],%o4		! read first limb from s1_ptr
-	srl	%o3,4,%g1
-	ld	[%o2+0],%o5		! read first limb from s2_ptr
-
-	sub	%g0,%o3,%o3
-	andcc	%o3,(16-1),%o3
-	be	Lzero
-	 mov	%o4,%g2			! put first s1_limb in g2 too
-
-	sll	%o3,2,%o3		! multiply by 4
-	sub	%o0,%o3,%o0		! adjust res_ptr
-	sub	%o1,%o3,%o1		! adjust s1_ptr
-	sub	%o2,%o3,%o2		! adjust s2_ptr
-
-#if PIC
-	mov	%o7,%g4			! Save return address register
-	call	1f
-	add	%o7,Lbase-1f,%g3
-1:	mov	%g4,%o7			! Restore return address register
-#else
-	sethi	%hi(Lbase),%g3
-	or	%g3,%lo(Lbase),%g3
-#endif
-	sll	%o3,2,%o3		! multiply by 4
-	jmp	%g3+%o3
-	 mov	%o5,%g3			! put first s2_limb in g3 too
-
-Loop:	addxcc	%g2,%g3,%o3
-	add	%o1,64,%o1
-	st	%o3,[%o0+60]
-	add	%o2,64,%o2
-	ld	[%o1+0],%o4
-	add	%o0,64,%o0
-	ld	[%o2+0],%o5
-Lzero:	sub	%g1,1,%g1	! add 0 + 16r limbs (adjust loop counter)
-Lbase:	ld	[%o1+4],%g2
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+4],%g3
-	st	%o3,[%o0+0]
-	ld	[%o1+8],%o4	! add 15 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+8],%o5
-	st	%o3,[%o0+4]
-	ld	[%o1+12],%g2	! add 14 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+12],%g3
-	st	%o3,[%o0+8]
-	ld	[%o1+16],%o4	! add 13 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+16],%o5
-	st	%o3,[%o0+12]
-	ld	[%o1+20],%g2	! add 12 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+20],%g3
-	st	%o3,[%o0+16]
-	ld	[%o1+24],%o4	! add 11 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+24],%o5
-	st	%o3,[%o0+20]
-	ld	[%o1+28],%g2	! add 10 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+28],%g3
-	st	%o3,[%o0+24]
-	ld	[%o1+32],%o4	! add 9 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+32],%o5
-	st	%o3,[%o0+28]
-	ld	[%o1+36],%g2	! add 8 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+36],%g3
-	st	%o3,[%o0+32]
-	ld	[%o1+40],%o4	! add 7 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+40],%o5
-	st	%o3,[%o0+36]
-	ld	[%o1+44],%g2	! add 6 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+44],%g3
-	st	%o3,[%o0+40]
-	ld	[%o1+48],%o4	! add 5 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+48],%o5
-	st	%o3,[%o0+44]
-	ld	[%o1+52],%g2	! add 4 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+52],%g3
-	st	%o3,[%o0+48]
-	ld	[%o1+56],%o4	! add 3 + 16r limbs
-	addxcc	%g2,%g3,%o3
-	ld	[%o2+56],%o5
-	st	%o3,[%o0+52]
-	ld	[%o1+60],%g2	! add 2 + 16r limbs
-	addxcc	%o4,%o5,%o3
-	ld	[%o2+60],%g3
-	st	%o3,[%o0+56]
-	addx	%g0,%g0,%o4
-	tst	%g1
-	bne	Loop
-	 subcc	%g0,%o4,%g0	! restore cy (delay slot)
-
-	addxcc	%g2,%g3,%o3
-	st	%o3,[%o0+60]	! store most significant limb
-
-	retl
-	 addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+	cmp	size,8
+	mov	0,%o4			! clear cy-save register
+	blt,a	Ltriv
+	addcc	size,-2,size
+	xor	s2_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L1			! branch if alignment differs
+	nop
+L0:	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1			! if no, branch
+	nop
+! **  V1a  **
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	addcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v1:	ld	[s1_ptr+0],%g4
+	ld	[s1_ptr+4],%g1
+	ldd	[s2_ptr+0],%g2
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-10,size
+	blt	Lfin1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1:	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+16],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+20],%g1
+	ldd	[s2_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+24],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+28],%g1
+	ldd	[s2_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+32],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+36],%g1
+	ldd	[s2_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1:	addcc	size,8-2,size
+	blt	Lend1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loop1b:	addxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	addxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loop1b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1:	addxcc	%g4,%g2,%o4
+	addxcc	%g1,%g3,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr+8],%g4
+	ld	[s2_ptr+8],%g2
+	addxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+L1:	xor	s1_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L2
+	nop
+! **  V1b  **
+	mov	s2_ptr,%g1
+	mov	s1_ptr,s2_ptr
+	b	L0
+	mov	%g1,s1_ptr
+
+! **  V2  **
+/* If we come here, the alignment of s1_ptr and res_ptr as well as the
+   alignment of s2_ptr and res_ptr differ.  Since there are only two ways
+   things can be aligned (that we care about) we now know that the alignment
+   of s1_ptr and s2_ptr are the same.  */
+
+L2:	andcc	s1_ptr,4,%g0		! s1_ptr unaligned? Side effect: cy=0
+	beq	L_v2			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	addcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v2:	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	blt	Lfin2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	ldd	[s1_ptr+8],%g2
+	ldd	[s2_ptr+8],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+8]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+12]
+	ldd	[s1_ptr+16],%g2
+	ldd	[s2_ptr+16],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+16]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+20]
+	ldd	[s1_ptr+24],%g2
+	ldd	[s2_ptr+24],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+24]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+28]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop2
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin2:	addcc	size,8-2,size
+Ltriv:	blt	Lend2
+	subcc	%g0,%o4,%g0		! restore cy
+Loop2b:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	addxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	addxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loop2b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend2:	andcc	size,1,%g0
+	be	Lret2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr],%g4
+	ld	[s2_ptr],%g2
+	addxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+
+Lret2:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
diff --git a/sysdeps/sparc/lshift.S b/sysdeps/sparc/lshift.S
new file mode 100644
index 0000000000..497272af0f
--- /dev/null
+++ b/sysdeps/sparc/lshift.S
@@ -0,0 +1,94 @@
+! sparc __mpn_lshift --
+
+! Copyright (C) 1995 Free Software Foundation, Inc.
+
+! This file is part of the GNU MP Library.
+
+! The GNU MP Library is free software; you can redistribute it and/or modify
+! it under the terms of the GNU Library General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or (at your
+! option) any later version.
+
+! The GNU MP Library is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+! or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+! License for more details.
+
+! You should have received a copy of the GNU Library General Public License
+! along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+! the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+! INPUT PARAMETERS
+! res_ptr	%o0
+! src_ptr	%o1
+! size		%o2
+! cnt		%o3
+
+#include "sysdep.h"
+
+	.text
+	.align	4
+	.global	C_SYMBOL_NAME(__mpn_lshift)
+C_SYMBOL_NAME(__mpn_lshift):
+	sll	%o2,2,%g1
+	add	%o1,%g1,%o1	! make %o1 point at end of src
+	ld	[%o1-4],%g2	! load first limb
+	sub	%g0,%o3,%o5	! negate shift count
+	add	%o0,%g1,%o0	! make %o0 point at end of res
+	add	%o2,-1,%o2
+	andcc	%o2,4-1,%g4	! number of limbs in first loop
+	srl	%g2,%o5,%g1	! compute function result
+	beq	L0		! if multiple of 4 limbs, skip first loop
+	st	%g1,[%sp+80]
+
+	sub	%o2,%g4,%o2	! adjust count for main loop
+
+Loop0:	ld	[%o1-8],%g3
+	add	%o0,-4,%o0
+	add	%o1,-4,%o1
+	addcc	%g4,-1,%g4
+	sll	%g2,%o3,%o4
+	srl	%g3,%o5,%g1
+	mov	%g3,%g2
+	or	%o4,%g1,%o4
+	bne	Loop0
+	 st	%o4,[%o0+0]
+
+L0:	tst	%o2
+	beq	Lend
+	 nop
+
+Loop:	ld	[%o1-8],%g3
+	add	%o0,-16,%o0
+	addcc	%o2,-4,%o2
+	sll	%g2,%o3,%o4
+	srl	%g3,%o5,%g1
+
+	ld	[%o1-12],%g2
+	sll	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0+12]
+	srl	%g2,%o5,%g1
+
+	ld	[%o1-16],%g3
+	sll	%g2,%o3,%o4
+	or	%g4,%g1,%g4
+	st	%g4,[%o0+8]
+	srl	%g3,%o5,%g1
+
+	ld	[%o1-20],%g2
+	sll	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0+4]
+	srl	%g2,%o5,%g1
+
+	add	%o1,-16,%o1
+	or	%g4,%g1,%g4
+	bne	Loop
+	 st	%g4,[%o0+0]
+
+Lend:	sll	%g2,%o3,%g2
+	st	%g2,[%o0-4]
+	retl
+	ld	[%sp+80],%o0
diff --git a/sysdeps/sparc/rshift.S b/sysdeps/sparc/rshift.S
new file mode 100644
index 0000000000..3428cfe9a4
--- /dev/null
+++ b/sysdeps/sparc/rshift.S
@@ -0,0 +1,91 @@
+! sparc __mpn_rshift --
+
+! Copyright (C) 1995 Free Software Foundation, Inc.
+
+! This file is part of the GNU MP Library.
+
+! The GNU MP Library is free software; you can redistribute it and/or modify
+! it under the terms of the GNU Library General Public License as published by
+! the Free Software Foundation; either version 2 of the License, or (at your
+! option) any later version.
+
+! The GNU MP Library is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+! or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+! License for more details.
+
+! You should have received a copy of the GNU Library General Public License
+! along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+! the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+! INPUT PARAMETERS
+! res_ptr	%o0
+! src_ptr	%o1
+! size		%o2
+! cnt		%o3
+
+#include "sysdep.h"
+
+	.text
+	.align	4
+	.global	C_SYMBOL_NAME(__mpn_rshift)
+C_SYMBOL_NAME(__mpn_rshift):
+	ld	[%o1],%g2	! load first limb
+	sub	%g0,%o3,%o5	! negate shift count
+	add	%o2,-1,%o2
+	andcc	%o2,4-1,%g4	! number of limbs in first loop
+	sll	%g2,%o5,%g1	! compute function result
+	beq	L0		! if multiple of 4 limbs, skip first loop
+	st	%g1,[%sp+80]
+
+	sub	%o2,%g4,%o2	! adjust count for main loop
+
+Loop0:	ld	[%o1+4],%g3
+	add	%o0,4,%o0
+	add	%o1,4,%o1
+	addcc	%g4,-1,%g4
+	srl	%g2,%o3,%o4
+	sll	%g3,%o5,%g1
+	mov	%g3,%g2
+	or	%o4,%g1,%o4
+	bne	Loop0
+	 st	%o4,[%o0-4]
+
+L0:	tst	%o2
+	beq	Lend
+	 nop
+
+Loop:	ld	[%o1+4],%g3
+	add	%o0,16,%o0
+	addcc	%o2,-4,%o2
+	srl	%g2,%o3,%o4
+	sll	%g3,%o5,%g1
+
+	ld	[%o1+8],%g2
+	srl	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0-16]
+	sll	%g2,%o5,%g1
+
+	ld	[%o1+12],%g3
+	srl	%g2,%o3,%o4
+	or	%g4,%g1,%g4
+	st	%g4,[%o0-12]
+	sll	%g3,%o5,%g1
+
+	ld	[%o1+16],%g2
+	srl	%g3,%o3,%g4
+	or	%o4,%g1,%o4
+	st	%o4,[%o0-8]
+	sll	%g2,%o5,%g1
+
+	add	%o1,16,%o1
+	or	%g4,%g1,%g4
+	bne	Loop
+	 st	%g4,[%o0-4]
+
+Lend:	srl	%g2,%o3,%g2
+	st	%g2,[%o0-0]
+	retl
+	ld	[%sp+80],%o0
diff --git a/sysdeps/sparc/sub_n.S b/sysdeps/sparc/sub_n.S
index 6264344009..2e217ed679 100644
--- a/sysdeps/sparc/sub_n.S
+++ b/sysdeps/sparc/sub_n.S
@@ -1,7 +1,7 @@
 ! sparc __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
 ! store difference in a third limb vector.
 
-! Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+! Copyright (C) 1995 Free Software Foundation, Inc.
 
 ! This file is part of the GNU MP Library.
 
@@ -21,10 +21,10 @@
 
 
 ! INPUT PARAMETERS
-! res_ptr	%o0
-! s1_ptr	%o1
-! s2_ptr	%o2
-! size		%o3
+#define res_ptr	%o0
+#define s1_ptr	%o1
+#define s2_ptr	%o2
+#define size	%o3
 
 #include "sysdep.h"
 
@@ -32,108 +32,279 @@
 	.align	4
 	.global	C_SYMBOL_NAME(__mpn_sub_n)
 C_SYMBOL_NAME(__mpn_sub_n):
-	ld	[%o1+0],%o4		! read first limb from s1_ptr
-	srl	%o3,4,%g1
-	ld	[%o2+0],%o5		! read first limb from s2_ptr
-
-	sub	%g0,%o3,%o3
-	andcc	%o3,(16-1),%o3
-	be	Lzero
-	 mov	%o4,%g2			! put first s1_limb in g2 too
-
-	sll	%o3,2,%o3		! multiply by 4
-	sub	%o0,%o3,%o0		! adjust res_ptr
-	sub	%o1,%o3,%o1		! adjust s1_ptr
-	sub	%o2,%o3,%o2		! adjust s2_ptr
-
-#if PIC
-	mov	%o7,%g4			! Save return address register
-	call	1f
-	add	%o7,Lbase-1f,%g3
-1:	mov	%g4,%o7			! Restore return address register
-#else
-	sethi	%hi(Lbase),%g3
-	or	%g3,%lo(Lbase),%g3
-#endif
-	sll	%o3,2,%o3		! multiply by 4
-	jmp	%g3+%o3
-	 mov	%o5,%g3			! put first s2_limb in g3 too
-
-Loop:	subxcc	%g2,%g3,%o3
-	add	%o1,64,%o1
-	st	%o3,[%o0+60]
-	add	%o2,64,%o2
-	ld	[%o1+0],%o4
-	add	%o0,64,%o0
-	ld	[%o2+0],%o5
-Lzero:	sub	%g1,1,%g1	! add 0 + 16r limbs (adjust loop counter)
-Lbase:	ld	[%o1+4],%g2
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+4],%g3
-	st	%o3,[%o0+0]
-	ld	[%o1+8],%o4	! add 15 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+8],%o5
-	st	%o3,[%o0+4]
-	ld	[%o1+12],%g2	! add 14 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+12],%g3
-	st	%o3,[%o0+8]
-	ld	[%o1+16],%o4	! add 13 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+16],%o5
-	st	%o3,[%o0+12]
-	ld	[%o1+20],%g2	! add 12 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+20],%g3
-	st	%o3,[%o0+16]
-	ld	[%o1+24],%o4	! add 11 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+24],%o5
-	st	%o3,[%o0+20]
-	ld	[%o1+28],%g2	! add 10 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+28],%g3
-	st	%o3,[%o0+24]
-	ld	[%o1+32],%o4	! add 9 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+32],%o5
-	st	%o3,[%o0+28]
-	ld	[%o1+36],%g2	! add 8 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+36],%g3
-	st	%o3,[%o0+32]
-	ld	[%o1+40],%o4	! add 7 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+40],%o5
-	st	%o3,[%o0+36]
-	ld	[%o1+44],%g2	! add 6 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+44],%g3
-	st	%o3,[%o0+40]
-	ld	[%o1+48],%o4	! add 5 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+48],%o5
-	st	%o3,[%o0+44]
-	ld	[%o1+52],%g2	! add 4 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+52],%g3
-	st	%o3,[%o0+48]
-	ld	[%o1+56],%o4	! add 3 + 16r limbs
-	subxcc	%g2,%g3,%o3
-	ld	[%o2+56],%o5
-	st	%o3,[%o0+52]
-	ld	[%o1+60],%g2	! add 2 + 16r limbs
-	subxcc	%o4,%o5,%o3
-	ld	[%o2+60],%g3
-	st	%o3,[%o0+56]
-	subx	%g0,%g0,%o4
-	tst	%g1
-	bne	Loop
-	 subcc	%g0,%o4,%g0	! restore cy (delay slot)
-
-	subxcc	%g2,%g3,%o3
-	st	%o3,[%o0+60]	! store most significant limb
-
-	retl
-	 addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+	xor	s2_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L1			! branch if alignment differs
+	nop
+! **  V1a  **
+	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	subcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+L_v1:	addx	%g0,%g0,%o4		! save cy in register
+	cmp	size,2			! if size < 2 ...
+	bl	Lend2			! ... branch to tail code
+	subcc	%g0,%o4,%g0		! restore cy
+
+	ld	[s1_ptr+0],%g4
+	addcc	size,-10,size
+	ld	[s1_ptr+4],%g1
+	ldd	[s2_ptr+0],%g2
+	blt	Lfin1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1:	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+16],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+20],%g1
+	ldd	[s2_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+24],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+28],%g1
+	ldd	[s2_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+32],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+36],%g1
+	ldd	[s2_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1:	addcc	size,8-2,size
+	blt	Lend1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loope1:	subxcc	%g4,%g2,%o4
+	ld	[s1_ptr+8],%g4
+	subxcc	%g1,%g3,%o5
+	ld	[s1_ptr+12],%g1
+	ldd	[s2_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope1
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1:	subxcc	%g4,%g2,%o4
+	subxcc	%g1,%g3,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s1_ptr+8],%g4
+	ld	[s2_ptr+8],%g2
+	subxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+L1:	xor	s1_ptr,res_ptr,%g1
+	andcc	%g1,4,%g0
+	bne	L2
+	nop
+! **  V1b  **
+	andcc	res_ptr,4,%g0		! res_ptr unaligned? Side effect: cy=0
+	beq	L_v1b			! if no, branch
+	nop
+/* Add least significant limb separately to align res_ptr and s2_ptr */
+	ld	[s2_ptr],%g4
+	add	s2_ptr,4,s2_ptr
+	ld	[s1_ptr],%g2
+	add	s1_ptr,4,s1_ptr
+	add	size,-1,size
+	subcc	%g2,%g4,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+L_v1b:	addx	%g0,%g0,%o4		! save cy in register
+	cmp	size,2			! if size < 2 ...
+	bl	Lend2			! ... branch to tail code
+	subcc	%g0,%o4,%g0		! restore cy
+
+	ld	[s2_ptr+0],%g4
+	addcc	size,-10,size
+	ld	[s2_ptr+4],%g1
+	ldd	[s1_ptr+0],%g2
+	blt	Lfin1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop1b:	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+8],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+12],%g1
+	ldd	[s1_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+16],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+20],%g1
+	ldd	[s1_ptr+16],%g2
+	std	%o4,[res_ptr+8]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+24],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+28],%g1
+	ldd	[s1_ptr+24],%g2
+	std	%o4,[res_ptr+16]
+	subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+32],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+36],%g1
+	ldd	[s1_ptr+32],%g2
+	std	%o4,[res_ptr+24]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop1b
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin1b:	addcc	size,8-2,size
+	blt	Lend1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 2 limbs until less than 2 limbs remain */
+Loope1b:subxcc	%g2,%g4,%o4
+	ld	[s2_ptr+8],%g4
+	subxcc	%g3,%g1,%o5
+	ld	[s2_ptr+12],%g1
+	ldd	[s1_ptr+8],%g2
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope1b
+	subcc	%g0,%o4,%g0		! restore cy
+Lend1b:	subxcc	%g2,%g4,%o4
+	subxcc	%g3,%g1,%o5
+	std	%o4,[res_ptr+0]
+	addx	%g0,%g0,%o4		! save cy in register
+
+	andcc	size,1,%g0
+	be	Lret1b
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+	ld	[s2_ptr+8],%g4
+	ld	[s1_ptr+8],%g2
+	subxcc	%g2,%g4,%o4
+	st	%o4,[res_ptr+8]
+
+Lret1b:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb
+
+! **  V2  **
+/* If we come here, the alignment of s1_ptr and res_ptr as well as the
+   alignment of s2_ptr and res_ptr differ.  Since there are only two ways
+   things can be aligned (that we care about) we now know that the alignment
+   of s1_ptr and s2_ptr are the same.  */
+
+L2:	cmp	size,1
+	be	Ljone
+	nop
+	andcc	s1_ptr,4,%g0		! s1_ptr unaligned? Side effect: cy=0
+	beq	L_v2			! if no, branch
+	nop
+/* Add least significant limb separately to align s1_ptr and s2_ptr */
+	ld	[s1_ptr],%g4
+	add	s1_ptr,4,s1_ptr
+	ld	[s2_ptr],%g2
+	add	s2_ptr,4,s2_ptr
+	add	size,-1,size
+	subcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+	add	res_ptr,4,res_ptr
+
+L_v2:	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	blt	Lfin2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add blocks of 8 limbs until less than 8 limbs remain */
+Loop2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	ldd	[s1_ptr+8],%g2
+	ldd	[s2_ptr+8],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+8]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+12]
+	ldd	[s1_ptr+16],%g2
+	ldd	[s2_ptr+16],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+16]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+20]
+	ldd	[s1_ptr+24],%g2
+	ldd	[s2_ptr+24],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+24]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+28]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-8,size
+	add	s1_ptr,32,s1_ptr
+	add	s2_ptr,32,s2_ptr
+	add	res_ptr,32,res_ptr
+	bge	Loop2
+	subcc	%g0,%o4,%g0		! restore cy
+
+Lfin2:	addcc	size,8-2,size
+	blt	Lend2
+	subcc	%g0,%o4,%g0		! restore cy
+Loope2:	ldd	[s1_ptr+0],%g2
+	ldd	[s2_ptr+0],%o4
+	subxcc	%g2,%o4,%g2
+	st	%g2,[res_ptr+0]
+	subxcc	%g3,%o5,%g3
+	st	%g3,[res_ptr+4]
+	addx	%g0,%g0,%o4		! save cy in register
+	addcc	size,-2,size
+	add	s1_ptr,8,s1_ptr
+	add	s2_ptr,8,s2_ptr
+	add	res_ptr,8,res_ptr
+	bge	Loope2
+	subcc	%g0,%o4,%g0		! restore cy
+Lend2:	andcc	size,1,%g0
+	be	Lret2
+	subcc	%g0,%o4,%g0		! restore cy
+/* Add last limb */
+Ljone:	ld	[s1_ptr],%g4
+	ld	[s2_ptr],%g2
+	subxcc	%g4,%g2,%o4
+	st	%o4,[res_ptr]
+
+Lret2:	retl
+	addx	%g0,%g0,%o0	! return carry-out from most sign. limb