about summary refs log tree commit diff
path: root/sysdeps/m68k
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>1996-03-01 18:45:35 +0000
committerRoland McGrath <roland@gnu.org>1996-03-01 18:45:35 +0000
commit6b628d3634559ddd84148ed860a0e1e967b5d59c (patch)
tree59e9370cd3fdf04ebe25ce605465f9057e24e081 /sysdeps/m68k
parentbc47d7a85be6334801853aaee489c875646ecb00 (diff)
downloadglibc-6b628d3634559ddd84148ed860a0e1e967b5d59c.tar.gz
glibc-6b628d3634559ddd84148ed860a0e1e967b5d59c.tar.xz
glibc-6b628d3634559ddd84148ed860a0e1e967b5d59c.zip
* stdlib/Makefile (mpn-stuff): New target.
	(copy-mpn): Use it.

	* Code copied from GMP updated to 1.937 version.
	* stdlib/strtod.c (HAVE_ALLOCA): Define this for gmp headers.
Diffstat (limited to 'sysdeps/m68k')
-rw-r--r--sysdeps/m68k/add_n.S75
-rw-r--r--sysdeps/m68k/lshift.S150
-rw-r--r--sysdeps/m68k/m68020/addmul_1.S75
-rw-r--r--sysdeps/m68k/m68020/mul_1.S83
-rw-r--r--sysdeps/m68k/m68020/submul_1.S75
-rw-r--r--sysdeps/m68k/rshift.S149
-rw-r--r--sysdeps/m68k/sub_n.S75
7 files changed, 498 insertions, 184 deletions
diff --git a/sysdeps/m68k/add_n.S b/sysdeps/m68k/add_n.S
index ea7a4458ea..754af9f469 100644
--- a/sysdeps/m68k/add_n.S
+++ b/sysdeps/m68k/add_n.S
@@ -1,7 +1,7 @@
 /* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
    sum in a third limb vector.
 
-Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -27,50 +27,53 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
   size		(sp + 12)
 */
 
+#include "sysdep.h"
 #include "asm-syntax.h"
 
 	TEXT
 	ALIGN
-	GLOBL	___mpn_add_n
+	GLOBL	C_SYMBOL_NAME(__mpn_add_n)
 
-LAB(___mpn_add_n)
+C_SYMBOL_NAME(__mpn_add_n:)
+PROLOG(__mpn_add_n)
 /* Save used registers on the stack.  */
-	INSN2(move,l	,MEM_PREDEC(sp),d2)
-	INSN2(move,l	,MEM_PREDEC(sp),a2)
+	movel	R(d2),MEM_PREDEC(sp)
+	movel	R(a2),MEM_PREDEC(sp)
 
 /* Copy the arguments to registers.  Better use movem?  */
-	INSN2(move,l	,a2,MEM_DISP(sp,12))
-	INSN2(move,l	,a0,MEM_DISP(sp,16))
-	INSN2(move,l	,a1,MEM_DISP(sp,20))
-	INSN2(move,l	,d2,MEM_DISP(sp,24))
-
-	INSN2(eor,w	,d2,#1)
-	INSN2(lsr,l	,d2,#1)
-	bcc L1
-	INSN2(subq,l	,d2,#1)		/* clears cy as side effect */
-
-LAB(Loop)
-	INSN2(move,l	,d0,MEM_POSTINC(a0))
-	INSN2(move,l	,d1,MEM_POSTINC(a1))
-	INSN2(addx,l	,d0,d1)
-	INSN2(move,l	,MEM_POSTINC(a2),d0)
-LAB(L1)	INSN2(move,l	,d0,MEM_POSTINC(a0))
-	INSN2(move,l	,d1,MEM_POSTINC(a1))
-	INSN2(addx,l	,d0,d1)
-	INSN2(move,l	,MEM_POSTINC(a2),d0)
-
-	dbf d2,Loop			/* loop until 16 lsb of %4 == -1 */
-	INSN2(subx,l	,d0,d0)		/* d0 <= -cy; save cy as 0 or -1 in d0 */
-	INSN2(sub,l	,d2,#0x10000)
-	bcs L2
-	INSN2(add,l	,d0,d0)		/* restore cy */
-	bra Loop
-
-LAB(L2)
-	INSN1(neg,l	,d0)
+	movel	MEM_DISP(sp,12),R(a2)
+	movel	MEM_DISP(sp,16),R(a0)
+	movel	MEM_DISP(sp,20),R(a1)
+	movel	MEM_DISP(sp,24),R(d2)
+
+	eorw	#1,R(d2)
+	lsrl	#1,R(d2)
+	bcc	L(L1)
+	subql	#1,R(d2)	/* clears cy as side effect */
+
+L(Loop:)
+	movel	MEM_POSTINC(a0),R(d0)
+	movel	MEM_POSTINC(a1),R(d1)
+	addxl	R(d1),R(d0)
+	movel	R(d0),MEM_POSTINC(a2)
+L(L1:)	movel	MEM_POSTINC(a0),R(d0)
+	movel	MEM_POSTINC(a1),R(d1)
+	addxl	R(d1),R(d0)
+	movel	R(d0),MEM_POSTINC(a2)
+
+	dbf	R(d2),L(Loop)		/* loop until 16 lsb of %4 == -1 */
+	subxl	R(d0),R(d0)	/* d0 <= -cy; save cy as 0 or -1 in d0 */
+	subl	#0x10000,R(d2)
+	bcs	L(L2)
+	addl	R(d0),R(d0)	/* restore cy */
+	bra	L(Loop)
+
+L(L2:)
+	negl	R(d0)
 
 /* Restore used registers from stack frame.  */
-	INSN2(move,l	,a2,MEM_POSTINC(sp))
-	INSN2(move,l	,d2,MEM_POSTINC(sp))
+	movel	MEM_POSTINC(sp),R(a2)
+	movel	MEM_POSTINC(sp),R(d2)
 
 	rts
+EPILOG(__mpn_add_n)
diff --git a/sysdeps/m68k/lshift.S b/sysdeps/m68k/lshift.S
new file mode 100644
index 0000000000..c58594a01b
--- /dev/null
+++ b/sysdeps/m68k/lshift.S
@@ -0,0 +1,150 @@
+/* mc68020 __mpn_lshift -- Shift left a low-level natural-number integer.
+
+Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+  INPUT PARAMETERS
+  res_ptr	(sp + 4)
+  s_ptr		(sp + 8)
+  s_size	(sp + 16)
+  cnt		(sp + 12)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+#define res_ptr a1
+#define s_ptr a0
+#define s_size d6
+#define cnt d4
+
+	TEXT
+	ALIGN
+	GLOBL	C_SYMBOL_NAME(__mpn_lshift)
+
+C_SYMBOL_NAME(__mpn_lshift:)
+PROLOG(__mpn_lshift)
+
+/* Save used registers on the stack.  */
+	moveml	R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
+
+/* Copy the arguments to registers.  */
+	movel	MEM_DISP(sp,28),R(res_ptr)
+	movel	MEM_DISP(sp,32),R(s_ptr)
+	movel	MEM_DISP(sp,36),R(s_size)
+	movel	MEM_DISP(sp,40),R(cnt)
+
+	moveql	#1,R(d5)
+	cmpl	R(d5),R(cnt)
+	bne	L(Lnormal)
+	cmpl	R(s_ptr),R(res_ptr)
+	bls	L(Lspecial)		/* jump if s_ptr >= res_ptr */
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
+	lea	MEM_INDX1(s_ptr,s_size,l,4),R(a2)
+#else /* not mc68020 */
+	movel	R(s_size),R(d0)
+	asll	#2,R(d0)
+	lea	MEM_INDX(s_ptr,d0,l),R(a2)
+#endif
+	cmpl	R(res_ptr),R(a2)
+	bls	L(Lspecial)		/* jump if res_ptr >= s_ptr + s_size */
+
+L(Lnormal:)
+	moveql	#32,R(d5)
+	subl	R(cnt),R(d5)
+
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
+	lea	MEM_INDX1(s_ptr,s_size,l,4),R(s_ptr)
+	lea	MEM_INDX1(res_ptr,s_size,l,4),R(res_ptr)
+#else /* not mc68000 */
+	movel	R(s_size),R(d0)
+	asll	#2,R(d0)
+	addl	R(s_size),R(s_ptr)
+	addl	R(s_size),R(res_ptr)
+#endif
+	movel	MEM_PREDEC(s_ptr),R(d2)
+	movel	R(d2),R(d0)
+	lsrl	R(d5),R(d0)		/* compute carry limb */
+
+	lsll	R(cnt),R(d2)
+	movel	R(d2),R(d1)
+	subql	#1,R(s_size)
+	beq	L(Lend)
+	lsrl	#1,R(s_size)
+	bcs	L(L1)
+	subql	#1,R(s_size)
+
+L(Loop:)
+	movel	MEM_PREDEC(s_ptr),R(d2)
+	movel	R(d2),R(d3)
+	lsrl	R(d5),R(d3)
+	orl	R(d3),R(d1)
+	movel	R(d1),MEM_PREDEC(res_ptr)
+	lsll	R(cnt),R(d2)
+L(L1:)
+	movel	MEM_PREDEC(s_ptr),R(d1)
+	movel	R(d1),R(d3)
+	lsrl	R(d5),R(d3)
+	orl	R(d3),R(d2)
+	movel	R(d2),MEM_PREDEC(res_ptr)
+	lsll	R(cnt),R(d1)
+
+	dbf	R(s_size),L(Loop)
+	subl	#0x10000,R(s_size)
+	bcc	L(Loop)
+
+L(Lend:)
+	movel	R(d1),MEM_PREDEC(res_ptr) /* store least significant limb */
+
+/* Restore used registers from stack frame.  */
+	moveml	MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
+	rts
+
+/* We loop from least significant end of the arrays, which is only
+   permissable if the source and destination don't overlap, since the
+   function is documented to work for overlapping source and destination.  */
+
+L(Lspecial:)
+	clrl	R(d0)			/* initialize carry */
+	eorw	#1,R(s_size)
+	lsrl	#1,R(s_size)
+	bcc	L(LL1)
+	subql	#1,R(s_size)
+
+L(LLoop:)
+	movel	MEM_POSTINC(s_ptr),R(d2)
+	addxl	R(d2),R(d2)
+	movel	R(d2),MEM_POSTINC(res_ptr)
+L(LL1:)
+	movel	MEM_POSTINC(s_ptr),R(d2)
+	addxl	R(d2),R(d2)
+	movel	R(d2),MEM_POSTINC(res_ptr)
+
+	dbf	R(s_size),L(LLoop)
+	addxl	R(d0),R(d0)		/* save cy in lsb */
+	subl	#0x10000,R(s_size)
+	bcs	L(LLend)
+	lsrl	#1,R(d0)		/* restore cy */
+	bra	L(LLoop)
+
+L(LLend:)
+/* Restore used registers from stack frame.  */
+	moveml	MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
+	rts
+EPILOG(__mpn_lshift)
diff --git a/sysdeps/m68k/m68020/addmul_1.S b/sysdeps/m68k/m68020/addmul_1.S
index 3f244c40b4..169f1135be 100644
--- a/sysdeps/m68k/m68020/addmul_1.S
+++ b/sysdeps/m68k/m68020/addmul_1.S
@@ -1,7 +1,7 @@
 /* mc68020 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
    the result to a second limb vector.
 
-Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -23,58 +23,61 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
   INPUT PARAMETERS
   res_ptr	(sp + 4)
   s1_ptr	(sp + 8)
-  size		(sp + 12)
+  s1_size	(sp + 12)
   s2_limb	(sp + 16)
 */
 
+#include "sysdep.h"
 #include "asm-syntax.h"
 
 	TEXT
 	ALIGN
-	GLOBL	___mpn_addmul_1
+	GLOBL	C_SYMBOL_NAME(__mpn_addmul_1)
 
-LAB(___mpn_addmul_1)
+C_SYMBOL_NAME(__mpn_addmul_1:)
+PROLOG(__mpn_addmul_1)
 
 #define res_ptr a0
 #define s1_ptr a1
-#define size d2
+#define s1_size d2
 #define s2_limb d4
 
 /* Save used registers on the stack.  */
-	INSN2(movem,l	,MEM_PREDEC(sp),d2-d5)
+	moveml	R(d2)-R(d5),MEM_PREDEC(sp)
 
 /* Copy the arguments to registers.  Better use movem?  */
-	INSN2(move,l	,res_ptr,MEM_DISP(sp,20))
-	INSN2(move,l	,s1_ptr,MEM_DISP(sp,24))
-	INSN2(move,l	,size,MEM_DISP(sp,28))
-	INSN2(move,l	,s2_limb,MEM_DISP(sp,32))
-
-	INSN2(eor,w	,size,#1)
-	INSN1(clr,l	,d1)
-	INSN1(clr,l	,d5)
-	INSN2(lsr,l	,size,#1)
-	bcc	L1
-	INSN2(subq,l	,size,#1)
-	INSN2(sub,l	,d0,d0)		/* (d0,cy) <= (0,0) */
-
-LAB(Loop)
-	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d1:d3,s2_limb)
-	INSN2(addx,l	,d3,d0)
-	INSN2(addx,l	,d1,d5)
-	INSN2(add,l	,MEM_POSTINC(res_ptr),d3)
-LAB(L1)	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d0:d3,s2_limb)
-	INSN2(addx,l	,d3,d1)
-	INSN2(addx,l	,d0,d5)
-	INSN2(add,l	,MEM_POSTINC(res_ptr),d3)
-
-	dbf	size,Loop
-	INSN2(addx,l	,d0,d5)
-	INSN2(sub,l	,size,#0x10000)
-	bcc	Loop
+	movel	MEM_DISP(sp,20),R(res_ptr)
+	movel	MEM_DISP(sp,24),R(s1_ptr)
+	movel	MEM_DISP(sp,28),R(s1_size)
+	movel	MEM_DISP(sp,32),R(s2_limb)
+
+	eorw	#1,R(s1_size)
+	clrl	R(d1)
+	clrl	R(d5)
+	lsrl	#1,R(s1_size)
+	bcc	L(L1)
+	subql	#1,R(s1_size)
+	subl	R(d0),R(d0)		/* (d0,cy) <= (0,0) */
+
+L(Loop:)
+	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d1):R(d3)
+	addxl	R(d0),R(d3)
+	addxl	R(d5),R(d1)
+	addl	R(d3),MEM_POSTINC(res_ptr)
+L(L1:)	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d0):R(d3)
+	addxl	R(d1),R(d3)
+	addxl	R(d5),R(d0)
+	addl	R(d3),MEM_POSTINC(res_ptr)
+
+	dbf	R(s1_size),L(Loop)
+	addxl	R(d5),R(d0)
+	subl	#0x10000,R(s1_size)
+	bcc	L(Loop)
 
 /* Restore used registers from stack frame.  */
-	INSN2(movem,l	,d2-d5,MEM_POSTINC(sp))
+	moveml	MEM_POSTINC(sp),R(d2)-R(d5)
 
 	rts
+EPILOG(__mpn_addmul_1)
diff --git a/sysdeps/m68k/m68020/mul_1.S b/sysdeps/m68k/m68020/mul_1.S
index 548ca0091b..4db1ccac25 100644
--- a/sysdeps/m68k/m68020/mul_1.S
+++ b/sysdeps/m68k/m68020/mul_1.S
@@ -1,7 +1,7 @@
 /* mc68020 __mpn_mul_1 -- Multiply a limb vector with a limb and store
    the result in a second limb vector.
 
-Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -23,65 +23,68 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
   INPUT PARAMETERS
   res_ptr	(sp + 4)
   s1_ptr	(sp + 8)
-  size		(sp + 12)
+  s1_size	(sp + 12)
   s2_limb	(sp + 16)
 */
 
+#include "sysdep.h"
 #include "asm-syntax.h"
 
 	TEXT
 	ALIGN
-	GLOBL	___mpn_mul_1
+	GLOBL	C_SYMBOL_NAME(__mpn_mul_1)
 
-LAB(___mpn_mul_1)
+C_SYMBOL_NAME(__mpn_mul_1:)
+PROLOG(__mpn_mul_1)
 
 #define res_ptr a0
 #define s1_ptr a1
-#define size d2
+#define s1_size d2
 #define s2_limb d4
 
 /* Save used registers on the stack.  */
-	INSN2(movem,l	,MEM_PREDEC(sp),d2-d4)
+	moveml	R(d2)-R(d4),MEM_PREDEC(sp)
 #if 0
-	INSN2(move,l	,MEM_PREDEC(sp),d2)
-	INSN2(move,l	,MEM_PREDEC(sp),d3)
-	INSN2(move,l	,MEM_PREDEC(sp),d4)
+	movel	R(d2),MEM_PREDEC(sp)
+	movel	R(d3),MEM_PREDEC(sp)
+	movel	R(d4),MEM_PREDEC(sp)
 #endif
 
 /* Copy the arguments to registers.  Better use movem?  */
-	INSN2(move,l	,res_ptr,MEM_DISP(sp,16))
-	INSN2(move,l	,s1_ptr,MEM_DISP(sp,20))
-	INSN2(move,l	,size,MEM_DISP(sp,24))
-	INSN2(move,l	,s2_limb,MEM_DISP(sp,28))
-
-	INSN2(eor,w	,size,#1)
-	INSN1(clr,l	,d1)
-	INSN2(lsr,l	,size,#1)
-	bcc	L1
-	INSN2(subq,l	,size,#1)
-	INSN2(sub,l	,d0,d0)		/* (d0,cy) <= (0,0) */
-
-LAB(Loop)
-	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d1:d3,s2_limb)
-	INSN2(addx,l	,d3,d0)
-	INSN2(move,l	,MEM_POSTINC(res_ptr),d3)
-LAB(L1)	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d0:d3,s2_limb)
-	INSN2(addx,l	,d3,d1)
-	INSN2(move,l	,MEM_POSTINC(res_ptr),d3)
-
-	dbf	size,Loop
-	INSN1(clr,l	,d3)
-	INSN2(addx,l	,d0,d3)
-	INSN2(sub,l	,size,#0x10000)
-	bcc	Loop
+	movel	MEM_DISP(sp,16),R(res_ptr)
+	movel	MEM_DISP(sp,20),R(s1_ptr)
+	movel	MEM_DISP(sp,24),R(s1_size)
+	movel	MEM_DISP(sp,28),R(s2_limb)
+
+	eorw	#1,R(s1_size)
+	clrl	R(d1)
+	lsrl	#1,R(s1_size)
+	bcc	L(L1)
+	subql	#1,R(s1_size)
+	subl	R(d0),R(d0)	/* (d0,cy) <= (0,0) */
+
+L(Loop:)
+	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d1):R(d3)
+	addxl	R(d0),R(d3)
+	movel	R(d3),MEM_POSTINC(res_ptr)
+L(L1:)	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d0):R(d3)
+	addxl	R(d1),R(d3)
+	movel	R(d3),MEM_POSTINC(res_ptr)
+
+	dbf	R(s1_size),L(Loop)
+	clrl	R(d3)
+	addxl	R(d3),R(d0)
+	subl	#0x10000,R(s1_size)
+	bcc	L(Loop)
 
 /* Restore used registers from stack frame.  */
-	INSN2(movem,l	,d2-d4,MEM_POSTINC(sp))
+	moveml	MEM_POSTINC(sp),R(d2)-R(d4)
 #if 0
-	INSN2(move,l	,d4,MEM_POSTINC(sp))
-	INSN2(move,l	,d3,MEM_POSTINC(sp))
-	INSN2(move,l	,d2,MEM_POSTINC(sp))
+	movel	MEM_POSTINC(sp),R(d4)
+	movel	MEM_POSTINC(sp),R(d3)
+	movel	MEM_POSTINC(sp),R(d2)
 #endif
 	rts
+EPILOG(__mpn_mul_1)
diff --git a/sysdeps/m68k/m68020/submul_1.S b/sysdeps/m68k/m68020/submul_1.S
index ef7f39de7a..cf30029b2f 100644
--- a/sysdeps/m68k/m68020/submul_1.S
+++ b/sysdeps/m68k/m68020/submul_1.S
@@ -1,7 +1,7 @@
 /* mc68020 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
    the result from a second limb vector.
 
-Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -23,58 +23,61 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
   INPUT PARAMETERS
   res_ptr	(sp + 4)
   s1_ptr	(sp + 8)
-  size		(sp + 12)
+  s1_size	(sp + 12)
   s2_limb	(sp + 16)
 */
 
+#include "sysdep.h"
 #include "asm-syntax.h"
 
 	TEXT
 	ALIGN
-	GLOBL	___mpn_submul_1
+	GLOBL	C_SYMBOL_NAME(__mpn_submul_1)
 
-LAB(___mpn_submul_1)
+C_SYMBOL_NAME(__mpn_submul_1:)
+PROLOG(__mpn_submul_1)
 
 #define res_ptr a0
 #define s1_ptr a1
-#define size d2
+#define s1_size d2
 #define s2_limb d4
 
 /* Save used registers on the stack.  */
-	INSN2(movem,l	,MEM_PREDEC(sp),d2-d5)
+	moveml	R(d2)-R(d5),MEM_PREDEC(sp)
 
 /* Copy the arguments to registers.  Better use movem?  */
-	INSN2(move,l	,res_ptr,MEM_DISP(sp,20))
-	INSN2(move,l	,s1_ptr,MEM_DISP(sp,24))
-	INSN2(move,l	,size,MEM_DISP(sp,28))
-	INSN2(move,l	,s2_limb,MEM_DISP(sp,32))
-
-	INSN2(eor,w	,size,#1)
-	INSN1(clr,l	,d1)
-	INSN1(clr,l	,d5)
-	INSN2(lsr,l	,size,#1)
-	bcc	L1
-	INSN2(subq,l	,size,#1)
-	INSN2(sub,l	,d0,d0)		/* (d0,cy) <= (0,0) */
-
-LAB(Loop)
-	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d1:d3,s2_limb)
-	INSN2(addx,l	,d3,d0)
-	INSN2(addx,l	,d1,d5)
-	INSN2(sub,l	,MEM_POSTINC(res_ptr),d3)
-LAB(L1)	INSN2(move,l	,d3,MEM_POSTINC(s1_ptr))
-	INSN2(mulu,l	,d0:d3,s2_limb)
-	INSN2(addx,l	,d3,d1)
-	INSN2(addx,l	,d0,d5)
-	INSN2(sub,l	,MEM_POSTINC(res_ptr),d3)
-
-	dbf	size,Loop
-	INSN2(addx,l	,d0,d5)
-	INSN2(sub,l	,size,#0x10000)
-	bcc	Loop
+	movel	MEM_DISP(sp,20),R(res_ptr)
+	movel	MEM_DISP(sp,24),R(s1_ptr)
+	movel	MEM_DISP(sp,28),R(s1_size)
+	movel	MEM_DISP(sp,32),R(s2_limb)
+
+	eorw	#1,R(s1_size)
+	clrl	R(d1)
+	clrl	R(d5)
+	lsrl	#1,R(s1_size)
+	bcc	L(L1)
+	subql	#1,R(s1_size)
+	subl	R(d0),R(d0)	/* (d0,cy) <= (0,0) */
+
+L(Loop:)
+	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d1):R(d3)
+	addxl	R(d0),R(d3)
+	addxl	R(d5),R(d1)
+	subl	R(d3),MEM_POSTINC(res_ptr)
+L(L1:)	movel	MEM_POSTINC(s1_ptr),R(d3)
+	mulul	R(s2_limb),R(d0):R(d3)
+	addxl	R(d1),R(d3)
+	addxl	R(d5),R(d0)
+	subl	R(d3),MEM_POSTINC(res_ptr)
+
+	dbf	R(s1_size),L(Loop)
+	addxl	R(d5),R(d0)
+	subl	#0x10000,R(s1_size)
+	bcc	L(Loop)
 
 /* Restore used registers from stack frame.  */
-	INSN2(movem,l	,d2-d5,MEM_POSTINC(sp))
+	moveml	MEM_POSTINC(sp),R(d2)-R(d5)
 
 	rts
+EPILOG(__mpn_submul_1)
diff --git a/sysdeps/m68k/rshift.S b/sysdeps/m68k/rshift.S
new file mode 100644
index 0000000000..494dfcbeab
--- /dev/null
+++ b/sysdeps/m68k/rshift.S
@@ -0,0 +1,149 @@
+/* mc68020 __mpn_rshift -- Shift right a low-level natural-number integer.
+
+Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Library General Public License as published by
+the Free Software Foundation; either version 2 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+License for more details.
+
+You should have received a copy of the GNU Library General Public License
+along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/*
+  INPUT PARAMETERS
+  res_ptr	(sp + 4)
+  s_ptr		(sp + 8)
+  s_size	(sp + 16)
+  cnt		(sp + 12)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+#define res_ptr a1
+#define s_ptr a0
+#define s_size d6
+#define cnt d4
+
+	TEXT
+	ALIGN
+	GLOBL	C_SYMBOL_NAME(__mpn_rshift)
+
+C_SYMBOL_NAME(__mpn_rshift:)
+PROLOG(__mpn_rshift)
+/* Save used registers on the stack.  */
+	moveml	R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
+
+/* Copy the arguments to registers.  */
+	movel	MEM_DISP(sp,28),R(res_ptr)
+	movel	MEM_DISP(sp,32),R(s_ptr)
+	movel	MEM_DISP(sp,36),R(s_size)
+	movel	MEM_DISP(sp,40),R(cnt)
+
+	moveql	#1,R(d5)
+	cmpl	R(d5),R(cnt)
+	bne	L(Lnormal)
+	cmpl	R(res_ptr),R(s_ptr)
+	bls	L(Lspecial)		/* jump if res_ptr >= s_ptr */
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
+	lea	MEM_INDX1(res_ptr,s_size,l,4),R(a2)
+#else /* not mc68020 */
+	movel	R(s_size),R(d0)
+	asll	#2,R(d0)
+	lea	MEM_INDX(res_ptr,d0,l),R(a2)
+#endif
+	cmpl	R(s_ptr),R(a2)
+	bls	L(Lspecial)		/* jump if s_ptr >= res_ptr + s_size */
+
+L(Lnormal:)
+	moveql	#32,R(d5)
+	subl	R(cnt),R(d5)
+	movel	MEM_POSTINC(s_ptr),R(d2)
+	movel	R(d2),R(d0)
+	lsll	R(d5),R(d0)		/* compute carry limb */
+   
+	lsrl	R(cnt),R(d2)
+	movel	R(d2),R(d1)
+	subql	#1,R(s_size)
+	beq	L(Lend)
+	lsrl	#1,R(s_size)
+	bcs	L(L1)
+	subql	#1,R(s_size)
+
+L(Loop:)
+	movel	MEM_POSTINC(s_ptr),R(d2)
+	movel	R(d2),R(d3)
+	lsll	R(d5),R(d3)
+	orl	R(d3),R(d1)
+	movel	R(d1),MEM_POSTINC(res_ptr)
+	lsrl	R(cnt),R(d2)
+L(L1:)
+	movel	MEM_POSTINC(s_ptr),R(d1)
+	movel	R(d1),R(d3)
+	lsll	R(d5),R(d3)
+	orl	R(d3),R(d2)
+	movel	R(d2),MEM_POSTINC(res_ptr)
+	lsrl	R(cnt),R(d1)
+
+	dbf	R(s_size),L(Loop)
+	subl	#0x10000,R(s_size)
+	bcc	L(Loop)
+
+L(Lend:)
+	movel	R(d1),MEM(res_ptr) /* store most significant limb */
+
+/* Restore used registers from stack frame.  */
+	moveml	MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
+	rts
+
+/* We loop from most significant end of the arrays, which is only
+   permissable if the source and destination don't overlap, since the
+   function is documented to work for overlapping source and destination.  */
+
+L(Lspecial:)
+#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
+	lea	MEM_INDX1(s_ptr,s_size,l,4),R(s_ptr)
+	lea	MEM_INDX1(res_ptr,s_size,l,4),R(res_ptr)
+#else /* not mc68000 */
+	movel	R(s_size),R(d0)
+	asll	#2,R(d0)
+	addl	R(s_size),R(s_ptr)
+	addl	R(s_size),R(res_ptr)
+#endif
+
+	clrl	R(d0)			/* initialize carry */
+	eorw	#1,R(s_size)
+	lsrl	#1,R(s_size)
+	bcc	L(LL1)
+	subql	#1,R(s_size)
+
+L(LLoop:)
+	movel	MEM_PREDEC(s_ptr),R(d2)
+	roxrl	#1,R(d2)
+	movel	R(d2),MEM_PREDEC(res_ptr)
+L(LL1:)
+	movel	MEM_PREDEC(s_ptr),R(d2)
+	roxrl	#1,R(d2)
+	movel	R(d2),MEM_PREDEC(res_ptr)
+
+	dbf	R(s_size),L(LLoop)
+	roxrl	#1,R(d0)		/* save cy in msb */
+	subl	#0x10000,R(s_size)
+	bcs	L(LLend)
+	addl	R(d0),R(d0)		/* restore cy */
+	bra	L(LLoop)
+
+L(LLend:)
+/* Restore used registers from stack frame.  */
+	moveml	MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
+	rts
+EPILOG(__mpn_rshift)
diff --git a/sysdeps/m68k/sub_n.S b/sysdeps/m68k/sub_n.S
index 19f0ec1568..39f5161176 100644
--- a/sysdeps/m68k/sub_n.S
+++ b/sysdeps/m68k/sub_n.S
@@ -1,7 +1,7 @@
 /* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
    store difference in a third limb vector.
 
-Copyright (C) 1992, 1994 Free Software Foundation, Inc.
+Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
 
 This file is part of the GNU MP Library.
 
@@ -27,50 +27,53 @@ the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
   size		(sp + 12)
 */
 
+#include "sysdep.h"
 #include "asm-syntax.h"
 
 	TEXT
 	ALIGN
-	GLOBL	___mpn_sub_n
+	GLOBL	C_SYMBOL_NAME(__mpn_sub_n)
 
-LAB(___mpn_sub_n)
+C_SYMBOL_NAME(__mpn_sub_n:)
+PROLOG(__mpn_sub_n)
 /* Save used registers on the stack.  */
-	INSN2(move,l	,MEM_PREDEC(sp),d2)
-	INSN2(move,l	,MEM_PREDEC(sp),a2)
+	movel	R(d2),MEM_PREDEC(sp)
+	movel	R(a2),MEM_PREDEC(sp)
 
 /* Copy the arguments to registers.  Better use movem?  */
-	INSN2(move,l	,a2,MEM_DISP(sp,12))
-	INSN2(move,l	,a0,MEM_DISP(sp,16))
-	INSN2(move,l	,a1,MEM_DISP(sp,20))
-	INSN2(move,l	,d2,MEM_DISP(sp,24))
-
-	INSN2(eor,w	,d2,#1)
-	INSN2(lsr,l	,d2,#1)
-	bcc L1
-	INSN2(subq,l	,d2,#1)		/* clears cy as side effect */
-
-LAB(Loop)
-	INSN2(move,l	,d0,MEM_POSTINC(a0))
-	INSN2(move,l	,d1,MEM_POSTINC(a1))
-	INSN2(subx,l	,d0,d1)
-	INSN2(move,l	,MEM_POSTINC(a2),d0)
-LAB(L1)	INSN2(move,l	,d0,MEM_POSTINC(a0))
-	INSN2(move,l	,d1,MEM_POSTINC(a1))
-	INSN2(subx,l	,d0,d1)
-	INSN2(move,l	,MEM_POSTINC(a2),d0)
-
-	dbf d2,Loop			/* loop until 16 lsb of %4 == -1 */
-	INSN2(subx,l	,d0,d0)		/* d0 <= -cy; save cy as 0 or -1 in d0 */
-	INSN2(sub,l	,d2,#0x10000)
-	bcs L2
-	INSN2(add,l	,d0,d0)		/* restore cy */
-	bra Loop
-
-LAB(L2)
-	INSN1(neg,l	,d0)
+	movel	MEM_DISP(sp,12),R(a2)
+	movel	MEM_DISP(sp,16),R(a0)
+	movel	MEM_DISP(sp,20),R(a1)
+	movel	MEM_DISP(sp,24),R(d2)
+
+	eorw	#1,R(d2)
+	lsrl	#1,R(d2)
+	bcc	L(L1)
+	subql	#1,R(d2)	/* clears cy as side effect */
+
+L(Loop:)
+	movel	MEM_POSTINC(a0),R(d0)
+	movel	MEM_POSTINC(a1),R(d1)
+	subxl	R(d1),R(d0)
+	movel	R(d0),MEM_POSTINC(a2)
+L(L1:)	movel	MEM_POSTINC(a0),R(d0)
+	movel	MEM_POSTINC(a1),R(d1)
+	subxl	R(d1),R(d0)
+	movel	R(d0),MEM_POSTINC(a2)
+
+	dbf	R(d2),L(Loop)		/* loop until 16 lsb of %4 == -1 */
+	subxl	R(d0),R(d0)	/* d0 <= -cy; save cy as 0 or -1 in d0 */
+	subl	#0x10000,R(d2)
+	bcs	L(L2)
+	addl	R(d0),R(d0)	/* restore cy */
+	bra	L(Loop)
+
+L(L2:)
+	negl	R(d0)
 
 /* Restore used registers from stack frame.  */
-	INSN2(move,l	,a2,MEM_POSTINC(sp))
-	INSN2(move,l	,d2,MEM_POSTINC(sp))
+	movel	MEM_POSTINC(sp),R(a2)
+	movel	MEM_POSTINC(sp),R(d2)
 
 	rts
+EPILOG(__mpn_sub_n)