summary refs log tree commit diff
diff options
context:
space:
mode:
authorGreg McGary <greg@mcgary.org>2000-06-26 18:20:43 +0000
committerGreg McGary <greg@mcgary.org>2000-06-26 18:20:43 +0000
commitf9e7bbcead1459b5f47998b0040ed9d5b6aac029 (patch)
treeef9ae6e1150555a5ed5ee2febb4b2c7ffb4e5be2
parentc000cdad1aa963d190eed7494a7a3df82c91ae99 (diff)
downloadglibc-f9e7bbcead1459b5f47998b0040ed9d5b6aac029.tar.gz
glibc-f9e7bbcead1459b5f47998b0040ed9d5b6aac029.tar.xz
glibc-f9e7bbcead1459b5f47998b0040ed9d5b6aac029.zip
* sysdeps/i386/addmul_1.S: Exchange roles of %ebp and %ebx.
* sysdeps/i386/mul_1.S: Likewise. 
* sysdeps/i386/submul_1.S: Likewise. 
* sysdeps/i386/i586/add_n.S: Likewise. 
* sysdeps/i386/i586/addmul_1.S: Likewise. 
* sysdeps/i386/i586/lshift.S: Likewise. 
* sysdeps/i386/i586/mul_1.S: Likewise. 
* sysdeps/i386/i586/rshift.S: Likewise. 
* sysdeps/i386/i586/sub_n.S: Likewise. 
* sysdeps/i386/i586/submul_1.S: Likewise.
2000-06-26  Greg McGary  <greg@mcgary.org>

	* sysdeps/i386/addmul_1.S: Exchange roles of %ebp and %ebx.
	* sysdeps/i386/mul_1.S: Likewise.
	* sysdeps/i386/submul_1.S: Likewise.
	* sysdeps/i386/i586/add_n.S: Likewise.
	* sysdeps/i386/i586/addmul_1.S: Likewise.
	* sysdeps/i386/i586/lshift.S: Likewise.
	* sysdeps/i386/i586/mul_1.S: Likewise.
	* sysdeps/i386/i586/rshift.S: Likewise.
	* sysdeps/i386/i586/sub_n.S: Likewise.
	* sysdeps/i386/i586/submul_1.S: Likewise.
-rw-r--r--ChangeLog13
-rw-r--r--sysdeps/i386/addmul_1.S14
-rw-r--r--sysdeps/i386/i586/add_n.S50
-rw-r--r--sysdeps/i386/i586/addmul_1.S24
-rw-r--r--sysdeps/i386/i586/lshift.S88
-rw-r--r--sysdeps/i386/i586/mul_1.S20
-rw-r--r--sysdeps/i386/i586/rshift.S88
-rw-r--r--sysdeps/i386/i586/sub_n.S50
-rw-r--r--sysdeps/i386/i586/submul_1.S24
-rw-r--r--sysdeps/i386/mul_1.S14
-rw-r--r--sysdeps/i386/submul_1.S14
11 files changed, 206 insertions, 193 deletions
diff --git a/ChangeLog b/ChangeLog
index a4f0acd602..b5145380f4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,16 @@
+2000-06-26  Greg McGary  <greg@mcgary.org>
+
+	* sysdeps/i386/addmul_1.S: Exchange roles of %ebp and %ebx.
+	* sysdeps/i386/mul_1.S: Likewise.
+	* sysdeps/i386/submul_1.S: Likewise.
+	* sysdeps/i386/i586/add_n.S: Likewise.
+	* sysdeps/i386/i586/addmul_1.S: Likewise.
+	* sysdeps/i386/i586/lshift.S: Likewise.
+	* sysdeps/i386/i586/mul_1.S: Likewise.
+	* sysdeps/i386/i586/rshift.S: Likewise.
+	* sysdeps/i386/i586/sub_n.S: Likewise.
+	* sysdeps/i386/i586/submul_1.S: Likewise.
+
 2000-06-26  Ulrich Drepper  <drepper@redhat.com>
 
 	* rt/Makefile (librt-routines): Add clock_nanosleep.
diff --git a/sysdeps/i386/addmul_1.S b/sysdeps/i386/addmul_1.S
index a5d3fb0539..a028944b5c 100644
--- a/sysdeps/i386/addmul_1.S
+++ b/sysdeps/i386/addmul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define sizeP ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_addmul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_addmul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,23 +50,23 @@ ENTRY(__mpn_addmul_1)
 	leal	(%res_ptr,%sizeP,4), %res_ptr
 	leal	(%s1_ptr,%sizeP,4), %s1_ptr
 	negl	%sizeP
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%sizeP,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	adcl	$0, %edx
 	addl	%eax, (%res_ptr,%sizeP,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%sizeP
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/add_n.S b/sysdeps/i386/i586/add_n.S
index 9bea31b068..7e30cac729 100644
--- a/sysdeps/i386/i586/add_n.S
+++ b/sysdeps/i386/i586/add_n.S
@@ -34,15 +34,15 @@ ENTRY(__mpn_add_n)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S1(%esp),%esi
-	movl	S2(%esp),%ebp
+	movl	S2(%esp),%ebx
 	movl	SIZE(%esp),%ecx
 
-	movl	(%ebp),%ebx
+	movl	(%ebx),%ebp
 
 	decl	%ecx
 	movl	%ecx,%edx
@@ -58,42 +58,42 @@ L(oop):	movl	28(%edi),%eax		/* fetch destination cache line */
 
 L(1):	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	adcl	%ebx,%eax
-	movl	4(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	8(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	4(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	8(%ebx),%ebp
 	movl	%eax,-32(%edi)
 	movl	%edx,-28(%edi)
 
 L(2):	movl	8(%esi),%eax
 	movl	12(%esi),%edx
-	adcl	%ebx,%eax
-	movl	12(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	16(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	12(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	16(%ebx),%ebp
 	movl	%eax,-24(%edi)
 	movl	%edx,-20(%edi)
 
 L(3):	movl	16(%esi),%eax
 	movl	20(%esi),%edx
-	adcl	%ebx,%eax
-	movl	20(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	24(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	20(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	24(%ebx),%ebp
 	movl	%eax,-16(%edi)
 	movl	%edx,-12(%edi)
 
 L(4):	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	adcl	%ebx,%eax
-	movl	28(%ebp),%ebx
-	adcl	%ebx,%edx
-	movl	32(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	28(%ebx),%ebp
+	adcl	%ebp,%edx
+	movl	32(%ebx),%ebp
 	movl	%eax,-8(%edi)
 	movl	%edx,-4(%edi)
 
 	leal	32(%esi),%esi
-	leal	32(%ebp),%ebp
+	leal	32(%ebx),%ebx
 	decl	%ecx
 	jnz	L(oop)
 
@@ -105,23 +105,23 @@ L(end):
 L(oop2):
 	leal	4(%edi),%edi
 	movl	(%esi),%eax
-	adcl	%ebx,%eax
-	movl	4(%ebp),%ebx
+	adcl	%ebp,%eax
+	movl	4(%ebx),%ebp
 	movl	%eax,-4(%edi)
 	leal	4(%esi),%esi
-	leal	4(%ebp),%ebp
+	leal	4(%ebx),%ebx
 	decl	%edx
 	jnz	L(oop2)
 L(end2):
 	movl	(%esi),%eax
-	adcl	%ebx,%eax
+	adcl	%ebp,%eax
 	movl	%eax,(%edi)
 
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/addmul_1.S b/sysdeps/i386/i586/addmul_1.S
index bf5b718a9f..06cdbe1c6d 100644
--- a/sysdeps/i386/i586/addmul_1.S
+++ b/sysdeps/i386/i586/addmul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_addmul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_addmul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,30 +50,30 @@ ENTRY(__mpn_addmul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%ebx, %eax
-	movl	(%res_ptr,%size,4), %ebx
+	addl	%ebp, %eax
+	movl	(%res_ptr,%size,4), %ebp
 
 	adcl	$0, %edx
-	addl	%eax, %ebx
+	addl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/lshift.S b/sysdeps/i386/i586/lshift.S
index d3f0da0062..2a44150377 100644
--- a/sysdeps/i386/i586/lshift.S
+++ b/sysdeps/i386/i586/lshift.S
@@ -33,12 +33,12 @@ ENTRY(__mpn_lshift)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S(%esp),%esi
-	movl	SIZE(%esp),%ebp
+	movl	SIZE(%esp),%ebx
 	movl	CNT(%esp),%ecx
 
 /* We can use faster code for shift-by-1 under certain conditions.  */
@@ -47,13 +47,13 @@ ENTRY(__mpn_lshift)
 	leal	4(%esi),%eax
 	cmpl	%edi,%eax
 	jnc	L(special)		/* jump if s_ptr + 1 >= res_ptr */
-	leal	(%esi,%ebp,4),%eax
+	leal	(%esi,%ebx,4),%eax
 	cmpl	%eax,%edi
 	jnc	L(special)		/* jump if res_ptr >= s_ptr + size */
 
 L(normal):
-	leal	-4(%edi,%ebp,4),%edi
-	leal	-4(%esi,%ebp,4),%esi
+	leal	-4(%edi,%ebx,4),%edi
+	leal	-4(%esi,%ebx,4),%esi
 
 	movl	(%esi),%edx
 	subl	$4,%esi
@@ -61,52 +61,52 @@ L(normal):
 	shldl	%cl,%edx,%eax		/* compute carry limb */
 	pushl	%eax			/* push carry limb onto stack */
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 	jz	L(end)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
 
 	ALIGN	(2)
 L(oop):	movl	-28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	-4(%esi),%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%eax,%ebp
 	shldl	%cl,%edx,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	movl	%eax,-4(%edi)
 
-	movl	-8(%esi),%ebx
+	movl	-8(%esi),%ebp
 	movl	-12(%esi),%eax
-	shldl	%cl,%ebx,%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%ebp,%edx
+	shldl	%cl,%eax,%ebp
 	movl	%edx,-8(%edi)
-	movl	%ebx,-12(%edi)
+	movl	%ebp,-12(%edi)
 
 	movl	-16(%esi),%edx
-	movl	-20(%esi),%ebx
+	movl	-20(%esi),%ebp
 	shldl	%cl,%edx,%eax
-	shldl	%cl,%ebx,%edx
+	shldl	%cl,%ebp,%edx
 	movl	%eax,-16(%edi)
 	movl	%edx,-20(%edi)
 
 	movl	-24(%esi),%eax
 	movl	-28(%esi),%edx
-	shldl	%cl,%eax,%ebx
+	shldl	%cl,%eax,%ebp
 	shldl	%cl,%edx,%eax
-	movl	%ebx,-24(%edi)
+	movl	%ebp,-24(%edi)
 	movl	%eax,-28(%edi)
 
 	subl	$32,%esi
 	subl	$32,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop)
 
-L(end):	popl	%ebp
-	andl	$7,%ebp
+L(end):	popl	%ebx
+	andl	$7,%ebx
 	jz	L(end2)
 L(oop2):
 	movl	(%esi),%eax
@@ -115,7 +115,7 @@ L(oop2):
 	movl	%eax,%edx
 	subl	$4,%esi
 	subl	$4,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop2)
 
 L(end2):
@@ -124,8 +124,8 @@ L(end2):
 
 	popl	%eax			/* pop carry limb */
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
@@ -141,13 +141,13 @@ L(special):
 	movl	(%esi),%edx
 	addl	$4,%esi
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 
 	addl	%edx,%edx
-	incl	%ebp
-	decl	%ebp
+	incl	%ebx
+	decl	%ebx
 	jz	L(Lend)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
@@ -155,56 +155,56 @@ L(special):
 	ALIGN	(2)
 L(Loop):
 	movl	28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	4(%esi),%edx
 	adcl	%eax,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	adcl	%edx,%edx
 	movl	%eax,4(%edi)
 
-	movl	8(%esi),%ebx
+	movl	8(%esi),%ebp
 	movl	12(%esi),%eax
-	adcl	%ebx,%ebx
+	adcl	%ebp,%ebp
 	movl	%edx,8(%edi)
 	adcl	%eax,%eax
-	movl	%ebx,12(%edi)
+	movl	%ebp,12(%edi)
 
 	movl	16(%esi),%edx
-	movl	20(%esi),%ebx
+	movl	20(%esi),%ebp
 	adcl	%edx,%edx
 	movl	%eax,16(%edi)
-	adcl	%ebx,%ebx
+	adcl	%ebp,%ebp
 	movl	%edx,20(%edi)
 
 	movl	24(%esi),%eax
 	movl	28(%esi),%edx
 	adcl	%eax,%eax
-	movl	%ebx,24(%edi)
+	movl	%ebp,24(%edi)
 	adcl	%edx,%edx
 	movl	%eax,28(%edi)
 
 	leal	32(%esi),%esi		/* use leal not to clobber carry */
 	leal	32(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop)
 
 L(Lend):
-	popl	%ebp
+	popl	%ebx
 	sbbl	%eax,%eax		/* save carry in %eax */
-	andl	$7,%ebp
+	andl	$7,%ebx
 	jz	L(Lend2)
 	addl	%eax,%eax		/* restore carry from eax */
 L(Loop2):
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 	movl	(%esi),%edx
 	adcl	%edx,%edx
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 
 	leal	4(%esi),%esi		/* use leal not to clobber carry */
 	leal	4(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop2)
 
 	jmp	L(L1)
@@ -215,8 +215,8 @@ L(L1):	movl	%edx,(%edi)		/* store last limb */
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/mul_1.S b/sysdeps/i386/i586/mul_1.S
index 1910c66eab..2d777036ad 100644
--- a/sysdeps/i386/i586/mul_1.S
+++ b/sysdeps/i386/i586/mul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_mul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_mul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,26 +50,26 @@ ENTRY(__mpn_mul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%eax, %ebx
+	addl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/rshift.S b/sysdeps/i386/i586/rshift.S
index 2395446d1b..00b4cb710e 100644
--- a/sysdeps/i386/i586/rshift.S
+++ b/sysdeps/i386/i586/rshift.S
@@ -33,12 +33,12 @@ ENTRY(__mpn_rshift)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S(%esp),%esi
-	movl	SIZE(%esp),%ebp
+	movl	SIZE(%esp),%ebx
 	movl	CNT(%esp),%ecx
 
 /* We can use faster code for shift-by-1 under certain conditions.  */
@@ -47,7 +47,7 @@ ENTRY(__mpn_rshift)
 	leal	4(%edi),%eax
 	cmpl	%esi,%eax
 	jnc	L(special)		/* jump if res_ptr + 1 >= s_ptr */
-	leal	(%edi,%ebp,4),%eax
+	leal	(%edi,%ebx,4),%eax
 	cmpl	%eax,%esi
 	jnc	L(special)		/* jump if s_ptr >= res_ptr + size */
 
@@ -58,52 +58,52 @@ L(normal):
 	shrdl	%cl,%edx,%eax		/* compute carry limb */
 	pushl	%eax			/* push carry limb onto stack */
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 	jz	L(end)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
 
 	ALIGN	(2)
 L(oop):	movl	28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%eax,%ebp
 	shrdl	%cl,%edx,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	movl	%eax,4(%edi)
 
-	movl	8(%esi),%ebx
+	movl	8(%esi),%ebp
 	movl	12(%esi),%eax
-	shrdl	%cl,%ebx,%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%ebp,%edx
+	shrdl	%cl,%eax,%ebp
 	movl	%edx,8(%edi)
-	movl	%ebx,12(%edi)
+	movl	%ebp,12(%edi)
 
 	movl	16(%esi),%edx
-	movl	20(%esi),%ebx
+	movl	20(%esi),%ebp
 	shrdl	%cl,%edx,%eax
-	shrdl	%cl,%ebx,%edx
+	shrdl	%cl,%ebp,%edx
 	movl	%eax,16(%edi)
 	movl	%edx,20(%edi)
 
 	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	shrdl	%cl,%eax,%ebx
+	shrdl	%cl,%eax,%ebp
 	shrdl	%cl,%edx,%eax
-	movl	%ebx,24(%edi)
+	movl	%ebp,24(%edi)
 	movl	%eax,28(%edi)
 
 	addl	$32,%esi
 	addl	$32,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop)
 
-L(end):	popl	%ebp
-	andl	$7,%ebp
+L(end):	popl	%ebx
+	andl	$7,%ebx
 	jz	L(end2)
 L(oop2):
 	movl	(%esi),%eax
@@ -112,7 +112,7 @@ L(oop2):
 	movl	%eax,%edx
 	addl	$4,%esi
 	addl	$4,%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(oop2)
 
 L(end2):
@@ -121,8 +121,8 @@ L(end2):
 
 	popl	%eax			/* pop carry limb */
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
@@ -135,19 +135,19 @@ L(end2):
 */
 
 L(special):
-	leal	-4(%edi,%ebp,4),%edi
-	leal	-4(%esi,%ebp,4),%esi
+	leal	-4(%edi,%ebx,4),%edi
+	leal	-4(%esi,%ebx,4),%esi
 
 	movl	(%esi),%edx
 	subl	$4,%esi
 
-	decl	%ebp
-	pushl	%ebp
-	shrl	$3,%ebp
+	decl	%ebx
+	pushl	%ebx
+	shrl	$3,%ebx
 
 	shrl	$1,%edx
-	incl	%ebp
-	decl	%ebp
+	incl	%ebx
+	decl	%ebx
 	jz	L(Lend)
 
 	movl	(%edi),%eax		/* fetch destination cache line */
@@ -155,56 +155,56 @@ L(special):
 	ALIGN	(2)
 L(Loop):
 	movl	-28(%edi),%eax		/* fetch destination cache line */
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 
 	movl	(%esi),%eax
 	movl	-4(%esi),%edx
 	rcrl	$1,%eax
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 	rcrl	$1,%edx
 	movl	%eax,-4(%edi)
 
-	movl	-8(%esi),%ebx
+	movl	-8(%esi),%ebp
 	movl	-12(%esi),%eax
-	rcrl	$1,%ebx
+	rcrl	$1,%ebp
 	movl	%edx,-8(%edi)
 	rcrl	$1,%eax
-	movl	%ebx,-12(%edi)
+	movl	%ebp,-12(%edi)
 
 	movl	-16(%esi),%edx
-	movl	-20(%esi),%ebx
+	movl	-20(%esi),%ebp
 	rcrl	$1,%edx
 	movl	%eax,-16(%edi)
-	rcrl	$1,%ebx
+	rcrl	$1,%ebp
 	movl	%edx,-20(%edi)
 
 	movl	-24(%esi),%eax
 	movl	-28(%esi),%edx
 	rcrl	$1,%eax
-	movl	%ebx,-24(%edi)
+	movl	%ebp,-24(%edi)
 	rcrl	$1,%edx
 	movl	%eax,-28(%edi)
 
 	leal	-32(%esi),%esi		/* use leal not to clobber carry */
 	leal	-32(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop)
 
 L(Lend):
-	popl	%ebp
+	popl	%ebx
 	sbbl	%eax,%eax		/* save carry in %eax */
-	andl	$7,%ebp
+	andl	$7,%ebx
 	jz	L(Lend2)
 	addl	%eax,%eax		/* restore carry from eax */
 L(Loop2):
-	movl	%edx,%ebx
+	movl	%edx,%ebp
 	movl	(%esi),%edx
 	rcrl	$1,%edx
-	movl	%ebx,(%edi)
+	movl	%ebp,(%edi)
 
 	leal	-4(%esi),%esi		/* use leal not to clobber carry */
 	leal	-4(%edi),%edi
-	decl	%ebp
+	decl	%ebx
 	jnz	L(Loop2)
 
 	jmp	L(L1)
@@ -215,8 +215,8 @@ L(L1):	movl	%edx,(%edi)		/* store last limb */
 	movl	$0,%eax
 	rcrl	$1,%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/sub_n.S b/sysdeps/i386/i586/sub_n.S
index dbfb5d2865..fcb13f114b 100644
--- a/sysdeps/i386/i586/sub_n.S
+++ b/sysdeps/i386/i586/sub_n.S
@@ -34,15 +34,15 @@ ENTRY(__mpn_sub_n)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp),%edi
 	movl	S1(%esp),%esi
-	movl	S2(%esp),%ebp
+	movl	S2(%esp),%ebx
 	movl	SIZE(%esp),%ecx
 
-	movl	(%ebp),%ebx
+	movl	(%ebx),%ebp
 
 	decl	%ecx
 	movl	%ecx,%edx
@@ -58,42 +58,42 @@ L(oop):	movl	28(%edi),%eax		/* fetch destination cache line */
 
 L(1):	movl	(%esi),%eax
 	movl	4(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	4(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	8(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	4(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	8(%ebx),%ebp
 	movl	%eax,-32(%edi)
 	movl	%edx,-28(%edi)
 
 L(2):	movl	8(%esi),%eax
 	movl	12(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	12(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	16(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	12(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	16(%ebx),%ebp
 	movl	%eax,-24(%edi)
 	movl	%edx,-20(%edi)
 
 L(3):	movl	16(%esi),%eax
 	movl	20(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	20(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	24(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	20(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	24(%ebx),%ebp
 	movl	%eax,-16(%edi)
 	movl	%edx,-12(%edi)
 
 L(4):	movl	24(%esi),%eax
 	movl	28(%esi),%edx
-	sbbl	%ebx,%eax
-	movl	28(%ebp),%ebx
-	sbbl	%ebx,%edx
-	movl	32(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	28(%ebx),%ebp
+	sbbl	%ebp,%edx
+	movl	32(%ebx),%ebp
 	movl	%eax,-8(%edi)
 	movl	%edx,-4(%edi)
 
 	leal	32(%esi),%esi
-	leal	32(%ebp),%ebp
+	leal	32(%ebx),%ebx
 	decl	%ecx
 	jnz	L(oop)
 
@@ -105,23 +105,23 @@ L(end):
 L(oop2):
 	leal	4(%edi),%edi
 	movl	(%esi),%eax
-	sbbl	%ebx,%eax
-	movl	4(%ebp),%ebx
+	sbbl	%ebp,%eax
+	movl	4(%ebx),%ebp
 	movl	%eax,-4(%edi)
 	leal	4(%esi),%esi
-	leal	4(%ebp),%ebp
+	leal	4(%ebx),%ebx
 	decl	%edx
 	jnz	L(oop2)
 L(end2):
 	movl	(%esi),%eax
-	sbbl	%ebx,%eax
+	sbbl	%ebp,%eax
 	movl	%eax,(%edi)
 
 	sbbl	%eax,%eax
 	negl	%eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/i586/submul_1.S b/sysdeps/i386/i586/submul_1.S
index 4cb2b5da92..d9e197dcc1 100644
--- a/sysdeps/i386/i586/submul_1.S
+++ b/sysdeps/i386/i586/submul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_submul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_submul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,30 +50,30 @@ ENTRY(__mpn_submul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 
-L(oop):	adcl	$0, %ebx
+L(oop):	adcl	$0, %ebp
 	movl	(%s1_ptr,%size,4), %eax
 
 	mull	%s2_limb
 
-	addl	%ebx, %eax
-	movl	(%res_ptr,%size,4), %ebx
+	addl	%ebp, %eax
+	movl	(%res_ptr,%size,4), %ebp
 
 	adcl	$0, %edx
-	subl	%eax, %ebx
+	subl	%eax, %ebp
 
-	movl	%ebx, (%res_ptr,%size,4)
+	movl	%ebp, (%res_ptr,%size,4)
 	incl	%size
 
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 	jnz	L(oop)
 
-	adcl	$0, %ebx
-	movl	%ebx, %eax
-	popl	%ebp
+	adcl	$0, %ebp
+	movl	%ebp, %eax
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/mul_1.S b/sysdeps/i386/mul_1.S
index 5abe97597a..f5d49540b9 100644
--- a/sysdeps/i386/mul_1.S
+++ b/sysdeps/i386/mul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define size ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_mul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_mul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,22 +50,22 @@ ENTRY(__mpn_mul_1)
 	leal	(%res_ptr,%size,4), %res_ptr
 	leal	(%s1_ptr,%size,4), %s1_ptr
 	negl	%size
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%size,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	movl	%eax, (%res_ptr,%size,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%size
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi
 
diff --git a/sysdeps/i386/submul_1.S b/sysdeps/i386/submul_1.S
index 8715aba7f4..b3c4e42b88 100644
--- a/sysdeps/i386/submul_1.S
+++ b/sysdeps/i386/submul_1.S
@@ -31,7 +31,7 @@
 #define res_ptr edi
 #define s1_ptr esi
 #define sizeP ecx
-#define s2_limb ebp
+#define s2_limb ebx
 
 	.text
 ENTRY(__mpn_submul_1)
@@ -39,8 +39,8 @@ ENTRY(__mpn_submul_1)
 
 	pushl	%edi
 	pushl	%esi
-	pushl	%ebx
 	pushl	%ebp
+	pushl	%ebx
 
 	movl	RES(%esp), %res_ptr
 	movl	S1(%esp), %s1_ptr
@@ -50,23 +50,23 @@ ENTRY(__mpn_submul_1)
 	leal	(%res_ptr,%sizeP,4), %res_ptr
 	leal	(%s1_ptr,%sizeP,4), %s1_ptr
 	negl	%sizeP
-	xorl	%ebx, %ebx
+	xorl	%ebp, %ebp
 	ALIGN (3)
 L(oop):
 	movl	(%s1_ptr,%sizeP,4), %eax
 	mull	%s2_limb
-	addl	%ebx, %eax
+	addl	%ebp, %eax
 	adcl	$0, %edx
 	subl	%eax, (%res_ptr,%sizeP,4)
 	adcl	$0, %edx
-	movl	%edx, %ebx
+	movl	%edx, %ebp
 
 	incl	%sizeP
 	jnz	L(oop)
-	movl	%ebx, %eax
+	movl	%ebp, %eax
 
-	popl	%ebp
 	popl	%ebx
+	popl	%ebp
 	popl	%esi
 	popl	%edi