about summary refs log tree commit diff
path: root/REORG.TODO/sysdeps/i386/i686/add_n.S
diff options
context:
space:
mode:
authorZack Weinberg <zackw@panix.com>2017-06-08 15:39:03 -0400
committerZack Weinberg <zackw@panix.com>2017-06-08 15:39:03 -0400
commit5046dbb4a7eba5eccfd258f92f4735c9ffc8d069 (patch)
tree4470480d904b65cf14ca524f96f79eca818c3eaf /REORG.TODO/sysdeps/i386/i686/add_n.S
parent199fc19d3aaaf57944ef036e15904febe877fc93 (diff)
downloadglibc-5046dbb4a7eba5eccfd258f92f4735c9ffc8d069.tar.gz
glibc-5046dbb4a7eba5eccfd258f92f4735c9ffc8d069.tar.xz
glibc-5046dbb4a7eba5eccfd258f92f4735c9ffc8d069.zip
Prepare for radical source tree reorganization. zack/build-layout-experiment
All top-level files and directories are moved into a temporary storage
directory, REORG.TODO, except for files that will certainly still
exist in their current form at top level when we're done (COPYING,
COPYING.LIB, LICENSES, NEWS, README), all old ChangeLog files (which
are moved to the new directory OldChangeLogs, instead), and the
generated file INSTALL (which is just deleted; in the new order, there
will be no generated files checked into version control).
Diffstat (limited to 'REORG.TODO/sysdeps/i386/i686/add_n.S')
-rw-r--r--REORG.TODO/sysdeps/i386/i686/add_n.S110
1 files changed, 110 insertions, 0 deletions
diff --git a/REORG.TODO/sysdeps/i386/i686/add_n.S b/REORG.TODO/sysdeps/i386/i686/add_n.S
new file mode 100644
index 0000000000..4afa648ceb
--- /dev/null
+++ b/REORG.TODO/sysdeps/i386/i686/add_n.S
@@ -0,0 +1,110 @@
+/* Add two limb vectors of the same length > 0 and store sum in a third
+   limb vector.
+   Copyright (C) 1992-2017 Free Software Foundation, Inc.
+   This file is part of the GNU MP Library.
+
+   The GNU MP Library is free software; you can redistribute it and/or modify
+   it under the terms of the GNU Lesser General Public License as published by
+   the Free Software Foundation; either version 2.1 of the License, or (at your
+   option) any later version.
+
+   The GNU MP Library is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+   License for more details.
+
+   You should have received a copy of the GNU Lesser General Public License
+   along with the GNU MP Library; see the file COPYING.LIB.  If not,
+   see <http://www.gnu.org/licenses/>.  */
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+#define PARMS	4+8		/* space for 2 saved regs */
+#define RES	PARMS
+#define S1	RES+4
+#define S2	S1+4
+#define SIZE	S2+4
+
+	.text
+#ifdef PIC
+L(1):	addl    (%esp), %eax
+	ret
+#endif
+ENTRY (__mpn_add_n)
+
+	pushl %edi
+	cfi_adjust_cfa_offset (4)
+	pushl %esi
+	cfi_adjust_cfa_offset (4)
+
+	movl	RES(%esp),%edi
+	cfi_rel_offset (edi, 4)
+	movl	S1(%esp),%esi
+	cfi_rel_offset (esi, 0)
+	movl	S2(%esp),%edx
+	movl	SIZE(%esp),%ecx
+	movl	%ecx,%eax
+	shrl	$3,%ecx			/* compute count for unrolled loop */
+	negl	%eax
+	andl	$7,%eax			/* get index where to start loop */
+	jz	L(oop)			/* necessary special case for 0 */
+	incl	%ecx			/* adjust loop count */
+	shll	$2,%eax			/* adjustment for pointers... */
+	subl	%eax,%edi		/* ... since they are offset ... */
+	subl	%eax,%esi		/* ... by a constant when we ... */
+	subl	%eax,%edx		/* ... enter the loop */
+	shrl	$2,%eax			/* restore previous value */
+#ifdef PIC
+/* Calculate start address in loop for PIC.  */
+	leal	(L(oop)-L(0)-3)(%eax,%eax,8),%eax
+	call	L(1)
+L(0):
+#else
+/* Calculate start address in loop for non-PIC.  */
+ 	leal	(L(oop) - 3)(%eax,%eax,8),%eax
+#endif
+	jmp	*%eax			/* jump into loop */
+	ALIGN (3)
+L(oop):	movl	(%esi),%eax
+	adcl	(%edx),%eax
+	movl	%eax,(%edi)
+	movl	4(%esi),%eax
+	adcl	4(%edx),%eax
+	movl	%eax,4(%edi)
+	movl	8(%esi),%eax
+	adcl	8(%edx),%eax
+	movl	%eax,8(%edi)
+	movl	12(%esi),%eax
+	adcl	12(%edx),%eax
+	movl	%eax,12(%edi)
+	movl	16(%esi),%eax
+	adcl	16(%edx),%eax
+	movl	%eax,16(%edi)
+	movl	20(%esi),%eax
+	adcl	20(%edx),%eax
+	movl	%eax,20(%edi)
+	movl	24(%esi),%eax
+	adcl	24(%edx),%eax
+	movl	%eax,24(%edi)
+	movl	28(%esi),%eax
+	adcl	28(%edx),%eax
+	movl	%eax,28(%edi)
+	leal	32(%edi),%edi
+	leal	32(%esi),%esi
+	leal	32(%edx),%edx
+	decl	%ecx
+	jnz	L(oop)
+
+	sbbl	%eax,%eax
+	negl	%eax
+
+	popl %esi
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (esi)
+	popl %edi
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (edi)
+
+	ret
+END (__mpn_add_n)