about summary refs log tree commit diff
path: root/sysdeps/i386
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /sysdeps/i386
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.xz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03  Kaz Kylheku  <kaz@ashi.footprints.net>

	Redesigned how cancellation unblocks a thread from internal
	cancellation points (sem_wait, pthread_join,
	pthread_cond_{wait,timedwait}).
	Cancellation won't eat a signal in any of these functions
	(*required* by POSIX and Single Unix Spec!).
	* condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a
	simultaneous condition variable signal (not required by POSIX
	or Single Unix Spec, but nice).
	* spinlock.c: __pthread_lock queues back any received restarts
	that don't belong to it instead of assuming ownership of lock
	upon any restart; fastlock can no longer be acquired by two threads
	simultaneously.
	* restart.h: restarts queue even on kernels that don't have
	queued real time signals (2.0, early 2.1), thanks to atomic counter,
	avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'sysdeps/i386')
-rw-r--r--sysdeps/i386/i686/add_n.S104
1 files changed, 104 insertions, 0 deletions
diff --git a/sysdeps/i386/i686/add_n.S b/sysdeps/i386/i686/add_n.S
new file mode 100644
index 0000000000..5a1339f9f8
--- /dev/null
+++ b/sysdeps/i386/i686/add_n.S
@@ -0,0 +1,104 @@
+/* Add two limb vectors of the same length > 0 and store sum in a third
+   limb vector.
+   Copyright (C) 1992, 94, 95, 97, 98, 2000 Free Software Foundation, Inc.
+   This file is part of the GNU MP Library.
+
+   The GNU MP Library is free software; you can redistribute it and/or modify
+   it under the terms of the GNU Library General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or (at your
+   option) any later version.
+
+   The GNU MP Library is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
+   License for more details.
+
+   You should have received a copy of the GNU Library General Public License
+   along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+   the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+   MA 02111-1307, USA. */
+
+/*
+  INPUT PARAMETERS
+  res_ptr	(sp + 4)
+  s1_ptr	(sp + 8)
+  s2_ptr	(sp + 12)
+  size		(sp + 16)
+*/
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+	.text
+#ifdef PIC
+L(1):	addl    (%esp), %eax
+	ret
+#endif
+ENTRY(__mpn_add_n)
+	pushl %edi
+	pushl %esi
+
+	movl 12(%esp),%edi		/* res_ptr */
+	movl 16(%esp),%esi		/* s1_ptr */
+	movl 20(%esp),%edx		/* s2_ptr */
+	movl 24(%esp),%ecx		/* size */
+
+	movl	%ecx,%eax
+	shrl	$3,%ecx			/* compute count for unrolled loop */
+	negl	%eax
+	andl	$7,%eax			/* get index where to start loop */
+	jz	L(oop)			/* necessary special case for 0 */
+	incl	%ecx			/* adjust loop count */
+	shll	$2,%eax			/* adjustment for pointers... */
+	subl	%eax,%edi		/* ... since they are offset ... */
+	subl	%eax,%esi		/* ... by a constant when we ... */
+	subl	%eax,%edx		/* ... enter the loop */
+	shrl	$2,%eax			/* restore previous value */
+#ifdef PIC
+/* Calculate start address in loop for PIC.  */
+	leal	(L(oop)-L(0)-3)(%eax,%eax,8),%eax
+	call	L(1)
+L(0):
+#else
+/* Calculate start address in loop for non-PIC.  */
+ 	leal	(L(oop) - 3)(%eax,%eax,8),%eax
+#endif
+	jmp	*%eax			/* jump into loop */
+	ALIGN (3)
+L(oop):	movl	(%esi),%eax
+	adcl	(%edx),%eax
+	movl	%eax,(%edi)
+	movl	4(%esi),%eax
+	adcl	4(%edx),%eax
+	movl	%eax,4(%edi)
+	movl	8(%esi),%eax
+	adcl	8(%edx),%eax
+	movl	%eax,8(%edi)
+	movl	12(%esi),%eax
+	adcl	12(%edx),%eax
+	movl	%eax,12(%edi)
+	movl	16(%esi),%eax
+	adcl	16(%edx),%eax
+	movl	%eax,16(%edi)
+	movl	20(%esi),%eax
+	adcl	20(%edx),%eax
+	movl	%eax,20(%edi)
+	movl	24(%esi),%eax
+	adcl	24(%edx),%eax
+	movl	%eax,24(%edi)
+	movl	28(%esi),%eax
+	adcl	28(%edx),%eax
+	movl	%eax,28(%edi)
+	leal	32(%edi),%edi
+	leal	32(%esi),%esi
+	leal	32(%edx),%edx
+	decl	%ecx
+	jnz	L(oop)
+
+	sbbl	%eax,%eax
+	negl	%eax
+
+	popl %esi
+	popl %edi
+	ret
+END(__mpn_add_n)