about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S')
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S641
1 files changed, 641 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S
new file mode 100644
index 0000000000..ec3538f561
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S
@@ -0,0 +1,641 @@
+/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <kernel-features.h>
+#include <stap-probe.h>
+
+
+	.text
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
+	.globl	__pthread_cond_wait
+	.type	__pthread_cond_wait, @function
+	.align	16
+__pthread_cond_wait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+
+	xorl	%esi, %esi
+	movl	20(%esp), %ebx
+
+	LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx)
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	1f
+
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+2:	cmpl	$-1, dep_mutex(%ebx)
+	movl	24(%esp), %eax
+	je	15f
+	movl	%eax, dep_mutex(%ebx)
+
+	/* Unlock the mutex.  */
+15:	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	12f
+
+	addl	$1, total_seq(%ebx)
+	adcl	$0, total_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+#define FRAME_SIZE 20
+	subl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+	cfi_remember_state
+
+	/* Get and store current wakeup_seq value.  */
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+	movl	broadcast_seq(%ebx), %eax
+	movl	%edi, 4(%esp)
+	movl	%edx, 8(%esp)
+	movl	%eax, 12(%esp)
+
+	/* Reset the pi-requeued flag.  */
+8:	movl	$0, 16(%esp)
+	movl	cond_futex(%ebx), %ebp
+
+	/* Unlock.  */
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	3f
+
+.LcleanupSTART:
+4:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+	xorl	%ecx, %ecx
+	cmpl	$-1, dep_mutex(%ebx)
+	sete	%cl
+	je	18f
+
+	movl	dep_mutex(%ebx), %edi
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	   the robust bit is not set.  */
+	movl	MUTEX_KIND(%edi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	18f
+
+	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+	movl	%ebp, %edx
+	xorl	%esi, %esi
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex_pi:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex_pi:
+	/* Set the pi-requeued flag only if the kernel has returned 0. The
+	   kernel does not hold the mutex on error.  */
+	cmpl	$0, %eax
+	sete	16(%esp)
+	je	19f
+
+	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
+	   successfully, it has already locked the mutex for us and the
+	   pi_flag (16(%esp)) is set to denote that fact.  However, if another
+	   thread changed the futex value before we entered the wait, the
+	   syscall may return an EAGAIN and the mutex is not locked.  We go
+	   ahead with a success anyway since later we look at the pi_flag to
+	   decide if we got the mutex or not.  The sequence numbers then make
+	   sure that only one of the threads actually wake up.  We retry using
+	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
+	   and PI futexes don't mix.
+
+	   Note that we don't check for EAGAIN specifically; we assume that the
+	   only other error the futex function could return is EAGAIN since
+	   anything else would mean an error in our function.  It is too
+	   expensive to do that check for every call (which is 	quite common in
+	   case of a large number of threads), so it has been skipped.  */
+	cmpl	$-ENOSYS, %eax
+	jne	19f
+	xorl	%ecx, %ecx
+
+18:	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+	addl	$FUTEX_WAIT, %ecx
+#endif
+	movl	%ebp, %edx
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex:
+
+19:	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	/* Lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	5f
+
+6:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	16f
+
+	movl	woken_seq(%ebx), %eax
+	movl	woken_seq+4(%ebx), %ecx
+
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+
+	cmpl	8(%esp), %edx
+	jne	7f
+	cmpl	4(%esp), %edi
+	je	22f
+
+7:	cmpl	%ecx, %edx
+	jne	9f
+	cmp	%eax, %edi
+	je	22f
+
+9:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+	/* Unlock */
+16:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	17f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	17f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+
+17:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	10f
+
+	/* With requeue_pi, the mutex lock is held in the kernel.  */
+11:	movl	24+FRAME_SIZE(%esp), %eax
+	movl	16(%esp), %ecx
+	testl	%ecx, %ecx
+	jnz	21f
+
+	call	__pthread_mutex_cond_lock
+20:	addl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(-FRAME_SIZE);
+
+14:	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+
+	/* We return the result of the mutex_lock operation.  */
+	ret
+
+	cfi_restore_state
+
+21:	call	__pthread_mutex_cond_lock_adjust
+	xorl	%eax, %eax
+	jmp	20b
+
+	cfi_adjust_cfa_offset(-FRAME_SIZE);
+
+	/* We need to go back to futex_wait.  If we're using requeue_pi, then
+	   release the mutex we had acquired and go back.  */
+22:	movl	16(%esp), %edx
+	test	%edx, %edx
+	jz	8b
+
+	/* Adjust the mutex values first and then unlock it.  The unlock
+	   should always succeed or else the kernel did not lock the mutex
+	   correctly.  */
+	movl	dep_mutex(%ebx), %eax
+	call    __pthread_mutex_cond_lock_adjust
+	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+	jmp	8b
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	2b
+
+	/* The initial unlocking of the mutex failed.  */
+12:
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	14b
+
+	movl	%eax, %esi
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	movl	%esi, %eax
+	jmp	14b
+
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Unlock in loop requires wakeup.  */
+3:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	6b
+
+	/* Unlock after loop requires wakeup.  */
+10:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	11b
+
+	.size	__pthread_cond_wait, .-__pthread_cond_wait
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+		  GLIBC_2_3_2)
+
+
+	.type	__condvar_w_cleanup2, @function
+__condvar_w_cleanup2:
+	subl	$cond_futex, %ebx
+	.size	__condvar_w_cleanup2, .-__condvar_w_cleanup2
+.LSbl4:
+	.type	__condvar_w_cleanup, @function
+__condvar_w_cleanup:
+	movl	%eax, %esi
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jz	1f
+
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+
+1:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movl	total_seq(%ebx), %eax
+	movl	total_seq+4(%ebx), %edi
+	cmpl	wakeup_seq+4(%ebx), %edi
+	jb	6f
+	ja	7f
+	cmpl	wakeup_seq(%ebx), %eax
+	jbe	7f
+
+6:	addl	$1, wakeup_seq(%ebx)
+	adcl	$0, wakeup_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+
+7:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorl	%edi, %edi
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	4f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+	movl	$1, %edi
+
+4:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	je	2f
+
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testl	%edi, %edi
+	jnz	5f
+	addl	$cond_futex, %ebx
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$SYS_futex, %eax
+	movl	$0x7fffffff, %edx
+	ENTER_KERNEL
+
+	/* Lock the mutex only if we don't own it already.  This only happens
+	   in case of PI mutexes, if we got cancelled after a successful
+	   return of the futex syscall and before disabling async
+	   cancellation.  */
+5:	movl	24+FRAME_SIZE(%esp), %eax
+	movl	MUTEX_KIND(%eax), %ebx
+	andl	$(ROBUST_BIT|PI_BIT), %ebx
+	cmpl	$PI_BIT, %ebx
+	jne	8f
+
+	movl	(%eax), %ebx
+	andl	$TID_MASK, %ebx
+	cmpl	%ebx, %gs:TID
+	jne	8f
+	/* We managed to get the lock.  Fix it up before returning.  */
+	call	__pthread_mutex_cond_lock_adjust
+	jmp	9f
+
+8:	call	__pthread_mutex_cond_lock
+
+9:	movl	%esi, (%esp)
+.LcallUR:
+	call	_Unwind_Resume
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format (omit)
+	.byte	DW_EH_PE_omit			# @TType format (omit)
+	.byte	DW_EH_PE_sdata4			# call-site format
+						# DW_EH_PE_sdata4
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.long	.LcleanupSTART-.LSTARTCODE
+	.long	.Ladd_cond_futex_pi-.LcleanupSTART
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex_pi-.LSTARTCODE
+	.long	.Lsub_cond_futex_pi-.Ladd_cond_futex_pi
+	.long	__condvar_w_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex_pi-.LSTARTCODE
+	.long	.Ladd_cond_futex-.Lsub_cond_futex_pi
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex-.LSTARTCODE
+	.long	.Lsub_cond_futex-.Ladd_cond_futex
+	.long	__condvar_w_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex-.LSTARTCODE
+	.long	.LcleanupEND-.Lsub_cond_futex
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.LcallUR-.LSTARTCODE
+	.long	.LENDCODE-.LcallUR
+	.long	0
+	.uleb128  0
+.Lcstend:
+
+#ifdef SHARED
+	.hidden DW.ref.__gcc_personality_v0
+	.weak   DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align 4
+	.type   DW.ref.__gcc_personality_v0, @object
+	.size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long   __gcc_personality_v0
+#endif