about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/i386
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2016-05-25 23:43:36 +0200
committerTorvald Riegel <triegel@redhat.com>2016-12-31 14:56:47 +0100
commited19993b5b0d05d62cc883571519a67dae481a14 (patch)
tree8956d8320ba5bb051cfdf76ba8f8d2f6e1907898 /sysdeps/unix/sysv/linux/i386
parentc0ff3befa9861171498dd29666d32899e9d8145b (diff)
downloadglibc-ed19993b5b0d05d62cc883571519a67dae481a14.tar.gz
glibc-ed19993b5b0d05d62cc883571519a67dae481a14.tar.xz
glibc-ed19993b5b0d05d62cc883571519a67dae481a14.zip
New condvar implementation that provides stronger ordering guarantees.
This is a new implementation for condition variables, required
after http://austingroupbugs.net/view.php?id=609 to fix bug 13165.  In
essence, we need to be stricter in which waiters a signal or broadcast
is required to wake up; this couldn't be solved using the old algorithm.
ISO C++ made a similar clarification, so this also fixes a bug in
current libstdc++, for example.

We can't use the old algorithm anymore because futexes do not guarantee
to wake in FIFO order.  Thus, when we wake, we can't simply let any
waiter grab a signal, but we need to ensure that one of the waiters
happening before the signal is woken up.  This is something the previous
algorithm violated (see bug 13165).

There's another issue specific to condvars: ABA issues on the underlying
futexes.  Unlike mutexes that have just three states, or semaphores that
have no tokens or a limited number of them, the state of a condvar is
the *order* of the waiters.  A waiter on a semaphore can grab a token
whenever one is available; a condvar waiter must only consume a signal
if it is eligible to do so as determined by the relative order of the
waiter and the signal.
Therefore, this new algorithm maintains two groups of waiters: Those
eligible to consume signals (G1), and those that have to wait until
previous waiters have consumed signals (G2).  Once G1 is empty, G2
becomes the new G1.  64b counters are used to avoid ABA issues.

This condvar doesn't yet use a requeue optimization (ie, on a broadcast,
waking just one thread and requeueing all others on the futex of the
mutex supplied by the program).  I don't think doing the requeue is
necessarily the right approach (but I haven't done real measurements
yet):
* If a program expects to wake many threads at the same time and make
that scalable, a condvar isn't great anyway because of how it requires
waiters to operate mutually exclusive (due to the mutex usage).  Thus, a
thundering herd problem is a scalability problem with or without the
optimization.  Using something like a semaphore might be more
appropriate in such a case.
* The scalability problem is actually at the mutex side; the condvar
could help (and it tries to with the requeue optimization), but it
should be the mutex who decides how that is done, and whether it is done
at all.
* Forcing all but one waiter into the kernel-side wait queue of the
mutex prevents/avoids the use of lock elision on the mutex.  Thus, it
prevents the only cure against the underlying scalability problem
inherent to condvars.
* If condvars use short critical sections (ie, hold the mutex just to
check a binary flag or such), which they should do ideally, then forcing
all those waiter to proceed serially with kernel-based hand-off (ie,
futex ops in the mutex' contended state, via the futex wait queues) will
be less efficient than just letting a scalable mutex implementation take
care of it.  Our current mutex impl doesn't employ spinning at all, but
if critical sections are short, spinning can be much better.
* Doing the requeue stuff requires all waiters to always drive the mutex
into the contended state.  This leads to each waiter having to call
futex_wake after lock release, even if this wouldn't be necessary.

	[BZ #13165]
	* nptl/pthread_cond_broadcast.c (__pthread_cond_broadcast): Rewrite to
	use new algorithm.
	* nptl/pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
	* nptl/pthread_cond_init.c (__pthread_cond_init): Likewise.
	* nptl/pthread_cond_signal.c (__pthread_cond_signal): Likewise.
	* nptl/pthread_cond_wait.c (__pthread_cond_wait): Likewise.
	(__pthread_cond_timedwait): Move here from pthread_cond_timedwait.c.
	(__condvar_confirm_wakeup, __condvar_cancel_waiting,
	__condvar_cleanup_waiting, __condvar_dec_grefs,
	__pthread_cond_wait_common): New.
	(__condvar_cleanup): Remove.
	* npt/pthread_condattr_getclock.c (pthread_condattr_getclock): Adapt.
	* npt/pthread_condattr_setclock.c (pthread_condattr_setclock):
	Likewise.
	* npt/pthread_condattr_getpshared.c (pthread_condattr_getpshared):
	Likewise.
	* npt/pthread_condattr_init.c (pthread_condattr_init): Likewise.
	* nptl/tst-cond1.c: Add comment.
	* nptl/tst-cond20.c (do_test): Adapt.
	* nptl/tst-cond22.c (do_test): Likewise.
	* sysdeps/aarch64/nptl/bits/pthreadtypes.h (pthread_cond_t): Adapt
	structure.
	* sysdeps/arm/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/ia64/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/m68k/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/microblaze/nptl/bits/pthreadtypes.h (pthread_cond_t):
	Likewise.
	* sysdeps/mips/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/nios2/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/s390/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/sh/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/tile/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h (pthread_cond_t):
	Likewise.
	* sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h (pthread_cond_t):
	Likewise.
	* sysdeps/x86/bits/pthreadtypes.h (pthread_cond_t): Likewise.
	* sysdeps/nptl/internaltypes.h (COND_NWAITERS_SHIFT): Remove.
	(COND_CLOCK_BITS): Adapt.
	* sysdeps/nptl/pthread.h (PTHREAD_COND_INITIALIZER): Adapt.
	* nptl/pthreadP.h (__PTHREAD_COND_CLOCK_MONOTONIC_MASK,
	__PTHREAD_COND_SHARED_MASK): New.
	* nptl/nptl-printers.py (CLOCK_IDS): Remove.
	(ConditionVariablePrinter, ConditionVariableAttributesPrinter): Adapt.
	* nptl/nptl_lock_constants.pysym: Adapt.
	* nptl/test-cond-printers.py: Adapt.
	* sysdeps/unix/sysv/linux/hppa/internaltypes.h (cond_compat_clear,
	cond_compat_check_and_clear): Adapt.
	* sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c: Remove file ...
	* sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c
	(__pthread_cond_timedwait): ... and move here.
	* nptl/DESIGN-condvar.txt: Remove file.
	* nptl/lowlevelcond.sym: Likewise.
	* nptl/pthread_cond_timedwait.c: Likewise.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
Diffstat (limited to 'sysdeps/unix/sysv/linux/i386')
-rw-r--r--sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S20
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S241
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S216
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S974
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S642
5 files changed, 0 insertions, 2093 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S
deleted file mode 100644
index f697e5bd06..0000000000
--- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#define HAVE_CMOV 1
-#include "../pthread_cond_timedwait.S"
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S
deleted file mode 100644
index 599668830d..0000000000
--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S
+++ /dev/null
@@ -1,241 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <kernel-features.h>
-#include <pthread-pi-defines.h>
-#include <pthread-errnos.h>
-#include <stap-probe.h>
-
-	.text
-
-	/* int pthread_cond_broadcast (pthread_cond_t *cond) */
-	.globl	__pthread_cond_broadcast
-	.type	__pthread_cond_broadcast, @function
-	.align	16
-__pthread_cond_broadcast:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%esi, 0)
-	pushl	%edi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%edi, 0)
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-	cfi_remember_state
-
-	movl	20(%esp), %ebx
-
-	LIBC_PROBE (cond_broadcast, 1, %edx)
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jnz	1f
-
-2:	addl	$cond_futex, %ebx
-	movl	total_seq+4-cond_futex(%ebx), %eax
-	movl	total_seq-cond_futex(%ebx), %ebp
-	cmpl	wakeup_seq+4-cond_futex(%ebx), %eax
-	ja	3f
-	jb	4f
-	cmpl	wakeup_seq-cond_futex(%ebx), %ebp
-	jna	4f
-
-	/* Cause all currently waiting threads to recognize they are
-	   woken up.  */
-3:	movl	%ebp, wakeup_seq-cond_futex(%ebx)
-	movl	%eax, wakeup_seq-cond_futex+4(%ebx)
-	movl	%ebp, woken_seq-cond_futex(%ebx)
-	movl	%eax, woken_seq-cond_futex+4(%ebx)
-	addl	%ebp, %ebp
-	addl	$1, broadcast_seq-cond_futex(%ebx)
-	movl	%ebp, (%ebx)
-
-	/* Get the address of the mutex used.  */
-	movl	dep_mutex-cond_futex(%ebx), %edi
-
-	/* Unlock.  */
-	LOCK
-	subl	$1, cond_lock-cond_futex(%ebx)
-	jne	7f
-
-	/* Don't use requeue for pshared condvars.  */
-8:	cmpl	$-1, %edi
-	je	9f
-
-	/* Do not use requeue for pshared condvars.  */
-	testl	$PS_BIT, MUTEX_KIND(%edi)
-	jne	9f
-
-	/* Requeue to a non-robust PI mutex if the PI bit is set and
-	   the robust bit is not set.  */
-	movl	MUTEX_KIND(%edi), %eax
-	andl	$(ROBUST_BIT|PI_BIT), %eax
-	cmpl	$PI_BIT, %eax
-	je	81f
-
-	/* Wake up all threads.  */
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx
-#else
-	movl	%gs:PRIVATE_FUTEX, %ecx
-	orl	$FUTEX_CMP_REQUEUE, %ecx
-#endif
-	movl	$SYS_futex, %eax
-	movl	$0x7fffffff, %esi
-	movl	$1, %edx
-	/* Get the address of the futex involved.  */
-# if MUTEX_FUTEX != 0
-	addl	$MUTEX_FUTEX, %edi
-# endif
-/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter.
-	ENTER_KERNEL  */
-	int	$0x80
-
-	/* For any kind of error, which mainly is EAGAIN, we try again
-	   with WAKE.  The general test also covers running on old
-	   kernels.  */
-	cmpl	$0xfffff001, %eax
-	jae	9f
-
-6:	xorl	%eax, %eax
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-	popl	%edi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edi)
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-
-	cfi_restore_state
-
-81:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
-	movl	$SYS_futex, %eax
-	movl	$0x7fffffff, %esi
-	movl	$1, %edx
-	/* Get the address of the futex involved.  */
-# if MUTEX_FUTEX != 0
-	addl	$MUTEX_FUTEX, %edi
-# endif
-	int	$0x80
-
-	/* For any kind of error, which mainly is EAGAIN, we try again
-	with WAKE.  The general test also covers running on old
-	kernels.  */
-	cmpl	$0xfffff001, %eax
-	jb	6b
-	jmp	9f
-
-	/* Initial locking failed.  */
-1:
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	2b
-
-	.align	16
-	/* Unlock.  */
-4:	LOCK
-	subl	$1, cond_lock-cond_futex(%ebx)
-	je	6b
-
-	/* Unlock in loop requires wakeup.  */
-5:	leal	cond_lock-cond_futex(%ebx), %eax
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	6b
-
-	/* Unlock in loop requires wakeup.  */
-7:	leal	cond_lock-cond_futex(%ebx), %eax
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	8b
-
-9:	/* The futex requeue functionality is not available.  */
-	movl	$0x7fffffff, %edx
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	jmp	6b
-	cfi_endproc
-	.size	__pthread_cond_broadcast, .-__pthread_cond_broadcast
-versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
-		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S
deleted file mode 100644
index 0038775d3f..0000000000
--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S
+++ /dev/null
@@ -1,216 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <kernel-features.h>
-#include <pthread-pi-defines.h>
-#include <pthread-errnos.h>
-#include <stap-probe.h>
-
-	.text
-
-	/* int pthread_cond_signal (pthread_cond_t *cond) */
-	.globl	__pthread_cond_signal
-	.type	__pthread_cond_signal, @function
-	.align	16
-__pthread_cond_signal:
-
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-	pushl	%edi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%edi, 0)
-	cfi_remember_state
-
-	movl	12(%esp), %edi
-
-	LIBC_PROBE (cond_signal, 1, %edi)
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%edi)
-#else
-	cmpxchgl %edx, cond_lock(%edi)
-#endif
-	jnz	1f
-
-2:	leal	cond_futex(%edi), %ebx
-	movl	total_seq+4(%edi), %eax
-	movl	total_seq(%edi), %ecx
-	cmpl	wakeup_seq+4(%edi), %eax
-#if cond_lock != 0
-	/* Must use leal to preserve the flags.  */
-	leal	cond_lock(%edi), %edi
-#endif
-	ja	3f
-	jb	4f
-	cmpl	wakeup_seq-cond_futex(%ebx), %ecx
-	jbe	4f
-
-	/* Bump the wakeup number.  */
-3:	addl	$1, wakeup_seq-cond_futex(%ebx)
-	adcl	$0, wakeup_seq-cond_futex+4(%ebx)
-	addl	$1, (%ebx)
-
-	/* Wake up one thread.  */
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%esi, 0)
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	sete	%cl
-	je	8f
-
-	movl	dep_mutex-cond_futex(%ebx), %edx
-	/* Requeue to a non-robust PI mutex if the PI bit is set and
-	   the robust bit is not set.  */
-	movl	MUTEX_KIND(%edx), %eax
-	andl	$(ROBUST_BIT|PI_BIT), %eax
-	cmpl	$PI_BIT, %eax
-	je	9f
-
-8:	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE_OP, %ecx
-	movl	$SYS_futex, %eax
-	movl	$1, %edx
-	movl	$1, %esi
-	movl	$FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
-	/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
-	   sysenter.
-	ENTER_KERNEL  */
-	int	$0x80
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-
-	/* For any kind of error, we try again with WAKE.
-	   The general test also covers running on old kernels.  */
-	cmpl	$-4095, %eax
-	jae	7f
-
-6:	xorl	%eax, %eax
-	popl	%edi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-
-	cfi_restore_state
-
-9:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
-	movl	$SYS_futex, %eax
-	movl	$1, %edx
-	xorl	%esi, %esi
-	movl	dep_mutex-cond_futex(%ebx), %edi
-	movl	(%ebx), %ebp
-	/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
-	   sysenter.
-	ENTER_KERNEL  */
-	int	$0x80
-	popl	%ebp
-	popl	%esi
-
-	leal	-cond_futex(%ebx), %edi
-
-	/* For any kind of error, we try again with WAKE.
-	   The general test also covers running on old kernels.  */
-	cmpl	$-4095, %eax
-	jb	4f
-
-7:
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	orl	$FUTEX_WAKE, %ecx
-
-	movl	$SYS_futex, %eax
-	/* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
-	movl	$1, %edx  */
-	ENTER_KERNEL
-
-	/* Unlock.  Note that at this point %edi always points to
-	   cond_lock.  */
-4:	LOCK
-	subl	$1, (%edi)
-	je	6b
-
-	/* Unlock in loop requires wakeup.  */
-5:	movl	%edi, %eax
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	6b
-
-	/* Initial locking failed.  */
-1:
-#if cond_lock == 0
-	movl	%edi, %edx
-#else
-	leal	cond_lock(%edi), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%edi)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	2b
-
-	cfi_endproc
-	.size	__pthread_cond_signal, .-__pthread_cond_signal
-versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
-		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S
deleted file mode 100644
index 6256376923..0000000000
--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S
+++ /dev/null
@@ -1,974 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <pthread-errnos.h>
-#include <pthread-pi-defines.h>
-#include <kernel-features.h>
-#include <stap-probe.h>
-
-	.text
-
-/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
-			       const struct timespec *abstime)  */
-	.globl	__pthread_cond_timedwait
-	.type	__pthread_cond_timedwait, @function
-	.align	16
-__pthread_cond_timedwait:
-.LSTARTCODE:
-	cfi_startproc
-#ifdef SHARED
-	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
-			DW.ref.__gcc_personality_v0)
-	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
-	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
-	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-	pushl	%edi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%edi, 0)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%esi, 0)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-
-	movl	20(%esp), %ebx
-	movl	28(%esp), %ebp
-
-	LIBC_PROBE (cond_timedwait, 3, %ebx, 24(%esp), %ebp)
-
-	cmpl	$1000000000, 4(%ebp)
-	movl	$EINVAL, %eax
-	jae	18f
-
-	/* Stack frame:
-
-	   esp + 32
-		    +--------------------------+
-	   esp + 24 | timeout value            |
-		    +--------------------------+
-	   esp + 20 | futex pointer            |
-		    +--------------------------+
-	   esp + 16 | pi-requeued flag         |
-		    +--------------------------+
-	   esp + 12 | old broadcast_seq value  |
-		    +--------------------------+
-	   esp +  4 | old wake_seq value       |
-		    +--------------------------+
-	   esp +  0 | old cancellation mode    |
-		    +--------------------------+
-	*/
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-# ifdef PIC
-	LOAD_PIC_REG (cx)
-	cmpl	$0, __have_futex_clock_realtime@GOTOFF(%ecx)
-# else
-	cmpl	$0, __have_futex_clock_realtime
-# endif
-	je	.Lreltmo
-#endif
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jnz	1f
-
-	/* Store the reference to the mutex.  If there is already a
-	   different value in there this is a bad user bug.  */
-2:	cmpl	$-1, dep_mutex(%ebx)
-	movl	24(%esp), %eax
-	je	17f
-	movl	%eax, dep_mutex(%ebx)
-
-	/* Unlock the mutex.  */
-17:	xorl	%edx, %edx
-	call	__pthread_mutex_unlock_usercnt
-
-	testl	%eax, %eax
-	jne	16f
-
-	addl	$1, total_seq(%ebx)
-	adcl	$0, total_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
-# define FRAME_SIZE 24
-#else
-# define FRAME_SIZE 32
-#endif
-	subl	$FRAME_SIZE, %esp
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-	cfi_remember_state
-
-	/* Get and store current wakeup_seq value.  */
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-	movl	broadcast_seq(%ebx), %eax
-	movl	%edi, 4(%esp)
-	movl	%edx, 8(%esp)
-	movl	%eax, 12(%esp)
-
-	/* Reset the pi-requeued flag.  */
-	movl	$0, 16(%esp)
-
-	cmpl	$0, (%ebp)
-	movl	$-ETIMEDOUT, %esi
-	js	6f
-
-8:	movl	cond_futex(%ebx), %edi
-	movl	%edi, 20(%esp)
-
-	/* Unlock.  */
-	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	3f
-
-.LcleanupSTART:
-4:	call	__pthread_enable_asynccancel
-	movl	%eax, (%esp)
-
-	leal	(%ebp), %esi
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	sete	%cl
-	je	40f
-
-	movl	dep_mutex(%ebx), %edi
-	/* Requeue to a non-robust PI mutex if the PI bit is set and
-	   the robust bit is not set.  */
-	movl	MUTEX_KIND(%edi), %eax
-	andl	$(ROBUST_BIT|PI_BIT), %eax
-	cmpl	$PI_BIT, %eax
-	jne	40f
-
-	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
-	/* The following only works like this because we only support
-	   two clocks, represented using a single bit.  */
-	testl	$1, cond_nwaiters(%ebx)
-	/* XXX Need to implement using sete instead of a jump.  */
-	jne	42f
-	orl	$FUTEX_CLOCK_REALTIME, %ecx
-
-42:	movl	20(%esp), %edx
-	addl	$cond_futex, %ebx
-.Ladd_cond_futex_pi:
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	subl	$cond_futex, %ebx
-.Lsub_cond_futex_pi:
-	movl	%eax, %esi
-	/* Set the pi-requeued flag only if the kernel has returned 0. The
-	   kernel does not hold the mutex on ETIMEDOUT or any other error.  */
-	cmpl	$0, %eax
-	sete	16(%esp)
-	je	41f
-
-	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
-	   successfully, it has already locked the mutex for us and the
-	   pi_flag (16(%esp)) is set to denote that fact.  However, if another
-	   thread changed the futex value before we entered the wait, the
-	   syscall may return an EAGAIN and the mutex is not locked.  We go
-	   ahead with a success anyway since later we look at the pi_flag to
-	   decide if we got the mutex or not.  The sequence numbers then make
-	   sure that only one of the threads actually wake up.  We retry using
-	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
-	   and PI futexes don't mix.
-
-	   Note that we don't check for EAGAIN specifically; we assume that the
-	   only other error the futex function could return is EAGAIN (barring
-	   the ETIMEOUT of course, for the timeout case in futex) since
-	   anything else would mean an error in our function.  It is too
-	   expensive to do that check for every call (which is  quite common in
-	   case of a large number of threads), so it has been skipped.  */
-	cmpl	$-ENOSYS, %eax
-	jne	41f
-	xorl	%ecx, %ecx
-
-40:	subl	$1, %ecx
-	movl	$0, 16(%esp)
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAIT_BITSET, %ecx
-	/* The following only works like this because we only support
-	   two clocks, represented using a single bit.  */
-	testl	$1, cond_nwaiters(%ebx)
-	jne	30f
-	orl	$FUTEX_CLOCK_REALTIME, %ecx
-30:
-	movl	20(%esp), %edx
-	movl	$0xffffffff, %ebp
-	addl	$cond_futex, %ebx
-.Ladd_cond_futex:
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	subl	$cond_futex, %ebx
-.Lsub_cond_futex:
-	movl	28+FRAME_SIZE(%esp), %ebp
-	movl	%eax, %esi
-
-41:	movl	(%esp), %eax
-	call	__pthread_disable_asynccancel
-.LcleanupEND:
-
-	/* Lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jnz	5f
-
-6:	movl	broadcast_seq(%ebx), %eax
-	cmpl	12(%esp), %eax
-	jne	23f
-
-	movl	woken_seq(%ebx), %eax
-	movl	woken_seq+4(%ebx), %ecx
-
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-
-	cmpl	8(%esp), %edx
-	jne	7f
-	cmpl	4(%esp), %edi
-	je	15f
-
-7:	cmpl	%ecx, %edx
-	jne	9f
-	cmp	%eax, %edi
-	jne	9f
-
-15:	cmpl	$-ETIMEDOUT, %esi
-	je	28f
-
-	/* We need to go back to futex_wait.  If we're using requeue_pi, then
-	   release the mutex we had acquired and go back.  */
-	movl	16(%esp), %edx
-	test	%edx, %edx
-	jz	8b
-
-	/* Adjust the mutex values first and then unlock it.  The unlock
-	   should always succeed or else the kernel did not lock the mutex
-	   correctly.  */
-	movl	dep_mutex(%ebx), %eax
-	call	__pthread_mutex_cond_lock_adjust
-	movl	dep_mutex(%ebx), %eax
-	xorl	%edx, %edx
-	call	__pthread_mutex_unlock_usercnt
-	jmp	8b
-
-28:	addl	$1, wakeup_seq(%ebx)
-	adcl	$0, wakeup_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-	movl	$ETIMEDOUT, %esi
-	jmp	14f
-
-23:	xorl	%esi, %esi
-	jmp	24f
-
-9:	xorl	%esi, %esi
-14:	addl	$1, woken_seq(%ebx)
-	adcl	$0, woken_seq+4(%ebx)
-
-24:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	movl	total_seq(%ebx), %eax
-	andl	total_seq+4(%ebx), %eax
-	cmpl	$0xffffffff, %eax
-	jne	25f
-	movl	cond_nwaiters(%ebx), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	25f
-
-	addl	$cond_nwaiters, %ebx
-	movl	$SYS_futex, %eax
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$1, %edx
-	ENTER_KERNEL
-	subl	$cond_nwaiters, %ebx
-
-25:	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	10f
-
-11:	movl	24+FRAME_SIZE(%esp), %eax
-	/* With requeue_pi, the mutex lock is held in the kernel.  */
-	movl	16(%esp), %ecx
-	testl	%ecx, %ecx
-	jnz	27f
-
-	call	__pthread_mutex_cond_lock
-26:	addl	$FRAME_SIZE, %esp
-	cfi_adjust_cfa_offset(-FRAME_SIZE)
-
-	/* We return the result of the mutex_lock operation if it failed.  */
-	testl	%eax, %eax
-#ifdef HAVE_CMOV
-	cmovel	%esi, %eax
-#else
-	jne	22f
-	movl	%esi, %eax
-22:
-#endif
-
-18:	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%edi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edi)
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-
-	ret
-
-	cfi_restore_state
-
-27:	call	__pthread_mutex_cond_lock_adjust
-	xorl	%eax, %eax
-	jmp	26b
-
-	cfi_adjust_cfa_offset(-FRAME_SIZE);
-	/* Initial locking failed.  */
-1:
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	2b
-
-	/* The initial unlocking of the mutex failed.  */
-16:
-	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	18b
-
-	movl	%eax, %esi
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-
-	movl	%esi, %eax
-	jmp	18b
-
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-
-	/* Unlock in loop requires wakeup.  */
-3:
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	4b
-
-	/* Locking in loop failed.  */
-5:
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	6b
-
-	/* Unlock after loop requires wakeup.  */
-10:
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	11b
-
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-	cfi_adjust_cfa_offset(-FRAME_SIZE)
-.Lreltmo:
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-# if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-# else
-	cmpxchgl %edx, cond_lock(%ebx)
-# endif
-	jnz	101f
-
-	/* Store the reference to the mutex.  If there is already a
-	   different value in there this is a bad user bug.  */
-102:	cmpl	$-1, dep_mutex(%ebx)
-	movl	24(%esp), %eax
-	je	117f
-	movl	%eax, dep_mutex(%ebx)
-
-	/* Unlock the mutex.  */
-117:	xorl	%edx, %edx
-	call	__pthread_mutex_unlock_usercnt
-
-	testl	%eax, %eax
-	jne	16b
-
-	addl	$1, total_seq(%ebx)
-	adcl	$0, total_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-	subl	$FRAME_SIZE, %esp
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-
-	/* Get and store current wakeup_seq value.  */
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-	movl	broadcast_seq(%ebx), %eax
-	movl	%edi, 4(%esp)
-	movl	%edx, 8(%esp)
-	movl	%eax, 12(%esp)
-
-	/* Reset the pi-requeued flag.  */
-	movl	$0, 16(%esp)
-
-	/* Get the current time.  */
-108:	movl	%ebx, %edx
-# ifdef __NR_clock_gettime
-	/* Get the clock number.  */
-	movl	cond_nwaiters(%ebx), %ebx
-	andl	$((1 << nwaiters_shift) - 1), %ebx
-	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
-	   kernel.  */
-	leal	24(%esp), %ecx
-	movl	$__NR_clock_gettime, %eax
-	ENTER_KERNEL
-	movl	%edx, %ebx
-
-	/* Compute relative timeout.  */
-	movl	(%ebp), %ecx
-	movl	4(%ebp), %edx
-	subl	24(%esp), %ecx
-	subl	28(%esp), %edx
-# else
-	/* Get the current time.  */
-	leal	24(%esp), %ebx
-	xorl	%ecx, %ecx
-	movl	$__NR_gettimeofday, %eax
-	ENTER_KERNEL
-	movl	%edx, %ebx
-
-	/* Compute relative timeout.  */
-	movl	28(%esp), %eax
-	movl	$1000, %edx
-	mul	%edx		/* Milli seconds to nano seconds.  */
-	movl	(%ebp), %ecx
-	movl	4(%ebp), %edx
-	subl	24(%esp), %ecx
-	subl	%eax, %edx
-# endif
-	jns	112f
-	addl	$1000000000, %edx
-	subl	$1, %ecx
-112:	testl	%ecx, %ecx
-	movl	$-ETIMEDOUT, %esi
-	js	106f
-
-	/* Store relative timeout.  */
-121:	movl	%ecx, 24(%esp)
-	movl	%edx, 28(%esp)
-
-	movl	cond_futex(%ebx), %edi
-	movl	%edi, 20(%esp)
-
-	/* Unlock.  */
-	LOCK
-# if cond_lock == 0
-	subl	$1, (%ebx)
-# else
-	subl	$1, cond_lock(%ebx)
-# endif
-	jne	103f
-
-.LcleanupSTART2:
-104:	call	__pthread_enable_asynccancel
-	movl	%eax, (%esp)
-
-	leal	24(%esp), %esi
-# if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-# endif
-	cmpl	$-1, dep_mutex(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-# ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-# else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-# endif
-# if FUTEX_WAIT != 0
-	addl	$FUTEX_WAIT, %ecx
-# endif
-	movl	20(%esp), %edx
-	addl	$cond_futex, %ebx
-.Ladd_cond_futex2:
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	subl	$cond_futex, %ebx
-.Lsub_cond_futex2:
-	movl	%eax, %esi
-
-141:	movl	(%esp), %eax
-	call	__pthread_disable_asynccancel
-.LcleanupEND2:
-
-
-	/* Lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-# if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-# else
-	cmpxchgl %edx, cond_lock(%ebx)
-# endif
-	jnz	105f
-
-106:	movl	broadcast_seq(%ebx), %eax
-	cmpl	12(%esp), %eax
-	jne	23b
-
-	movl	woken_seq(%ebx), %eax
-	movl	woken_seq+4(%ebx), %ecx
-
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-
-	cmpl	8(%esp), %edx
-	jne	107f
-	cmpl	4(%esp), %edi
-	je	115f
-
-107:	cmpl	%ecx, %edx
-	jne	9b
-	cmp	%eax, %edi
-	jne	9b
-
-115:	cmpl	$-ETIMEDOUT, %esi
-	je	28b
-
-	jmp	8b
-
-	cfi_adjust_cfa_offset(-FRAME_SIZE)
-	/* Initial locking failed.  */
-101:
-# if cond_lock == 0
-	movl	%ebx, %edx
-# else
-	leal	cond_lock(%ebx), %edx
-# endif
-# if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-# endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-# if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-# endif
-	call	__lll_lock_wait
-	jmp	102b
-
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-
-	/* Unlock in loop requires wakeup.  */
-103:
-# if cond_lock == 0
-	movl	%ebx, %eax
-# else
-	leal	cond_lock(%ebx), %eax
-# endif
-# if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-# endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-# if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-# endif
-	call	__lll_unlock_wake
-	jmp	104b
-
-	/* Locking in loop failed.  */
-105:
-# if cond_lock == 0
-	movl	%ebx, %edx
-# else
-	leal	cond_lock(%ebx), %edx
-# endif
-# if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-# endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-# if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-# endif
-	call	__lll_lock_wait
-	jmp	106b
-#endif
-
-	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
-		  GLIBC_2_3_2)
-
-
-	.type	__condvar_tw_cleanup2, @function
-__condvar_tw_cleanup2:
-	subl	$cond_futex, %ebx
-	.size	__condvar_tw_cleanup2, .-__condvar_tw_cleanup2
-	.type	__condvar_tw_cleanup, @function
-__condvar_tw_cleanup:
-	movl	%eax, %esi
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jz	1f
-
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-
-1:	movl	broadcast_seq(%ebx), %eax
-	cmpl	12(%esp), %eax
-	jne	3f
-
-	/* We increment the wakeup_seq counter only if it is lower than
-	   total_seq.  If this is not the case the thread was woken and
-	   then canceled.  In this case we ignore the signal.  */
-	movl	total_seq(%ebx), %eax
-	movl	total_seq+4(%ebx), %edi
-	cmpl	wakeup_seq+4(%ebx), %edi
-	jb	6f
-	ja	7f
-	cmpl	wakeup_seq(%ebx), %eax
-	jbe	7f
-
-6:	addl	$1, wakeup_seq(%ebx)
-	adcl	$0, wakeup_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-
-7:	addl	$1, woken_seq(%ebx)
-	adcl	$0, woken_seq+4(%ebx)
-
-3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	xorl	%edi, %edi
-	movl	total_seq(%ebx), %eax
-	andl	total_seq+4(%ebx), %eax
-	cmpl	$0xffffffff, %eax
-	jne	4f
-	movl	cond_nwaiters(%ebx), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	4f
-
-	addl	$cond_nwaiters, %ebx
-	movl	$SYS_futex, %eax
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$1, %edx
-	ENTER_KERNEL
-	subl	$cond_nwaiters, %ebx
-	movl	$1, %edi
-
-4:	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	je	2f
-
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-
-	/* Wake up all waiters to make sure no signal gets lost.  */
-2:	testl	%edi, %edi
-	jnz	5f
-	addl	$cond_futex, %ebx
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$SYS_futex, %eax
-	movl	$0x7fffffff, %edx
-	ENTER_KERNEL
-
-	/* Lock the mutex only if we don't own it already.  This only happens
-	   in case of PI mutexes, if we got cancelled after a successful
-	   return of the futex syscall and before disabling async
-	   cancellation.  */
-5:	movl	24+FRAME_SIZE(%esp), %eax
-	movl	MUTEX_KIND(%eax), %ebx
-	andl	$(ROBUST_BIT|PI_BIT), %ebx
-	cmpl	$PI_BIT, %ebx
-	jne	8f
-
-	movl	(%eax), %ebx
-	andl	$TID_MASK, %ebx
-	cmpl	%ebx, %gs:TID
-	jne	8f
-	/* We managed to get the lock.  Fix it up before returning.  */
-	call	__pthread_mutex_cond_lock_adjust
-	jmp	9f
-
-8:	call	__pthread_mutex_cond_lock
-
-9:	movl	%esi, (%esp)
-.LcallUR:
-	call	_Unwind_Resume
-	hlt
-.LENDCODE:
-	cfi_endproc
-	.size	__condvar_tw_cleanup, .-__condvar_tw_cleanup
-
-
-	.section .gcc_except_table,"a",@progbits
-.LexceptSTART:
-	.byte	DW_EH_PE_omit			# @LPStart format (omit)
-	.byte	DW_EH_PE_omit			# @TType format (omit)
-	.byte	DW_EH_PE_sdata4			# call-site format
-						# DW_EH_PE_sdata4
-	.uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
-	.long	.LcleanupSTART-.LSTARTCODE
-	.long	.Ladd_cond_futex_pi-.LcleanupSTART
-	.long	__condvar_tw_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.Ladd_cond_futex_pi-.LSTARTCODE
-	.long	.Lsub_cond_futex_pi-.Ladd_cond_futex_pi
-	.long	__condvar_tw_cleanup2-.LSTARTCODE
-	.uleb128  0
-	.long	.Lsub_cond_futex_pi-.LSTARTCODE
-	.long	.Ladd_cond_futex-.Lsub_cond_futex_pi
-	.long	__condvar_tw_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.Ladd_cond_futex-.LSTARTCODE
-	.long	.Lsub_cond_futex-.Ladd_cond_futex
-	.long	__condvar_tw_cleanup2-.LSTARTCODE
-	.uleb128  0
-	.long	.Lsub_cond_futex-.LSTARTCODE
-	.long	.LcleanupEND-.Lsub_cond_futex
-	.long	__condvar_tw_cleanup-.LSTARTCODE
-	.uleb128  0
-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-	.long	.LcleanupSTART2-.LSTARTCODE
-	.long	.Ladd_cond_futex2-.LcleanupSTART2
-	.long	__condvar_tw_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.Ladd_cond_futex2-.LSTARTCODE
-	.long	.Lsub_cond_futex2-.Ladd_cond_futex2
-	.long	__condvar_tw_cleanup2-.LSTARTCODE
-	.uleb128  0
-	.long	.Lsub_cond_futex2-.LSTARTCODE
-	.long	.LcleanupEND2-.Lsub_cond_futex2
-	.long	__condvar_tw_cleanup-.LSTARTCODE
-	.uleb128  0
-#endif
-	.long	.LcallUR-.LSTARTCODE
-	.long	.LENDCODE-.LcallUR
-	.long	0
-	.uleb128  0
-.Lcstend:
-
-
-#ifdef SHARED
-	.hidden DW.ref.__gcc_personality_v0
-	.weak	DW.ref.__gcc_personality_v0
-	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
-	.align	4
-	.type	DW.ref.__gcc_personality_v0, @object
-	.size	DW.ref.__gcc_personality_v0, 4
-DW.ref.__gcc_personality_v0:
-	.long   __gcc_personality_v0
-#endif
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S
deleted file mode 100644
index 5016718801..0000000000
--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S
+++ /dev/null
@@ -1,642 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <shlib-compat.h>
-#include <lowlevellock.h>
-#include <lowlevelcond.h>
-#include <tcb-offsets.h>
-#include <pthread-errnos.h>
-#include <pthread-pi-defines.h>
-#include <kernel-features.h>
-#include <stap-probe.h>
-
-
-	.text
-
-/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
-	.globl	__pthread_cond_wait
-	.type	__pthread_cond_wait, @function
-	.align	16
-__pthread_cond_wait:
-.LSTARTCODE:
-	cfi_startproc
-#ifdef SHARED
-	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
-			DW.ref.__gcc_personality_v0)
-	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
-#else
-	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
-	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
-#endif
-
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-	pushl	%edi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%edi, 0)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%esi, 0)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-
-	xorl	%esi, %esi
-	movl	20(%esp), %ebx
-
-	LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx)
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jnz	1f
-
-	/* Store the reference to the mutex.  If there is already a
-	   different value in there this is a bad user bug.  */
-2:	cmpl	$-1, dep_mutex(%ebx)
-	movl	24(%esp), %eax
-	je	15f
-	movl	%eax, dep_mutex(%ebx)
-
-	/* Unlock the mutex.  */
-15:	xorl	%edx, %edx
-	call	__pthread_mutex_unlock_usercnt
-
-	testl	%eax, %eax
-	jne	12f
-
-	addl	$1, total_seq(%ebx)
-	adcl	$0, total_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-#define FRAME_SIZE 20
-	subl	$FRAME_SIZE, %esp
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-	cfi_remember_state
-
-	/* Get and store current wakeup_seq value.  */
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-	movl	broadcast_seq(%ebx), %eax
-	movl	%edi, 4(%esp)
-	movl	%edx, 8(%esp)
-	movl	%eax, 12(%esp)
-
-	/* Reset the pi-requeued flag.  */
-8:	movl	$0, 16(%esp)
-	movl	cond_futex(%ebx), %ebp
-
-	/* Unlock.  */
-	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	3f
-
-.LcleanupSTART:
-4:	call	__pthread_enable_asynccancel
-	movl	%eax, (%esp)
-
-	xorl	%ecx, %ecx
-	cmpl	$-1, dep_mutex(%ebx)
-	sete	%cl
-	je	18f
-
-	movl	dep_mutex(%ebx), %edi
-	/* Requeue to a non-robust PI mutex if the PI bit is set and
-	   the robust bit is not set.  */
-	movl	MUTEX_KIND(%edi), %eax
-	andl	$(ROBUST_BIT|PI_BIT), %eax
-	cmpl	$PI_BIT, %eax
-	jne	18f
-
-	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
-	movl	%ebp, %edx
-	xorl	%esi, %esi
-	addl	$cond_futex, %ebx
-.Ladd_cond_futex_pi:
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	subl	$cond_futex, %ebx
-.Lsub_cond_futex_pi:
-	/* Set the pi-requeued flag only if the kernel has returned 0. The
-	   kernel does not hold the mutex on error.  */
-	cmpl	$0, %eax
-	sete	16(%esp)
-	je	19f
-
-	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
-	   successfully, it has already locked the mutex for us and the
-	   pi_flag (16(%esp)) is set to denote that fact.  However, if another
-	   thread changed the futex value before we entered the wait, the
-	   syscall may return an EAGAIN and the mutex is not locked.  We go
-	   ahead with a success anyway since later we look at the pi_flag to
-	   decide if we got the mutex or not.  The sequence numbers then make
-	   sure that only one of the threads actually wake up.  We retry using
-	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
-	   and PI futexes don't mix.
-
-	   Note that we don't check for EAGAIN specifically; we assume that the
-	   only other error the futex function could return is EAGAIN since
-	   anything else would mean an error in our function.  It is too
-	   expensive to do that check for every call (which is 	quite common in
-	   case of a large number of threads), so it has been skipped.  */
-	cmpl	$-ENOSYS, %eax
-	jne	19f
-	xorl	%ecx, %ecx
-
-18:	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-#if FUTEX_WAIT != 0
-	addl	$FUTEX_WAIT, %ecx
-#endif
-	movl	%ebp, %edx
-	addl	$cond_futex, %ebx
-.Ladd_cond_futex:
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-	subl	$cond_futex, %ebx
-.Lsub_cond_futex:
-
-19:	movl	(%esp), %eax
-	call	__pthread_disable_asynccancel
-.LcleanupEND:
-
-	/* Lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jnz	5f
-
-6:	movl	broadcast_seq(%ebx), %eax
-	cmpl	12(%esp), %eax
-	jne	16f
-
-	movl	woken_seq(%ebx), %eax
-	movl	woken_seq+4(%ebx), %ecx
-
-	movl	wakeup_seq(%ebx), %edi
-	movl	wakeup_seq+4(%ebx), %edx
-
-	cmpl	8(%esp), %edx
-	jne	7f
-	cmpl	4(%esp), %edi
-	je	22f
-
-7:	cmpl	%ecx, %edx
-	jne	9f
-	cmp	%eax, %edi
-	je	22f
-
-9:	addl	$1, woken_seq(%ebx)
-	adcl	$0, woken_seq+4(%ebx)
-
-	/* Unlock */
-16:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	movl	total_seq(%ebx), %eax
-	andl	total_seq+4(%ebx), %eax
-	cmpl	$0xffffffff, %eax
-	jne	17f
-	movl	cond_nwaiters(%ebx), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	17f
-
-	addl	$cond_nwaiters, %ebx
-	movl	$SYS_futex, %eax
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$1, %edx
-	ENTER_KERNEL
-	subl	$cond_nwaiters, %ebx
-
-17:	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	10f
-
-	/* With requeue_pi, the mutex lock is held in the kernel.  */
-11:	movl	24+FRAME_SIZE(%esp), %eax
-	movl	16(%esp), %ecx
-	testl	%ecx, %ecx
-	jnz	21f
-
-	call	__pthread_mutex_cond_lock
-20:	addl	$FRAME_SIZE, %esp
-	cfi_adjust_cfa_offset(-FRAME_SIZE);
-
-14:	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%edi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edi)
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-
-	/* We return the result of the mutex_lock operation.  */
-	ret
-
-	cfi_restore_state
-
-21:	call	__pthread_mutex_cond_lock_adjust
-	xorl	%eax, %eax
-	jmp	20b
-
-	cfi_adjust_cfa_offset(-FRAME_SIZE);
-
-	/* We need to go back to futex_wait.  If we're using requeue_pi, then
-	   release the mutex we had acquired and go back.  */
-22:	movl	16(%esp), %edx
-	test	%edx, %edx
-	jz	8b
-
-	/* Adjust the mutex values first and then unlock it.  The unlock
-	   should always succeed or else the kernel did not lock the mutex
-	   correctly.  */
-	movl	dep_mutex(%ebx), %eax
-	call    __pthread_mutex_cond_lock_adjust
-	movl	dep_mutex(%ebx), %eax
-	xorl	%edx, %edx
-	call	__pthread_mutex_unlock_usercnt
-	jmp	8b
-
-	/* Initial locking failed.  */
-1:
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	2b
-
-	/* The initial unlocking of the mutex failed.  */
-12:
-	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	jne	14b
-
-	movl	%eax, %esi
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-
-	movl	%esi, %eax
-	jmp	14b
-
-	cfi_adjust_cfa_offset(FRAME_SIZE)
-
-	/* Unlock in loop requires wakeup.  */
-3:
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	4b
-
-	/* Locking in loop failed.  */
-5:
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-	jmp	6b
-
-	/* Unlock after loop requires wakeup.  */
-10:
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-	jmp	11b
-
-	.size	__pthread_cond_wait, .-__pthread_cond_wait
-versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
-		  GLIBC_2_3_2)
-
-
-	.type	__condvar_w_cleanup2, @function
-__condvar_w_cleanup2:
-	subl	$cond_futex, %ebx
-	.size	__condvar_w_cleanup2, .-__condvar_w_cleanup2
-.LSbl4:
-	.type	__condvar_w_cleanup, @function
-__condvar_w_cleanup:
-	movl	%eax, %esi
-
-	/* Get internal lock.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %edx, (%ebx)
-#else
-	cmpxchgl %edx, cond_lock(%ebx)
-#endif
-	jz	1f
-
-#if cond_lock == 0
-	movl	%ebx, %edx
-#else
-	leal	cond_lock(%ebx), %edx
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_lock_wait
-
-1:	movl	broadcast_seq(%ebx), %eax
-	cmpl	12(%esp), %eax
-	jne	3f
-
-	/* We increment the wakeup_seq counter only if it is lower than
-	   total_seq.  If this is not the case the thread was woken and
-	   then canceled.  In this case we ignore the signal.  */
-	movl	total_seq(%ebx), %eax
-	movl	total_seq+4(%ebx), %edi
-	cmpl	wakeup_seq+4(%ebx), %edi
-	jb	6f
-	ja	7f
-	cmpl	wakeup_seq(%ebx), %eax
-	jbe	7f
-
-6:	addl	$1, wakeup_seq(%ebx)
-	adcl	$0, wakeup_seq+4(%ebx)
-	addl	$1, cond_futex(%ebx)
-
-7:	addl	$1, woken_seq(%ebx)
-	adcl	$0, woken_seq+4(%ebx)
-
-3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	xorl	%edi, %edi
-	movl	total_seq(%ebx), %eax
-	andl	total_seq+4(%ebx), %eax
-	cmpl	$0xffffffff, %eax
-	jne	4f
-	movl	cond_nwaiters(%ebx), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	4f
-
-	addl	$cond_nwaiters, %ebx
-	movl	$SYS_futex, %eax
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$1, %edx
-	ENTER_KERNEL
-	subl	$cond_nwaiters, %ebx
-	movl	$1, %edi
-
-4:	LOCK
-#if cond_lock == 0
-	subl	$1, (%ebx)
-#else
-	subl	$1, cond_lock(%ebx)
-#endif
-	je	2f
-
-#if cond_lock == 0
-	movl	%ebx, %eax
-#else
-	leal	cond_lock(%ebx), %eax
-#endif
-#if (LLL_SHARED-LLL_PRIVATE) > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex(%ebx)
-	setne	%cl
-	subl	$1, %ecx
-	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
-#if LLL_PRIVATE != 0
-	addl	$LLL_PRIVATE, %ecx
-#endif
-	call	__lll_unlock_wake
-
-	/* Wake up all waiters to make sure no signal gets lost.  */
-2:	testl	%edi, %edi
-	jnz	5f
-	addl	$cond_futex, %ebx
-#if FUTEX_PRIVATE_FLAG > 255
-	xorl	%ecx, %ecx
-#endif
-	cmpl	$-1, dep_mutex-cond_futex(%ebx)
-	sete	%cl
-	subl	$1, %ecx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	andl	$FUTEX_PRIVATE_FLAG, %ecx
-#else
-	andl	%gs:PRIVATE_FUTEX, %ecx
-#endif
-	addl	$FUTEX_WAKE, %ecx
-	movl	$SYS_futex, %eax
-	movl	$0x7fffffff, %edx
-	ENTER_KERNEL
-
-	/* Lock the mutex only if we don't own it already.  This only happens
-	   in case of PI mutexes, if we got cancelled after a successful
-	   return of the futex syscall and before disabling async
-	   cancellation.  */
-5:	movl	24+FRAME_SIZE(%esp), %eax
-	movl	MUTEX_KIND(%eax), %ebx
-	andl	$(ROBUST_BIT|PI_BIT), %ebx
-	cmpl	$PI_BIT, %ebx
-	jne	8f
-
-	movl	(%eax), %ebx
-	andl	$TID_MASK, %ebx
-	cmpl	%ebx, %gs:TID
-	jne	8f
-	/* We managed to get the lock.  Fix it up before returning.  */
-	call	__pthread_mutex_cond_lock_adjust
-	jmp	9f
-
-8:	call	__pthread_mutex_cond_lock
-
-9:	movl	%esi, (%esp)
-.LcallUR:
-	call	_Unwind_Resume
-	hlt
-.LENDCODE:
-	cfi_endproc
-	.size	__condvar_w_cleanup, .-__condvar_w_cleanup
-
-
-	.section .gcc_except_table,"a",@progbits
-.LexceptSTART:
-	.byte	DW_EH_PE_omit			# @LPStart format (omit)
-	.byte	DW_EH_PE_omit			# @TType format (omit)
-	.byte	DW_EH_PE_sdata4			# call-site format
-						# DW_EH_PE_sdata4
-	.uleb128 .Lcstend-.Lcstbegin
-.Lcstbegin:
-	.long	.LcleanupSTART-.LSTARTCODE
-	.long	.Ladd_cond_futex_pi-.LcleanupSTART
-	.long	__condvar_w_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.Ladd_cond_futex_pi-.LSTARTCODE
-	.long	.Lsub_cond_futex_pi-.Ladd_cond_futex_pi
-	.long	__condvar_w_cleanup2-.LSTARTCODE
-	.uleb128  0
-	.long	.Lsub_cond_futex_pi-.LSTARTCODE
-	.long	.Ladd_cond_futex-.Lsub_cond_futex_pi
-	.long	__condvar_w_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.Ladd_cond_futex-.LSTARTCODE
-	.long	.Lsub_cond_futex-.Ladd_cond_futex
-	.long	__condvar_w_cleanup2-.LSTARTCODE
-	.uleb128  0
-	.long	.Lsub_cond_futex-.LSTARTCODE
-	.long	.LcleanupEND-.Lsub_cond_futex
-	.long	__condvar_w_cleanup-.LSTARTCODE
-	.uleb128  0
-	.long	.LcallUR-.LSTARTCODE
-	.long	.LENDCODE-.LcallUR
-	.long	0
-	.uleb128  0
-.Lcstend:
-
-#ifdef SHARED
-	.hidden DW.ref.__gcc_personality_v0
-	.weak   DW.ref.__gcc_personality_v0
-	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
-	.align 4
-	.type   DW.ref.__gcc_personality_v0, @object
-	.size   DW.ref.__gcc_personality_v0, 4
-DW.ref.__gcc_personality_v0:
-	.long   __gcc_personality_v0
-#endif