about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/i386/i486
diff options
context:
space:
mode:
authorRoland McGrath <roland@hack.frob.com>2014-05-14 13:37:40 -0700
committerRoland McGrath <roland@hack.frob.com>2014-05-14 13:37:40 -0700
commit348f8e8c26178d7e5b7b64c16ab5c1a773916781 (patch)
treee155c8e2bbf1f549127d7b9a40ed33f7926e2b1c /sysdeps/unix/sysv/linux/i386/i486
parent5085af05bdfd7b4656abdd79e7da50022e3e1cad (diff)
downloadglibc-348f8e8c26178d7e5b7b64c16ab5c1a773916781.tar.gz
glibc-348f8e8c26178d7e5b7b64c16ab5c1a773916781.tar.xz
glibc-348f8e8c26178d7e5b7b64c16ab5c1a773916781.zip
Move remaining nptl/sysdeps/unix/sysv/linux/i386/ files.
Diffstat (limited to 'sysdeps/unix/sysv/linux/i386/i486')
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S466
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S232
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S186
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S241
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S216
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S973
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S641
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S192
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S243
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S236
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S151
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S183
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/sem_post.S150
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S327
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S67
-rw-r--r--sysdeps/unix/sysv/linux/i386/i486/sem_wait.S343
17 files changed, 4866 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
new file mode 100644
index 0000000000..83e523174f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
@@ -0,0 +1,19 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
new file mode 100644
index 0000000000..4ed46fc63e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -0,0 +1,466 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
+
+#include <stap-probe.h>
+
+	.text
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
+#endif
+
+	.globl	__lll_lock_wait_private
+	.type	__lll_lock_wait_private,@function
+	.hidden	__lll_lock_wait_private
+	.align	16
+__lll_lock_wait_private:
+	cfi_startproc
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%edx, -8)
+	cfi_offset(%ebx, -12)
+	cfi_offset(%esi, -16)
+
+	movl	$2, %edx
+	movl	%ecx, %ebx
+	xorl	%esi, %esi	/* No timeout.  */
+	LOAD_PRIVATE_FUTEX_WAIT (%ecx)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne 2f
+
+1:	LIBC_PROBE (lll_lock_wait_private, 1, %ebx)
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	ret
+	cfi_endproc
+	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_lock_wait
+	.type	__lll_lock_wait,@function
+	.hidden	__lll_lock_wait
+	.align	16
+__lll_lock_wait:
+	cfi_startproc
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%edx, -8)
+	cfi_offset(%ebx, -12)
+	cfi_offset(%esi, -16)
+
+	movl	%edx, %ebx
+	movl	$2, %edx
+	xorl	%esi, %esi	/* No timeout.  */
+	LOAD_FUTEX_WAIT (%ecx)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne 2f
+
+1:	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	ret
+	cfi_endproc
+	.size	__lll_lock_wait,.-__lll_lock_wait
+
+	/*      %ecx: futex
+		%esi: flags
+		%edx: timeout
+		%eax: futex value
+	*/
+	.globl	__lll_timedlock_wait
+	.type	__lll_timedlock_wait,@function
+	.hidden	__lll_timedlock_wait
+	.align	16
+__lll_timedlock_wait:
+	cfi_startproc
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	LOAD_PIC_REG (bx)
+	cmpl	$0, __have_futex_clock_realtime@GOTOFF(%ebx)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+# endif
+
+	cmpl	$0, (%edx)
+	js	8f
+
+	movl	%ecx, %ebx
+	movl	%esi, %ecx
+	movl	%edx, %esi
+	movl	$0xffffffff, %ebp
+	LOAD_FUTEX_WAIT_ABS (%ecx)
+
+	movl	$2, %edx
+	cmpl	%edx, %eax
+	jne	2f
+
+1:	movl	$SYS_futex, %eax
+	movl	$2, %edx
+	ENTER_KERNEL
+
+2:	xchgl	%edx, (%ebx)	/* NB:   lock is implied */
+
+	testl	%edx, %edx
+	jz	3f
+
+	cmpl	$-ETIMEDOUT, %eax
+	je	4f
+	cmpl	$-EINVAL, %eax
+	jne	1b
+4:	movl	%eax, %edx
+	negl	%edx
+
+3:	movl	%edx, %eax
+7:	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	ret
+
+8:	movl	$ETIMEDOUT, %eax
+	jmp	7b
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	/* Check for a valid timeout value.  */
+	cmpl	$1000000000, 4(%edx)
+	jae	3f
+
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+
+	/* Stack frame for the timespec and timeval structs.  */
+	subl	$8, %esp
+	cfi_adjust_cfa_offset(8)
+
+	movl	%ecx, %ebp
+	movl	%edx, %edi
+
+	movl	$2, %edx
+	xchgl	%edx, (%ebp)
+
+	test	%edx, %edx
+	je	6f
+
+1:
+	/* Get current time.  */
+	movl	%esp, %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	4f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+4:	testl	%ecx, %ecx
+	js	2f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	movl	%ecx, (%esp)
+	movl	%edx, 4(%esp)
+
+	/* Futex call.  */
+	movl	%ebp, %ebx
+	movl	$2, %edx
+	movl	%esp, %esi
+	movl	16(%esp), %ecx
+	LOAD_FUTEX_WAIT (%ecx)
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	/* NB: %edx == 2 */
+	xchgl	%edx, (%ebp)
+
+	testl	%edx, %edx
+	je	6f
+
+	cmpl	$-ETIMEDOUT, %eax
+	jne	1b
+2:	movl	$ETIMEDOUT, %edx
+
+6:	addl	$8, %esp
+	cfi_adjust_cfa_offset(-8)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+7:	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	movl	%edx, %eax
+	ret
+
+3:	movl	$EINVAL, %edx
+	jmp	7b
+# endif
+	cfi_endproc
+	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
+#endif
+
+	.globl	__lll_unlock_wake_private
+	.type	__lll_unlock_wake_private,@function
+	.hidden	__lll_unlock_wake_private
+	.align	16
+__lll_unlock_wake_private:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ecx
+	cfi_adjust_cfa_offset(4)
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+	cfi_offset(%ecx, -12)
+	cfi_offset(%edx, -16)
+
+	movl	%eax, %ebx
+	movl	$0, (%eax)
+	LOAD_PRIVATE_FUTEX_WAKE (%ecx)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	popl	%ecx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ecx)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+	cfi_endproc
+	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_unlock_wake
+	.type	__lll_unlock_wake,@function
+	.hidden	__lll_unlock_wake
+	.align	16
+__lll_unlock_wake:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ecx
+	cfi_adjust_cfa_offset(4)
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+	cfi_offset(%ecx, -12)
+	cfi_offset(%edx, -16)
+
+	movl	%eax, %ebx
+	movl	$0, (%eax)
+	LOAD_FUTEX_WAKE (%ecx)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	popl	%ecx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ecx)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+	cfi_endproc
+	.size	__lll_unlock_wake,.-__lll_unlock_wake
+
+	.globl	__lll_timedwait_tid
+	.type	__lll_timedwait_tid,@function
+	.hidden	__lll_timedwait_tid
+	.align	16
+__lll_timedwait_tid:
+	pushl	%edi
+	pushl	%esi
+	pushl	%ebx
+	pushl	%ebp
+
+	movl	%eax, %ebp
+	movl	%edx, %edi
+	subl	$8, %esp
+
+	/* Get current time.  */
+2:	movl	%esp, %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	5f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+5:	testl	%ecx, %ecx
+	js	6f		/* Time is already up.  */
+
+	movl	%ecx, (%esp)	/* Store relative timeout.  */
+	movl	%edx, 4(%esp)
+
+	movl	(%ebp), %edx
+	testl	%edx, %edx
+	jz	4f
+
+	movl	%esp, %esi
+	/* XXX The kernel so far uses global futex for the wakeup at
+	   all times.  */
+	xorl	%ecx, %ecx	/* movl $FUTEX_WAIT, %ecx */
+	movl	%ebp, %ebx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	cmpl	$0, (%ebx)
+	jne	1f
+4:	xorl	%eax, %eax
+
+3:	addl	$8, %esp
+	popl	%ebp
+	popl	%ebx
+	popl	%esi
+	popl	%edi
+	ret
+
+1:	cmpl	$-ETIMEDOUT, %eax
+	jne	2b
+6:	movl	$ETIMEDOUT, %eax
+	jmp	3b
+	.size	__lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
new file mode 100644
index 0000000000..1b2933dadc
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
@@ -0,0 +1,232 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
+
+	.text
+
+#define FUTEX_WAITERS		0x80000000
+#define FUTEX_OWNER_DIED	0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+#endif
+
+	.globl	__lll_robust_lock_wait
+	.type	__lll_robust_lock_wait,@function
+	.hidden	__lll_robust_lock_wait
+	.align	16
+__lll_robust_lock_wait:
+	cfi_startproc
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%edx, -8)
+	cfi_offset(%ebx, -12)
+	cfi_offset(%esi, -16)
+
+	movl	%edx, %ebx
+	xorl	%esi, %esi	/* No timeout.  */
+	LOAD_FUTEX_WAIT (%ecx)
+
+4:	movl	%eax, %edx
+	orl	$FUTEX_WAITERS, %edx
+
+	testl	$FUTEX_OWNER_DIED, %eax
+	jnz	3f
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	je	1f
+
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jnz	2f
+
+1:	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	movl	(%ebx), %eax
+
+2:	test	%eax, %eax
+	jne	4b
+
+	movl	%gs:TID, %edx
+	orl	$FUTEX_WAITERS, %edx
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jnz	4b
+	/* NB:	 %eax == 0 */
+
+3:	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	ret
+	cfi_endproc
+	.size	__lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+	.globl	__lll_robust_timedlock_wait
+	.type	__lll_robust_timedlock_wait,@function
+	.hidden	__lll_robust_timedlock_wait
+	.align	16
+__lll_robust_timedlock_wait:
+	cfi_startproc
+	/* Check for a valid timeout value.  */
+	cmpl	$1000000000, 4(%edx)
+	jae	3f
+
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%edi, -8)
+	cfi_offset(%esi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+
+	/* Stack frame for the timespec and timeval structs.  */
+	subl	$12, %esp
+	cfi_adjust_cfa_offset(12)
+
+	movl	%ecx, %ebp
+	movl	%edx, %edi
+
+1:	movl	%eax, 8(%esp)
+
+	/* Get current time.  */
+	movl	%esp, %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	4f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+4:	testl	%ecx, %ecx
+	js	8f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	movl	%ecx, (%esp)
+	movl	%edx, 4(%esp)
+
+	movl	%ebp, %ebx
+
+	movl	8(%esp), %edx
+	movl	%edx, %eax
+	orl	$FUTEX_WAITERS, %edx
+
+	testl	$FUTEX_OWNER_DIED, %eax
+	jnz	6f
+
+	cmpl	%eax, %edx
+	je	2f
+
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	movl	$0, %ecx	/* Must use mov to avoid changing cc.  */
+	jnz	5f
+
+2:
+	/* Futex call.  */
+	movl	%esp, %esi
+	movl	20(%esp), %ecx
+	LOAD_FUTEX_WAIT (%ecx)
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	movl	%eax, %ecx
+
+	movl	(%ebx), %eax
+
+5:	testl	%eax, %eax
+	jne	7f
+
+	movl	%gs:TID, %edx
+	orl	$FUTEX_WAITERS, %edx
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jnz	7f
+
+6:	addl	$12, %esp
+	cfi_adjust_cfa_offset(-12)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	ret
+
+3:	movl	$EINVAL, %eax
+	ret
+
+	cfi_adjust_cfa_offset(28)
+	cfi_offset(%edi, -8)
+	cfi_offset(%esi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+	/* Check whether the time expired.  */
+7:	cmpl	$-ETIMEDOUT, %ecx
+	jne	1b
+
+8:	movl	$ETIMEDOUT, %eax
+	jmp	6b
+	cfi_endproc
+	.size	__lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
new file mode 100644
index 0000000000..aa755a1737
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -0,0 +1,186 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelbarrier.h>
+
+	.text
+
+	.globl	pthread_barrier_wait
+	.type	pthread_barrier_wait,@function
+	.align	16
+pthread_barrier_wait:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+
+	movl	8(%esp), %ebx
+
+	/* Get the mutex.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+	cmpxchgl %edx, MUTEX(%ebx)
+	jnz	1f
+
+	/* One less waiter.  If this was the last one needed wake
+	   everybody.  */
+2:	subl	$1, LEFT(%ebx)
+	je	3f
+
+	/* There are more threads to come.  */
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -12)
+
+#if CURR_EVENT == 0
+	movl	(%ebx), %edx
+#else
+	movl	CURR_EVENT(%ebx), %edx
+#endif
+
+	/* Release the mutex.  */
+	LOCK
+	subl	$1, MUTEX(%ebx)
+	jne	6f
+
+	/* Wait for the remaining threads.  The call will return immediately
+	   if the CURR_EVENT memory has meanwhile been changed.  */
+7:
+#if FUTEX_WAIT == 0
+	movl	PRIVATE(%ebx), %ecx
+#else
+	movl	$FUTEX_WAIT, %ecx
+	orl	PRIVATE(%ebx), %ecx
+#endif
+	xorl	%esi, %esi
+8:	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	/* Don't return on spurious wakeups.  The syscall does not change
+	   any register except %eax so there is no need to reload any of
+	   them.  */
+#if CURR_EVENT == 0
+	cmpl	%edx, (%ebx)
+#else
+	cmpl	%edx, CURR_EVENT(%ebx)
+#endif
+	je	8b
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	movl	$1, %edx
+	movl	INIT_COUNT(%ebx), %ecx
+	LOCK
+	xaddl	%edx, LEFT(%ebx)
+	subl	$1, %ecx
+	cmpl	%ecx, %edx
+	jne	10f
+
+	/* Release the mutex.  We cannot release the lock before
+	   waking the waiting threads since otherwise a new thread might
+	   arrive and gets waken up, too.  */
+	LOCK
+	subl	$1, MUTEX(%ebx)
+	jne	9f
+
+	/* Note: %esi is still zero.  */
+10:	movl	%esi, %eax		/* != PTHREAD_BARRIER_SERIAL_THREAD */
+
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+
+	/* The necessary number of threads arrived.  */
+3:
+#if CURR_EVENT == 0
+	addl	$1, (%ebx)
+#else
+	addl	$1, CURR_EVENT(%ebx)
+#endif
+
+	/* Wake up all waiters.  The count is a signed number in the kernel
+	   so 0x7fffffff is the highest value.  */
+	movl	$0x7fffffff, %edx
+	movl	$FUTEX_WAKE, %ecx
+	orl	PRIVATE(%ebx), %ecx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	movl	$1, %edx
+	movl	INIT_COUNT(%ebx), %ecx
+	LOCK
+	xaddl	%edx, LEFT(%ebx)
+	subl	$1, %ecx
+	cmpl	%ecx, %edx
+	jne	5f
+
+	/* Release the mutex.  We cannot release the lock before
+	   waking the waiting threads since otherwise a new thread might
+	   arrive and gets waken up, too.  */
+	LOCK
+	subl	$1, MUTEX(%ebx)
+	jne	4f
+
+5:	orl	$-1, %eax		/* == PTHREAD_BARRIER_SERIAL_THREAD */
+
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+1:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+4:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
+	jmp	5b
+
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -12)
+6:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
+	jmp	7b
+
+9:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
+	jmp	10b
+	cfi_endproc
+	.size	pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
new file mode 100644
index 0000000000..60702ce8fe
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -0,0 +1,241 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+	.text
+
+	/* int pthread_cond_broadcast (pthread_cond_t *cond) */
+	.globl	__pthread_cond_broadcast
+	.type	__pthread_cond_broadcast, @function
+	.align	16
+__pthread_cond_broadcast:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+	cfi_remember_state
+
+	movl	20(%esp), %ebx
+
+	LIBC_PROBE (cond_broadcast, 1, %edx)
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	1f
+
+2:	addl	$cond_futex, %ebx
+	movl	total_seq+4-cond_futex(%ebx), %eax
+	movl	total_seq-cond_futex(%ebx), %ebp
+	cmpl	wakeup_seq+4-cond_futex(%ebx), %eax
+	ja	3f
+	jb	4f
+	cmpl	wakeup_seq-cond_futex(%ebx), %ebp
+	jna	4f
+
+	/* Cause all currently waiting threads to recognize they are
+	   woken up.  */
+3:	movl	%ebp, wakeup_seq-cond_futex(%ebx)
+	movl	%eax, wakeup_seq-cond_futex+4(%ebx)
+	movl	%ebp, woken_seq-cond_futex(%ebx)
+	movl	%eax, woken_seq-cond_futex+4(%ebx)
+	addl	%ebp, %ebp
+	addl	$1, broadcast_seq-cond_futex(%ebx)
+	movl	%ebp, (%ebx)
+
+	/* Get the address of the mutex used.  */
+	movl	dep_mutex-cond_futex(%ebx), %edi
+
+	/* Unlock.  */
+	LOCK
+	subl	$1, cond_lock-cond_futex(%ebx)
+	jne	7f
+
+	/* Don't use requeue for pshared condvars.  */
+8:	cmpl	$-1, %edi
+	je	9f
+
+	/* Do not use requeue for pshared condvars.  */
+	testl	$PS_BIT, MUTEX_KIND(%edi)
+	jne	9f
+
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	   the robust bit is not set.  */
+	movl	MUTEX_KIND(%edi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	je	81f
+
+	/* Wake up all threads.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx
+#else
+	movl	%gs:PRIVATE_FUTEX, %ecx
+	orl	$FUTEX_CMP_REQUEUE, %ecx
+#endif
+	movl	$SYS_futex, %eax
+	movl	$0x7fffffff, %esi
+	movl	$1, %edx
+	/* Get the address of the futex involved.  */
+# if MUTEX_FUTEX != 0
+	addl	$MUTEX_FUTEX, %edi
+# endif
+/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter.
+	ENTER_KERNEL  */
+	int	$0x80
+
+	/* For any kind of error, which mainly is EAGAIN, we try again
+	   with WAKE.  The general test also covers running on old
+	   kernels.  */
+	cmpl	$0xfffff001, %eax
+	jae	9f
+
+6:	xorl	%eax, %eax
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_restore_state
+
+81:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+	movl	$SYS_futex, %eax
+	movl	$0x7fffffff, %esi
+	movl	$1, %edx
+	/* Get the address of the futex involved.  */
+# if MUTEX_FUTEX != 0
+	addl	$MUTEX_FUTEX, %edi
+# endif
+	int	$0x80
+
+	/* For any kind of error, which mainly is EAGAIN, we try again
+	with WAKE.  The general test also covers running on old
+	kernels.  */
+	cmpl	$0xfffff001, %eax
+	jb	6b
+	jmp	9f
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	2b
+
+	.align	16
+	/* Unlock.  */
+4:	LOCK
+	subl	$1, cond_lock-cond_futex(%ebx)
+	je	6b
+
+	/* Unlock in loop requires wakeup.  */
+5:	leal	cond_lock-cond_futex(%ebx), %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	6b
+
+	/* Unlock in loop requires wakeup.  */
+7:	leal	cond_lock-cond_futex(%ebx), %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	8b
+
+9:	/* The futex requeue functionality is not available.  */
+	movl	$0x7fffffff, %edx
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	jmp	6b
+	cfi_endproc
+	.size	__pthread_cond_broadcast, .-__pthread_cond_broadcast
+versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
new file mode 100644
index 0000000000..b086d92dae
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -0,0 +1,216 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+	.text
+
+	/* int pthread_cond_signal (pthread_cond_t *cond) */
+	.globl	__pthread_cond_signal
+	.type	__pthread_cond_signal, @function
+	.align	16
+__pthread_cond_signal:
+
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+	cfi_remember_state
+
+	movl	12(%esp), %edi
+
+	LIBC_PROBE (cond_signal, 1, %edi)
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%edi)
+#else
+	cmpxchgl %edx, cond_lock(%edi)
+#endif
+	jnz	1f
+
+2:	leal	cond_futex(%edi), %ebx
+	movl	total_seq+4(%edi), %eax
+	movl	total_seq(%edi), %ecx
+	cmpl	wakeup_seq+4(%edi), %eax
+#if cond_lock != 0
+	/* Must use leal to preserve the flags.  */
+	leal	cond_lock(%edi), %edi
+#endif
+	ja	3f
+	jb	4f
+	cmpl	wakeup_seq-cond_futex(%ebx), %ecx
+	jbe	4f
+
+	/* Bump the wakeup number.  */
+3:	addl	$1, wakeup_seq-cond_futex(%ebx)
+	adcl	$0, wakeup_seq-cond_futex+4(%ebx)
+	addl	$1, (%ebx)
+
+	/* Wake up one thread.  */
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	sete	%cl
+	je	8f
+
+	movl	dep_mutex-cond_futex(%ebx), %edx
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	   the robust bit is not set.  */
+	movl	MUTEX_KIND(%edx), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	je	9f
+
+8:	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE_OP, %ecx
+	movl	$SYS_futex, %eax
+	movl	$1, %edx
+	movl	$1, %esi
+	movl	$FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
+	/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+	   sysenter.
+	ENTER_KERNEL  */
+	int	$0x80
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+
+	/* For any kind of error, we try again with WAKE.
+	   The general test also covers running on old kernels.  */
+	cmpl	$-4095, %eax
+	jae	7f
+
+6:	xorl	%eax, %eax
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_restore_state
+
+9:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+	movl	$SYS_futex, %eax
+	movl	$1, %edx
+	xorl	%esi, %esi
+	movl	dep_mutex-cond_futex(%ebx), %edi
+	movl	(%ebx), %ebp
+	/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+	   sysenter.
+	ENTER_KERNEL  */
+	int	$0x80
+	popl	%ebp
+	popl	%esi
+
+	leal	-cond_futex(%ebx), %edi
+
+	/* For any kind of error, we try again with WAKE.
+	   The general test also covers running on old kernels.  */
+	cmpl	$-4095, %eax
+	jb	4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	orl	$FUTEX_WAKE, %ecx
+
+	movl	$SYS_futex, %eax
+	/* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
+	movl	$1, %edx  */
+	ENTER_KERNEL
+
+	/* Unlock.  Note that at this point %edi always points to
+	   cond_lock.  */
+4:	LOCK
+	subl	$1, (%edi)
+	je	6b
+
+	/* Unlock in loop requires wakeup.  */
+5:	movl	%edi, %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	6b
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock == 0
+	movl	%edi, %edx
+#else
+	leal	cond_lock(%edi), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%edi)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	2b
+
+	cfi_endproc
+	.size	__pthread_cond_signal, .-__pthread_cond_signal
+versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
new file mode 100644
index 0000000000..91888d774e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -0,0 +1,973 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <kernel-features.h>
+#include <stap-probe.h>
+
+	.text
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+			       const struct timespec *abstime)  */
+	.globl	__pthread_cond_timedwait
+	.type	__pthread_cond_timedwait, @function
+	.align	16
+__pthread_cond_timedwait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+
+	movl	20(%esp), %ebx
+	movl	28(%esp), %ebp
+
+	LIBC_PROBE (cond_timedwait, 3, %ebx, 24(%esp), %ebp)
+
+	cmpl	$1000000000, 4(%ebp)
+	movl	$EINVAL, %eax
+	jae	18f
+
+	/* Stack frame:
+
+	   esp + 32
+		    +--------------------------+
+	   esp + 24 | timeout value            |
+		    +--------------------------+
+	   esp + 20 | futex pointer            |
+		    +--------------------------+
+	   esp + 16 | pi-requeued flag         |
+		    +--------------------------+
+	   esp + 12 | old broadcast_seq value  |
+		    +--------------------------+
+	   esp +  4 | old wake_seq value       |
+		    +--------------------------+
+	   esp +  0 | old cancellation mode    |
+		    +--------------------------+
+	*/
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+	LOAD_PIC_REG (cx)
+	cmpl	$0, __have_futex_clock_realtime@GOTOFF(%ecx)
+# else
+	cmpl	$0, __have_futex_clock_realtime
+# endif
+	je	.Lreltmo
+#endif
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	1f
+
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+2:	cmpl	$-1, dep_mutex(%ebx)
+	movl	24(%esp), %eax
+	je	17f
+	movl	%eax, dep_mutex(%ebx)
+
+	/* Unlock the mutex.  */
+17:	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	16f
+
+	addl	$1, total_seq(%ebx)
+	adcl	$0, total_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE 24
+#else
+# define FRAME_SIZE 32
+#endif
+	subl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+	cfi_remember_state
+
+	/* Get and store current wakeup_seq value.  */
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+	movl	broadcast_seq(%ebx), %eax
+	movl	%edi, 4(%esp)
+	movl	%edx, 8(%esp)
+	movl	%eax, 12(%esp)
+
+	/* Reset the pi-requeued flag.  */
+	movl	$0, 16(%esp)
+
+	cmpl	$0, (%ebp)
+	movl	$-ETIMEDOUT, %esi
+	js	6f
+
+8:	movl	cond_futex(%ebx), %edi
+	movl	%edi, 20(%esp)
+
+	/* Unlock.  */
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	3f
+
+.LcleanupSTART:
+4:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+	leal	(%ebp), %esi
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	sete	%cl
+	je	40f
+
+	movl	dep_mutex(%ebx), %edi
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	   the robust bit is not set.  */
+	movl	MUTEX_KIND(%edi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	40f
+
+	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+	/* The following only works like this because we only support
+	   two clocks, represented using a single bit.  */
+	testl	$1, cond_nwaiters(%ebx)
+	/* XXX Need to implement using sete instead of a jump.  */
+	jne	42f
+	orl	$FUTEX_CLOCK_REALTIME, %ecx
+
+42:	movl	20(%esp), %edx
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex_pi:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex_pi:
+	movl	%eax, %esi
+	/* Set the pi-requeued flag only if the kernel has returned 0. The
+	   kernel does not hold the mutex on ETIMEDOUT or any other error.  */
+	cmpl	$0, %eax
+	sete	16(%esp)
+	je	41f
+
+	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
+	   successfully, it has already locked the mutex for us and the
+	   pi_flag (16(%esp)) is set to denote that fact.  However, if another
+	   thread changed the futex value before we entered the wait, the
+	   syscall may return an EAGAIN and the mutex is not locked.  We go
+	   ahead with a success anyway since later we look at the pi_flag to
+	   decide if we got the mutex or not.  The sequence numbers then make
+	   sure that only one of the threads actually wake up.  We retry using
+	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
+	   and PI futexes don't mix.
+
+	   Note that we don't check for EAGAIN specifically; we assume that the
+	   only other error the futex function could return is EAGAIN (barring
+	   the ETIMEOUT of course, for the timeout case in futex) since
+	   anything else would mean an error in our function.  It is too
+	   expensive to do that check for every call (which is  quite common in
+	   case of a large number of threads), so it has been skipped.  */
+	cmpl	$-ENOSYS, %eax
+	jne	41f
+	xorl	%ecx, %ecx
+
+40:	subl	$1, %ecx
+	movl	$0, 16(%esp)
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAIT_BITSET, %ecx
+	/* The following only works like this because we only support
+	   two clocks, represented using a single bit.  */
+	testl	$1, cond_nwaiters(%ebx)
+	jne	30f
+	orl	$FUTEX_CLOCK_REALTIME, %ecx
+30:
+	movl	20(%esp), %edx
+	movl	$0xffffffff, %ebp
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex:
+	movl	28+FRAME_SIZE(%esp), %ebp
+	movl	%eax, %esi
+
+41:	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	/* Lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	5f
+
+6:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	23f
+
+	movl	woken_seq(%ebx), %eax
+	movl	woken_seq+4(%ebx), %ecx
+
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+
+	cmpl	8(%esp), %edx
+	jne	7f
+	cmpl	4(%esp), %edi
+	je	15f
+
+7:	cmpl	%ecx, %edx
+	jne	9f
+	cmp	%eax, %edi
+	jne	9f
+
+15:	cmpl	$-ETIMEDOUT, %esi
+	je	28f
+
+	/* We need to go back to futex_wait.  If we're using requeue_pi, then
+	   release the mutex we had acquired and go back.  */
+	movl	16(%esp), %edx
+	test	%edx, %edx
+	jz	8b
+
+	/* Adjust the mutex values first and then unlock it.  The unlock
+	   should always succeed or else the kernel did not lock the mutex
+	   correctly.  */
+	movl	dep_mutex(%ebx), %eax
+	call	__pthread_mutex_cond_lock_adjust
+	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+	jmp	8b
+
+28:	addl	$1, wakeup_seq(%ebx)
+	adcl	$0, wakeup_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+	movl	$ETIMEDOUT, %esi
+	jmp	14f
+
+23:	xorl	%esi, %esi
+	jmp	24f
+
+9:	xorl	%esi, %esi
+14:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+24:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	25f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	25f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+
+25:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	10f
+
+11:	movl	24+FRAME_SIZE(%esp), %eax
+	/* With requeue_pi, the mutex lock is held in the kernel.  */
+	movl	16(%esp), %ecx
+	testl	%ecx, %ecx
+	jnz	27f
+
+	call	__pthread_mutex_cond_lock
+26:	addl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(-FRAME_SIZE)
+
+	/* We return the result of the mutex_lock operation if it failed.  */
+	testl	%eax, %eax
+#ifdef HAVE_CMOV
+	cmovel	%esi, %eax
+#else
+	jne	22f
+	movl	%esi, %eax
+22:
+#endif
+
+18:	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+
+	ret
+
+	cfi_restore_state
+
+27:	call	__pthread_mutex_cond_lock_adjust
+	xorl	%eax, %eax
+	jmp	26b
+
+	cfi_adjust_cfa_offset(-FRAME_SIZE);
+	/* Initial locking failed.  */
+1:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	2b
+
+	/* The initial unlocking of the mutex failed.  */
+16:
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	18b
+
+	movl	%eax, %esi
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	movl	%esi, %eax
+	jmp	18b
+
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Unlock in loop requires wakeup.  */
+3:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	6b
+
+	/* Unlock after loop requires wakeup.  */
+10:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	11b
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	cfi_adjust_cfa_offset(-FRAME_SIZE)
+.Lreltmo:
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+# else
+	cmpxchgl %edx, cond_lock(%ebx)
+# endif
+	jnz	101f
+
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+102:	cmpl	$-1, dep_mutex(%ebx)
+	movl	24(%esp), %eax
+	je	117f
+	movl	%eax, dep_mutex(%ebx)
+
+	/* Unlock the mutex.  */
+117:	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	16b
+
+	addl	$1, total_seq(%ebx)
+	adcl	$0, total_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	subl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Get and store current wakeup_seq value.  */
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+	movl	broadcast_seq(%ebx), %eax
+	movl	%edi, 4(%esp)
+	movl	%edx, 8(%esp)
+	movl	%eax, 12(%esp)
+
+	/* Reset the pi-requeued flag.  */
+	movl	$0, 16(%esp)
+
+	/* Get the current time.  */
+108:	movl	%ebx, %edx
+# ifdef __NR_clock_gettime
+	/* Get the clock number.  */
+	movl	cond_nwaiters(%ebx), %ebx
+	andl	$((1 << nwaiters_shift) - 1), %ebx
+	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
+	   kernel.  */
+	leal	24(%esp), %ecx
+	movl	$__NR_clock_gettime, %eax
+	ENTER_KERNEL
+	movl	%edx, %ebx
+
+	/* Compute relative timeout.  */
+	movl	(%ebp), %ecx
+	movl	4(%ebp), %edx
+	subl	24(%esp), %ecx
+	subl	28(%esp), %edx
+# else
+	/* Get the current time.  */
+	leal	24(%esp), %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+	movl	%edx, %ebx
+
+	/* Compute relative timeout.  */
+	movl	28(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%ebp), %ecx
+	movl	4(%ebp), %edx
+	subl	24(%esp), %ecx
+	subl	%eax, %edx
+# endif
+	jns	112f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+112:	testl	%ecx, %ecx
+	movl	$-ETIMEDOUT, %esi
+	js	106f
+
+	/* Store relative timeout.  */
+121:	movl	%ecx, 24(%esp)
+	movl	%edx, 28(%esp)
+
+	movl	cond_futex(%ebx), %edi
+	movl	%edi, 20(%esp)
+
+	/* Unlock.  */
+	LOCK
+# if cond_lock == 0
+	subl	$1, (%ebx)
+# else
+	subl	$1, cond_lock(%ebx)
+# endif
+	jne	103f
+
+.LcleanupSTART2:
+104:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+	leal	24(%esp), %esi
+# if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+# endif
+	cmpl	$-1, dep_mutex(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+# ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+# else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+# endif
+# if FUTEX_WAIT != 0
+	addl	$FUTEX_WAIT, %ecx
+# endif
+	movl	20(%esp), %edx
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex2:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex2:
+	movl	%eax, %esi
+
+141:	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND2:
+
+
+	/* Lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+# else
+	cmpxchgl %edx, cond_lock(%ebx)
+# endif
+	jnz	105f
+
+106:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	23b
+
+	movl	woken_seq(%ebx), %eax
+	movl	woken_seq+4(%ebx), %ecx
+
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+
+	cmpl	8(%esp), %edx
+	jne	107f
+	cmpl	4(%esp), %edi
+	je	115f
+
+107:	cmpl	%ecx, %edx
+	jne	9b
+	cmp	%eax, %edi
+	jne	9b
+
+115:	cmpl	$-ETIMEDOUT, %esi
+	je	28b
+
+	jmp	8b
+
+	cfi_adjust_cfa_offset(-FRAME_SIZE)
+	/* Initial locking failed.  */
+101:
+# if cond_lock == 0
+	movl	%ebx, %edx
+# else
+	leal	cond_lock(%ebx), %edx
+# endif
+# if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+# endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+# if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+# endif
+	call	__lll_lock_wait
+	jmp	102b
+
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Unlock in loop requires wakeup.  */
+103:
+# if cond_lock == 0
+	movl	%ebx, %eax
+# else
+	leal	cond_lock(%ebx), %eax
+# endif
+# if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+# endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+# if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+# endif
+	call	__lll_unlock_wake
+	jmp	104b
+
+	/* Locking in loop failed.  */
+105:
+# if cond_lock == 0
+	movl	%ebx, %edx
+# else
+	leal	cond_lock(%ebx), %edx
+# endif
+# if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+# endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+# if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+# endif
+	call	__lll_lock_wait
+	jmp	106b
+#endif
+
+	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
+versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
+		  GLIBC_2_3_2)
+
+
+	.type	__condvar_tw_cleanup2, @function
+__condvar_tw_cleanup2:
+	subl	$cond_futex, %ebx
+	.size	__condvar_tw_cleanup2, .-__condvar_tw_cleanup2
+	.type	__condvar_tw_cleanup, @function
+__condvar_tw_cleanup:
+	movl	%eax, %esi
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jz	1f
+
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+
+1:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movl	total_seq(%ebx), %eax
+	movl	total_seq+4(%ebx), %edi
+	cmpl	wakeup_seq+4(%ebx), %edi
+	jb	6f
+	ja	7f
+	cmpl	wakeup_seq(%ebx), %eax
+	jbe	7f
+
+6:	addl	$1, wakeup_seq(%ebx)
+	adcl	$0, wakeup_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+
+7:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorl	%edi, %edi
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	4f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+	movl	$1, %edi
+
+4:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	je	2f
+
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testl	%edi, %edi
+	jnz	5f
+	addl	$cond_futex, %ebx
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$SYS_futex, %eax
+	movl	$0x7fffffff, %edx
+	ENTER_KERNEL
+
+	/* Lock the mutex only if we don't own it already.  This only happens
+	   in case of PI mutexes, if we got cancelled after a successful
+	   return of the futex syscall and before disabling async
+	   cancellation.  */
+5:	movl	24+FRAME_SIZE(%esp), %eax
+	movl	MUTEX_KIND(%eax), %ebx
+	andl	$(ROBUST_BIT|PI_BIT), %ebx
+	cmpl	$PI_BIT, %ebx
+	jne	8f
+
+	movl	(%eax), %ebx
+	andl	$TID_MASK, %ebx
+	cmpl	%ebx, %gs:TID
+	jne	8f
+	/* We managed to get the lock.  Fix it up before returning.  */
+	call	__pthread_mutex_cond_lock_adjust
+	jmp	9f
+
+8:	call	__pthread_mutex_cond_lock
+
+9:	movl	%esi, (%esp)
+.LcallUR:
+	call	_Unwind_Resume
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_tw_cleanup, .-__condvar_tw_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format (omit)
+	.byte	DW_EH_PE_omit			# @TType format (omit)
+	.byte	DW_EH_PE_sdata4			# call-site format
+						# DW_EH_PE_sdata4
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.long	.LcleanupSTART-.LSTARTCODE
+	.long	.Ladd_cond_futex_pi-.LcleanupSTART
+	.long	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex_pi-.LSTARTCODE
+	.long	.Lsub_cond_futex_pi-.Ladd_cond_futex_pi
+	.long	__condvar_tw_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex_pi-.LSTARTCODE
+	.long	.Ladd_cond_futex-.Lsub_cond_futex_pi
+	.long	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex-.LSTARTCODE
+	.long	.Lsub_cond_futex-.Ladd_cond_futex
+	.long	__condvar_tw_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex-.LSTARTCODE
+	.long	.LcleanupEND-.Lsub_cond_futex
+	.long	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.long	.LcleanupSTART2-.LSTARTCODE
+	.long	.Ladd_cond_futex2-.LcleanupSTART2
+	.long	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex2-.LSTARTCODE
+	.long	.Lsub_cond_futex2-.Ladd_cond_futex2
+	.long	__condvar_tw_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex2-.LSTARTCODE
+	.long	.LcleanupEND2-.Lsub_cond_futex2
+	.long	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+#endif
+	.long	.LcallUR-.LSTARTCODE
+	.long	.LENDCODE-.LcallUR
+	.long	0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	4
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long   __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
new file mode 100644
index 0000000000..c7e79712b8
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
@@ -0,0 +1,641 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <kernel-features.h>
+#include <stap-probe.h>
+
+
+	.text
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
+	.globl	__pthread_cond_wait
+	.type	__pthread_cond_wait, @function
+	.align	16
+__pthread_cond_wait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebp, 0)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%edi, 0)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%esi, 0)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset(%ebx, 0)
+
+	xorl	%esi, %esi
+	movl	20(%esp), %ebx
+
+	LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx)
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	1f
+
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+2:	cmpl	$-1, dep_mutex(%ebx)
+	movl	24(%esp), %eax
+	je	15f
+	movl	%eax, dep_mutex(%ebx)
+
+	/* Unlock the mutex.  */
+15:	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	12f
+
+	addl	$1, total_seq(%ebx)
+	adcl	$0, total_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+#define FRAME_SIZE 20
+	subl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+	cfi_remember_state
+
+	/* Get and store current wakeup_seq value.  */
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+	movl	broadcast_seq(%ebx), %eax
+	movl	%edi, 4(%esp)
+	movl	%edx, 8(%esp)
+	movl	%eax, 12(%esp)
+
+	/* Reset the pi-requeued flag.  */
+8:	movl	$0, 16(%esp)
+	movl	cond_futex(%ebx), %ebp
+
+	/* Unlock.  */
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	3f
+
+.LcleanupSTART:
+4:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+	xorl	%ecx, %ecx
+	cmpl	$-1, dep_mutex(%ebx)
+	sete	%cl
+	je	18f
+
+	movl	dep_mutex(%ebx), %edi
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	   the robust bit is not set.  */
+	movl	MUTEX_KIND(%edi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	18f
+
+	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+	movl	%ebp, %edx
+	xorl	%esi, %esi
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex_pi:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex_pi:
+	/* Set the pi-requeued flag only if the kernel has returned 0. The
+	   kernel does not hold the mutex on error.  */
+	cmpl	$0, %eax
+	sete	16(%esp)
+	je	19f
+
+	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
+	   successfully, it has already locked the mutex for us and the
+	   pi_flag (16(%esp)) is set to denote that fact.  However, if another
+	   thread changed the futex value before we entered the wait, the
+	   syscall may return an EAGAIN and the mutex is not locked.  We go
+	   ahead with a success anyway since later we look at the pi_flag to
+	   decide if we got the mutex or not.  The sequence numbers then make
+	   sure that only one of the threads actually wake up.  We retry using
+	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
+	   and PI futexes don't mix.
+
+	   Note that we don't check for EAGAIN specifically; we assume that the
+	   only other error the futex function could return is EAGAIN since
+	   anything else would mean an error in our function.  It is too
+	   expensive to do that check for every call (which is 	quite common in
+	   case of a large number of threads), so it has been skipped.  */
+	cmpl	$-ENOSYS, %eax
+	jne	19f
+	xorl	%ecx, %ecx
+
+18:	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+	addl	$FUTEX_WAIT, %ecx
+#endif
+	movl	%ebp, %edx
+	addl	$cond_futex, %ebx
+.Ladd_cond_futex:
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	subl	$cond_futex, %ebx
+.Lsub_cond_futex:
+
+19:	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	/* Lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jnz	5f
+
+6:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	16f
+
+	movl	woken_seq(%ebx), %eax
+	movl	woken_seq+4(%ebx), %ecx
+
+	movl	wakeup_seq(%ebx), %edi
+	movl	wakeup_seq+4(%ebx), %edx
+
+	cmpl	8(%esp), %edx
+	jne	7f
+	cmpl	4(%esp), %edi
+	je	22f
+
+7:	cmpl	%ecx, %edx
+	jne	9f
+	cmp	%eax, %edi
+	je	22f
+
+9:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+	/* Unlock */
+16:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	17f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	17f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+
+17:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	10f
+
+	/* With requeue_pi, the mutex lock is held in the kernel.  */
+11:	movl	24+FRAME_SIZE(%esp), %eax
+	movl	16(%esp), %ecx
+	testl	%ecx, %ecx
+	jnz	21f
+
+	call	__pthread_mutex_cond_lock
+20:	addl	$FRAME_SIZE, %esp
+	cfi_adjust_cfa_offset(-FRAME_SIZE);
+
+14:	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+
+	/* We return the result of the mutex_lock operation.  */
+	ret
+
+	cfi_restore_state
+
+21:	call	__pthread_mutex_cond_lock_adjust
+	xorl	%eax, %eax
+	jmp	20b
+
+	cfi_adjust_cfa_offset(-FRAME_SIZE);
+
+	/* We need to go back to futex_wait.  If we're using requeue_pi, then
+	   release the mutex we had acquired and go back.  */
+22:	movl	16(%esp), %edx
+	test	%edx, %edx
+	jz	8b
+
+	/* Adjust the mutex values first and then unlock it.  The unlock
+	   should always succeed or else the kernel did not lock the mutex
+	   correctly.  */
+	movl	dep_mutex(%ebx), %eax
+	call    __pthread_mutex_cond_lock_adjust
+	xorl	%edx, %edx
+	call	__pthread_mutex_unlock_usercnt
+	jmp	8b
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	2b
+
+	/* The initial unlocking of the mutex failed.  */
+12:
+	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	jne	14b
+
+	movl	%eax, %esi
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	movl	%esi, %eax
+	jmp	14b
+
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Unlock in loop requires wakeup.  */
+3:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+	jmp	6b
+
+	/* Unlock after loop requires wakeup.  */
+10:
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+	jmp	11b
+
+	.size	__pthread_cond_wait, .-__pthread_cond_wait
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+		  GLIBC_2_3_2)
+
+
+	.type	__condvar_w_cleanup2, @function
+__condvar_w_cleanup2:
+	subl	$cond_futex, %ebx
+	.size	__condvar_w_cleanup2, .-__condvar_w_cleanup2
+.LSbl4:
+	.type	__condvar_w_cleanup, @function
+__condvar_w_cleanup:
+	movl	%eax, %esi
+
+	/* Get internal lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, cond_lock(%ebx)
+#endif
+	jz	1f
+
+#if cond_lock == 0
+	movl	%ebx, %edx
+#else
+	leal	cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_lock_wait
+
+1:	movl	broadcast_seq(%ebx), %eax
+	cmpl	12(%esp), %eax
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movl	total_seq(%ebx), %eax
+	movl	total_seq+4(%ebx), %edi
+	cmpl	wakeup_seq+4(%ebx), %edi
+	jb	6f
+	ja	7f
+	cmpl	wakeup_seq(%ebx), %eax
+	jbe	7f
+
+6:	addl	$1, wakeup_seq(%ebx)
+	adcl	$0, wakeup_seq+4(%ebx)
+	addl	$1, cond_futex(%ebx)
+
+7:	addl	$1, woken_seq(%ebx)
+	adcl	$0, woken_seq+4(%ebx)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%ebx)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorl	%edi, %edi
+	movl	total_seq(%ebx), %eax
+	andl	total_seq+4(%ebx), %eax
+	cmpl	$0xffffffff, %eax
+	jne	4f
+	movl	cond_nwaiters(%ebx), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	addl	$cond_nwaiters, %ebx
+	movl	$SYS_futex, %eax
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_nwaiters(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+	subl	$cond_nwaiters, %ebx
+	movl	$1, %edi
+
+4:	LOCK
+#if cond_lock == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, cond_lock(%ebx)
+#endif
+	je	2f
+
+#if cond_lock == 0
+	movl	%ebx, %eax
+#else
+	leal	cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex(%ebx)
+	setne	%cl
+	subl	$1, %ecx
+	andl	$(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+	addl	$LLL_PRIVATE, %ecx
+#endif
+	call	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testl	%edi, %edi
+	jnz	5f
+	addl	$cond_futex, %ebx
+#if FUTEX_PRIVATE_FLAG > 255
+	xorl	%ecx, %ecx
+#endif
+	cmpl	$-1, dep_mutex-cond_futex(%ebx)
+	sete	%cl
+	subl	$1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %ecx
+#else
+	andl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$FUTEX_WAKE, %ecx
+	movl	$SYS_futex, %eax
+	movl	$0x7fffffff, %edx
+	ENTER_KERNEL
+
+	/* Lock the mutex only if we don't own it already.  This only happens
+	   in case of PI mutexes, if we got cancelled after a successful
+	   return of the futex syscall and before disabling async
+	   cancellation.  */
+5:	movl	24+FRAME_SIZE(%esp), %eax
+	movl	MUTEX_KIND(%eax), %ebx
+	andl	$(ROBUST_BIT|PI_BIT), %ebx
+	cmpl	$PI_BIT, %ebx
+	jne	8f
+
+	movl	(%eax), %ebx
+	andl	$TID_MASK, %ebx
+	cmpl	%ebx, %gs:TID
+	jne	8f
+	/* We managed to get the lock.  Fix it up before returning.  */
+	call	__pthread_mutex_cond_lock_adjust
+	jmp	9f
+
+8:	call	__pthread_mutex_cond_lock
+
+9:	movl	%esi, (%esp)
+.LcallUR:
+	call	_Unwind_Resume
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format (omit)
+	.byte	DW_EH_PE_omit			# @TType format (omit)
+	.byte	DW_EH_PE_sdata4			# call-site format
+						# DW_EH_PE_sdata4
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.long	.LcleanupSTART-.LSTARTCODE
+	.long	.Ladd_cond_futex_pi-.LcleanupSTART
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex_pi-.LSTARTCODE
+	.long	.Lsub_cond_futex_pi-.Ladd_cond_futex_pi
+	.long	__condvar_w_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex_pi-.LSTARTCODE
+	.long	.Ladd_cond_futex-.Lsub_cond_futex_pi
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.Ladd_cond_futex-.LSTARTCODE
+	.long	.Lsub_cond_futex-.Ladd_cond_futex
+	.long	__condvar_w_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.long	.Lsub_cond_futex-.LSTARTCODE
+	.long	.LcleanupEND-.Lsub_cond_futex
+	.long	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.long	.LcallUR-.LSTARTCODE
+	.long	.LENDCODE-.LcallUR
+	.long	0
+	.uleb128  0
+.Lcstend:
+
+#ifdef SHARED
+	.hidden DW.ref.__gcc_personality_v0
+	.weak   DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align 4
+	.type   DW.ref.__gcc_personality_v0, @object
+	.size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long   __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
new file mode 100644
index 0000000000..a81bc06d7c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
@@ -0,0 +1,192 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+#include <stap-probe.h>
+
+	.text
+
+	.globl	__pthread_rwlock_rdlock
+	.type	__pthread_rwlock_rdlock,@function
+	.align	16
+__pthread_rwlock_rdlock:
+	cfi_startproc
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -8)
+	cfi_offset(%ebx, -12)
+
+	xorl	%esi, %esi
+	movl	12(%esp), %ebx
+
+	LIBC_PROBE (rdlock_entry, 1, %ebx)
+
+	/* Get the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, MUTEX(%ebx)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%ebx), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, WRITERS_QUEUED(%ebx)
+	je	5f
+	cmpb	$0, FLAGS(%ebx)
+	je	5f
+
+3:	addl	$1, READERS_QUEUED(%ebx)
+	je	4f
+
+	movl	READERS_WAKEUP(%ebx), %edx
+
+	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, MUTEX(%ebx)
+#endif
+	jne	10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movzbl	PSHARED(%ebx), %ecx
+	xorl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+	movzbl	PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %ecx
+# endif
+	xorl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$READERS_WAKEUP, %ebx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	subl	$READERS_WAKEUP, %ebx
+
+	/* Reget the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, MUTEX(%ebx)
+#endif
+	jnz	12f
+
+13:	subl	$1, READERS_QUEUED(%ebx)
+	jmp	2b
+
+5:	xorl	%edx, %edx
+	addl	$1, NR_READERS(%ebx)
+	je	8f
+9:	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, MUTEX(%ebx)
+#endif
+	jne	6f
+7:
+
+	movl	%edx, %eax
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	ret
+
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%esi, -8)
+	cfi_offset(%ebx, -12)
+1:
+#if MUTEX == 0
+	movl	%ebx, %edx
+#else
+	leal	MUTEX(%ebx), %edx
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%gs:TID, %eax
+	jne	3b
+	/* Deadlock detected.  */
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:
+#if MUTEX == 0
+	movl	%ebx, %eax
+#else
+	leal	MUTEX(%ebx), %eax
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
+	jmp	7b
+
+	/* Overflow.  */
+8:	subl	$1, NR_READERS(%ebx)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+	/* Overflow.  */
+4:	subl	$1, READERS_QUEUED(%ebx)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:
+#if MUTEX == 0
+	movl	%ebx, %eax
+#else
+	leal	MUTEX(%ebx), %eax
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
+	jmp	11b
+
+12:
+#if MUTEX == 0
+	movl	%ebx, %edx
+#else
+	leal	MUTEX(%ebx), %edx
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
+	jmp	13b
+	cfi_endproc
+	.size	__pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
+
+strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
+hidden_def (__pthread_rwlock_rdlock)
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
new file mode 100644
index 0000000000..bc1001ce9c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
@@ -0,0 +1,243 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+
+	.text
+
+	.globl	pthread_rwlock_timedrdlock
+	.type	pthread_rwlock_timedrdlock,@function
+	.align	16
+pthread_rwlock_timedrdlock:
+	cfi_startproc
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -8)
+	cfi_offset(%edi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+	subl	$8, %esp
+	cfi_adjust_cfa_offset(8)
+
+	movl	28(%esp), %ebp
+	movl	32(%esp), %edi
+
+	/* Get the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebp)
+#else
+	cmpxchgl %edx, MUTEX(%ebp)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%ebp), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, WRITERS_QUEUED(%ebp)
+	je	5f
+	cmpb	$0, FLAGS(%ebp)
+	je	5f
+
+	/* Check the value of the timeout parameter.  */
+3:	cmpl	$1000000000, 4(%edi)
+	jae	19f
+
+	addl	$1, READERS_QUEUED(%ebp)
+	je	4f
+
+	movl	READERS_WAKEUP(%ebp), %esi
+
+	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebp)
+#else
+	subl	$1, MUTEX(%ebp)
+#endif
+	jne	10f
+
+	/* Get current time.  */
+11:	movl	%esp, %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	15f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+15:	testl	%ecx, %ecx
+	js	16f		/* Time is already up.  */
+
+	/* Futex call.  */
+	movl	%ecx, (%esp)	/* Store relative timeout.  */
+	movl	%edx, 4(%esp)
+
+	movl	%esi, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movzbl	PSHARED(%ebp), %ecx
+	xorl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+	movzbl	PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %ecx
+# endif
+	xorl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	movl	%esp, %esi
+	leal	READERS_WAKEUP(%ebp), %ebx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	movl	%eax, %esi
+17:
+
+	/* Reget the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebp)
+#else
+	cmpxchgl %edx, MUTEX(%ebp)
+#endif
+	jnz	12f
+
+13:	subl	$1, READERS_QUEUED(%ebp)
+	cmpl	$-ETIMEDOUT, %esi
+	jne	2b
+
+18:	movl	$ETIMEDOUT, %edx
+	jmp	9f
+
+
+5:	xorl	%edx, %edx
+	addl	$1, NR_READERS(%ebp)
+	je	8f
+9:	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebp)
+#else
+	subl	$1, MUTEX(%ebp)
+#endif
+	jne	6f
+
+7:	movl	%edx, %eax
+
+	addl	$8, %esp
+	cfi_adjust_cfa_offset(-8)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	ret
+
+	cfi_adjust_cfa_offset(24)
+	cfi_offset(%esi, -8)
+	cfi_offset(%edi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+1:
+#if MUTEX == 0
+	movl	%ebp, %edx
+#else
+	leal	MUTEX(%ebp), %edx
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%gs:TID, %eax
+	jne	3b
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:
+#if MUTEX == 0
+	movl	%ebp, %eax
+#else
+	leal	MUTEX(%ebp), %eax
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
+	jmp	7b
+
+	/* Overflow.  */
+8:	subl	$1, NR_READERS(%ebp)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+	/* Overflow.  */
+4:	subl	$1, READERS_QUEUED(%ebp)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:
+#if MUTEX == 0
+	movl	%ebp, %eax
+#else
+	leal	MUTEX(%ebp), %eax
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
+	jmp	11b
+
+12:
+#if MUTEX == 0
+	movl	%ebp, %edx
+#else
+	leal	MUTEX(%ebp), %edx
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
+	jmp	13b
+
+16:	movl	$-ETIMEDOUT, %esi
+	jmp	17b
+
+19:	movl	$EINVAL, %edx
+	jmp	9b
+	cfi_endproc
+	.size	pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
new file mode 100644
index 0000000000..9abba58ca8
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
@@ -0,0 +1,236 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+
+	.text
+
+	.globl	pthread_rwlock_timedwrlock
+	.type	pthread_rwlock_timedwrlock,@function
+	.align	16
+pthread_rwlock_timedwrlock:
+	cfi_startproc
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebp
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -8)
+	cfi_offset(%edi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+	subl	$8, %esp
+	cfi_adjust_cfa_offset(8)
+
+	movl	28(%esp), %ebp
+	movl	32(%esp), %edi
+
+	/* Get the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebp)
+#else
+	cmpxchgl %edx, MUTEX(%ebp)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%ebp), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, NR_READERS(%ebp)
+	je	5f
+
+	/* Check the value of the timeout parameter.  */
+3:	cmpl	$1000000000, 4(%edi)
+	jae	19f
+
+	addl	$1, WRITERS_QUEUED(%ebp)
+	je	4f
+
+	movl	WRITERS_WAKEUP(%ebp), %esi
+
+	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebp)
+#else
+	subl	$1, MUTEX(%ebp)
+#endif
+	jne	10f
+
+	/* Get current time.  */
+11:	movl	%esp, %ebx
+	xorl	%ecx, %ecx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	15f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+15:	testl	%ecx, %ecx
+	js	16f		/* Time is already up.  */
+
+	/* Futex call.  */
+	movl	%ecx, (%esp)	/* Store relative timeout.  */
+	movl	%edx, 4(%esp)
+
+	movl	%esi, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movzbl	PSHARED(%ebp), %ecx
+	xorl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+	movzbl	PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %ecx
+# endif
+	xorl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	movl	%esp, %esi
+	leal	WRITERS_WAKEUP(%ebp), %ebx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	movl	%eax, %esi
+17:
+
+	/* Reget the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebp)
+#else
+	cmpxchgl %edx, MUTEX(%ebp)
+#endif
+	jnz	12f
+
+13:	subl	$1, WRITERS_QUEUED(%ebp)
+	cmpl	$-ETIMEDOUT, %esi
+	jne	2b
+
+18:	movl	$ETIMEDOUT, %edx
+	jmp	9f
+
+
+5:	xorl	%edx, %edx
+	movl	%gs:TID, %eax
+	movl	%eax, WRITER(%ebp)
+9:	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebp)
+#else
+	subl	$1, MUTEX(%ebp)
+#endif
+	jne	6f
+
+7:	movl	%edx, %eax
+
+	addl	$8, %esp
+	cfi_adjust_cfa_offset(-8)
+	popl	%ebp
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebp)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	ret
+
+	cfi_adjust_cfa_offset(24)
+	cfi_offset(%esi, -8)
+	cfi_offset(%edi, -12)
+	cfi_offset(%ebx, -16)
+	cfi_offset(%ebp, -20)
+1:
+#if MUTEX == 0
+	movl	%ebp, %edx
+#else
+	leal	MUTEX(%ebp), %edx
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%gs:TID, %eax
+	jne	3b
+20:	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:
+#if MUTEX == 0
+	movl	%ebp, %eax
+#else
+	leal	MUTEX(%ebp), %eax
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
+	jmp	7b
+
+	/* Overflow.  */
+4:	subl	$1, WRITERS_QUEUED(%ebp)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:
+#if MUTEX == 0
+	movl	%ebp, %eax
+#else
+	leal	MUTEX(%ebp), %eax
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
+	jmp	11b
+
+12:
+#if MUTEX == 0
+	movl	%ebp, %edx
+#else
+	leal	MUTEX(%ebp), %edx
+#endif
+	movzbl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
+	jmp	13b
+
+16:	movl	$-ETIMEDOUT, %esi
+	jmp	17b
+
+19:	movl	$EINVAL, %edx
+	jmp	9b
+	cfi_endproc
+	.size	pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
new file mode 100644
index 0000000000..738c067780
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
@@ -0,0 +1,151 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <kernel-features.h>
+
+
+	.text
+
+	.globl	__pthread_rwlock_unlock
+	.type	__pthread_rwlock_unlock,@function
+	.align	16
+__pthread_rwlock_unlock:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%edi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+	cfi_offset(%edi, -12)
+
+	movl	12(%esp), %edi
+
+	/* Get the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%edi)
+#else
+	cmpxchgl %edx, MUTEX(%edi)
+#endif
+	jnz	1f
+
+2:	cmpl	$0, WRITER(%edi)
+	jne	5f
+	subl	$1, NR_READERS(%edi)
+	jnz	6f
+
+5:	movl	$0, WRITER(%edi)
+
+	movl	$1, %edx
+	leal	WRITERS_WAKEUP(%edi), %ebx
+	cmpl	$0, WRITERS_QUEUED(%edi)
+	jne	0f
+
+	/* If also no readers waiting nothing to do.  */
+	cmpl	$0, READERS_QUEUED(%edi)
+	je	6f
+
+	movl	$0x7fffffff, %edx
+	leal	READERS_WAKEUP(%edi), %ebx
+
+0:	addl	$1, (%ebx)
+	LOCK
+#if MUTEX == 0
+	subl	$1, (%edi)
+#else
+	subl	$1, MUTEX(%edi)
+#endif
+	jne	7f
+
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movzbl	PSHARED(%edi), %ecx
+	xorl	$FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %ecx
+#else
+	movzbl	PSHARED(%edi), %ecx
+	orl	$FUTEX_WAKE, %ecx
+	xorl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	xorl	%eax, %eax
+	popl	%edi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%ebx, -8)
+	cfi_offset(%edi, -12)
+	.align	16
+6:	LOCK
+#if MUTEX == 0
+	subl	$1, (%edi)
+#else
+	subl	$1, MUTEX(%edi)
+#endif
+	jne	3f
+
+4:	xorl	%eax, %eax
+	popl	%edi
+	popl	%ebx
+	ret
+
+1:
+#if MUTEX == 0
+	movl	%edi, %edx
+#else
+	leal	MUTEX(%edi), %edx
+#endif
+	movzbl	PSHARED(%edi), %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+3:
+#if MUTEX == 0
+	movl	%edi, %eax
+#else
+	leal	MUTEX(%edi), %eax
+#endif
+	movzbl	PSHARED(%edi), %ecx
+	call	__lll_unlock_wake
+	jmp	4b
+
+7:
+#if MUTEX == 0
+	movl	%edi, %eax
+#else
+	leal	MUTEX(%edi), %eax
+#endif
+	movzbl	PSHARED(%edi), %ecx
+	call	__lll_unlock_wake
+	jmp	8b
+	cfi_endproc
+	.size	__pthread_rwlock_unlock,.-__pthread_rwlock_unlock
+
+strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
+hidden_def (__pthread_rwlock_unlock)
diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
new file mode 100644
index 0000000000..8345cae1ea
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
@@ -0,0 +1,183 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+#include <stap-probe.h>
+
+	.text
+
+	.globl	__pthread_rwlock_wrlock
+	.type	__pthread_rwlock_wrlock,@function
+	.align	16
+__pthread_rwlock_wrlock:
+	cfi_startproc
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%esi, -8)
+	cfi_offset(%ebx, -12)
+
+	xorl	%esi, %esi
+	movl	12(%esp), %ebx
+
+	LIBC_PROBE (wrlock_entry, 1, %ebx)
+
+	/* Get the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, MUTEX(%ebx)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%ebx), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, NR_READERS(%ebx)
+	je	5f
+
+3:	addl	$1, WRITERS_QUEUED(%ebx)
+	je	4f
+
+	movl	WRITERS_WAKEUP(%ebx), %edx
+
+	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, MUTEX(%ebx)
+#endif
+	jne	10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movzbl	PSHARED(%ebx), %ecx
+	xorl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+	movzbl	PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %ecx
+# endif
+	xorl	%gs:PRIVATE_FUTEX, %ecx
+#endif
+	addl	$WRITERS_WAKEUP, %ebx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	subl	$WRITERS_WAKEUP, %ebx
+
+	/* Reget the lock.  */
+	movl	$1, %edx
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, MUTEX(%ebx)
+#endif
+	jnz	12f
+
+13:	subl	$1, WRITERS_QUEUED(%ebx)
+	jmp	2b
+
+5:	xorl	%edx, %edx
+	movl	%gs:TID, %eax
+	movl	%eax, WRITER(%ebx)
+9:	LOCK
+#if MUTEX == 0
+	subl	$1, (%ebx)
+#else
+	subl	$1, MUTEX(%ebx)
+#endif
+	jne	6f
+7:
+
+	movl	%edx, %eax
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	ret
+
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%esi, -8)
+	cfi_offset(%ebx, -12)
+1:
+#if MUTEX == 0
+	movl	%ebx, %edx
+#else
+	leal	MUTEX(%ebx), %edx
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%gs:TID	, %eax
+	jne	3b
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:
+#if MUTEX == 0
+	movl	%ebx, %eax
+#else
+	leal	MUTEX(%ebx), %eax
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
+	jmp	7b
+
+4:	subl	$1, WRITERS_QUEUED(%ebx)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:
+#if MUTEX == 0
+	movl	%ebx, %eax
+#else
+	leal	MUTEX(%ebx), %eax
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
+	jmp	11b
+
+12:
+#if MUTEX == 0
+	movl	%ebx, %edx
+#else
+	leal	MUTEX(%ebx), %edx
+#endif
+	movzbl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
+	jmp	13b
+	cfi_endproc
+	.size	__pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
+
+strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
+hidden_def (__pthread_rwlock_wrlock)
diff --git a/sysdeps/unix/sysv/linux/i386/i486/sem_post.S b/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
new file mode 100644
index 0000000000..bc091a0926
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
@@ -0,0 +1,150 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+
+
+	.text
+
+	.globl	__new_sem_post
+	.type	__new_sem_post,@function
+	.align	16
+__new_sem_post:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+
+	movl	8(%esp), %ebx
+
+#if VALUE == 0
+	movl	(%ebx), %eax
+#else
+	movl	VALUE(%ebx), %eax
+#endif
+0:	cmpl	$SEM_VALUE_MAX, %eax
+	je	3f
+	leal	1(%eax), %edx
+	LOCK
+#if VALUE == 0
+	cmpxchgl %edx, (%ebx)
+#else
+	cmpxchgl %edx, VALUE(%ebx)
+#endif
+	jnz	0b
+
+	cmpl	$0, NWAITERS(%ebx)
+	je	2f
+
+	movl	$FUTEX_WAKE, %ecx
+	orl	PRIVATE(%ebx), %ecx
+	movl	$1, %edx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	testl	%eax, %eax
+	js	1f
+
+2:	xorl	%eax, %eax
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+1:
+#ifdef PIC
+	SETUP_PIC_REG(bx)
+#else
+	movl	$4f, %ebx
+4:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ebx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ebx), %edx
+	addl	%gs:0, %edx
+	movl	$EINVAL, (%edx)
+#else
+	movl	errno@gotntpoff(%ebx), %edx
+	movl	$EINVAL, %gs:(%edx)
+#endif
+
+	orl	$-1, %eax
+	popl	%ebx
+	ret
+
+3:
+#ifdef PIC
+	SETUP_PIC_REG(bx)
+#else
+	movl	$5f, %ebx
+5:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ebx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ebx), %edx
+	addl	%gs:0, %edx
+	movl	$EOVERFLOW, (%edx)
+#else
+	movl	errno@gotntpoff(%ebx), %edx
+	movl	$EOVERFLOW, %gs:(%edx)
+#endif
+
+	orl	$-1, %eax
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+	cfi_endproc
+	.size	__new_sem_post,.-__new_sem_post
+	versioned_symbol(libpthread, __new_sem_post, sem_post, GLIBC_2_1)
+#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
+	.global	__old_sem_post
+	.type	__old_sem_post,@function
+__old_sem_post:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+
+	movl	8(%esp), %ebx
+	LOCK
+	addl	$1, (%ebx)
+
+	movl	$SYS_futex, %eax
+	movl	$FUTEX_WAKE, %ecx
+	movl	$1, %edx
+	ENTER_KERNEL
+
+	testl	%eax, %eax
+	js	1b
+
+	xorl	%eax, %eax
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+	cfi_endproc
+	.size	__old_sem_post,.-__old_sem_post
+	compat_symbol(libpthread, __old_sem_post, sem_post, GLIBC_2_0)
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S b/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
new file mode 100644
index 0000000000..94d052afc0
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
@@ -0,0 +1,327 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+
+
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
+
+
+	.text
+
+	.globl	sem_timedwait
+	.type	sem_timedwait,@function
+	.align	16
+sem_timedwait:
+.LSTARTCODE:
+	movl	4(%esp), %ecx
+
+	movl	(%ecx), %eax
+2:	testl	%eax, %eax
+	je	1f
+
+	leal	-1(%eax), %edx
+	LOCK
+	cmpxchgl %edx, (%ecx)
+	jne	2b
+
+	xorl	%eax, %eax
+	ret
+
+	/* Check whether the timeout value is valid.  */
+1:	pushl	%esi
+.Lpush_esi:
+	pushl	%edi
+.Lpush_edi:
+	pushl	%ebx
+.Lpush_ebx:
+	subl	$12, %esp
+.Lsub_esp:
+
+	movl	32(%esp), %edi
+
+	/* Check for invalid nanosecond field.  */
+	cmpl	$1000000000, 4(%edi)
+	movl	$EINVAL, %esi
+	jae	.Lerrno_exit
+
+	LOCK
+	incl	NWAITERS(%ecx)
+
+7:	xorl	%ecx, %ecx
+	movl	%esp, %ebx
+	movl	%ecx, %edx
+	movl	$__NR_gettimeofday, %eax
+	ENTER_KERNEL
+
+	/* Compute relative timeout.  */
+	movl	4(%esp), %eax
+	movl	$1000, %edx
+	mul	%edx		/* Milli seconds to nano seconds.  */
+	movl	(%edi), %ecx
+	movl	4(%edi), %edx
+	subl	(%esp), %ecx
+	subl	%eax, %edx
+	jns	5f
+	addl	$1000000000, %edx
+	subl	$1, %ecx
+5:	testl	%ecx, %ecx
+	movl	$ETIMEDOUT, %esi
+	js	6f		/* Time is already up.  */
+
+	movl	%ecx, (%esp)	/* Store relative timeout.  */
+	movl	%edx, 4(%esp)
+
+.LcleanupSTART:
+	call	__pthread_enable_asynccancel
+	movl	%eax, 8(%esp)
+
+	movl	28(%esp), %ebx	/* Load semaphore address.  */
+#if FUTEX_WAIT == 0
+	movl	PRIVATE(%ebx), %ecx
+#else
+	movl	$FUTEX_WAIT, %ecx
+	orl	PRIVATE(%ebx), %ecx
+#endif
+	movl	%esp, %esi
+	xorl	%edx, %edx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	movl	%eax, %esi
+
+	movl	8(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	testl	%esi, %esi
+	je	9f
+	cmpl	$-EWOULDBLOCK, %esi
+	jne	3f
+
+9:	movl	(%ebx), %eax
+8:	testl	%eax, %eax
+	je	7b
+
+	leal	-1(%eax), %ecx
+	LOCK
+	cmpxchgl %ecx, (%ebx)
+	jne	8b
+
+	xorl	%eax, %eax
+
+	LOCK
+	decl	NWAITERS(%ebx)
+
+10:	addl	$12, %esp
+.Ladd_esp:
+	popl	%ebx
+.Lpop_ebx:
+	popl	%edi
+.Lpop_edi:
+	popl	%esi
+.Lpop_esi:
+	ret
+
+.Lafter_ret:
+3:	negl	%esi
+6:
+	movl	28(%esp), %ebx	/* Load semaphore address.  */
+	LOCK
+	decl	NWAITERS(%ebx)
+.Lerrno_exit:
+#ifdef PIC
+	SETUP_PIC_REG(bx)
+#else
+	movl	$4f, %ebx
+4:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ebx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ebx), %edx
+	addl	%gs:0, %edx
+	movl	%esi, (%edx)
+#else
+	movl	errno@gotntpoff(%ebx), %edx
+	movl	%esi, %gs:(%edx)
+#endif
+
+	orl	$-1, %eax
+	jmp	10b
+	.size	sem_timedwait,.-sem_timedwait
+
+
+	.type	sem_wait_cleanup,@function
+sem_wait_cleanup:
+	LOCK
+	decl	NWAITERS(%ebx)
+	movl	%eax, (%esp)
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	.size	sem_wait_cleanup,.-sem_wait_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	0xff				# @LPStart format (omit)
+	.byte	0xff				# @TType format (omit)
+	.byte	0x01				# call-site format
+						# DW_EH_PE_uleb128
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_wait_cleanup-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+	.section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+	.long	.LENDCIE-.LSTARTCIE		# Length of the CIE.
+.LSTARTCIE:
+	.long	0				# CIE ID.
+	.byte	1				# Version number.
+#ifdef SHARED
+	.string	"zPLR"				# NUL-terminated augmentation
+						# string.
+#else
+	.string	"zPL"				# NUL-terminated augmentation
+						# string.
+#endif
+	.uleb128 1				# Code alignment factor.
+	.sleb128 -4				# Data alignment factor.
+	.byte	8				# Return address register
+						# column.
+#ifdef SHARED
+	.uleb128 7				# Augmentation value length.
+	.byte	0x9b				# Personality: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4
+						# + DW_EH_PE_indirect
+	.long	DW.ref.__gcc_personality_v0-.
+	.byte	0x1b				# LSDA Encoding: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4.
+	.byte	0x1b				# FDE Encoding: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4.
+#else
+	.uleb128 6				# Augmentation value length.
+	.byte	0x0				# Personality: absolute
+	.long	__gcc_personality_v0
+	.byte	0x0				# LSDA Encoding: absolute
+#endif
+	.byte 0x0c				# DW_CFA_def_cfa
+	.uleb128 4
+	.uleb128 4
+	.byte	0x88				# DW_CFA_offset, column 0x10
+	.uleb128 1
+	.align 4
+.LENDCIE:
+
+	.long	.LENDFDE-.LSTARTFDE		# Length of the FDE.
+.LSTARTFDE:
+	.long	.LSTARTFDE-.LSTARTFRAME		# CIE pointer.
+#ifdef SHARED
+	.long	.LSTARTCODE-.			# PC-relative start address
+						# of the code.
+#else
+	.long	.LSTARTCODE			# Start address of the code.
+#endif
+	.long	.LENDCODE-.LSTARTCODE		# Length of the code.
+	.uleb128 4				# Augmentation size
+#ifdef SHARED
+	.long	.LexceptSTART-.
+#else
+	.long	.LexceptSTART
+#endif
+
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpush_esi-.LSTARTCODE
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 8
+	.byte   0x86				# DW_CFA_offset %esi
+	.uleb128 2
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpush_edi-.Lpush_esi
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 12
+	.byte   0x87				# DW_CFA_offset %edi
+	.uleb128 3
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpush_ebx-.Lpush_edi
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 16
+	.byte   0x83				# DW_CFA_offset %ebx
+	.uleb128 4
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lsub_esp-.Lpush_ebx
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 28
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Ladd_esp-.Lsub_esp
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 16
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpop_ebx-.Ladd_esp
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 12
+	.byte	0xc3				# DW_CFA_restore %ebx
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpop_edi-.Lpop_ebx
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 8
+	.byte	0xc7				# DW_CFA_restore %edi
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpop_esi-.Lpop_edi
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 4
+	.byte	0xc6				# DW_CFA_restore %esi
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lafter_ret-.Lpop_esi
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 28
+	.byte   0x86				# DW_CFA_offset %esi
+	.uleb128 2
+	.byte   0x87				# DW_CFA_offset %edi
+	.uleb128 3
+	.byte   0x83				# DW_CFA_offset %ebx
+	.uleb128 4
+	.align	4
+.LENDFDE:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	4
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long	__gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S b/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
new file mode 100644
index 0000000000..69611eac5d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
@@ -0,0 +1,67 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+
+	.text
+
+	.globl	__new_sem_trywait
+	.type	__new_sem_trywait,@function
+	.align	16
+__new_sem_trywait:
+	movl	4(%esp), %ecx
+
+	movl	(%ecx), %eax
+2:	testl	%eax, %eax
+	jz	1f
+
+	leal	-1(%eax), %edx
+	LOCK
+	cmpxchgl %edx, (%ecx)
+	jne	2b
+	xorl	%eax, %eax
+	ret
+
+1:
+#ifdef PIC
+	SETUP_PIC_REG(cx)
+#else
+	movl	$3f, %ecx
+3:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ecx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ecx), %edx
+	addl	%gs:0, %edx
+	movl	$EAGAIN, (%edx)
+#else
+	movl	errno@gotntpoff(%ecx), %edx
+	movl	$EAGAIN, %gs:(%edx)
+#endif
+	orl	$-1, %eax
+	ret
+	.size	__new_sem_trywait,.-__new_sem_trywait
+	versioned_symbol(libpthread, __new_sem_trywait, sem_trywait, GLIBC_2_1)
+#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
+	.global	__old_sem_trywait
+__old_sem_trywait = __new_sem_trywait
+	compat_symbol(libpthread, __old_sem_trywait, sem_trywait, GLIBC_2_0)
+#endif
diff --git a/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S b/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
new file mode 100644
index 0000000000..14d616fc91
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
@@ -0,0 +1,343 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+
+
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
+
+	.text
+
+	.globl	__new_sem_wait
+	.type	__new_sem_wait,@function
+	.align	16
+__new_sem_wait:
+.LSTARTCODE:
+	pushl	%ebx
+.Lpush_ebx:
+	pushl	%esi
+.Lpush_esi:
+	subl	$4, %esp
+.Lsub_esp:
+
+	movl	16(%esp), %ebx
+
+	movl	(%ebx), %eax
+2:	testl	%eax, %eax
+	je	1f
+
+	leal	-1(%eax), %edx
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jne	2b
+7:	xorl	%eax, %eax
+
+9:	movl	4(%esp), %esi
+	movl	8(%esp), %ebx
+	addl	$12, %esp
+.Ladd_esp:
+	ret
+
+.Lafter_ret:
+1:	LOCK
+	incl	NWAITERS(%ebx)
+
+.LcleanupSTART:
+6:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+#if FUTEX_WAIT == 0
+	movl	PRIVATE(%ebx), %ecx
+#else
+	movl	$FUTEX_WAIT, %ecx
+	orl	PRIVATE(%ebx), %ecx
+#endif
+	xorl	%esi, %esi
+	xorl	%edx, %edx
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+	movl	%eax, %esi
+
+	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	testl	%esi, %esi
+	je	3f
+	cmpl	$-EWOULDBLOCK, %esi
+	jne	4f
+
+3:
+	movl	(%ebx), %eax
+5:	testl	%eax, %eax
+	je	6b
+
+	leal	-1(%eax), %edx
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jne	5b
+
+	LOCK
+	decl	NWAITERS(%ebx)
+	jmp	7b
+
+4:	LOCK
+	decl	NWAITERS(%ebx)
+
+	negl	%esi
+#ifdef PIC
+	SETUP_PIC_REG(bx)
+#else
+	movl	$8f, %ebx
+8:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ebx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ebx), %edx
+	addl	%gs:0, %edx
+	movl	%esi, (%edx)
+#else
+	movl	errno@gotntpoff(%ebx), %edx
+	movl	%esi, %gs:(%edx)
+#endif
+	orl	$-1, %eax
+
+	jmp	9b
+	.size	__new_sem_wait,.-__new_sem_wait
+	versioned_symbol(libpthread, __new_sem_wait, sem_wait, GLIBC_2_1)
+
+
+	.type	sem_wait_cleanup,@function
+sem_wait_cleanup:
+	LOCK
+	decl	NWAITERS(%ebx)
+	movl	%eax, (%esp)
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	.size	sem_wait_cleanup,.-sem_wait_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	0xff				# @LPStart format (omit)
+	.byte	0xff				# @TType format (omit)
+	.byte	0x01				# call-site format
+						# DW_EH_PE_uleb128
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_wait_cleanup-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+	.section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+	.long	.LENDCIE-.LSTARTCIE		# Length of the CIE.
+.LSTARTCIE:
+	.long	0				# CIE ID.
+	.byte	1				# Version number.
+#ifdef SHARED
+	.string	"zPLR"				# NUL-terminated augmentation
+						# string.
+#else
+	.string	"zPL"				# NUL-terminated augmentation
+						# string.
+#endif
+	.uleb128 1				# Code alignment factor.
+	.sleb128 -4				# Data alignment factor.
+	.byte	8				# Return address register
+						# column.
+#ifdef SHARED
+	.uleb128 7				# Augmentation value length.
+	.byte	0x9b				# Personality: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4
+						# + DW_EH_PE_indirect
+	.long	DW.ref.__gcc_personality_v0-.
+	.byte	0x1b				# LSDA Encoding: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4.
+	.byte	0x1b				# FDE Encoding: DW_EH_PE_pcrel
+						# + DW_EH_PE_sdata4.
+#else
+	.uleb128 6				# Augmentation value length.
+	.byte	0x0				# Personality: absolute
+	.long	__gcc_personality_v0
+	.byte	0x0				# LSDA Encoding: absolute
+#endif
+	.byte 0x0c				# DW_CFA_def_cfa
+	.uleb128 4
+	.uleb128 4
+	.byte	0x88				# DW_CFA_offset, column 0x10
+	.uleb128 1
+	.align 4
+.LENDCIE:
+
+	.long	.LENDFDE-.LSTARTFDE		# Length of the FDE.
+.LSTARTFDE:
+	.long	.LSTARTFDE-.LSTARTFRAME		# CIE pointer.
+#ifdef SHARED
+	.long	.LSTARTCODE-.			# PC-relative start address
+						# of the code.
+#else
+	.long	.LSTARTCODE			# Start address of the code.
+#endif
+	.long	.LENDCODE-.LSTARTCODE		# Length of the code.
+	.uleb128 4				# Augmentation size
+#ifdef SHARED
+	.long	.LexceptSTART-.
+#else
+	.long	.LexceptSTART
+#endif
+
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpush_ebx-.LSTARTCODE
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 8
+	.byte   0x83				# DW_CFA_offset %ebx
+	.uleb128 2
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lpush_esi-.Lpush_ebx
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 12
+	.byte   0x86				# DW_CFA_offset %esi
+	.uleb128 3
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lsub_esp-.Lpush_esi
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 16
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Ladd_esp-.Lsub_esp
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 4
+	.byte	0xc3				# DW_CFA_restore %ebx
+	.byte	0xc6				# DW_CFA_restore %esi
+	.byte	4				# DW_CFA_advance_loc4
+	.long	.Lafter_ret-.Ladd_esp
+	.byte	14				# DW_CFA_def_cfa_offset
+	.uleb128 16
+	.byte   0x83				# DW_CFA_offset %ebx
+	.uleb128 2
+	.byte   0x86				# DW_CFA_offset %esi
+	.uleb128 3
+	.align	4
+.LENDFDE:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	4
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long	__gcc_personality_v0
+#endif
+
+
+#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
+	.section ".text.compat", "ax"
+	.global	__old_sem_wait
+	.type	__old_sem_wait,@function
+	.align	16
+	cfi_startproc
+__old_sem_wait:
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	subl	$4, %esp
+	cfi_adjust_cfa_offset(4)
+
+	movl	16(%esp), %ebx
+	cfi_offset(ebx, -8)
+
+	cfi_offset(esi, -12)
+3:	movl	(%ebx), %eax
+2:	testl	%eax, %eax
+	je	1f
+
+	leal	-1(%eax), %edx
+	LOCK
+	cmpxchgl %edx, (%ebx)
+	jne	2b
+	xorl	%eax, %eax
+
+5:	movl	4(%esp), %esi
+	movl	8(%esp), %ebx
+	addl	$12, %esp
+	cfi_restore(ebx)
+	cfi_restore(esi)
+	cfi_adjust_cfa_offset(-12)
+	ret
+
+	cfi_adjust_cfa_offset(12)
+	cfi_offset(ebx, -8)
+	cfi_offset(esi, -12)
+1:	call	__pthread_enable_asynccancel
+	movl	%eax, (%esp)
+
+	xorl	%esi, %esi
+	movl	$SYS_futex, %eax
+	movl	%esi, %ecx
+	movl	%esi, %edx
+	ENTER_KERNEL
+	movl	%eax, %esi
+
+	movl	(%esp), %eax
+	call	__pthread_disable_asynccancel
+
+	testl	%esi, %esi
+	je	3b
+	cmpl	$-EWOULDBLOCK, %esi
+	je	3b
+	negl	%esi
+#ifdef PIC
+	SETUP_PIC_REG(bx)
+#else
+	movl	$4f, %ebx
+4:
+#endif
+	addl	$_GLOBAL_OFFSET_TABLE_, %ebx
+#ifdef NO_TLS_DIRECT_SEG_REFS
+	movl	errno@gotntpoff(%ebx), %edx
+	addl	%gs:0, %edx
+	movl	%esi, (%edx)
+#else
+	movl	errno@gotntpoff(%ebx), %edx
+	movl	%esi, %gs:(%edx)
+#endif
+	orl	$-1, %eax
+	jmp	5b
+	cfi_endproc
+	.size	__old_sem_wait,.-__old_sem_wait
+	compat_symbol(libpthread, __old_sem_wait, sem_wait, GLIBC_2_0)
+#endif