about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorRoland McGrath <roland@hack.frob.com>2014-05-14 12:33:43 -0700
committerRoland McGrath <roland@hack.frob.com>2014-05-14 12:33:43 -0700
commitc96067bce58414214474bb5b42be86ffbf525670 (patch)
treef9513abfd26395bc43c7c62a17256f05230a3ae9 /sysdeps
parentec136444ca77f7a64e2c3616fc560b6410a767d2 (diff)
downloadglibc-c96067bce58414214474bb5b42be86ffbf525670.tar.gz
glibc-c96067bce58414214474bb5b42be86ffbf525670.tar.xz
glibc-c96067bce58414214474bb5b42be86ffbf525670.zip
Move remaining nptl/sysdeps/unix/sysv/linux/x86_64/ files.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/cancellation.S117
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S21
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S21
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.S462
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.h476
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S306
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S160
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S179
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S164
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S840
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S555
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_once.S193
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S177
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S274
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S266
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S126
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S165
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c14
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/sem_post.S75
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S380
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/sem_trywait.S47
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/sem_wait.S176
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h109
24 files changed, 5322 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
new file mode 100644
index 0000000000..89fda5efeb
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
@@ -0,0 +1,117 @@
+/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevellock.h"
+
+#ifdef IS_IN_libpthread
+# if defined SHARED && !defined NO_HIDDEN
+#  define __pthread_unwind __GI___pthread_unwind
+# endif
+#else
+# ifndef SHARED
+	.weak __pthread_unwind
+# endif
+#endif
+
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+#endif
+
+/* It is crucial that the functions in this file don't modify registers
+   other than %rax and %r11.  The syscall wrapper code depends on this
+   because it doesn't explicitly save the other registers which hold
+   relevant values.  */
+	.text
+
+	.hidden __pthread_enable_asynccancel
+ENTRY(__pthread_enable_asynccancel)
+	movl	%fs:CANCELHANDLING, %eax
+2:	movl	%eax, %r11d
+	orl	$TCB_CANCELTYPE_BITMASK, %r11d
+	cmpl	%eax, %r11d
+	je	1f
+
+	lock
+	cmpxchgl %r11d, %fs:CANCELHANDLING
+	jnz	2b
+
+	andl	$(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
+	cmpl	$(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
+	je	3f
+
+1:	ret
+
+3:	subq	$8, %rsp
+	cfi_adjust_cfa_offset(8)
+	LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
+	lock
+	orl	$TCB_EXITING_BITMASK, %fs:CANCELHANDLING
+	mov	%fs:CLEANUP_JMP_BUF, %RDI_LP
+#ifdef SHARED
+	call	__pthread_unwind@PLT
+#else
+	call	__pthread_unwind
+#endif
+	hlt
+END(__pthread_enable_asynccancel)
+
+
+	.hidden __pthread_disable_asynccancel
+ENTRY(__pthread_disable_asynccancel)
+	testl	$TCB_CANCELTYPE_BITMASK, %edi
+	jnz	1f
+
+	movl	%fs:CANCELHANDLING, %eax
+2:	movl	%eax, %r11d
+	andl	$~TCB_CANCELTYPE_BITMASK, %r11d
+	lock
+	cmpxchgl %r11d, %fs:CANCELHANDLING
+	jnz	2b
+
+	movl	%r11d, %eax
+3:	andl	$(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
+	cmpl	$TCB_CANCELING_BITMASK, %eax
+	je	4f
+1:	ret
+
+	/* Performance doesn't matter in this loop.  We will
+	   delay until the thread is canceled.  And we will unlikely
+	   enter the loop twice.  */
+4:	mov	%fs:0, %RDI_LP
+	movl	$__NR_futex, %eax
+	xorq	%r10, %r10
+	addq	$CANCELHANDLING, %rdi
+	LOAD_PRIVATE_FUTEX_WAIT (%esi)
+	syscall
+	movl	%fs:CANCELHANDLING, %eax
+	jmp	3b
+END(__pthread_disable_asynccancel)
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
new file mode 100644
index 0000000000..019e22fc89
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
@@ -0,0 +1,21 @@
+/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __pthread_enable_asynccancel __libc_enable_asynccancel
+#define __pthread_disable_asynccancel __libc_disable_asynccancel
+#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
new file mode 100644
index 0000000000..83e523174f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
@@ -0,0 +1,19 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
new file mode 100644
index 0000000000..02892effa7
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
@@ -0,0 +1,21 @@
+/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
new file mode 100644
index 0000000000..f2dca070f3
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -0,0 +1,462 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
+
+#include <stap-probe.h>
+
+	.text
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl    %fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl    %fs:PRIVATE_FUTEX, reg ; \
+	orl     $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
+#endif
+
+
+	.globl	__lll_lock_wait_private
+	.type	__lll_lock_wait_private,@function
+	.hidden	__lll_lock_wait_private
+	.align	16
+__lll_lock_wait_private:
+	cfi_startproc
+	pushq	%r10
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r10, -16)
+	cfi_offset(%rdx, -24)
+	xorq	%r10, %r10	/* No timeout.  */
+	movl	$2, %edx
+	LOAD_PRIVATE_FUTEX_WAIT (%esi)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne	2f
+
+1:	LIBC_PROBE (lll_lock_wait_private, 1, %rdi)
+	movl	$SYS_futex, %eax
+	syscall
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%rdi)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%r10
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r10)
+	retq
+	cfi_endproc
+	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_lock_wait
+	.type	__lll_lock_wait,@function
+	.hidden	__lll_lock_wait
+	.align	16
+__lll_lock_wait:
+	cfi_startproc
+	pushq	%r10
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r10, -16)
+	cfi_offset(%rdx, -24)
+	xorq	%r10, %r10	/* No timeout.  */
+	movl	$2, %edx
+	LOAD_FUTEX_WAIT (%esi)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne	2f
+
+1:	LIBC_PROBE (lll_lock_wait, 2, %rdi, %rsi)
+	movl	$SYS_futex, %eax
+	syscall
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%rdi)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%r10
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r10)
+	retq
+	cfi_endproc
+	.size	__lll_lock_wait,.-__lll_lock_wait
+
+	/*      %rdi: futex
+		%rsi: flags
+		%rdx: timeout
+		%eax: futex value
+	*/
+	.globl	__lll_timedlock_wait
+	.type	__lll_timedlock_wait,@function
+	.hidden	__lll_timedlock_wait
+	.align	16
+__lll_timedlock_wait:
+	cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+# endif
+
+	cmpq	$0, (%rdx)
+	js	5f
+
+	pushq	%r9
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r9, 0)
+
+	movq	%rdx, %r10
+	movl	$0xffffffff, %r9d
+	LOAD_FUTEX_WAIT_ABS (%esi)
+
+	movl	$2, %edx
+	cmpl	%edx, %eax
+	jne	2f
+
+1:	movl	$SYS_futex, %eax
+	movl	$2, %edx
+	syscall
+
+2:	xchgl	%edx, (%rdi)	/* NB:   lock is implied */
+
+	testl	%edx, %edx
+	jz	3f
+
+	cmpl	$-ETIMEDOUT, %eax
+	je	4f
+	cmpl	$-EINVAL, %eax
+	jne	1b
+4:	movl	%eax, %edx
+	negl	%edx
+
+3:	movl	%edx, %eax
+	popq	%r9
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r9)
+	retq
+
+5:	movl	$ETIMEDOUT, %eax
+	retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	/* Check for a valid timeout value.  */
+	cmpq	$1000000000, 8(%rdx)
+	jae	3f
+
+	pushq	%r8
+	cfi_adjust_cfa_offset(8)
+	pushq	%r9
+	cfi_adjust_cfa_offset(8)
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	pushq	%r14
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r8, -16)
+	cfi_offset(%r9, -24)
+	cfi_offset(%r12, -32)
+	cfi_offset(%r13, -40)
+	cfi_offset(%r14, -48)
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+
+	/* Stack frame for the timespec and timeval structs.  */
+	subq	$24, %rsp
+	cfi_adjust_cfa_offset(24)
+
+	movq	%rdi, %r12
+	movq	%rdx, %r13
+
+	movl	$2, %edx
+	xchgl	%edx, (%r12)
+
+	testl	%edx, %edx
+	je	6f
+
+1:
+	/* Get current time.  */
+	movq	%rsp, %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rdi
+	movq	8(%r13), %rsi
+	subq	(%rsp), %rdi
+	subq	%rax, %rsi
+	jns	4f
+	addq	$1000000000, %rsi
+	decq	%rdi
+4:	testq	%rdi, %rdi
+	js	2f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	movq	%rdi, (%rsp)
+	movq	%rsi, 8(%rsp)
+
+	/* Futex call.  */
+	movl	$2, %edx
+	movl	$1, %eax
+	movq	%rsp, %r10
+	movl	24(%rsp), %esi
+	LOAD_FUTEX_WAIT (%esi)
+	movq	%r12, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+
+	/* NB: %edx == 2 */
+	xchgl	%edx, (%r12)
+
+	testl	%edx, %edx
+	je	6f
+
+	cmpl	$-ETIMEDOUT, %eax
+	jne	1b
+2:	movl	$ETIMEDOUT, %edx
+
+6:	addq	$32, %rsp
+	cfi_adjust_cfa_offset(-32)
+	popq	%r14
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r14)
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	popq	%r9
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r9)
+	popq	%r8
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r8)
+	movl	%edx, %eax
+	retq
+
+3:	movl	$EINVAL, %eax
+	retq
+# endif
+	cfi_endproc
+	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
+#endif
+
+
+	.globl	__lll_unlock_wake_private
+	.type	__lll_unlock_wake_private,@function
+	.hidden	__lll_unlock_wake_private
+	.align	16
+__lll_unlock_wake_private:
+	cfi_startproc
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%rsi, -16)
+	cfi_offset(%rdx, -24)
+
+	movl	$0, (%rdi)
+	LOAD_PRIVATE_FUTEX_WAKE (%esi)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	syscall
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%rsi
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rsi)
+	retq
+	cfi_endproc
+	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_unlock_wake
+	.type	__lll_unlock_wake,@function
+	.hidden	__lll_unlock_wake
+	.align	16
+__lll_unlock_wake:
+	cfi_startproc
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%rsi, -16)
+	cfi_offset(%rdx, -24)
+
+	movl	$0, (%rdi)
+	LOAD_FUTEX_WAKE (%esi)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	syscall
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%rsi
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rsi)
+	retq
+	cfi_endproc
+	.size	__lll_unlock_wake,.-__lll_unlock_wake
+
+	.globl	__lll_timedwait_tid
+	.type	__lll_timedwait_tid,@function
+	.hidden	__lll_timedwait_tid
+	.align	16
+__lll_timedwait_tid:
+	cfi_startproc
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r12, -16)
+	cfi_offset(%r13, -24)
+
+	movq	%rdi, %r12
+	movq	%rsi, %r13
+
+	subq	$16, %rsp
+	cfi_adjust_cfa_offset(16)
+
+	/* Get current time.  */
+2:	movq	%rsp, %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rdi
+	movq	8(%r13), %rsi
+	subq	(%rsp), %rdi
+	subq	%rax, %rsi
+	jns	5f
+	addq	$1000000000, %rsi
+	decq	%rdi
+5:	testq	%rdi, %rdi
+	js	6f		/* Time is already up.  */
+
+	movq	%rdi, (%rsp)	/* Store relative timeout.  */
+	movq	%rsi, 8(%rsp)
+
+	movl	(%r12), %edx
+	testl	%edx, %edx
+	jz	4f
+
+	movq	%rsp, %r10
+	/* XXX The kernel so far uses global futex for the wakeup at
+	   all times.  */
+#if FUTEX_WAIT == 0
+	xorl	%esi, %esi
+#else
+	movl	$FUTEX_WAIT, %esi
+#endif
+	movq	%r12, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+
+	cmpl	$0, (%rdi)
+	jne	1f
+4:	xorl	%eax, %eax
+
+8:	addq	$16, %rsp
+	cfi_adjust_cfa_offset(-16)
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	retq
+
+	cfi_adjust_cfa_offset(32)
+1:	cmpq	$-ETIMEDOUT, %rax
+	jne	2b
+
+6:	movl	$ETIMEDOUT, %eax
+	jmp	8b
+	cfi_endproc
+	.size	__lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
new file mode 100644
index 0000000000..c13ca084df
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -0,0 +1,476 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _LOWLEVELLOCK_H
+#define _LOWLEVELLOCK_H	1
+
+#include <stap-probe.h>
+
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+#  ifdef UP
+#   define LOCK_INSTR	/* nothing */
+#  else
+#   define LOCK_INSTR "lock;"
+#  endif
+# endif
+#else
+# ifndef LOCK
+#  ifdef UP
+#   define LOCK
+#  else
+#   define LOCK lock
+#  endif
+# endif
+#endif
+
+#define SYS_futex		__NR_futex
+#define FUTEX_WAIT		0
+#define FUTEX_WAKE		1
+#define FUTEX_CMP_REQUEUE	4
+#define FUTEX_WAKE_OP		5
+#define FUTEX_LOCK_PI		6
+#define FUTEX_UNLOCK_PI		7
+#define FUTEX_TRYLOCK_PI	8
+#define FUTEX_WAIT_BITSET	9
+#define FUTEX_WAKE_BITSET	10
+#define FUTEX_WAIT_REQUEUE_PI	11
+#define FUTEX_CMP_REQUEUE_PI	12
+#define FUTEX_PRIVATE_FLAG	128
+#define FUTEX_CLOCK_REALTIME	256
+
+#define FUTEX_BITSET_MATCH_ANY	0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE	0
+#define LLL_SHARED	FUTEX_PRIVATE_FLAG
+
+#ifndef __ASSEMBLER__
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)					      \
+   ? ((private) == 0							      \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \
+      : (fl))								      \
+   : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG);		      \
+	asm ("andl %%fs:%P1, %0" : "+r" (__fl)				      \
+	     : "i" (offsetof (struct pthread, header.private_futex)));	      \
+	__fl | (fl); }))
+# endif
+#endif
+
+/* Initializer for lock.  */
+#define LLL_LOCK_INITIALIZER		(0)
+#define LLL_LOCK_INITIALIZER_LOCKED	(1)
+#define LLL_LOCK_INITIALIZER_WAITERS	(2)
+
+/* Delay in spinlock loop.  */
+#define BUSY_WAIT_NOP	  asm ("rep; nop")
+
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait(futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+  ({									      \
+    register const struct timespec *__to __asm ("r10") = timeout;	      \
+    int __status;							      \
+    register __typeof (val) _val __asm ("edx") = (val);			      \
+    __asm __volatile ("syscall"						      \
+		      : "=a" (__status)					      \
+		      : "0" (SYS_futex), "D" (futex),			      \
+			"S" (__lll_private_flag (FUTEX_WAIT, private)),	      \
+			"d" (_val), "r" (__to)				      \
+		      : "memory", "cc", "r11", "cx");			      \
+    __status;								      \
+  })
+
+
+#define lll_futex_wake(futex, nr, private) \
+  ({									      \
+    int __status;							      \
+    register __typeof (nr) _nr __asm ("edx") = (nr);			      \
+    LIBC_PROBE (lll_futex_wake, 3, futex, nr, private);                       \
+    __asm __volatile ("syscall"						      \
+		      : "=a" (__status)					      \
+		      : "0" (SYS_futex), "D" (futex),			      \
+			"S" (__lll_private_flag (FUTEX_WAKE, private)),	      \
+			"d" (_nr)					      \
+		      : "memory", "cc", "r10", "r11", "cx");		      \
+    __status;								      \
+  })
+
+
+/* NB: in the lll_trylock macro we simply return the value in %eax
+   after the cmpxchg instruction.  In case the operation succeded this
+   value is zero.  In case the operation failed, the cmpxchg instruction
+   has loaded the current value of the memory work which is guaranteed
+   to be nonzero.  */
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t"      \
+			   "je 0f\n\t"					      \
+			   "lock; cmpxchgl %2, %1\n\t"			      \
+			   "jmp 1f\n\t"					      \
+			   "0:\tcmpxchgl %2, %1\n\t"			      \
+			   "1:"
+#endif
+
+#define lll_trylock(futex) \
+  ({ int ret;								      \
+     __asm __volatile (__lll_trylock_asm				      \
+		       : "=a" (ret), "=m" (futex)			      \
+		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
+			 "0" (LLL_LOCK_INITIALIZER)			      \
+		       : "memory");					      \
+     ret; })
+
+#define lll_robust_trylock(futex, id) \
+  ({ int ret;								      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
+		       : "=a" (ret), "=m" (futex)			      \
+		       : "r" (id), "m" (futex),	"0" (LLL_LOCK_INITIALIZER)    \
+		       : "memory");					      \
+     ret; })
+
+#define lll_cond_trylock(futex) \
+  ({ int ret;								      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
+		       : "=a" (ret), "=m" (futex)			      \
+		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
+			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
+		       : "memory");					      \
+     ret; })
+
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
+			      "jz 24f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
+			      "je 0f\n\t"				      \
+			      "lock; cmpxchgl %4, %2\n\t"		      \
+			      "jnz 1f\n\t"				      \
+			      "jmp 24f\n"				      \
+			      "0:\tcmpxchgl %4, %2\n\t"			      \
+			      "jz 24f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2, ignore3;					      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_lock_asm_start				      \
+			   "1:\tlea %2, %%" RDI_LP "\n"			      \
+			   "2:\tsub $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset 128\n"		      \
+			   "3:\tcallq __lll_lock_wait_private\n"	      \
+			   "4:\tadd $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset -128\n"		      \
+			   "24:"					      \
+			   : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),   \
+			     "=a" (ignore3)				      \
+			   : "0" (1), "m" (futex), "3" (0)		      \
+			   : "cx", "r11", "cc", "memory");		      \
+       else								      \
+	 __asm __volatile (__lll_lock_asm_start				      \
+			   "1:\tlea %2, %%" RDI_LP "\n"			      \
+			   "2:\tsub $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset 128\n"		      \
+			   "3:\tcallq __lll_lock_wait\n"		      \
+			   "4:\tadd $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset -128\n"		      \
+			   "24:"					      \
+			   : "=S" (ignore1), "=D" (ignore2), "=m" (futex),    \
+			     "=a" (ignore3)				      \
+			   : "1" (1), "m" (futex), "3" (0), "0" (private)     \
+			   : "cx", "r11", "cc", "memory");		      \
+    })									      \
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;					      \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \
+		      "jz 24f\n"					      \
+		      "1:\tlea %2, %%" RDI_LP "\n"			      \
+		      "2:\tsub $128, %%" RSP_LP "\n"			      \
+		      ".cfi_adjust_cfa_offset 128\n"			      \
+		      "3:\tcallq __lll_robust_lock_wait\n"		      \
+		      "4:\tadd $128, %%" RSP_LP "\n"			      \
+		      ".cfi_adjust_cfa_offset -128\n"			      \
+		      "24:"						      \
+		      : "=S" (ignore1), "=D" (ignore2), "=m" (futex),	      \
+			"=a" (result)					      \
+		      : "1" (id), "m" (futex), "3" (0), "0" (private)	      \
+		      : "cx", "r11", "cc", "memory");			      \
+    result; })
+
+#define lll_cond_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2, ignore3;					      \
+       __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
+			 "jz 24f\n"					      \
+			 "1:\tlea %2, %%" RDI_LP "\n"			      \
+			 "2:\tsub $128, %%" RSP_LP "\n"			      \
+			 ".cfi_adjust_cfa_offset 128\n"			      \
+			 "3:\tcallq __lll_lock_wait\n"			      \
+			 "4:\tadd $128, %%" RSP_LP "\n"			      \
+			 ".cfi_adjust_cfa_offset -128\n"		      \
+			 "24:"						      \
+			 : "=S" (ignore1), "=D" (ignore2), "=m" (futex),      \
+			   "=a" (ignore3)				      \
+			 : "1" (2), "m" (futex), "3" (0), "0" (private)	      \
+			 : "cx", "r11", "cc", "memory");		      \
+    })
+
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;					      \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \
+		      "jz 24f\n"					      \
+		      "1:\tlea %2, %%" RDI_LP "\n"			      \
+		      "2:\tsub $128, %%" RSP_LP "\n"			      \
+		      ".cfi_adjust_cfa_offset 128\n"			      \
+		      "3:\tcallq __lll_robust_lock_wait\n"		      \
+		      "4:\tadd $128, %%" RSP_LP "\n"			      \
+		      ".cfi_adjust_cfa_offset -128\n"			      \
+		      "24:"						      \
+		      : "=S" (ignore1), "=D" (ignore2), "=m" (futex),	      \
+			"=a" (result)					      \
+		      : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0),	      \
+			"0" (private)					      \
+		      : "cx", "r11", "cc", "memory");			      \
+    result; })
+
+#define lll_timedlock(futex, timeout, private) \
+  ({ int result, ignore1, ignore2, ignore3;				      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \
+		       "jz 24f\n"					      \
+		       "1:\tlea %4, %%" RDI_LP "\n"			      \
+		       "0:\tmov %8, %%" RDX_LP "\n"			      \
+		       "2:\tsub $128, %%" RSP_LP "\n"			      \
+		       ".cfi_adjust_cfa_offset 128\n"			      \
+		       "3:\tcallq __lll_timedlock_wait\n"		      \
+		       "4:\tadd $128, %%" RSP_LP "\n"			      \
+		       ".cfi_adjust_cfa_offset -128\n"			      \
+		       "24:"						      \
+		       : "=a" (result), "=D" (ignore1), "=S" (ignore2),	      \
+			 "=&d" (ignore3), "=m" (futex)			      \
+		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
+			 "2" (private)					      \
+		       : "memory", "cx", "cc", "r10", "r11");		      \
+     result; })
+
+extern int __lll_timedlock_elision (int *futex, short *adapt_count,
+					 const struct timespec *timeout,
+					 int private) attribute_hidden;
+
+#define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
+  __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
+
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int result, ignore1, ignore2, ignore3;				      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \
+		       "jz 24f\n\t"					      \
+		       "1:\tlea %4, %%" RDI_LP "\n"			      \
+		       "0:\tmov %8, %%" RDX_LP "\n"			      \
+		       "2:\tsub $128, %%" RSP_LP "\n"			      \
+		       ".cfi_adjust_cfa_offset 128\n"			      \
+		       "3:\tcallq __lll_robust_timedlock_wait\n"	      \
+		       "4:\tadd $128, %%" RSP_LP "\n"			      \
+		       ".cfi_adjust_cfa_offset -128\n"			      \
+		       "24:"						      \
+		       : "=a" (result), "=D" (ignore1), "=S" (ignore2),       \
+			 "=&d" (ignore3), "=m" (futex)			      \
+		       : "0" (0), "1" (id), "m" (futex), "m" (timeout),	      \
+			 "2" (private)					      \
+		       : "memory", "cx", "cc", "r10", "r11");		      \
+     result; })
+
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t"		      \
+				"je 24f\n\t"
+#else
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+				"je 0f\n\t"				      \
+				"lock; decl %0\n\t"			      \
+				"jne 1f\n\t"				      \
+				"jmp 24f\n\t"				      \
+				"0:\tdecl %0\n\t"			      \
+				"je 24f\n\t"
+#endif
+
+#define lll_unlock(futex, private) \
+  (void)								      \
+    ({ int ignore;							      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_unlock_asm_start			      \
+			   "1:\tlea %0, %%" RDI_LP "\n"			      \
+			   "2:\tsub $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset 128\n"		      \
+			   "3:\tcallq __lll_unlock_wake_private\n"	      \
+			   "4:\tadd $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset -128\n"		      \
+			   "24:"					      \
+			   : "=m" (futex), "=&D" (ignore)		      \
+			   : "m" (futex)				      \
+			   : "ax", "cx", "r11", "cc", "memory");	      \
+       else								      \
+	 __asm __volatile (__lll_unlock_asm_start			      \
+			   "1:\tlea %0, %%" RDI_LP "\n"			      \
+			   "2:\tsub $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset 128\n"		      \
+			   "3:\tcallq __lll_unlock_wake\n"		      \
+			   "4:\tadd $128, %%" RSP_LP "\n"		      \
+			   ".cfi_adjust_cfa_offset -128\n"		      \
+			   "24:"					      \
+			   : "=m" (futex), "=&D" (ignore)		      \
+			   : "m" (futex), "S" (private)			      \
+			   : "ax", "cx", "r11", "cc", "memory");	      \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  do									      \
+    {									      \
+      int ignore;							      \
+      __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"			      \
+			"je 24f\n\t"					      \
+			"1:\tlea %0, %%" RDI_LP "\n"			      \
+			"2:\tsub $128, %%" RSP_LP "\n"			      \
+			".cfi_adjust_cfa_offset 128\n"			      \
+			"3:\tcallq __lll_unlock_wake\n"			      \
+			"4:\tadd $128, %%" RSP_LP "\n"			      \
+			".cfi_adjust_cfa_offset -128\n"			      \
+			"24:"						      \
+			: "=m" (futex), "=&D" (ignore)			      \
+			: "i" (FUTEX_WAITERS), "m" (futex),		      \
+			  "S" (private)					      \
+			: "ax", "cx", "r11", "cc", "memory");		      \
+    }									      \
+  while (0)
+
+#define lll_robust_dead(futex, private) \
+  do									      \
+    {									      \
+      int ignore;							      \
+      __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t"			      \
+			"syscall"					      \
+			: "=m" (futex), "=a" (ignore)			      \
+			: "D" (&(futex)), "i" (FUTEX_OWNER_DIED),	      \
+			  "S" (__lll_private_flag (FUTEX_WAKE, private)),     \
+			  "1" (__NR_futex), "d" (1)			      \
+			: "cx", "r11", "cc", "memory");			      \
+    }									      \
+  while (0)
+
+/* Returns non-zero if error happened, zero if success.  */
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \
+  ({ int __res;								      \
+     register int __nr_move __asm ("r10") = nr_move;			      \
+     register void *__mutex __asm ("r8") = mutex;			      \
+     register int __val __asm ("r9") = val;				      \
+     __asm __volatile ("syscall"					      \
+		       : "=a" (__res)					      \
+		       : "0" (__NR_futex), "D" ((void *) ftx),		      \
+			 "S" (__lll_private_flag (FUTEX_CMP_REQUEUE,	      \
+						  private)), "d" (nr_wake),   \
+			 "r" (__nr_move), "r" (__mutex), "r" (__val)	      \
+		       : "cx", "r11", "cc", "memory");			      \
+     __res < 0; })
+
+#define lll_islocked(futex) \
+  (futex != LLL_LOCK_INITIALIZER)
+
+
+/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
+   wakeup when the clone terminates.  The memory location contains the
+   thread ID while the clone is running and is reset to zero
+   afterwards.
+
+   The macro parameter must not have any side effect.  */
+#define lll_wait_tid(tid) \
+  do {									      \
+    int __ignore;							      \
+    register __typeof (tid) _tid asm ("edx") = (tid);			      \
+    if (_tid != 0)							      \
+      __asm __volatile ("xorq %%r10, %%r10\n\t"				      \
+			"1:\tmovq %2, %%rax\n\t"			      \
+			"syscall\n\t"					      \
+			"cmpl $0, (%%rdi)\n\t"				      \
+			"jne 1b"					      \
+			: "=&a" (__ignore)				      \
+			: "S" (FUTEX_WAIT), "i" (SYS_futex), "D" (&tid),      \
+			  "d" (_tid)					      \
+			: "memory", "cc", "r10", "r11", "cx");		      \
+  } while (0)
+
+extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
+     attribute_hidden;
+#define lll_timedwait_tid(tid, abstime) \
+  ({									      \
+    int __result = 0;							      \
+    if (tid != 0)							      \
+      {									      \
+	if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)	      \
+	  __result = EINVAL;						      \
+	else								      \
+	  __result = __lll_timedwait_tid (&tid, abstime);		      \
+      }									      \
+    __result; })
+
+extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
+  attribute_hidden;
+
+extern int __lll_unlock_elision (int *lock, int private)
+  attribute_hidden;
+
+extern int __lll_trylock_elision (int *lock, short *adapt_count)
+  attribute_hidden;
+
+#define lll_lock_elision(futex, adapt_count, private) \
+  __lll_lock_elision (&(futex), &(adapt_count), private)
+#define lll_unlock_elision(futex, private) \
+  __lll_unlock_elision (&(futex), private)
+#define lll_trylock_elision(futex, adapt_count) \
+  __lll_trylock_elision (&(futex), &(adapt_count))
+
+#endif  /* !__ASSEMBLER__ */
+
+#endif	/* lowlevellock.h */
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
new file mode 100644
index 0000000000..990b6f9fdb
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
@@ -0,0 +1,306 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
+
+	.text
+
+#define FUTEX_WAITERS		0x80000000
+#define FUTEX_OWNER_DIED	0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+#endif
+
+
+	.globl	__lll_robust_lock_wait
+	.type	__lll_robust_lock_wait,@function
+	.hidden	__lll_robust_lock_wait
+	.align	16
+__lll_robust_lock_wait:
+	cfi_startproc
+	pushq	%r10
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r10, -16)
+	cfi_offset(%rdx, -24)
+
+	xorq	%r10, %r10	/* No timeout.  */
+	LOAD_FUTEX_WAIT (%esi)
+
+4:	movl	%eax, %edx
+	orl	$FUTEX_WAITERS, %edx
+
+	testl	$FUTEX_OWNER_DIED, %eax
+	jnz	3f
+
+	cmpl	%edx, %eax
+	je	1f
+
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	jnz	2f
+
+1:	movl	$SYS_futex, %eax
+	syscall
+
+	movl	(%rdi), %eax
+
+2:	testl	%eax, %eax
+	jne	4b
+
+	movl	%fs:TID, %edx
+	orl	$FUTEX_WAITERS, %edx
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	jnz	4b
+	/* NB:	 %rax == 0 */
+
+3:	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%r10
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r10)
+	retq
+	cfi_endproc
+	.size	__lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+	.globl	__lll_robust_timedlock_wait
+	.type	__lll_robust_timedlock_wait,@function
+	.hidden	__lll_robust_timedlock_wait
+	.align	16
+__lll_robust_timedlock_wait:
+	cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+# endif
+
+	cmpq	$0, (%rdx)
+	js	7f
+
+	pushq	%r9
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r9, 0)
+	movq	%rdx, %r10
+	movl	$0xffffffff, %r9d
+	LOAD_FUTEX_WAIT_ABS (%esi)
+
+1:	testl	$FUTEX_OWNER_DIED, %eax
+	jnz	3f
+
+	movl	%eax, %edx
+	orl	$FUTEX_WAITERS, %edx
+
+	cmpl	%eax, %edx
+	je	5f
+
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	movq	$0, %rcx	/* Must use mov to avoid changing cc.  */
+	jnz	6f
+
+5:	movl	$SYS_futex, %eax
+	syscall
+	movl	%eax, %ecx
+
+	movl	(%rdi), %eax
+
+6:	testl	%eax, %eax
+	jne	2f
+
+	movl	%fs:TID, %edx
+	orl	$FUTEX_WAITERS, %edx
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	jnz	2f
+
+3:	popq	%r9
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r9)
+	retq
+
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r9, 0)
+	/* Check whether the time expired.  */
+2:	cmpl	$-ETIMEDOUT, %ecx
+	je	4f
+	cmpl	$-EINVAL, %ecx
+	jne	1b
+
+4:	movl	%ecx, %eax
+	negl	%eax
+	jmp	3b
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r9)
+
+7:	movl	$ETIMEDOUT, %eax
+	retq
+
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	/* Check for a valid timeout value.  */
+	cmpq	$1000000000, 8(%rdx)
+	jae	3f
+
+	pushq	%r8
+	cfi_adjust_cfa_offset(8)
+	pushq	%r9
+	cfi_adjust_cfa_offset(8)
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r8, -16)
+	cfi_offset(%r9, -24)
+	cfi_offset(%r12, -32)
+	cfi_offset(%r13, -40)
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+
+	/* Stack frame for the timespec and timeval structs.  */
+	subq	$32, %rsp
+	cfi_adjust_cfa_offset(32)
+
+	movq	%rdi, %r12
+	movq	%rdx, %r13
+
+1:	movq	%rax, 16(%rsp)
+
+	/* Get current time.  */
+	movq	%rsp, %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rdi
+	movq	8(%r13), %rsi
+	subq	(%rsp), %rdi
+	subq	%rax, %rsi
+	jns	4f
+	addq	$1000000000, %rsi
+	decq	%rdi
+4:	testq	%rdi, %rdi
+	js	8f		/* Time is already up.  */
+
+	/* Futex call.  */
+	movq	%rdi, (%rsp)	/* Store relative timeout.  */
+	movq	%rsi, 8(%rsp)
+
+	movq	16(%rsp), %rdx
+	movl	%edx, %eax
+	orl	$FUTEX_WAITERS, %edx
+
+	testl	$FUTEX_OWNER_DIED, %eax
+	jnz	6f
+
+	cmpl	%eax, %edx
+	je	2f
+
+	LOCK
+	cmpxchgl %edx, (%r12)
+	movq	$0, %rcx	/* Must use mov to avoid changing cc.  */
+	jnz	5f
+
+2:	movq	%rsp, %r10
+	movl	32(%rsp), %esi
+	LOAD_FUTEX_WAIT (%esi)
+	movq	%r12, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+	movq	%rax, %rcx
+
+	movl	(%r12), %eax
+
+5:	testl	%eax, %eax
+	jne	7f
+
+	movl	%fs:TID, %edx
+	orl	$FUTEX_WAITERS, %edx
+	LOCK
+	cmpxchgl %edx, (%r12)
+	jnz	7f
+
+6:	addq	$40, %rsp
+	cfi_adjust_cfa_offset(-40)
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	popq	%r9
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r9)
+	popq	%r8
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r8)
+	retq
+
+3:	movl	$EINVAL, %eax
+	retq
+
+	cfi_adjust_cfa_offset(72)
+	cfi_offset(%r8, -16)
+	cfi_offset(%r9, -24)
+	cfi_offset(%r12, -32)
+	cfi_offset(%r13, -40)
+	/* Check whether the time expired.  */
+7:	cmpl	$-ETIMEDOUT, %ecx
+	jne	1b
+
+8:	movl	$ETIMEDOUT, %eax
+	jmp	6b
+#endif
+	cfi_endproc
+	.size	__lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
new file mode 100644
index 0000000000..eec17f226f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
@@ -0,0 +1,160 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelbarrier.h>
+
+
+	.text
+
+	.globl	pthread_barrier_wait
+	.type	pthread_barrier_wait,@function
+	.align	16
+pthread_barrier_wait:
+	/* Get the mutex.  */
+	xorl	%eax, %eax
+	movl	$1, %esi
+	LOCK
+	cmpxchgl %esi, MUTEX(%rdi)
+	jnz	1f
+
+	/* One less waiter.  If this was the last one needed wake
+	   everybody.  */
+2:	decl	LEFT(%rdi)
+	je	3f
+
+	/* There are more threads to come.  */
+#if CURR_EVENT == 0
+	movl	(%rdi), %edx
+#else
+	movl	CURR_EVENT(%rdi), %edx
+#endif
+
+	/* Release the mutex.  */
+	LOCK
+	decl	MUTEX(%rdi)
+	jne	6f
+
+	/* Wait for the remaining threads.  The call will return immediately
+	   if the CURR_EVENT memory has meanwhile been changed.  */
+7:
+#if FUTEX_WAIT == 0
+	movl	PRIVATE(%rdi), %esi
+#else
+	movl	$FUTEX_WAIT, %esi
+	orl	PRIVATE(%rdi), %esi
+#endif
+	xorq	%r10, %r10
+8:	movl	$SYS_futex, %eax
+	syscall
+
+	/* Don't return on spurious wakeups.  The syscall does not change
+	   any register except %eax so there is no need to reload any of
+	   them.  */
+#if CURR_EVENT == 0
+	cmpl	%edx, (%rdi)
+#else
+	cmpl	%edx, CURR_EVENT(%rdi)
+#endif
+	je	8b
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	movl	$1, %edx
+	movl	INIT_COUNT(%rdi), %eax
+	LOCK
+	xaddl	%edx, LEFT(%rdi)
+	subl	$1, %eax
+	cmpl	%eax, %edx
+	jne,pt	10f
+
+	/* Release the mutex.  We cannot release the lock before
+	   waking the waiting threads since otherwise a new thread might
+	   arrive and gets waken up, too.  */
+	LOCK
+	decl	MUTEX(%rdi)
+	jne	9f
+
+10:	xorl	%eax, %eax		/* != PTHREAD_BARRIER_SERIAL_THREAD */
+
+	retq
+
+	/* The necessary number of threads arrived.  */
+3:
+#if CURR_EVENT == 0
+	incl	(%rdi)
+#else
+	incl	CURR_EVENT(%rdi)
+#endif
+
+	/* Wake up all waiters.  The count is a signed number in the kernel
+	   so 0x7fffffff is the highest value.  */
+	movl	$0x7fffffff, %edx
+	movl	$FUTEX_WAKE, %esi
+	orl	PRIVATE(%rdi), %esi
+	movl	$SYS_futex, %eax
+	syscall
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	movl	$1, %edx
+	movl	INIT_COUNT(%rdi), %eax
+	LOCK
+	xaddl	%edx, LEFT(%rdi)
+	subl	$1, %eax
+	cmpl	%eax, %edx
+	jne,pt	5f
+
+	/* Release the mutex.  We cannot release the lock before
+	   waking the waiting threads since otherwise a new thread might
+	   arrive and gets waken up, too.  */
+	LOCK
+	decl	MUTEX(%rdi)
+	jne	4f
+
+5:	orl	$-1, %eax		/* == PTHREAD_BARRIER_SERIAL_THREAD */
+
+	retq
+
+1:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
+	subq	$MUTEX, %rdi
+	jmp	2b
+
+4:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
+	jmp	5b
+
+6:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
+	subq	$MUTEX, %rdi
+	jmp	7b
+
+9:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
+	jmp	10b
+	.size	pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
new file mode 100644
index 0000000000..985e0f1cfa
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
@@ -0,0 +1,179 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+	.text
+
+	/* int pthread_cond_broadcast (pthread_cond_t *cond) */
+	.globl	__pthread_cond_broadcast
+	.type	__pthread_cond_broadcast, @function
+	.align	16
+__pthread_cond_broadcast:
+
+	LIBC_PROBE (cond_broadcast, 1, %rdi)
+
+	/* Get internal lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jnz	1f
+
+2:	addq	$cond_futex, %rdi
+	movq	total_seq-cond_futex(%rdi), %r9
+	cmpq	wakeup_seq-cond_futex(%rdi), %r9
+	jna	4f
+
+	/* Cause all currently waiting threads to recognize they are
+	   woken up.  */
+	movq	%r9, wakeup_seq-cond_futex(%rdi)
+	movq	%r9, woken_seq-cond_futex(%rdi)
+	addq	%r9, %r9
+	movl	%r9d, (%rdi)
+	incl	broadcast_seq-cond_futex(%rdi)
+
+	/* Get the address of the mutex used.  */
+	mov	dep_mutex-cond_futex(%rdi), %R8_LP
+
+	/* Unlock.  */
+	LOCK
+	decl	cond_lock-cond_futex(%rdi)
+	jne	7f
+
+8:	cmp	$-1, %R8_LP
+	je	9f
+
+	/* Do not use requeue for pshared condvars.  */
+	testl	$PS_BIT, MUTEX_KIND(%r8)
+	jne	9f
+
+	/* Requeue to a PI mutex if the PI bit is set.  */
+	movl	MUTEX_KIND(%r8), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	je	81f
+
+	/* Wake up all threads.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
+#else
+	movl	%fs:PRIVATE_FUTEX, %esi
+	orl	$FUTEX_CMP_REQUEUE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	movl	$1, %edx
+	movl	$0x7fffffff, %r10d
+	syscall
+
+	/* For any kind of error, which mainly is EAGAIN, we try again
+	   with WAKE.  The general test also covers running on old
+	   kernels.  */
+	cmpq	$-4095, %rax
+	jae	9f
+
+10:	xorl	%eax, %eax
+	retq
+
+	/* Wake up all threads.  */
+81:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+	movl	$SYS_futex, %eax
+	movl	$1, %edx
+	movl	$0x7fffffff, %r10d
+	syscall
+
+	/* For any kind of error, which mainly is EAGAIN, we try again
+	   with WAKE.  The general test also covers running on old
+	   kernels.  */
+	cmpq	$-4095, %rax
+	jb	10b
+	jmp	9f
+
+	.align	16
+	/* Unlock.  */
+4:	LOCK
+	decl	cond_lock-cond_futex(%rdi)
+	jne	5f
+
+6:	xorl	%eax, %eax
+	retq
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+	jmp	2b
+
+	/* Unlock in loop requires wakeup.  */
+5:	addq	$cond_lock-cond_futex, %rdi
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	6b
+
+	/* Unlock in loop requires wakeup.  */
+7:	addq	$cond_lock-cond_futex, %rdi
+	cmp	$-1, %R8_LP
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	subq	$cond_lock-cond_futex, %rdi
+	jmp	8b
+
+9:	/* The futex requeue functionality is not available.  */
+	cmp	$-1, %R8_LP
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	jmp	10b
+	.size	__pthread_cond_broadcast, .-__pthread_cond_broadcast
+versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
new file mode 100644
index 0000000000..53d65b6f12
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
@@ -0,0 +1,164 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
+#include <kernel-features.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+
+	.text
+
+	/* int pthread_cond_signal (pthread_cond_t *cond) */
+	.globl	__pthread_cond_signal
+	.type	__pthread_cond_signal, @function
+	.align	16
+__pthread_cond_signal:
+
+	LIBC_PROBE (cond_signal, 1, %rdi)
+
+	/* Get internal lock.  */
+	movq	%rdi, %r8
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jnz	1f
+
+2:	addq	$cond_futex, %rdi
+	movq	total_seq(%r8), %rcx
+	cmpq	wakeup_seq(%r8), %rcx
+	jbe	4f
+
+	/* Bump the wakeup number.  */
+	addq	$1, wakeup_seq(%r8)
+	addl	$1, (%rdi)
+
+	/* Wake up one thread.  */
+	LP_OP(cmp) $-1, dep_mutex(%r8)
+	movl	$FUTEX_WAKE_OP, %esi
+	movl	$1, %edx
+	movl	$SYS_futex, %eax
+	je	8f
+
+	/* Get the address of the mutex used.  */
+	mov     dep_mutex(%r8), %RCX_LP
+	movl	MUTEX_KIND(%rcx), %r11d
+	andl	$(ROBUST_BIT|PI_BIT), %r11d
+	cmpl	$PI_BIT, %r11d
+	je	9f
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
+#else
+	orl	%fs:PRIVATE_FUTEX, %esi
+#endif
+
+8:	movl	$1, %r10d
+#if cond_lock != 0
+	addq	$cond_lock, %r8
+#endif
+	movl	$FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
+	syscall
+#if cond_lock != 0
+	subq	$cond_lock, %r8
+#endif
+	/* For any kind of error, we try again with WAKE.
+	   The general test also covers running on old kernels.  */
+	cmpq	$-4095, %rax
+	jae	7f
+
+	xorl	%eax, %eax
+	retq
+
+	/* Wake up one thread and requeue none in the PI Mutex case.  */
+9:	movl	$(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+	movq	%rcx, %r8
+	xorq	%r10, %r10
+	movl	(%rdi), %r9d	// XXX Can this be right?
+	syscall
+
+	leaq	-cond_futex(%rdi), %r8
+
+	/* For any kind of error, we try again with WAKE.
+	   The general test also covers running on old kernels.  */
+	cmpq	$-4095, %rax
+	jb	4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	andl	$FUTEX_PRIVATE_FLAG, %esi
+#else
+	andl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	orl	$FUTEX_WAKE, %esi
+	movl	$SYS_futex, %eax
+	/* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
+	movl	$1, %edx  */
+	syscall
+
+	/* Unlock.  */
+4:	LOCK
+#if cond_lock == 0
+	decl	(%r8)
+#else
+	decl	cond_lock(%r8)
+#endif
+	jne	5f
+
+6:	xorl	%eax, %eax
+	retq
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+	jmp	2b
+
+	/* Unlock in loop requires wakeup.  */
+5:
+	movq	%r8, %rdi
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	6b
+	.size	__pthread_cond_signal, .-__pthread_cond_signal
+versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
new file mode 100644
index 0000000000..0dc23405b0
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -0,0 +1,840 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+#include <kernel-features.h>
+
+
+	.text
+
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+			       const struct timespec *abstime)  */
+	.globl	__pthread_cond_timedwait
+	.type	__pthread_cond_timedwait, @function
+	.align	16
+__pthread_cond_timedwait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r12, 0)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r13, 0)
+	pushq	%r14
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r14, 0)
+	pushq	%r15
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r15, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE (32+8)
+#else
+# define FRAME_SIZE (48+8)
+#endif
+	subq	$FRAME_SIZE, %rsp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+	cfi_remember_state
+
+	LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx)
+
+	cmpq	$1000000000, 8(%rdx)
+	movl	$EINVAL, %eax
+	jae	48f
+
+	/* Stack frame:
+
+	   rsp + 48
+		    +--------------------------+
+	   rsp + 32 | timeout value            |
+		    +--------------------------+
+	   rsp + 24 | old wake_seq value       |
+		    +--------------------------+
+	   rsp + 16 | mutex pointer            |
+		    +--------------------------+
+	   rsp +  8 | condvar pointer          |
+		    +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+		    +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+		    +--------------------------+
+	*/
+
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+
+	/* Prepare structure passed to cancellation handler.  */
+	movq	%rdi, 8(%rsp)
+	movq	%rsi, 16(%rsp)
+	movq	%rdx, %r13
+
+	je	22f
+	mov	%RSI_LP, dep_mutex(%rdi)
+
+22:
+	xorb	%r15b, %r15b
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+#endif
+
+	/* Get internal lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jnz	31f
+
+	/* Unlock the mutex.  */
+32:	movq	16(%rsp), %rdi
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	46f
+
+	movq	8(%rsp), %rdi
+	incq	total_seq(%rdi)
+	incl	cond_futex(%rdi)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Get and store current wakeup_seq value.  */
+	movq	8(%rsp), %rdi
+	movq	wakeup_seq(%rdi), %r9
+	movl	broadcast_seq(%rdi), %edx
+	movq	%r9, 24(%rsp)
+	movl	%edx, 4(%rsp)
+
+	cmpq	$0, (%r13)
+	movq	$-ETIMEDOUT, %r14
+	js	36f
+
+38:	movl	cond_futex(%rdi), %r12d
+
+	/* Unlock.  */
+	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	jne	33f
+
+.LcleanupSTART1:
+34:	callq	__pthread_enable_asynccancel
+	movl	%eax, (%rsp)
+
+	movq	%r13, %r10
+	movl	$FUTEX_WAIT_BITSET, %esi
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+	je	60f
+
+	mov	dep_mutex(%rdi), %R8_LP
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	the robust bit is not set.  */
+	movl	MUTEX_KIND(%r8), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	61f
+
+	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+	xorl	%eax, %eax
+	/* The following only works like this because we only support
+	   two clocks, represented using a single bit.  */
+	testl	$1, cond_nwaiters(%rdi)
+	movl	$FUTEX_CLOCK_REALTIME, %edx
+	cmove	%edx, %eax
+	orl	%eax, %esi
+	movq	%r12, %rdx
+	addq	$cond_futex, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+
+	cmpl	$0, %eax
+	sete	%r15b
+
+#ifdef __ASSUME_REQUEUE_PI
+	jmp	62f
+#else
+	je	62f
+
+	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
+	   successfully, it has already locked the mutex for us and the
+	   pi_flag (%r15b) is set to denote that fact.  However, if another
+	   thread changed the futex value before we entered the wait, the
+	   syscall may return an EAGAIN and the mutex is not locked.  We go
+	   ahead with a success anyway since later we look at the pi_flag to
+	   decide if we got the mutex or not.  The sequence numbers then make
+	   sure that only one of the threads actually wake up.  We retry using
+	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
+	   and PI futexes don't mix.
+
+	   Note that we don't check for EAGAIN specifically; we assume that the
+	   only other error the futex function could return is EAGAIN (barring
+	   the ETIMEOUT of course, for the timeout case in futex) since
+	   anything else would mean an error in our function.  It is too
+	   expensive to do that check for every call (which is  quite common in
+	   case of a large number of threads), so it has been skipped.  */
+	cmpl    $-ENOSYS, %eax
+	jne     62f
+
+	subq	$cond_futex, %rdi
+#endif
+
+61:	movl	$(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
+60:	xorb	%r15b, %r15b
+	xorl	%eax, %eax
+	/* The following only works like this because we only support
+	   two clocks, represented using a single bit.  */
+	testl	$1, cond_nwaiters(%rdi)
+	movl	$FUTEX_CLOCK_REALTIME, %edx
+	movl	$0xffffffff, %r9d
+	cmove	%edx, %eax
+	orl	%eax, %esi
+	movq	%r12, %rdx
+	addq	$cond_futex, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+62:	movq	%rax, %r14
+
+	movl	(%rsp), %edi
+	callq	__pthread_disable_asynccancel
+.LcleanupEND1:
+
+	/* Lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jne	35f
+
+36:	movl	broadcast_seq(%rdi), %edx
+
+	movq	woken_seq(%rdi), %rax
+
+	movq	wakeup_seq(%rdi), %r9
+
+	cmpl	4(%rsp), %edx
+	jne	53f
+
+	cmpq	24(%rsp), %r9
+	jbe	45f
+
+	cmpq	%rax, %r9
+	ja	39f
+
+45:	cmpq	$-ETIMEDOUT, %r14
+	je	99f
+
+	/* We need to go back to futex_wait.  If we're using requeue_pi, then
+	   release the mutex we had acquired and go back.  */
+	test	%r15b, %r15b
+	jz	38b
+
+	/* Adjust the mutex values first and then unlock it.  The unlock
+	   should always succeed or else the kernel did not lock the
+	   mutex correctly.  */
+	movq	%r8, %rdi
+	callq	__pthread_mutex_cond_lock_adjust
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+	/* Reload cond_var.  */
+	movq	8(%rsp), %rdi
+	jmp	38b
+
+99:	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+	movl	$ETIMEDOUT, %r14d
+	jmp	44f
+
+53:	xorq	%r14, %r14
+	jmp	54f
+
+39:	xorq	%r14, %r14
+44:	incq	woken_seq(%rdi)
+
+54:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	55f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	55f
+
+	addq	$cond_nwaiters, %rdi
+	LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+
+55:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	jne	40f
+
+	/* If requeue_pi is used the kernel performs the locking of the
+	   mutex. */
+41:	movq	16(%rsp), %rdi
+	testb	%r15b, %r15b
+	jnz	64f
+
+	callq	__pthread_mutex_cond_lock
+
+63:	testq	%rax, %rax
+	cmoveq	%r14, %rax
+
+48:	addq	$FRAME_SIZE, %rsp
+	cfi_adjust_cfa_offset(-FRAME_SIZE)
+	popq	%r15
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r15)
+	popq	%r14
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r14)
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+
+	retq
+
+	cfi_restore_state
+
+64:	callq	__pthread_mutex_cond_lock_adjust
+	movq	%r14, %rax
+	jmp	48b
+
+	/* Initial locking failed.  */
+31:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+	jmp	32b
+
+	/* Unlock in loop requires wakeup.  */
+33:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	34b
+
+	/* Locking in loop failed.  */
+35:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+	jmp	36b
+
+	/* Unlock after loop requires wakeup.  */
+40:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	41b
+
+	/* The initial unlocking of the mutex failed.  */
+46:	movq	8(%rsp), %rdi
+	movq	%rax, (%rsp)
+	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	jne	47f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+47:	movq	(%rsp), %rax
+	jmp	48b
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	/* Get internal lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+# else
+	cmpxchgl %esi, cond_lock(%rdi)
+# endif
+	jnz	1f
+
+	/* Unlock the mutex.  */
+2:	movq	16(%rsp), %rdi
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	46b
+
+	movq	8(%rsp), %rdi
+	incq	total_seq(%rdi)
+	incl	cond_futex(%rdi)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Get and store current wakeup_seq value.  */
+	movq	8(%rsp), %rdi
+	movq	wakeup_seq(%rdi), %r9
+	movl	broadcast_seq(%rdi), %edx
+	movq	%r9, 24(%rsp)
+	movl	%edx, 4(%rsp)
+
+	/* Get the current time.  */
+8:
+# ifdef __NR_clock_gettime
+	/* Get the clock number.  Note that the field in the condvar
+	   structure stores the number minus 1.  */
+	movq	8(%rsp), %rdi
+	movl	cond_nwaiters(%rdi), %edi
+	andl	$((1 << nwaiters_shift) - 1), %edi
+	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
+	   kernel.  */
+	leaq	32(%rsp), %rsi
+#  ifdef SHARED
+	mov	__vdso_clock_gettime@GOTPCREL(%rip), %RAX_LP
+	mov	(%rax), %RAX_LP
+	PTR_DEMANGLE (%RAX_LP)
+	call	*%rax
+#  else
+	movl	$__NR_clock_gettime, %eax
+	syscall
+#  endif
+
+	/* Compute relative timeout.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdx
+	subq	32(%rsp), %rcx
+	subq	40(%rsp), %rdx
+# else
+	leaq	24(%rsp), %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	40(%rsp), %rax
+	movl	$1000, %edx
+	mul	%rdx		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdx
+	subq	32(%rsp), %rcx
+	subq	%rax, %rdx
+# endif
+	jns	12f
+	addq	$1000000000, %rdx
+	decq	%rcx
+12:	testq	%rcx, %rcx
+	movq	8(%rsp), %rdi
+	movq	$-ETIMEDOUT, %r14
+	js	6f
+
+	/* Store relative timeout.  */
+21:	movq	%rcx, 32(%rsp)
+	movq	%rdx, 40(%rsp)
+
+	movl	cond_futex(%rdi), %r12d
+
+	/* Unlock.  */
+	LOCK
+# if cond_lock == 0
+	decl	(%rdi)
+# else
+	decl	cond_lock(%rdi)
+# endif
+	jne	3f
+
+.LcleanupSTART2:
+4:	callq	__pthread_enable_asynccancel
+	movl	%eax, (%rsp)
+
+	leaq	32(%rsp), %r10
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+	movq	%r12, %rdx
+# ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT, %eax
+	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+# else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+#  if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %esi
+#  endif
+# endif
+	addq	$cond_futex, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+	movq	%rax, %r14
+
+	movl	(%rsp), %edi
+	callq	__pthread_disable_asynccancel
+.LcleanupEND2:
+
+	/* Lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+# else
+	cmpxchgl %esi, cond_lock(%rdi)
+# endif
+	jne	5f
+
+6:	movl	broadcast_seq(%rdi), %edx
+
+	movq	woken_seq(%rdi), %rax
+
+	movq	wakeup_seq(%rdi), %r9
+
+	cmpl	4(%rsp), %edx
+	jne	53b
+
+	cmpq	24(%rsp), %r9
+	jbe	15f
+
+	cmpq	%rax, %r9
+	ja	39b
+
+15:	cmpq	$-ETIMEDOUT, %r14
+	jne	8b
+
+	jmp	99b
+
+	/* Initial locking failed.  */
+1:
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+	jmp	2b
+
+	/* Unlock in loop requires wakeup.  */
+3:
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+# if cond_lock != 0
+	subq	$cond_lock, %rdi
+# endif
+	jmp	6b
+#endif
+	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
+versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
+		  GLIBC_2_3_2)
+
+
+	.align	16
+	.type	__condvar_cleanup2, @function
+__condvar_cleanup2:
+	/* Stack frame:
+
+	   rsp + 72
+		    +--------------------------+
+	   rsp + 64 | %r12                     |
+		    +--------------------------+
+	   rsp + 56 | %r13                     |
+		    +--------------------------+
+	   rsp + 48 | %r14                     |
+		    +--------------------------+
+	   rsp + 24 | unused                   |
+		    +--------------------------+
+	   rsp + 16 | mutex pointer            |
+		    +--------------------------+
+	   rsp +  8 | condvar pointer          |
+		    +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+		    +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+		    +--------------------------+
+	*/
+
+	movq	%rax, 24(%rsp)
+
+	/* Get internal lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jz	1f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+
+1:	movl	broadcast_seq(%rdi), %edx
+	cmpl	4(%rsp), %edx
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movq	total_seq(%rdi), %rax
+	cmpq	wakeup_seq(%rdi), %rax
+	jbe	6f
+	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+6:	incq	woken_seq(%rdi)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorq	%r12, %r12
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	4f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+	leaq	cond_nwaiters(%rdi), %rdi
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+	movl	$1, %r12d
+
+4:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	2f
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testq	%r12, %r12
+	jnz	5f
+	addq	$cond_futex, %rdi
+	LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+	/* Lock the mutex only if we don't own it already.  This only happens
+	   in case of PI mutexes, if we got cancelled after a successful
+	   return of the futex syscall and before disabling async
+	   cancellation.  */
+5:	movq	16(%rsp), %rdi
+	movl	MUTEX_KIND(%rdi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	7f
+
+	movl	(%rdi), %eax
+	andl	$TID_MASK, %eax
+	cmpl	%eax, %fs:TID
+	jne	7f
+	/* We managed to get the lock.  Fix it up before returning.  */
+	callq	__pthread_mutex_cond_lock_adjust
+	jmp	8f
+
+7:	callq	__pthread_mutex_cond_lock
+
+8:	movq	24(%rsp), %rdi
+	movq	FRAME_SIZE(%rsp), %r15
+	movq	FRAME_SIZE+8(%rsp), %r14
+	movq	FRAME_SIZE+16(%rsp), %r13
+	movq	FRAME_SIZE+24(%rsp), %r12
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_cleanup2, .-__condvar_cleanup2
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART1-.LSTARTCODE
+	.uleb128 .LcleanupEND1-.LcleanupSTART1
+	.uleb128 __condvar_cleanup2-.LSTARTCODE
+	.uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.uleb128 .LcleanupSTART2-.LSTARTCODE
+	.uleb128 .LcleanupEND2-.LcleanupSTART2
+	.uleb128 __condvar_cleanup2-.LSTARTCODE
+	.uleb128  0
+#endif
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	LP_SIZE
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, LP_SIZE
+DW.ref.__gcc_personality_v0:
+	ASM_ADDR __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
new file mode 100644
index 0000000000..0e61d0aa24
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -0,0 +1,555 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include <stap-probe.h>
+
+#include <kernel-features.h>
+
+
+	.text
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
+	.globl	__pthread_cond_wait
+	.type	__pthread_cond_wait, @function
+	.align	16
+__pthread_cond_wait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+#define FRAME_SIZE (32+8)
+	leaq	-FRAME_SIZE(%rsp), %rsp
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+	/* Stack frame:
+
+	   rsp + 32
+		    +--------------------------+
+	   rsp + 24 | old wake_seq value       |
+		    +--------------------------+
+	   rsp + 16 | mutex pointer            |
+		    +--------------------------+
+	   rsp +  8 | condvar pointer          |
+		    +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+		    +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+		    +--------------------------+
+	*/
+
+	LIBC_PROBE (cond_wait, 2, %rdi, %rsi)
+
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+
+	/* Prepare structure passed to cancellation handler.  */
+	movq	%rdi, 8(%rsp)
+	movq	%rsi, 16(%rsp)
+
+	je	15f
+	mov	%RSI_LP, dep_mutex(%rdi)
+
+	/* Get internal lock.  */
+15:	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jne	1f
+
+	/* Unlock the mutex.  */
+2:	movq	16(%rsp), %rdi
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	12f
+
+	movq	8(%rsp), %rdi
+	incq	total_seq(%rdi)
+	incl	cond_futex(%rdi)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Get and store current wakeup_seq value.  */
+	movq	8(%rsp), %rdi
+	movq	wakeup_seq(%rdi), %r9
+	movl	broadcast_seq(%rdi), %edx
+	movq	%r9, 24(%rsp)
+	movl	%edx, 4(%rsp)
+
+	/* Unlock.  */
+8:	movl	cond_futex(%rdi), %edx
+	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	jne	3f
+
+.LcleanupSTART:
+4:	callq	__pthread_enable_asynccancel
+	movl	%eax, (%rsp)
+
+	xorq	%r10, %r10
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+	leaq	cond_futex(%rdi), %rdi
+	movl	$FUTEX_WAIT, %esi
+	je	60f
+
+	mov	dep_mutex-cond_futex(%rdi), %R8_LP
+	/* Requeue to a non-robust PI mutex if the PI bit is set and
+	the robust bit is not set.  */
+	movl	MUTEX_KIND(%r8), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	61f
+
+	movl	$(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+	movl	$SYS_futex, %eax
+	syscall
+
+	cmpl	$0, %eax
+	sete	%r8b
+
+#ifdef __ASSUME_REQUEUE_PI
+	jmp	62f
+#else
+	je	62f
+
+	/* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
+	   successfully, it has already locked the mutex for us and the
+	   pi_flag (%r8b) is set to denote that fact.  However, if another
+	   thread changed the futex value before we entered the wait, the
+	   syscall may return an EAGAIN and the mutex is not locked.  We go
+	   ahead with a success anyway since later we look at the pi_flag to
+	   decide if we got the mutex or not.  The sequence numbers then make
+	   sure that only one of the threads actually wake up.  We retry using
+	   normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
+	   and PI futexes don't mix.
+
+	   Note that we don't check for EAGAIN specifically; we assume that the
+	   only other error the futex function could return is EAGAIN since
+	   anything else would mean an error in our function.  It is too
+	   expensive to do that check for every call (which is 	quite common in
+	   case of a large number of threads), so it has been skipped.  */
+	cmpl	$-ENOSYS, %eax
+	jne	62f
+
+# ifndef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT, %esi
+# endif
+#endif
+
+61:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+#else
+	orl	%fs:PRIVATE_FUTEX, %esi
+#endif
+60:	xorb	%r8b, %r8b
+	movl	$SYS_futex, %eax
+	syscall
+
+62:	movl	(%rsp), %edi
+	callq	__pthread_disable_asynccancel
+.LcleanupEND:
+
+	/* Lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jnz	5f
+
+6:	movl	broadcast_seq(%rdi), %edx
+
+	movq	woken_seq(%rdi), %rax
+
+	movq	wakeup_seq(%rdi), %r9
+
+	cmpl	4(%rsp), %edx
+	jne	16f
+
+	cmpq	24(%rsp), %r9
+	jbe	19f
+
+	cmpq	%rax, %r9
+	jna	19f
+
+	incq	woken_seq(%rdi)
+
+	/* Unlock */
+16:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	17f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	17f
+
+	addq	$cond_nwaiters, %rdi
+	LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+
+17:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	jne	10f
+
+	/* If requeue_pi is used the kernel performs the locking of the
+	   mutex. */
+11:	movq	16(%rsp), %rdi
+	testb	%r8b, %r8b
+	jnz	18f
+
+	callq	__pthread_mutex_cond_lock
+
+14:	leaq	FRAME_SIZE(%rsp), %rsp
+	cfi_adjust_cfa_offset(-FRAME_SIZE)
+
+	/* We return the result of the mutex_lock operation.  */
+	retq
+
+	cfi_adjust_cfa_offset(FRAME_SIZE)
+
+18:	callq	__pthread_mutex_cond_lock_adjust
+	xorl	%eax, %eax
+	jmp	14b
+
+	/* We need to go back to futex_wait.  If we're using requeue_pi, then
+	   release the mutex we had acquired and go back.  */
+19:	testb	%r8b, %r8b
+	jz	8b
+
+	/* Adjust the mutex values first and then unlock it.  The unlock
+	   should always succeed or else the kernel did not lock the mutex
+	   correctly.  */
+	movq	16(%rsp), %rdi
+	callq	__pthread_mutex_cond_lock_adjust
+	movq	%rdi, %r8
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+	/* Reload cond_var.  */
+	movq	8(%rsp), %rdi
+	jmp	8b
+
+	/* Initial locking failed.  */
+1:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+	jmp	2b
+
+	/* Unlock in loop requires wakeup.  */
+3:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	/* The call preserves %rdx.  */
+	callq	__lll_unlock_wake
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+	jmp	6b
+
+	/* Unlock after loop requires wakeup.  */
+10:
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	11b
+
+	/* The initial unlocking of the mutex failed.  */
+12:	movq	%rax, %r10
+	movq	8(%rsp), %rdi
+	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	13f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+13:	movq	%r10, %rax
+	jmp	14b
+
+	.size	__pthread_cond_wait, .-__pthread_cond_wait
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+		  GLIBC_2_3_2)
+
+
+	.align	16
+	.type	__condvar_cleanup1, @function
+	.globl	__condvar_cleanup1
+	.hidden	__condvar_cleanup1
+__condvar_cleanup1:
+	/* Stack frame:
+
+	   rsp + 32
+		    +--------------------------+
+	   rsp + 24 | unused                   |
+		    +--------------------------+
+	   rsp + 16 | mutex pointer            |
+		    +--------------------------+
+	   rsp +  8 | condvar pointer          |
+		    +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+		    +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+		    +--------------------------+
+	*/
+
+	movq	%rax, 24(%rsp)
+
+	/* Get internal lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jz	1f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+
+1:	movl	broadcast_seq(%rdi), %edx
+	cmpl	4(%rsp), %edx
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movq	total_seq(%rdi), %rax
+	cmpq	wakeup_seq(%rdi), %rax
+	jbe	6f
+	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+6:	incq	woken_seq(%rdi)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorl	%ecx, %ecx
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	4f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	LP_OP(cmp) $-1, dep_mutex(%rdi)
+	leaq	cond_nwaiters(%rdi), %rdi
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+	movl	$1, %ecx
+
+4:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	2f
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	/* The call preserves %rcx.  */
+	callq	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testl	%ecx, %ecx
+	jnz	5f
+	addq	$cond_futex, %rdi
+	LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+	/* Lock the mutex only if we don't own it already.  This only happens
+	   in case of PI mutexes, if we got cancelled after a successful
+	   return of the futex syscall and before disabling async
+	   cancellation.  */
+5:	movq	16(%rsp), %rdi
+	movl	MUTEX_KIND(%rdi), %eax
+	andl	$(ROBUST_BIT|PI_BIT), %eax
+	cmpl	$PI_BIT, %eax
+	jne	7f
+
+	movl	(%rdi), %eax
+	andl	$TID_MASK, %eax
+	cmpl	%eax, %fs:TID
+	jne	7f
+	/* We managed to get the lock.  Fix it up before returning.  */
+	callq	__pthread_mutex_cond_lock_adjust
+	jmp	8f
+
+
+7:	callq	__pthread_mutex_cond_lock
+
+8:	movq	24(%rsp), %rdi
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_cleanup1, .-__condvar_cleanup1
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 __condvar_cleanup1-.LSTARTCODE
+	.uleb128 0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	LP_SIZE
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, LP_SIZE
+DW.ref.__gcc_personality_v0:
+	ASM_ADDR __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_once.S b/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
new file mode 100644
index 0000000000..2cbe2fae62
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
@@ -0,0 +1,193 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <kernel-features.h>
+#include <tcb-offsets.h>
+#include <lowlevellock.h>
+
+
+	.comm	__fork_generation, 4, 4
+
+	.text
+
+
+	.globl	__pthread_once
+	.type	__pthread_once,@function
+	.align	16
+__pthread_once:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+	testl	$2, (%rdi)
+	jz	1f
+	xorl	%eax, %eax
+	retq
+
+	/* Preserve the function pointer.  */
+1:	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+	xorq	%r10, %r10
+
+	/* Not yet initialized or initialization in progress.
+	   Get the fork generation counter now.  */
+6:	movl	(%rdi), %eax
+
+5:	movl	%eax, %edx
+
+	testl	$2, %eax
+	jnz	4f
+
+	andl	$3, %edx
+	orl	__fork_generation(%rip), %edx
+	orl	$1, %edx
+
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	jnz	5b
+
+	/* Check whether another thread already runs the initializer.  */
+	testl	$1, %eax
+	jz	3f	/* No -> do it.  */
+
+	/* Check whether the initializer execution was interrupted
+	   by a fork.  */
+	xorl	%edx, %eax
+	testl	$0xfffffffc, %eax
+	jnz	3f	/* Different for generation -> run initializer.  */
+
+	/* Somebody else got here first.  Wait.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %esi
+#else
+# if FUTEX_WAIT == 0
+	movl	%fs:PRIVATE_FUTEX, %esi
+# else
+	movl	$FUTEX_WAIT, %esi
+	orl	%fs:PRIVATE_FUTEX, %esi
+# endif
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	jmp	6b
+
+	/* Preserve the pointer to the control variable.  */
+3:	pushq	%rdi
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdi
+	cfi_adjust_cfa_offset(8)
+
+.LcleanupSTART:
+	callq	*16(%rsp)
+.LcleanupEND:
+
+	/* Get the control variable address back.  */
+	popq	%rdi
+	cfi_adjust_cfa_offset(-8)
+
+	/* Sucessful run of the initializer.  Signal that we are done.  */
+	LOCK
+	incl	(%rdi)
+
+	addq	$8, %rsp
+	cfi_adjust_cfa_offset(-8)
+
+	/* Wake up all other threads.  */
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
+	movl	$FUTEX_WAKE, %esi
+	orl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+4:	addq	$8, %rsp
+	cfi_adjust_cfa_offset(-8)
+	xorl	%eax, %eax
+	retq
+	.size	__pthread_once,.-__pthread_once
+
+
+hidden_def (__pthread_once)
+strong_alias (__pthread_once, pthread_once)
+
+
+	.type	clear_once_control,@function
+	.align	16
+clear_once_control:
+	cfi_adjust_cfa_offset(3 * 8)
+	movq	(%rsp), %rdi
+	movq	%rax, %r8
+	movl	$0, (%rdi)
+
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
+	movl	$FUTEX_WAKE, %esi
+	orl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+	movq	%r8, %rdi
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	clear_once_control,.-clear_once_control
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 clear_once_control-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	LP_SIZE
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, LP_SIZE
+DW.ref.__gcc_personality_v0:
+	ASM_ADDR __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
new file mode 100644
index 0000000000..3bbb4c7f62
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
@@ -0,0 +1,177 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <stap-probe.h>
+
+	.text
+
+	.globl	__pthread_rwlock_rdlock
+	.type	__pthread_rwlock_rdlock,@function
+	.align	16
+__pthread_rwlock_rdlock:
+	cfi_startproc
+
+	LIBC_PROBE (rdlock_entry, 1, %rdi)
+
+	xorq	%r10, %r10
+
+	/* Get the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%rdi), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, WRITERS_QUEUED(%rdi)
+	je	5f
+	cmpl	$0, FLAGS(%rdi)
+	je	5f
+
+3:	incl	READERS_QUEUED(%rdi)
+	je	4f
+
+	movl	READERS_WAKEUP(%rdi), %edx
+
+	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+	xorl	PSHARED(%rdi), %esi
+#else
+# if FUTEX_WAIT == 0
+	movl	PSHARED(%rdi), %esi
+# else
+	movl	$FUTEX_WAIT, %esi
+	orl	PSHARED(%rdi), %esi
+# endif
+	xorl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	addq	$READERS_WAKEUP, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+
+	subq	$READERS_WAKEUP, %rdi
+
+	/* Reget the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	12f
+
+13:	decl	READERS_QUEUED(%rdi)
+	jmp	2b
+
+5:	xorl	%edx, %edx
+	incl	NR_READERS(%rdi)
+	je	8f
+9:	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	6f
+7:
+
+	movq	%rdx, %rax
+	retq
+
+1:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	2b
+
+14:	cmpl	%fs:TID, %eax
+	jne	3b
+	/* Deadlock detected.  */
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	7b
+
+	/* Overflow.  */
+8:	decl	NR_READERS(%rdi)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+	/* Overflow.  */
+4:	decl	READERS_QUEUED(%rdi)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	11b
+
+12:	movl	PSHARED(%rdi), %esi
+#if MUTEX == 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	13b
+	cfi_endproc
+	.size	__pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
+
+strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
+hidden_def (__pthread_rwlock_rdlock)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
new file mode 100644
index 0000000000..40bcc04a9d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
@@ -0,0 +1,274 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+	.text
+
+	.globl	pthread_rwlock_timedrdlock
+	.type	pthread_rwlock_timedrdlock,@function
+	.align	16
+pthread_rwlock_timedrdlock:
+	cfi_startproc
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r12, 0)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG	%edx
+#else
+	pushq	%r14
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r14, 0)
+
+	subq	$16, %rsp
+	cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
+
+	movq	%rdi, %r12
+	movq	%rsi, %r13
+
+	/* Get the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%r12), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, WRITERS_QUEUED(%r12)
+	je	5f
+	cmpl	$0, FLAGS(%r12)
+	je	5f
+
+	/* Check the value of the timeout parameter.  */
+3:	cmpq	$1000000000, 8(%r13)
+	jae	19f
+
+	incl	READERS_QUEUED(%r12)
+	je	4f
+
+	movl	READERS_WAKEUP(%r12), VALREG
+
+	/* Unlock.  */
+	LOCK
+#if MUTEX == 0
+	decl	(%r12)
+#else
+	decl	MUTEX(%r12)
+#endif
+	jne	10f
+
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+#endif
+
+	cmpq	$0, (%r13)
+	js	16f		/* Time is already up.  */
+
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+	xorl	PSHARED(%r12), %esi
+	movq	%r13, %r10
+	movl	$0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	movl	%r14d, %edx
+#endif
+21:	leaq	READERS_WAKEUP(%r12), %rdi
+	movl	$SYS_futex, %eax
+	syscall
+	movq	%rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.subsection 2
+.Lreltmo:
+	/* Get current time.  */
+	movq	%rsp, %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdi
+	subq	(%rsp), %rcx
+	subq	%rax, %rdi
+	jns	15f
+	addq	$1000000000, %rdi
+	decq	%rcx
+15:	testq	%rcx, %rcx
+	js	16f		/* Time is already up.  */
+
+	/* Futex call.  */
+	movq	%rcx, (%rsp)	/* Store relative timeout.  */
+	movq	%rdi, 8(%rsp)
+
+# ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+	xorl	PSHARED(%r12), %esi
+# else
+#  if FUTEX_WAIT == 0
+	movl	PSHARED(%r12), %esi
+#  else
+	movl	$FUTEX_WAIT, %esi
+	orl	PSHARED(%r12), %esi
+#  endif
+	xorl	%fs:PRIVATE_FUTEX, %esi
+# endif
+	movq	%rsp, %r10
+	movl	%r14d, %edx
+
+	jmp	21b
+	.previous
+#endif
+
+17:	/* Reget the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%r12)
+#else
+	cmpxchgl %esi, MUTEX(%r12)
+#endif
+	jnz	12f
+
+13:	decl	READERS_QUEUED(%r12)
+	cmpq	$-ETIMEDOUT, %rdx
+	jne	2b
+
+18:	movl	$ETIMEDOUT, %edx
+	jmp	9f
+
+
+5:	xorl	%edx, %edx
+	incl	NR_READERS(%r12)
+	je	8f
+9:	LOCK
+#if MUTEX == 0
+	decl	(%r12)
+#else
+	decl	MUTEX(%r12)
+#endif
+	jne	6f
+
+7:	movq	%rdx, %rax
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	addq	$16, %rsp
+	cfi_adjust_cfa_offset(-16)
+	popq	%r14
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r14)
+#endif
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	retq
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+	cfi_adjust_cfa_offset(16)
+	cfi_rel_offset(%r12, 8)
+	cfi_rel_offset(%r13, 0)
+#else
+	cfi_adjust_cfa_offset(40)
+	cfi_offset(%r12, -16)
+	cfi_offset(%r13, -24)
+	cfi_offset(%r14, -32)
+#endif
+1:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%fs:TID, %eax
+	jne	3b
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leal	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	7b
+
+	/* Overflow.  */
+8:	decl	NR_READERS(%r12)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+	/* Overflow.  */
+4:	decl	READERS_QUEUED(%r12)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leaq	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	11b
+
+12:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leaq	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_lock_wait
+	jmp	13b
+
+16:	movq	$-ETIMEDOUT, %rdx
+	jmp	17b
+
+19:	movl	$EINVAL, %edx
+	jmp	9b
+	cfi_endproc
+	.size	pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
new file mode 100644
index 0000000000..f57ef5238c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
@@ -0,0 +1,266 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+
+	.text
+
+	.globl	pthread_rwlock_timedwrlock
+	.type	pthread_rwlock_timedwrlock,@function
+	.align	16
+pthread_rwlock_timedwrlock:
+	cfi_startproc
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r12, 0)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG	%edx
+#else
+	pushq	%r14
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r14, 0)
+
+	subq	$16, %rsp
+	cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
+
+	movq	%rdi, %r12
+	movq	%rsi, %r13
+
+	/* Get the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%r12), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, NR_READERS(%r12)
+	je	5f
+
+	/* Check the value of the timeout parameter.  */
+3:	cmpq	$1000000000, 8(%r13)
+	jae	19f
+
+	incl	WRITERS_QUEUED(%r12)
+	je	4f
+
+	movl	WRITERS_WAKEUP(%r12), VALREG
+
+	LOCK
+#if MUTEX == 0
+	decl	(%r12)
+#else
+	decl	MUTEX(%r12)
+#endif
+	jne	10f
+
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+#endif
+
+	cmpq	$0, (%r13)
+	js	16f		/* Time is already up. */
+
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+	xorl	PSHARED(%r12), %esi
+	movq	%r13, %r10
+	movl	$0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	movl	%r14d, %edx
+#endif
+21:	leaq	WRITERS_WAKEUP(%r12), %rdi
+	movl	$SYS_futex, %eax
+	syscall
+	movq	%rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.subsection 2
+.Lreltmo:
+	/* Get current time.  */
+	movq	%rsp, %rdi
+	xorl	%esi, %esi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdi
+	subq	(%rsp), %rcx
+	subq	%rax, %rdi
+	jns	15f
+	addq	$1000000000, %rdi
+	decq	%rcx
+15:	testq	%rcx, %rcx
+	js	16f		/* Time is already up.  */
+
+	/* Futex call.  */
+	movq	%rcx, (%rsp)	/* Store relative timeout.  */
+	movq	%rdi, 8(%rsp)
+
+# ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+	xorl	PSHARED(%r12), %esi
+# else
+#  if FUTEX_WAIT == 0
+	movl	PSHARED(%r12), %esi
+#  else
+	movl	$FUTEX_WAIT, %esi
+	orl	PSHARED(%r12), %esi
+#  endif
+	xorl	%fs:PRIVATE_FUTEX, %esi
+# endif
+	movq	%rsp, %r10
+	movl	%r14d, %edx
+
+	jmp	21b
+	.previous
+#endif
+
+17:	/* Reget the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%r12)
+#else
+	cmpxchgl %esi, MUTEX(%r12)
+#endif
+	jnz	12f
+
+13:	decl	WRITERS_QUEUED(%r12)
+	cmpq	$-ETIMEDOUT, %rdx
+	jne	2b
+
+18:	movl	$ETIMEDOUT, %edx
+	jmp	9f
+
+
+5:	xorl	%edx, %edx
+	movl	%fs:TID, %eax
+	movl	%eax, WRITER(%r12)
+9:	LOCK
+#if MUTEX == 0
+	decl	(%r12)
+#else
+	decl	MUTEX(%r12)
+#endif
+	jne	6f
+
+7:	movq	%rdx, %rax
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	addq	$16, %rsp
+	cfi_adjust_cfa_offset(-16)
+	popq	%r14
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r14)
+#endif
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	retq
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+	cfi_adjust_cfa_offset(16)
+	cfi_rel_offset(%r12, 8)
+	cfi_rel_offset(%r13, 0)
+#else
+	cfi_adjust_cfa_offset(40)
+	cfi_offset(%r12, -16)
+	cfi_offset(%r13, -24)
+	cfi_offset(%r14, -32)
+#endif
+1:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+	jmp	2b
+
+14:	cmpl	%fs:TID, %eax
+	jne	3b
+20:	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leal	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	7b
+
+	/* Overflow.  */
+4:	decl	WRITERS_QUEUED(%r12)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leaq	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	11b
+
+12:	movl	PSHARED(%r12), %esi
+#if MUTEX == 0
+	movq	%r12, %rdi
+#else
+	leaq	MUTEX(%r12), %rdi
+#endif
+	callq	__lll_lock_wait
+	jmp	13b
+
+16:	movq	$-ETIMEDOUT, %rdx
+	jmp	17b
+
+19:	movl	$EINVAL, %edx
+	jmp	9b
+	cfi_endproc
+	.size	pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
new file mode 100644
index 0000000000..d779f7b759
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
@@ -0,0 +1,126 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <kernel-features.h>
+
+
+	.text
+
+	.globl	__pthread_rwlock_unlock
+	.type	__pthread_rwlock_unlock,@function
+	.align	16
+__pthread_rwlock_unlock:
+	cfi_startproc
+	/* Get the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	1f
+
+2:	cmpl	$0, WRITER(%rdi)
+	jne	5f
+	decl	NR_READERS(%rdi)
+	jnz	6f
+
+5:	movl	$0, WRITER(%rdi)
+
+	movl	$1, %edx
+	leaq	WRITERS_WAKEUP(%rdi), %r10
+	cmpl	$0, WRITERS_QUEUED(%rdi)
+	jne	0f
+
+	/* If also no readers waiting nothing to do.  */
+	cmpl	$0, READERS_QUEUED(%rdi)
+	je	6f
+
+	movl	$0x7fffffff, %edx
+	leaq	READERS_WAKEUP(%rdi), %r10
+
+0:	incl	(%r10)
+	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	7f
+
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %esi
+	xorl	PSHARED(%rdi), %esi
+#else
+	movl	$FUTEX_WAKE, %esi
+	orl	PSHARED(%rdi), %esi
+	xorl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	movl	$SYS_futex, %eax
+	movq	%r10, %rdi
+	syscall
+
+	xorl	%eax, %eax
+	retq
+
+	.align	16
+6:	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	3f
+
+4:	xorl	%eax, %eax
+	retq
+
+1:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	2b
+
+3:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	4b
+
+7:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	8b
+	cfi_endproc
+	.size	__pthread_rwlock_unlock,.-__pthread_rwlock_unlock
+
+strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
+hidden_def (__pthread_rwlock_unlock)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
new file mode 100644
index 0000000000..e444def525
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
@@ -0,0 +1,165 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <stap-probe.h>
+
+	.text
+
+	.globl	__pthread_rwlock_wrlock
+	.type	__pthread_rwlock_wrlock,@function
+	.align	16
+__pthread_rwlock_wrlock:
+	cfi_startproc
+
+	LIBC_PROBE (wrlock_entry, 1, %rdi)
+
+	xorq	%r10, %r10
+
+	/* Get the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	1f
+
+2:	movl	WRITER(%rdi), %eax
+	testl	%eax, %eax
+	jne	14f
+	cmpl	$0, NR_READERS(%rdi)
+	je	5f
+
+3:	incl	WRITERS_QUEUED(%rdi)
+	je	4f
+
+	movl	WRITERS_WAKEUP(%rdi), %edx
+
+	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	10f
+
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+	xorl	PSHARED(%rdi), %esi
+#else
+# if FUTEX_WAIT == 0
+	movl	PSHARED(%rdi), %esi
+# else
+	movl	$FUTEX_WAIT, %esi
+	orl	PSHARED(%rdi), %esi
+# endif
+	xorl	%fs:PRIVATE_FUTEX, %esi
+#endif
+	addq	$WRITERS_WAKEUP, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+
+	subq	$WRITERS_WAKEUP, %rdi
+
+	/* Reget the lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if MUTEX == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, MUTEX(%rdi)
+#endif
+	jnz	12f
+
+13:	decl	WRITERS_QUEUED(%rdi)
+	jmp	2b
+
+5:	xorl	%edx, %edx
+	movl	%fs:TID, %eax
+	movl	%eax, WRITER(%rdi)
+9:	LOCK
+#if MUTEX == 0
+	decl	(%rdi)
+#else
+	decl	MUTEX(%rdi)
+#endif
+	jne	6f
+7:
+
+	movq	%rdx, %rax
+	retq
+
+1:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	2b
+
+14:	cmpl	%fs:TID, %eax
+	jne	3b
+	movl	$EDEADLK, %edx
+	jmp	9b
+
+6:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+	jmp	7b
+
+4:	decl	WRITERS_QUEUED(%rdi)
+	movl	$EAGAIN, %edx
+	jmp	9b
+
+10:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_unlock_wake
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	11b
+
+12:	movl	PSHARED(%rdi), %esi
+#if MUTEX != 0
+	addq	$MUTEX, %rdi
+#endif
+	callq	__lll_lock_wait
+#if MUTEX != 0
+	subq	$MUTEX, %rdi
+#endif
+	jmp	13b
+	cfi_endproc
+	.size	__pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
+
+strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
+hidden_def (__pthread_rwlock_wrlock)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c b/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
new file mode 100644
index 0000000000..a7bbe38156
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c
@@ -0,0 +1,14 @@
+#include <tls.h>
+
+#define RESET_VGETCPU_CACHE() \
+  do {			      \
+    asm volatile ("movl %0, %%fs:%P1\n\t"				      \
+		  "movl %0, %%fs:%P2"					      \
+		  :							      \
+		  : "ir" (0), "i" (offsetof (struct pthread,		      \
+					     header.vgetcpu_cache[0])),	      \
+		    "i" (offsetof (struct pthread,			      \
+				   header.vgetcpu_cache[1])));		\
+  } while (0)
+
+#include <sysdeps/unix/sysv/linux/pthread_setaffinity.c>
diff --git a/sysdeps/unix/sysv/linux/x86_64/sem_post.S b/sysdeps/unix/sysv/linux/x86_64/sem_post.S
new file mode 100644
index 0000000000..1c11600468
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/sem_post.S
@@ -0,0 +1,75 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+
+	.text
+
+	.globl	sem_post
+	.type	sem_post,@function
+	.align	16
+sem_post:
+#if VALUE == 0
+	movl	(%rdi), %eax
+#else
+	movl	VALUE(%rdi), %eax
+#endif
+0:	cmpl	$SEM_VALUE_MAX, %eax
+	je	3f
+	leal	1(%rax), %esi
+	LOCK
+#if VALUE == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, VALUE(%rdi)
+#endif
+	jnz	0b
+
+	LP_OP(cmp) $0, NWAITERS(%rdi)
+	je	2f
+
+	movl	$SYS_futex, %eax
+	movl	$FUTEX_WAKE, %esi
+	orl	PRIVATE(%rdi), %esi
+	movl	$1, %edx
+	syscall
+
+	testq	%rax, %rax
+	js	1f
+
+2:	xorl	%eax, %eax
+	retq
+
+1:
+	movl	$EINVAL, %eax
+	jmp	4f
+
+3:
+	movl	$EOVERFLOW, %eax
+
+4:
+	movq	errno@gottpoff(%rip), %rdx
+	movl	%eax, %fs:(%rdx)
+	orl	$-1, %eax
+	retq
+	.size	sem_post,.-sem_post
diff --git a/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
new file mode 100644
index 0000000000..880610e682
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -0,0 +1,380 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+	.text
+
+	.globl	sem_timedwait
+	.type	sem_timedwait,@function
+	.align	16
+sem_timedwait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+#if VALUE == 0
+	movl	(%rdi), %eax
+#else
+	movl	VALUE(%rdi), %eax
+#endif
+2:	testl	%eax, %eax
+	je	1f
+
+	leaq	-1(%rax), %rdx
+	LOCK
+#if VALUE == 0
+	cmpxchgl %edx, (%rdi)
+#else
+	cmpxchgl %edx, VALUE(%rdi)
+#endif
+	jne	2b
+
+	xorl	%eax, %eax
+	retq
+
+	/* Check whether the timeout value is valid.  */
+1:	cmpq	$1000000000, 8(%rsi)
+	jae	6f
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+#endif
+
+	cmpq	$0, (%rsi)
+	js	16f
+
+	/* This push is only needed to store the sem_t pointer for the
+	   exception handler.  */
+	pushq	%rdi
+	cfi_adjust_cfa_offset(8)
+
+	movq	%rsi, %r10
+
+	LOCK
+	LP_OP(add) $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+13:	call	__pthread_enable_asynccancel
+	movl	%eax, %r8d
+
+#if VALUE != 0
+	leaq	VALUE(%rdi), %rdi
+#endif
+	movl	$0xffffffff, %r9d
+	movl	$FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+	orl	PRIVATE(%rdi), %esi
+	movl	$SYS_futex, %eax
+	xorl	%edx, %edx
+	syscall
+	movq	%rax, %r9
+#if VALUE != 0
+	leaq	-VALUE(%rdi), %rdi
+#endif
+
+	xchgq	%r8, %rdi
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+	movq	%r8, %rdi
+
+	testq	%r9, %r9
+	je	11f
+	cmpq	$-EWOULDBLOCK, %r9
+	jne	3f
+
+11:
+#if VALUE == 0
+	movl	(%rdi), %eax
+#else
+	movl	VALUE(%rdi), %eax
+#endif
+14:	testl	%eax, %eax
+	je	13b
+
+	leaq	-1(%rax), %rcx
+	LOCK
+#if VALUE == 0
+	cmpxchgl %ecx, (%rdi)
+#else
+	cmpxchgl %ecx, VALUE(%rdi)
+#endif
+	jne	14b
+
+	xorl	%eax, %eax
+
+15:	LOCK
+	LP_OP(sub) $1, NWAITERS(%rdi)
+
+	leaq	8(%rsp), %rsp
+	cfi_adjust_cfa_offset(-8)
+	retq
+
+	cfi_adjust_cfa_offset(8)
+3:	negq	%r9
+	movq	errno@gottpoff(%rip), %rdx
+	movl	%r9d, %fs:(%rdx)
+
+	orl	$-1, %eax
+	jmp	15b
+
+	cfi_adjust_cfa_offset(-8)
+6:
+	movq	errno@gottpoff(%rip), %rdx
+	movl	$EINVAL, %fs:(%rdx)
+
+	orl	$-1, %eax
+
+	retq
+
+16:
+	movq	errno@gottpoff(%rip), %rdx
+	movl	$ETIMEDOUT, %fs:(%rdx)
+
+	orl	$-1, %eax
+
+	retq
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r12, 0)
+	pushq	%r13
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r13, 0)
+	pushq	%r14
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r14, 0)
+
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define STACKFRAME 8
+#else
+# define STACKFRAME 24
+#endif
+	subq	$STACKFRAME, %rsp
+	cfi_adjust_cfa_offset(STACKFRAME)
+
+	movq	%rdi, %r12
+	movq	%rsi, %r13
+
+	LOCK
+	LP_OP(add) $1, NWAITERS(%r12)
+
+7:	xorl	%esi, %esi
+	movq	%rsp,%rdi
+	/* This call works because we directly jump to a system call entry
+	   which preserves all the registers.  */
+	call	JUMPTARGET(__gettimeofday)
+
+	/* Compute relative timeout.  */
+	movq	8(%rsp), %rax
+	movl	$1000, %edi
+	mul	%rdi		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rdi
+	movq	8(%r13), %rsi
+	subq	(%rsp), %rdi
+	subq	%rax, %rsi
+	jns	5f
+	addq	$1000000000, %rsi
+	decq	%rdi
+5:	testq	%rdi, %rdi
+	movl	$ETIMEDOUT, %r14d
+	js	36f		/* Time is already up.  */
+
+	movq	%rdi, (%rsp)	/* Store relative timeout.  */
+	movq	%rsi, 8(%rsp)
+
+.LcleanupSTART2:
+	call	__pthread_enable_asynccancel
+	movl	%eax, 16(%rsp)
+
+	movq	%rsp, %r10
+# if VALUE == 0
+	movq	%r12, %rdi
+# else
+	leaq	VALUE(%r12), %rdi
+# endif
+# if FUTEX_WAIT == 0
+	movl	PRIVATE(%rdi), %esi
+# else
+	movl	$FUTEX_WAIT, %esi
+	orl	PRIVATE(%rdi), %esi
+# endif
+	movl	$SYS_futex, %eax
+	xorl	%edx, %edx
+	syscall
+	movq	%rax, %r14
+
+	movl	16(%rsp), %edi
+	call	__pthread_disable_asynccancel
+.LcleanupEND2:
+
+	testq	%r14, %r14
+	je	9f
+	cmpq	$-EWOULDBLOCK, %r14
+	jne	33f
+
+9:
+# if VALUE == 0
+	movl	(%r12), %eax
+# else
+	movl	VALUE(%r12), %eax
+# endif
+8:	testl	%eax, %eax
+	je	7b
+
+	leaq	-1(%rax), %rcx
+	LOCK
+# if VALUE == 0
+	cmpxchgl %ecx, (%r12)
+# else
+	cmpxchgl %ecx, VALUE(%r12)
+# endif
+	jne	8b
+
+	xorl	%eax, %eax
+
+45:	LOCK
+	LP_OP(sub) $1, NWAITERS(%r12)
+
+	addq	$STACKFRAME, %rsp
+	cfi_adjust_cfa_offset(-STACKFRAME)
+	popq	%r14
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r14)
+	popq	%r13
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r13)
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+	retq
+
+	cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+	cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+	cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+	cfi_rel_offset(%r14, STACKFRAME)
+33:	negq	%r14
+36:
+	movq	errno@gottpoff(%rip), %rdx
+	movl	%r14d, %fs:(%rdx)
+
+	orl	$-1, %eax
+	jmp	45b
+#endif
+	cfi_endproc
+	.size	sem_timedwait,.-sem_timedwait
+
+
+	.type	sem_timedwait_cleanup,@function
+sem_timedwait_cleanup:
+	cfi_startproc
+	cfi_adjust_cfa_offset(8)
+
+	movq	(%rsp), %rdi
+	LOCK
+	LP_OP(sub) $1, NWAITERS(%rdi)
+	movq	%rax, %rdi
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	sem_timedwait_cleanup,.-sem_timedwait_cleanup
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.type	sem_timedwait_cleanup2,@function
+sem_timedwait_cleanup2:
+	cfi_startproc
+	cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+	cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+	cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+	cfi_rel_offset(%r14, STACKFRAME)
+
+	LOCK
+	LP_OP(sub) $1, NWAITERS(%r12)
+	movq	%rax, %rdi
+	movq	STACKFRAME(%rsp), %r14
+	movq	STACKFRAME+8(%rsp), %r13
+	movq	STACKFRAME+16(%rsp), %r12
+.LcallUR2:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE2:
+	cfi_endproc
+	.size	sem_timedwait_cleanup2,.-sem_timedwait_cleanup2
+#endif
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_timedwait_cleanup-.LSTARTCODE
+	.uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.uleb128 .LcleanupSTART2-.LSTARTCODE
+	.uleb128 .LcleanupEND2-.LcleanupSTART2
+	.uleb128 sem_timedwait_cleanup2-.LSTARTCODE
+	.uleb128  0
+#endif
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.uleb128 .LcallUR2-.LSTARTCODE
+	.uleb128 .LENDCODE2-.LcallUR2
+	.uleb128 0
+	.uleb128  0
+#endif
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	LP_SIZE
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, LP_SIZE
+DW.ref.__gcc_personality_v0:
+	ASM_ADDR __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S b/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
new file mode 100644
index 0000000000..1893a34737
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
@@ -0,0 +1,47 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+
+	.text
+
+	.globl	sem_trywait
+	.type	sem_trywait,@function
+	.align	16
+sem_trywait:
+	movl	(%rdi), %eax
+2:	testl	%eax, %eax
+	jz	1f
+
+	leal	-1(%rax), %edx
+	LOCK
+	cmpxchgl %edx, (%rdi)
+	jne	2b
+
+	xorl	%eax, %eax
+	retq
+
+1:
+	movq	errno@gottpoff(%rip), %rdx
+	movl	$EAGAIN, %fs:(%rdx)
+	orl	$-1, %eax
+	retq
+	.size	sem_trywait,.-sem_trywait
diff --git a/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
new file mode 100644
index 0000000000..8f4d0686ec
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -0,0 +1,176 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+
+
+	.text
+
+	.globl	sem_wait
+	.type	sem_wait,@function
+	.align	16
+sem_wait:
+.LSTARTCODE:
+	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+#if VALUE == 0
+	movl	(%rdi), %eax
+#else
+	movl	VALUE(%rdi), %eax
+#endif
+2:	testl	%eax, %eax
+	je	1f
+
+	leal	-1(%rax), %edx
+	LOCK
+#if VALUE == 0
+	cmpxchgl %edx, (%rdi)
+#else
+	cmpxchgl %edx, VALUE(%rdi)
+#endif
+	jne	2b
+
+	xorl	%eax, %eax
+	retq
+
+	/* This push is only needed to store the sem_t pointer for the
+	   exception handler.  */
+1:	pushq	%rdi
+	cfi_adjust_cfa_offset(8)
+
+	LOCK
+	LP_OP(add) $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+6:	call	__pthread_enable_asynccancel
+	movl	%eax, %r8d
+
+	xorq	%r10, %r10
+	movl	$SYS_futex, %eax
+#if FUTEX_WAIT == 0
+	movl	PRIVATE(%rdi), %esi
+#else
+	movl	$FUTEX_WAIT, %esi
+	orl	PRIVATE(%rdi), %esi
+#endif
+	xorl	%edx, %edx
+	syscall
+	movq	%rax, %rcx
+
+	xchgq	%r8, %rdi
+	call	__pthread_disable_asynccancel
+.LcleanupEND:
+	movq	%r8, %rdi
+
+	testq	%rcx, %rcx
+	je	3f
+	cmpq	$-EWOULDBLOCK, %rcx
+	jne	4f
+
+3:
+#if VALUE == 0
+	movl	(%rdi), %eax
+#else
+	movl	VALUE(%rdi), %eax
+#endif
+5:	testl	%eax, %eax
+	je	6b
+
+	leal	-1(%rax), %edx
+	LOCK
+#if VALUE == 0
+	cmpxchgl %edx, (%rdi)
+#else
+	cmpxchgl %edx, VALUE(%rdi)
+#endif
+	jne	5b
+
+	xorl	%eax, %eax
+
+9:	LOCK
+	LP_OP(sub) $1, NWAITERS(%rdi)
+
+	leaq	8(%rsp), %rsp
+	cfi_adjust_cfa_offset(-8)
+
+	retq
+
+	cfi_adjust_cfa_offset(8)
+4:	negq	%rcx
+	movq	errno@gottpoff(%rip), %rdx
+	movl	%ecx, %fs:(%rdx)
+	orl	$-1, %eax
+
+	jmp 9b
+	.size	sem_wait,.-sem_wait
+
+
+	.type	sem_wait_cleanup,@function
+sem_wait_cleanup:
+	movq	(%rsp), %rdi
+	LOCK
+	LP_OP(sub) $1, NWAITERS(%rdi)
+	movq	%rax, %rdi
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	sem_wait_cleanup,.-sem_wait_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_wait_cleanup-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	LP_SIZE
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, LP_SIZE
+DW.ref.__gcc_personality_v0:
+	ASM_ADDR __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h b/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
new file mode 100644
index 0000000000..83cd25fe8d
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
@@ -0,0 +1,109 @@
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <tls.h>
+#ifndef __ASSEMBLER__
+# include <nptl/pthreadP.h>
+#endif
+
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+
+/* The code to disable cancellation depends on the fact that the called
+   functions are special.  They don't modify registers other than %rax
+   and %r11 if they return.  Therefore we don't have to preserve other
+   registers around these calls.  */
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args)				      \
+  .text;								      \
+  ENTRY (name)								      \
+    SINGLE_THREAD_P;							      \
+    jne L(pseudo_cancel);						      \
+  .type __##syscall_name##_nocancel,@function;				      \
+  .globl __##syscall_name##_nocancel;					      \
+  __##syscall_name##_nocancel:						      \
+    DO_CALL (syscall_name, args);					      \
+    cmpq $-4095, %rax;							      \
+    jae SYSCALL_ERROR_LABEL;						      \
+    ret;								      \
+  .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;	      \
+  L(pseudo_cancel):							      \
+    /* We always have to align the stack before calling a function.  */	      \
+    subq $8, %rsp; cfi_adjust_cfa_offset (8);				      \
+    CENABLE								      \
+    /* The return value from CENABLE is argument for CDISABLE.  */	      \
+    movq %rax, (%rsp);							      \
+    DO_CALL (syscall_name, args);					      \
+    movq (%rsp), %rdi;							      \
+    /* Save %rax since it's the error code from the syscall.  */	      \
+    movq %rax, %rdx;							      \
+    CDISABLE								      \
+    movq %rdx, %rax;							      \
+    addq $8,%rsp; cfi_adjust_cfa_offset (-8);				      \
+    cmpq $-4095, %rax;							      \
+    jae SYSCALL_ERROR_LABEL
+
+
+# ifdef IS_IN_libpthread
+#  define CENABLE	call __pthread_enable_asynccancel;
+#  define CDISABLE	call __pthread_disable_asynccancel;
+#  define __local_multiple_threads __pthread_multiple_threads
+# elif !defined NOT_IN_libc
+#  define CENABLE	call __libc_enable_asynccancel;
+#  define CDISABLE	call __libc_disable_asynccancel;
+#  define __local_multiple_threads __libc_multiple_threads
+# elif defined IS_IN_librt
+#  define CENABLE	call __librt_enable_asynccancel;
+#  define CDISABLE	call __librt_disable_asynccancel;
+# else
+#  error Unsupported library
+# endif
+
+# if defined IS_IN_libpthread || !defined NOT_IN_libc
+#  ifndef __ASSEMBLER__
+extern int __local_multiple_threads attribute_hidden;
+#   define SINGLE_THREAD_P \
+  __builtin_expect (__local_multiple_threads == 0, 1)
+#  else
+#   define SINGLE_THREAD_P cmpl $0, __local_multiple_threads(%rip)
+#  endif
+
+# else
+
+#  ifndef __ASSEMBLER__
+#   define SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+				   header.multiple_threads) == 0, 1)
+#  else
+#   define SINGLE_THREAD_P cmpl $0, %fs:MULTIPLE_THREADS_OFFSET
+#  endif
+
+# endif
+
+#elif !defined __ASSEMBLER__
+
+# define SINGLE_THREAD_P (1)
+# define NO_CANCELLATION 1
+
+#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+				   header.multiple_threads) == 0, 1)
+#endif