about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2019-02-21 13:33:54 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2019-05-14 08:48:02 -0300
commitc50e1c263ec15e98da3235e663049156fd1afcfa (patch)
tree47d22df898622b32fdc262f830978f29d6fa800e /sysdeps
parent959aff9fa22c45e0fa11cd88c9f8ea10bd9ba494 (diff)
downloadglibc-c50e1c263ec15e98da3235e663049156fd1afcfa.tar.gz
glibc-c50e1c263ec15e98da3235e663049156fd1afcfa.tar.xz
glibc-c50e1c263ec15e98da3235e663049156fd1afcfa.zip
x86: Remove arch-specific low level lock implementation
This patch removes the arch-specific x86 assembly implementation for
low level locking and consolidate both 64 bits and 32 bits in a
single implementation.

Different than other architectures, x86 lll_trylock, lll_lock, and
lll_unlock implements a single-thread optimization to avoid atomic
operation, using cmpxchgl instead.  This patch implements by using
the new single-thread.h definitions in a generic way, although using
the previous semantic.

The lll_cond_trylock, lll_cond_lock, and lll_timedlock just use
atomic operations plus calls to lll_lock_wait*.

For __lll_lock_wait_private and __lll_lock_wait the generic implemtation
there is no indication that assembly implementation is required
performance-wise.

Checked on x86_64-linux-gnu and i686-linux-gnu.

	* sysdeps/nptl/lowlevellock.h (__lll_trylock): New macro.
	(lll_trylock): Call __lll_trylock.
	* sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S: Remove file.
	* sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/x86/lowlevellock.h: New file.
	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: Include
	lowlevellock-futex.h.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/nptl/lowlevellock.h4
-rw-r--r--sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c1
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.S268
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.h240
-rw-r--r--sysdeps/unix/sysv/linux/x86/lowlevellock.h110
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/cancellation.S2
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c1
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.S233
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.h243
11 files changed, 114 insertions, 1026 deletions
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 6f017afdd5..e905829ee4 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -63,8 +63,10 @@
 /* If LOCK is 0 (not acquired), set to 1 (acquired with no waiters) and return
    0.  Otherwise leave lock unchanged and return non-zero to indicate that the
    lock was not acquired.  */
+#define __lll_trylock(lock)	\
+  __glibc_unlikely (atomic_compare_and_exchange_bool_acq ((lock), 1, 0))
 #define lll_trylock(lock)	\
-  __glibc_unlikely (atomic_compare_and_exchange_bool_acq (&(lock), 1, 0))
+   __lll_trylock (&(lock))
 
 /* If LOCK is 0 (not acquired), set to 2 (acquired, possibly with waiters) and
    return 0.  Otherwise leave lock unchanged and return non-zero to indicate
diff --git a/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S
deleted file mode 100644
index 32025277f3..0000000000
--- a/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c
deleted file mode 100644
index f6875b8f89..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c
+++ /dev/null
@@ -1 +0,0 @@
-/* __lll_timedlock_wait is in lowlevellock.S.  */
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/lowlevellock.S
deleted file mode 100644
index a9898e94fd..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.S
+++ /dev/null
@@ -1,268 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <kernel-features.h>
-#include <lowlevellock.h>
-
-#include <stap-probe.h>
-
-	.text
-
-#define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_PRIVATE_FUTEX_WAKE(reg) \
-	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT(reg) \
-	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT_ABS(reg) \
-	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
-#define LOAD_FUTEX_WAKE(reg) \
-	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-
-	.globl	__lll_lock_wait_private
-	.type	__lll_lock_wait_private,@function
-	.hidden	__lll_lock_wait_private
-	.align	16
-__lll_lock_wait_private:
-	cfi_startproc
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%edx, -8)
-	cfi_offset(%ebx, -12)
-	cfi_offset(%esi, -16)
-
-	movl	$2, %edx
-	movl	%ecx, %ebx
-	xorl	%esi, %esi	/* No timeout.  */
-	LOAD_PRIVATE_FUTEX_WAIT (%ecx)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne 2f
-
-1:	LIBC_PROBE (lll_lock_wait_private, 1, %ebx)
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	ret
-	cfi_endproc
-	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
-
-#if !IS_IN (libc)
-	.globl	__lll_lock_wait
-	.type	__lll_lock_wait,@function
-	.hidden	__lll_lock_wait
-	.align	16
-__lll_lock_wait:
-	cfi_startproc
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%edx, -8)
-	cfi_offset(%ebx, -12)
-	cfi_offset(%esi, -16)
-
-	movl	%edx, %ebx
-	movl	$2, %edx
-	xorl	%esi, %esi	/* No timeout.  */
-	LOAD_FUTEX_WAIT (%ecx)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne 2f
-
-1:	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	ret
-	cfi_endproc
-	.size	__lll_lock_wait,.-__lll_lock_wait
-
-	/*      %ecx: futex
-		%esi: flags
-		%edx: timeout
-		%eax: futex value
-	*/
-	.globl	__lll_timedlock_wait
-	.type	__lll_timedlock_wait,@function
-	.hidden	__lll_timedlock_wait
-	.align	16
-__lll_timedlock_wait:
-	cfi_startproc
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-
-	cmpl	$0, (%edx)
-	js	8f
-
-	movl	%ecx, %ebx
-	movl	%esi, %ecx
-	movl	%edx, %esi
-	movl	$0xffffffff, %ebp
-	LOAD_FUTEX_WAIT_ABS (%ecx)
-
-	movl	$2, %edx
-	cmpl	%edx, %eax
-	jne	2f
-
-1:	movl	$SYS_futex, %eax
-	movl	$2, %edx
-	ENTER_KERNEL
-
-2:	xchgl	%edx, (%ebx)	/* NB:   lock is implied */
-
-	testl	%edx, %edx
-	jz	3f
-
-	cmpl	$-ETIMEDOUT, %eax
-	je	4f
-	cmpl	$-EINVAL, %eax
-	jne	1b
-4:	movl	%eax, %edx
-	negl	%edx
-
-3:	movl	%edx, %eax
-7:	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-	ret
-
-8:	movl	$ETIMEDOUT, %eax
-	jmp	7b
-
-	cfi_endproc
-	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
-#endif
-
-	.globl	__lll_unlock_wake_private
-	.type	__lll_unlock_wake_private,@function
-	.hidden	__lll_unlock_wake_private
-	.align	16
-__lll_unlock_wake_private:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ecx
-	cfi_adjust_cfa_offset(4)
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-	cfi_offset(%ecx, -12)
-	cfi_offset(%edx, -16)
-
-	movl	%eax, %ebx
-	movl	$0, (%eax)
-	LOAD_PRIVATE_FUTEX_WAKE (%ecx)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	popl	%ecx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ecx)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-	cfi_endproc
-	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
-
-#if !IS_IN (libc)
-	.globl	__lll_unlock_wake
-	.type	__lll_unlock_wake,@function
-	.hidden	__lll_unlock_wake
-	.align	16
-__lll_unlock_wake:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ecx
-	cfi_adjust_cfa_offset(4)
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-	cfi_offset(%ecx, -12)
-	cfi_offset(%edx, -16)
-
-	movl	%eax, %ebx
-	movl	$0, (%eax)
-	LOAD_FUTEX_WAKE (%ecx)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	popl	%ecx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ecx)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-	cfi_endproc
-	.size	__lll_unlock_wake,.-__lll_unlock_wake
-#endif
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
deleted file mode 100644
index 94dccc4ce7..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#ifndef _LOWLEVELLOCK_H
-#define _LOWLEVELLOCK_H	1
-
-#ifndef __ASSEMBLER__
-# include <time.h>
-# include <sys/param.h>
-# include <bits/pthreadtypes.h>
-# include <kernel-features.h>
-/* <tcb-offsets.h> is generated from tcb-offsets.sym to define offsets
-   and sizes of types in <tls.h> as well as <pthread.h> which includes
-   <lowlevellock.h> via nptl/descr.h.  Don't include <tcb-offsets.h>
-   when generating <tcb-offsets.h> to avoid circular dependency which
-   may lead to build hang on a many-core machine.  */
-# ifndef GEN_AS_CONST_HEADERS
-#  include <tcb-offsets.h>
-# endif
-
-# ifndef LOCK_INSTR
-#  ifdef UP
-#   define LOCK_INSTR	/* nothing */
-#  else
-#   define LOCK_INSTR "lock;"
-#  endif
-# endif
-#else
-# ifndef LOCK
-#  ifdef UP
-#   define LOCK
-#  else
-#   define LOCK lock
-#  endif
-# endif
-#endif
-
-#include <lowlevellock-futex.h>
-
-/* XXX Remove when no assembler code uses futexes anymore.  */
-#define SYS_futex		__NR_futex
-
-#ifndef __ASSEMBLER__
-
-/* Initializer for compatibility lock.  */
-#define LLL_LOCK_INITIALIZER		(0)
-#define LLL_LOCK_INITIALIZER_LOCKED	(1)
-#define LLL_LOCK_INITIALIZER_WAITERS	(2)
-
-
-/* NB: in the lll_trylock macro we simply return the value in %eax
-   after the cmpxchg instruction.  In case the operation succeded this
-   value is zero.  In case the operation failed, the cmpxchg instruction
-   has loaded the current value of the memory work which is guaranteed
-   to be nonzero.  */
-#if !IS_IN (libc) || defined UP
-# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
-#else
-# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
-			   "je 0f\n\t"					      \
-			   "lock\n"					      \
-			   "0:\tcmpxchgl %2, %1"
-#endif
-
-#define lll_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (__lll_trylock_asm				      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
-			 "0" (LLL_LOCK_INITIALIZER),			      \
-			 "i" (MULTIPLE_THREADS_OFFSET)			      \
-		       : "memory");					      \
-     ret; })
-
-
-#define lll_cond_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
-			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
-		       : "memory");					      \
-     ret; })
-
-#if !IS_IN (libc) || defined UP
-# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
-#else
-# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t"			      \
-			      "je 0f\n\t"				      \
-			      "lock\n"					      \
-			      "0:\tcmpxchgl %1, %2\n\t"
-#endif
-
-#define lll_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2;						      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_lock_asm_start				      \
-			   "jz 18f\n\t"				      \
-			   "1:\tleal %2, %%ecx\n"			      \
-			   "2:\tcall __lll_lock_wait_private\n" 	      \
-			   "18:"					      \
-			   : "=a" (ignore1), "=c" (ignore2), "=m" (futex)     \
-			   : "0" (0), "1" (1), "m" (futex),		      \
-			     "i" (MULTIPLE_THREADS_OFFSET)		      \
-			   : "memory");					      \
-       else								      \
-	 {								      \
-	   int ignore3;							      \
-	   __asm __volatile (__lll_lock_asm_start			      \
-			     "jz 18f\n\t"			 	      \
-			     "1:\tleal %2, %%edx\n"			      \
-			     "0:\tmovl %8, %%ecx\n"			      \
-			     "2:\tcall __lll_lock_wait\n"		      \
-			     "18:"					      \
-			     : "=a" (ignore1), "=c" (ignore2),		      \
-			       "=m" (futex), "=&d" (ignore3) 		      \
-			     : "1" (1), "m" (futex),			      \
-			       "i" (MULTIPLE_THREADS_OFFSET), "0" (0),	      \
-			       "g" ((int) (private))			      \
-			     : "memory");				      \
-	 }								      \
-    })
-
-
-/* Special version of lll_lock which causes the unlock function to
-   always wakeup waiters.  */
-#define lll_cond_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2, ignore3;					      \
-       __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
-			 "jz 18f\n\t"					      \
-			 "1:\tleal %2, %%edx\n"				      \
-			 "0:\tmovl %7, %%ecx\n"				      \
-			 "2:\tcall __lll_lock_wait\n"			      \
-			 "18:"						      \
-			 : "=a" (ignore1), "=c" (ignore2), "=m" (futex),      \
-			   "=&d" (ignore3)				      \
-			 : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
-			 : "memory");					      \
-    })
-
-
-#define lll_timedlock(futex, timeout, private) \
-  ({ int result, ignore1, ignore2, ignore3;				      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \
-		       "jz 18f\n\t"					      \
-		       "1:\tleal %3, %%ecx\n"				      \
-		       "0:\tmovl %8, %%edx\n"				      \
-		       "2:\tcall __lll_timedlock_wait\n"		      \
-		       "18:"						      \
-		       : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
-			 "=m" (futex), "=S" (ignore3)			      \
-		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
-			 "4" ((int) (private))				      \
-		       : "memory");					      \
-     result; })
-
-extern int __lll_timedlock_elision (int *futex, short *adapt_count,
-					 const struct timespec *timeout,
-					 int private) attribute_hidden;
-
-#define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
-  __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-
-#if !IS_IN (libc) || defined UP
-# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
-#else
-# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t"			      \
-			  "je 0f\n\t"					      \
-			  "lock\n"					      \
-			  "0:\tsubl $1,%0\n\t"
-#endif
-
-#define lll_unlock(futex, private) \
-  (void)								      \
-    ({ int ignore;							      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_unlock_asm				      \
-			   "je 18f\n\t"					      \
-			   "1:\tleal %0, %%eax\n"			      \
-			   "2:\tcall __lll_unlock_wake_private\n"	      \
-			   "18:"					      \
-			   : "=m" (futex), "=&a" (ignore)		      \
-			   : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET)	      \
-			   : "memory");					      \
-       else								      \
-	 {								      \
-	   int ignore2;							      \
-	   __asm __volatile (__lll_unlock_asm				      \
-			     "je 18f\n\t"				      \
-			     "1:\tleal %0, %%eax\n"			      \
-			     "0:\tmovl %5, %%ecx\n"			      \
-			     "2:\tcall __lll_unlock_wake\n"		      \
-			     "18:"					      \
-			     : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)  \
-			     : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex),    \
-			       "g" ((int) (private))			      \
-			     : "memory");				      \
-	 }								      \
-    })
-
-
-#define lll_islocked(futex) \
-  (futex != LLL_LOCK_INITIALIZER)
-
-extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
-  attribute_hidden;
-
-extern int __lll_unlock_elision(int *lock, int private)
-  attribute_hidden;
-
-extern int __lll_trylock_elision(int *lock, short *adapt_count)
-  attribute_hidden;
-
-#define lll_lock_elision(futex, adapt_count, private) \
-  __lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, adapt_count, private) \
-  __lll_unlock_elision (&(futex), private)
-#define lll_trylock_elision(futex, adapt_count) \
-  __lll_trylock_elision(&(futex), &(adapt_count))
-
-#endif  /* !__ASSEMBLER__ */
-
-#endif	/* lowlevellock.h */
diff --git a/sysdeps/unix/sysv/linux/x86/lowlevellock.h b/sysdeps/unix/sysv/linux/x86/lowlevellock.h
new file mode 100644
index 0000000000..8a78dcff47
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/x86/lowlevellock.h
@@ -0,0 +1,110 @@
+/* Low-level lock implementation, x86 version.
+   Copyright (C) 2019 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _X86_64_LOWLEVELLOCK_H
+#define _X86_64_LOWLEVELLOCK_H	1
+
+#ifndef __ASSEMBLER__
+#include <sysdeps/nptl/lowlevellock.h>
+#include <single-thread.h>
+
+/* The lll_trylock, lll_lock, and lll_unlock implements a single-thread
+   optimization using the cmpxchgl instruction.  It checks if the process
+   is single thread and avoid a more expensive atomic instruction.  */
+
+/* The single-thread optimization only works for libc itself, we need
+   atomicity for libpthread in case of shared futexes.  */
+#if !IS_IN(libc)
+# define is_single_thread 0
+#else
+# define is_single_thread SINGLE_THREAD_P
+#endif
+
+/* In the __lllc_as we simply return the value in %eax after the cmpxchg
+   instruction.  In case the operation succeeded this value is zero.  In case
+   the operation failed, the cmpxchg instruction has loaded the current value
+   of the memory work which is guaranteed to be nonzero.  */
+static inline int
+__attribute__ ((always_inline))
+__lll_cas_lock (int *futex)
+{
+  int ret;
+  asm volatile ("cmpxchgl %2, %1"
+		: "=a" (ret), "=m" (*futex)
+		: "r" (1), "m" (*futex), "0" (0)
+		: "memory");
+  return ret;
+}
+
+#undef lll_trylock
+#define lll_trylock(lock)						     \
+  ({									     \
+    int __ret;								     \
+    if (is_single_thread)						     \
+      __ret = __lll_cas_lock (&(lock));					     \
+    else								     \
+      __ret = __lll_trylock (&(lock));					     \
+    __ret;								     \
+  })
+
+#undef lll_lock
+#define lll_lock(lock, private)						     \
+  ((void)								     \
+   ({									     \
+     if (is_single_thread)						     \
+       __lll_cas_lock (&(lock));					     \
+     else								     \
+       __lll_lock (&(lock), private);					     \
+   }))
+
+#undef lll_unlock
+#define lll_unlock(lock, private)					     \
+  ((void)								     \
+   ({									     \
+     if (is_single_thread)						     \
+       (lock)--;							     \
+     else								     \
+       __lll_unlock (&(lock), private);					     \
+   }))
+
+extern int __lll_timedlock_elision (int *futex, short *adapt_count,
+					 const struct timespec *timeout,
+					 int private) attribute_hidden;
+
+#define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
+  __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
+
+extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
+  attribute_hidden;
+
+extern int __lll_unlock_elision (int *lock, int private)
+  attribute_hidden;
+
+extern int __lll_trylock_elision (int *lock, short *adapt_count)
+  attribute_hidden;
+
+#define lll_lock_elision(futex, adapt_count, private) \
+  __lll_lock_elision (&(futex), &(adapt_count), private)
+#define lll_unlock_elision(futex, adapt_count, private) \
+  __lll_unlock_elision (&(futex), private)
+#define lll_trylock_elision(futex, adapt_count) \
+  __lll_trylock_elision (&(futex), &(adapt_count))
+
+#endif  /* !__ASSEMBLER__ */
+
+#endif	/* lowlevellock.h */
diff --git a/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
index 7d169d9aca..bb4910764a 100644
--- a/sysdeps/unix/sysv/linux/x86_64/cancellation.S
+++ b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
@@ -19,7 +19,7 @@
 #include <sysdep.h>
 #include <tcb-offsets.h>
 #include <kernel-features.h>
-#include "lowlevellock.h"
+#include <lowlevellock-futex.h>
 
 #define PTHREAD_UNWIND JUMPTARGET(__pthread_unwind)
 #if IS_IN (libpthread)
diff --git a/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
deleted file mode 100644
index 32025277f3..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c
deleted file mode 100644
index f6875b8f89..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c
+++ /dev/null
@@ -1 +0,0 @@
-/* __lll_timedlock_wait is in lowlevellock.S.  */
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
deleted file mode 100644
index 00ba88d5d2..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ /dev/null
@@ -1,233 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <kernel-features.h>
-#include <lowlevellock.h>
-
-#include <stap-probe.h>
-
-	.text
-
-#define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_PRIVATE_FUTEX_WAKE(reg) \
-	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT(reg) \
-	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT_ABS(reg) \
-	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
-#define LOAD_FUTEX_WAKE(reg) \
-	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-
-
-	.globl	__lll_lock_wait_private
-	.type	__lll_lock_wait_private,@function
-	.hidden	__lll_lock_wait_private
-	.align	16
-__lll_lock_wait_private:
-	cfi_startproc
-	pushq	%r10
-	cfi_adjust_cfa_offset(8)
-	pushq	%rdx
-	cfi_adjust_cfa_offset(8)
-	cfi_offset(%r10, -16)
-	cfi_offset(%rdx, -24)
-	xorq	%r10, %r10	/* No timeout.  */
-	movl	$2, %edx
-	LOAD_PRIVATE_FUTEX_WAIT (%esi)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne	2f
-
-1:	LIBC_PROBE (lll_lock_wait_private, 1, %rdi)
-	movl	$SYS_futex, %eax
-	syscall
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%rdi)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popq	%rdx
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rdx)
-	popq	%r10
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%r10)
-	retq
-	cfi_endproc
-	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
-
-#if !IS_IN (libc)
-	.globl	__lll_lock_wait
-	.type	__lll_lock_wait,@function
-	.hidden	__lll_lock_wait
-	.align	16
-__lll_lock_wait:
-	cfi_startproc
-	pushq	%r10
-	cfi_adjust_cfa_offset(8)
-	pushq	%rdx
-	cfi_adjust_cfa_offset(8)
-	cfi_offset(%r10, -16)
-	cfi_offset(%rdx, -24)
-	xorq	%r10, %r10	/* No timeout.  */
-	movl	$2, %edx
-	LOAD_FUTEX_WAIT (%esi)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne	2f
-
-1:	LIBC_PROBE (lll_lock_wait, 2, %rdi, %rsi)
-	movl	$SYS_futex, %eax
-	syscall
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%rdi)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popq	%rdx
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rdx)
-	popq	%r10
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%r10)
-	retq
-	cfi_endproc
-	.size	__lll_lock_wait,.-__lll_lock_wait
-
-	/*      %rdi: futex
-		%rsi: flags
-		%rdx: timeout
-		%eax: futex value
-	*/
-	.globl	__lll_timedlock_wait
-	.type	__lll_timedlock_wait,@function
-	.hidden	__lll_timedlock_wait
-	.align	16
-__lll_timedlock_wait:
-	cfi_startproc
-	cmpq	$0, (%rdx)
-	js	5f
-
-	pushq	%r9
-	cfi_adjust_cfa_offset(8)
-	cfi_rel_offset(%r9, 0)
-
-	movq	%rdx, %r10
-	movl	$0xffffffff, %r9d
-	LOAD_FUTEX_WAIT_ABS (%esi)
-
-	movl	$2, %edx
-	cmpl	%edx, %eax
-	jne	2f
-
-1:	movl	$SYS_futex, %eax
-	movl	$2, %edx
-	syscall
-
-2:	xchgl	%edx, (%rdi)	/* NB:   lock is implied */
-
-	testl	%edx, %edx
-	jz	3f
-
-	cmpl	$-ETIMEDOUT, %eax
-	je	4f
-	cmpl	$-EINVAL, %eax
-	jne	1b
-4:	movl	%eax, %edx
-	negl	%edx
-
-3:	movl	%edx, %eax
-	popq	%r9
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%r9)
-	retq
-
-5:	movl	$ETIMEDOUT, %eax
-	retq
-
-	cfi_endproc
-	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
-#endif
-
-
-	.globl	__lll_unlock_wake_private
-	.type	__lll_unlock_wake_private,@function
-	.hidden	__lll_unlock_wake_private
-	.align	16
-__lll_unlock_wake_private:
-	cfi_startproc
-	pushq	%rsi
-	cfi_adjust_cfa_offset(8)
-	pushq	%rdx
-	cfi_adjust_cfa_offset(8)
-	cfi_offset(%rsi, -16)
-	cfi_offset(%rdx, -24)
-
-	movl	$0, (%rdi)
-	LOAD_PRIVATE_FUTEX_WAKE (%esi)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	syscall
-
-	popq	%rdx
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rdx)
-	popq	%rsi
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rsi)
-	retq
-	cfi_endproc
-	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
-
-#if !IS_IN (libc)
-	.globl	__lll_unlock_wake
-	.type	__lll_unlock_wake,@function
-	.hidden	__lll_unlock_wake
-	.align	16
-__lll_unlock_wake:
-	cfi_startproc
-	pushq	%rsi
-	cfi_adjust_cfa_offset(8)
-	pushq	%rdx
-	cfi_adjust_cfa_offset(8)
-	cfi_offset(%rsi, -16)
-	cfi_offset(%rdx, -24)
-
-	movl	$0, (%rdi)
-	LOAD_FUTEX_WAKE (%esi)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	syscall
-
-	popq	%rdx
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rdx)
-	popq	%rsi
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%rsi)
-	retq
-	cfi_endproc
-	.size	__lll_unlock_wake,.-__lll_unlock_wake
-#endif
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
deleted file mode 100644
index 8cbc1caa5b..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#ifndef _LOWLEVELLOCK_H
-#define _LOWLEVELLOCK_H	1
-
-#ifndef __ASSEMBLER__
-# include <time.h>
-# include <sys/param.h>
-# include <bits/pthreadtypes.h>
-# include <kernel-features.h>
-
-# ifndef LOCK_INSTR
-#  ifdef UP
-#   define LOCK_INSTR	/* nothing */
-#  else
-#   define LOCK_INSTR "lock;"
-#  endif
-# endif
-#else
-# ifndef LOCK
-#  ifdef UP
-#   define LOCK
-#  else
-#   define LOCK lock
-#  endif
-# endif
-#endif
-
-#include <lowlevellock-futex.h>
-
-/* XXX Remove when no assembler code uses futexes anymore.  */
-#define SYS_futex		__NR_futex
-
-#ifndef __ASSEMBLER__
-
-/* Initializer for lock.  */
-#define LLL_LOCK_INITIALIZER		(0)
-#define LLL_LOCK_INITIALIZER_LOCKED	(1)
-#define LLL_LOCK_INITIALIZER_WAITERS	(2)
-
-
-/* NB: in the lll_trylock macro we simply return the value in %eax
-   after the cmpxchg instruction.  In case the operation succeded this
-   value is zero.  In case the operation failed, the cmpxchg instruction
-   has loaded the current value of the memory work which is guaranteed
-   to be nonzero.  */
-#if !IS_IN (libc) || defined UP
-# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
-#else
-# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t"      \
-			   "je 0f\n\t"					      \
-			   "lock; cmpxchgl %2, %1\n\t"			      \
-			   "jmp 1f\n\t"					      \
-			   "0:\tcmpxchgl %2, %1\n\t"			      \
-			   "1:"
-#endif
-
-#define lll_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (__lll_trylock_asm				      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
-			 "0" (LLL_LOCK_INITIALIZER)			      \
-		       : "memory");					      \
-     ret; })
-
-#define lll_cond_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
-			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
-		       : "memory");					      \
-     ret; })
-
-#if !IS_IN (libc) || defined UP
-# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
-			      "jz 24f\n\t"
-#else
-# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
-			      "je 0f\n\t"				      \
-			      "lock; cmpxchgl %4, %2\n\t"		      \
-			      "jnz 1f\n\t"				      \
-			      "jmp 24f\n"				      \
-			      "0:\tcmpxchgl %4, %2\n\t"			      \
-			      "jz 24f\n\t"
-#endif
-
-#define lll_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2, ignore3;					      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_lock_asm_start				      \
-			   "1:\tlea %2, %%" RDI_LP "\n"			      \
-			   "2:\tsub $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset 128\n"		      \
-			   "3:\tcallq __lll_lock_wait_private\n"	      \
-			   "4:\tadd $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset -128\n"		      \
-			   "24:"					      \
-			   : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),   \
-			     "=a" (ignore3)				      \
-			   : "0" (1), "m" (futex), "3" (0)		      \
-			   : "cx", "r11", "cc", "memory");		      \
-       else								      \
-	 __asm __volatile (__lll_lock_asm_start				      \
-			   "1:\tlea %2, %%" RDI_LP "\n"			      \
-			   "2:\tsub $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset 128\n"		      \
-			   "3:\tcallq __lll_lock_wait\n"		      \
-			   "4:\tadd $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset -128\n"		      \
-			   "24:"					      \
-			   : "=S" (ignore1), "=D" (ignore2), "=m" (futex),    \
-			     "=a" (ignore3)				      \
-			   : "1" (1), "m" (futex), "3" (0), "0" (private)     \
-			   : "cx", "r11", "cc", "memory");		      \
-    })									      \
-
-#define lll_cond_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2, ignore3;					      \
-       __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
-			 "jz 24f\n"					      \
-			 "1:\tlea %2, %%" RDI_LP "\n"			      \
-			 "2:\tsub $128, %%" RSP_LP "\n"			      \
-			 ".cfi_adjust_cfa_offset 128\n"			      \
-			 "3:\tcallq __lll_lock_wait\n"			      \
-			 "4:\tadd $128, %%" RSP_LP "\n"			      \
-			 ".cfi_adjust_cfa_offset -128\n"		      \
-			 "24:"						      \
-			 : "=S" (ignore1), "=D" (ignore2), "=m" (futex),      \
-			   "=a" (ignore3)				      \
-			 : "1" (2), "m" (futex), "3" (0), "0" (private)	      \
-			 : "cx", "r11", "cc", "memory");		      \
-    })
-
-#define lll_timedlock(futex, timeout, private) \
-  ({ int result, ignore1, ignore2, ignore3;				      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \
-		       "jz 24f\n"					      \
-		       "1:\tlea %4, %%" RDI_LP "\n"			      \
-		       "0:\tmov %8, %%" RDX_LP "\n"			      \
-		       "2:\tsub $128, %%" RSP_LP "\n"			      \
-		       ".cfi_adjust_cfa_offset 128\n"			      \
-		       "3:\tcallq __lll_timedlock_wait\n"		      \
-		       "4:\tadd $128, %%" RSP_LP "\n"			      \
-		       ".cfi_adjust_cfa_offset -128\n"			      \
-		       "24:"						      \
-		       : "=a" (result), "=D" (ignore1), "=S" (ignore2),	      \
-			 "=&d" (ignore3), "=m" (futex)			      \
-		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
-			 "2" (private)					      \
-		       : "memory", "cx", "cc", "r10", "r11");		      \
-     result; })
-
-extern int __lll_timedlock_elision (int *futex, short *adapt_count,
-					 const struct timespec *timeout,
-					 int private) attribute_hidden;
-
-#define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
-  __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-
-#if !IS_IN (libc) || defined UP
-# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t"		      \
-				"je 24f\n\t"
-#else
-# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
-				"je 0f\n\t"				      \
-				"lock; decl %0\n\t"			      \
-				"jne 1f\n\t"				      \
-				"jmp 24f\n\t"				      \
-				"0:\tdecl %0\n\t"			      \
-				"je 24f\n\t"
-#endif
-
-#define lll_unlock(futex, private) \
-  (void)								      \
-    ({ int ignore;							      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_unlock_asm_start			      \
-			   "1:\tlea %0, %%" RDI_LP "\n"			      \
-			   "2:\tsub $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset 128\n"		      \
-			   "3:\tcallq __lll_unlock_wake_private\n"	      \
-			   "4:\tadd $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset -128\n"		      \
-			   "24:"					      \
-			   : "=m" (futex), "=&D" (ignore)		      \
-			   : "m" (futex)				      \
-			   : "ax", "cx", "r11", "cc", "memory");	      \
-       else								      \
-	 __asm __volatile (__lll_unlock_asm_start			      \
-			   "1:\tlea %0, %%" RDI_LP "\n"			      \
-			   "2:\tsub $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset 128\n"		      \
-			   "3:\tcallq __lll_unlock_wake\n"		      \
-			   "4:\tadd $128, %%" RSP_LP "\n"		      \
-			   ".cfi_adjust_cfa_offset -128\n"		      \
-			   "24:"					      \
-			   : "=m" (futex), "=&D" (ignore)		      \
-			   : "m" (futex), "S" (private)			      \
-			   : "ax", "cx", "r11", "cc", "memory");	      \
-    })
-
-#define lll_islocked(futex) \
-  (futex != LLL_LOCK_INITIALIZER)
-
-extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
-  attribute_hidden;
-
-extern int __lll_unlock_elision (int *lock, int private)
-  attribute_hidden;
-
-extern int __lll_trylock_elision (int *lock, short *adapt_count)
-  attribute_hidden;
-
-#define lll_lock_elision(futex, adapt_count, private) \
-  __lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, adapt_count, private) \
-  __lll_unlock_elision (&(futex), private)
-#define lll_trylock_elision(futex, adapt_count) \
-  __lll_trylock_elision (&(futex), &(adapt_count))
-
-#endif  /* !__ASSEMBLER__ */
-
-#endif	/* lowlevellock.h */