about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/i386
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2019-02-21 13:33:54 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2019-05-14 08:48:02 -0300
commitc50e1c263ec15e98da3235e663049156fd1afcfa (patch)
tree47d22df898622b32fdc262f830978f29d6fa800e /sysdeps/unix/sysv/linux/i386
parent959aff9fa22c45e0fa11cd88c9f8ea10bd9ba494 (diff)
downloadglibc-c50e1c263ec15e98da3235e663049156fd1afcfa.tar.gz
glibc-c50e1c263ec15e98da3235e663049156fd1afcfa.tar.xz
glibc-c50e1c263ec15e98da3235e663049156fd1afcfa.zip
x86: Remove arch-specific low level lock implementation
This patch removes the arch-specific x86 assembly implementation for
low level locking and consolidate both 64 bits and 32 bits in a
single implementation.

Different than other architectures, x86 lll_trylock, lll_lock, and
lll_unlock implements a single-thread optimization to avoid atomic
operation, using cmpxchgl instead.  This patch implements by using
the new single-thread.h definitions in a generic way, although using
the previous semantic.

The lll_cond_trylock, lll_cond_lock, and lll_timedlock just use
atomic operations plus calls to lll_lock_wait*.

For __lll_lock_wait_private and __lll_lock_wait the generic implemtation
there is no indication that assembly implementation is required
performance-wise.

Checked on x86_64-linux-gnu and i686-linux-gnu.

	* sysdeps/nptl/lowlevellock.h (__lll_trylock): New macro.
	(lll_trylock): Call __lll_trylock.
	* sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S: Remove file.
	* sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lll_timedlock_wait.c: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/x86/lowlevellock.h: New file.
	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: Include
	lowlevellock-futex.h.
Diffstat (limited to 'sysdeps/unix/sysv/linux/i386')
-rw-r--r--sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S19
-rw-r--r--sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c1
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.S268
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.h240
4 files changed, 0 insertions, 528 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S
deleted file mode 100644
index 32025277f3..0000000000
--- a/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c
deleted file mode 100644
index f6875b8f89..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c
+++ /dev/null
@@ -1 +0,0 @@
-/* __lll_timedlock_wait is in lowlevellock.S.  */
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/lowlevellock.S
deleted file mode 100644
index a9898e94fd..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.S
+++ /dev/null
@@ -1,268 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <kernel-features.h>
-#include <lowlevellock.h>
-
-#include <stap-probe.h>
-
-	.text
-
-#define LOAD_PRIVATE_FUTEX_WAIT(reg) \
-	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_PRIVATE_FUTEX_WAKE(reg) \
-	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT(reg) \
-	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#define LOAD_FUTEX_WAIT_ABS(reg) \
-	xorl	$(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
-#define LOAD_FUTEX_WAKE(reg) \
-	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
-
-	.globl	__lll_lock_wait_private
-	.type	__lll_lock_wait_private,@function
-	.hidden	__lll_lock_wait_private
-	.align	16
-__lll_lock_wait_private:
-	cfi_startproc
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%edx, -8)
-	cfi_offset(%ebx, -12)
-	cfi_offset(%esi, -16)
-
-	movl	$2, %edx
-	movl	%ecx, %ebx
-	xorl	%esi, %esi	/* No timeout.  */
-	LOAD_PRIVATE_FUTEX_WAIT (%ecx)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne 2f
-
-1:	LIBC_PROBE (lll_lock_wait_private, 1, %ebx)
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	ret
-	cfi_endproc
-	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
-
-#if !IS_IN (libc)
-	.globl	__lll_lock_wait
-	.type	__lll_lock_wait,@function
-	.hidden	__lll_lock_wait
-	.align	16
-__lll_lock_wait:
-	cfi_startproc
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%edx, -8)
-	cfi_offset(%ebx, -12)
-	cfi_offset(%esi, -16)
-
-	movl	%edx, %ebx
-	movl	$2, %edx
-	xorl	%esi, %esi	/* No timeout.  */
-	LOAD_FUTEX_WAIT (%ecx)
-
-	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
-	jne 2f
-
-1:	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-2:	movl	%edx, %eax
-	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
-
-	testl	%eax, %eax
-	jnz	1b
-
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	ret
-	cfi_endproc
-	.size	__lll_lock_wait,.-__lll_lock_wait
-
-	/*      %ecx: futex
-		%esi: flags
-		%edx: timeout
-		%eax: futex value
-	*/
-	.globl	__lll_timedlock_wait
-	.type	__lll_timedlock_wait,@function
-	.hidden	__lll_timedlock_wait
-	.align	16
-__lll_timedlock_wait:
-	cfi_startproc
-	pushl	%ebp
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebp, 0)
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_rel_offset(%ebx, 0)
-
-	cmpl	$0, (%edx)
-	js	8f
-
-	movl	%ecx, %ebx
-	movl	%esi, %ecx
-	movl	%edx, %esi
-	movl	$0xffffffff, %ebp
-	LOAD_FUTEX_WAIT_ABS (%ecx)
-
-	movl	$2, %edx
-	cmpl	%edx, %eax
-	jne	2f
-
-1:	movl	$SYS_futex, %eax
-	movl	$2, %edx
-	ENTER_KERNEL
-
-2:	xchgl	%edx, (%ebx)	/* NB:   lock is implied */
-
-	testl	%edx, %edx
-	jz	3f
-
-	cmpl	$-ETIMEDOUT, %eax
-	je	4f
-	cmpl	$-EINVAL, %eax
-	jne	1b
-4:	movl	%eax, %edx
-	negl	%edx
-
-3:	movl	%edx, %eax
-7:	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	popl	%ebp
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebp)
-	ret
-
-8:	movl	$ETIMEDOUT, %eax
-	jmp	7b
-
-	cfi_endproc
-	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
-#endif
-
-	.globl	__lll_unlock_wake_private
-	.type	__lll_unlock_wake_private,@function
-	.hidden	__lll_unlock_wake_private
-	.align	16
-__lll_unlock_wake_private:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ecx
-	cfi_adjust_cfa_offset(4)
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-	cfi_offset(%ecx, -12)
-	cfi_offset(%edx, -16)
-
-	movl	%eax, %ebx
-	movl	$0, (%eax)
-	LOAD_PRIVATE_FUTEX_WAKE (%ecx)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	popl	%ecx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ecx)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-	cfi_endproc
-	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
-
-#if !IS_IN (libc)
-	.globl	__lll_unlock_wake
-	.type	__lll_unlock_wake,@function
-	.hidden	__lll_unlock_wake
-	.align	16
-__lll_unlock_wake:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	pushl	%ecx
-	cfi_adjust_cfa_offset(4)
-	pushl	%edx
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-	cfi_offset(%ecx, -12)
-	cfi_offset(%edx, -16)
-
-	movl	%eax, %ebx
-	movl	$0, (%eax)
-	LOAD_FUTEX_WAKE (%ecx)
-	movl	$1, %edx	/* Wake one thread.  */
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	popl	%edx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%edx)
-	popl	%ecx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ecx)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-	cfi_endproc
-	.size	__lll_unlock_wake,.-__lll_unlock_wake
-#endif
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
deleted file mode 100644
index 94dccc4ce7..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#ifndef _LOWLEVELLOCK_H
-#define _LOWLEVELLOCK_H	1
-
-#ifndef __ASSEMBLER__
-# include <time.h>
-# include <sys/param.h>
-# include <bits/pthreadtypes.h>
-# include <kernel-features.h>
-/* <tcb-offsets.h> is generated from tcb-offsets.sym to define offsets
-   and sizes of types in <tls.h> as well as <pthread.h> which includes
-   <lowlevellock.h> via nptl/descr.h.  Don't include <tcb-offsets.h>
-   when generating <tcb-offsets.h> to avoid circular dependency which
-   may lead to build hang on a many-core machine.  */
-# ifndef GEN_AS_CONST_HEADERS
-#  include <tcb-offsets.h>
-# endif
-
-# ifndef LOCK_INSTR
-#  ifdef UP
-#   define LOCK_INSTR	/* nothing */
-#  else
-#   define LOCK_INSTR "lock;"
-#  endif
-# endif
-#else
-# ifndef LOCK
-#  ifdef UP
-#   define LOCK
-#  else
-#   define LOCK lock
-#  endif
-# endif
-#endif
-
-#include <lowlevellock-futex.h>
-
-/* XXX Remove when no assembler code uses futexes anymore.  */
-#define SYS_futex		__NR_futex
-
-#ifndef __ASSEMBLER__
-
-/* Initializer for compatibility lock.  */
-#define LLL_LOCK_INITIALIZER		(0)
-#define LLL_LOCK_INITIALIZER_LOCKED	(1)
-#define LLL_LOCK_INITIALIZER_WAITERS	(2)
-
-
-/* NB: in the lll_trylock macro we simply return the value in %eax
-   after the cmpxchg instruction.  In case the operation succeded this
-   value is zero.  In case the operation failed, the cmpxchg instruction
-   has loaded the current value of the memory work which is guaranteed
-   to be nonzero.  */
-#if !IS_IN (libc) || defined UP
-# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
-#else
-# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
-			   "je 0f\n\t"					      \
-			   "lock\n"					      \
-			   "0:\tcmpxchgl %2, %1"
-#endif
-
-#define lll_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (__lll_trylock_asm				      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
-			 "0" (LLL_LOCK_INITIALIZER),			      \
-			 "i" (MULTIPLE_THREADS_OFFSET)			      \
-		       : "memory");					      \
-     ret; })
-
-
-#define lll_cond_trylock(futex) \
-  ({ int ret;								      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
-			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
-		       : "memory");					      \
-     ret; })
-
-#if !IS_IN (libc) || defined UP
-# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
-#else
-# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t"			      \
-			      "je 0f\n\t"				      \
-			      "lock\n"					      \
-			      "0:\tcmpxchgl %1, %2\n\t"
-#endif
-
-#define lll_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2;						      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_lock_asm_start				      \
-			   "jz 18f\n\t"				      \
-			   "1:\tleal %2, %%ecx\n"			      \
-			   "2:\tcall __lll_lock_wait_private\n" 	      \
-			   "18:"					      \
-			   : "=a" (ignore1), "=c" (ignore2), "=m" (futex)     \
-			   : "0" (0), "1" (1), "m" (futex),		      \
-			     "i" (MULTIPLE_THREADS_OFFSET)		      \
-			   : "memory");					      \
-       else								      \
-	 {								      \
-	   int ignore3;							      \
-	   __asm __volatile (__lll_lock_asm_start			      \
-			     "jz 18f\n\t"			 	      \
-			     "1:\tleal %2, %%edx\n"			      \
-			     "0:\tmovl %8, %%ecx\n"			      \
-			     "2:\tcall __lll_lock_wait\n"		      \
-			     "18:"					      \
-			     : "=a" (ignore1), "=c" (ignore2),		      \
-			       "=m" (futex), "=&d" (ignore3) 		      \
-			     : "1" (1), "m" (futex),			      \
-			       "i" (MULTIPLE_THREADS_OFFSET), "0" (0),	      \
-			       "g" ((int) (private))			      \
-			     : "memory");				      \
-	 }								      \
-    })
-
-
-/* Special version of lll_lock which causes the unlock function to
-   always wakeup waiters.  */
-#define lll_cond_lock(futex, private) \
-  (void)								      \
-    ({ int ignore1, ignore2, ignore3;					      \
-       __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
-			 "jz 18f\n\t"					      \
-			 "1:\tleal %2, %%edx\n"				      \
-			 "0:\tmovl %7, %%ecx\n"				      \
-			 "2:\tcall __lll_lock_wait\n"			      \
-			 "18:"						      \
-			 : "=a" (ignore1), "=c" (ignore2), "=m" (futex),      \
-			   "=&d" (ignore3)				      \
-			 : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
-			 : "memory");					      \
-    })
-
-
-#define lll_timedlock(futex, timeout, private) \
-  ({ int result, ignore1, ignore2, ignore3;				      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \
-		       "jz 18f\n\t"					      \
-		       "1:\tleal %3, %%ecx\n"				      \
-		       "0:\tmovl %8, %%edx\n"				      \
-		       "2:\tcall __lll_timedlock_wait\n"		      \
-		       "18:"						      \
-		       : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
-			 "=m" (futex), "=S" (ignore3)			      \
-		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
-			 "4" ((int) (private))				      \
-		       : "memory");					      \
-     result; })
-
-extern int __lll_timedlock_elision (int *futex, short *adapt_count,
-					 const struct timespec *timeout,
-					 int private) attribute_hidden;
-
-#define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
-  __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-
-#if !IS_IN (libc) || defined UP
-# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
-#else
-# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t"			      \
-			  "je 0f\n\t"					      \
-			  "lock\n"					      \
-			  "0:\tsubl $1,%0\n\t"
-#endif
-
-#define lll_unlock(futex, private) \
-  (void)								      \
-    ({ int ignore;							      \
-       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
-	 __asm __volatile (__lll_unlock_asm				      \
-			   "je 18f\n\t"					      \
-			   "1:\tleal %0, %%eax\n"			      \
-			   "2:\tcall __lll_unlock_wake_private\n"	      \
-			   "18:"					      \
-			   : "=m" (futex), "=&a" (ignore)		      \
-			   : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET)	      \
-			   : "memory");					      \
-       else								      \
-	 {								      \
-	   int ignore2;							      \
-	   __asm __volatile (__lll_unlock_asm				      \
-			     "je 18f\n\t"				      \
-			     "1:\tleal %0, %%eax\n"			      \
-			     "0:\tmovl %5, %%ecx\n"			      \
-			     "2:\tcall __lll_unlock_wake\n"		      \
-			     "18:"					      \
-			     : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)  \
-			     : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex),    \
-			       "g" ((int) (private))			      \
-			     : "memory");				      \
-	 }								      \
-    })
-
-
-#define lll_islocked(futex) \
-  (futex != LLL_LOCK_INITIALIZER)
-
-extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
-  attribute_hidden;
-
-extern int __lll_unlock_elision(int *lock, int private)
-  attribute_hidden;
-
-extern int __lll_trylock_elision(int *lock, short *adapt_count)
-  attribute_hidden;
-
-#define lll_lock_elision(futex, adapt_count, private) \
-  __lll_lock_elision (&(futex), &(adapt_count), private)
-#define lll_unlock_elision(futex, adapt_count, private) \
-  __lll_unlock_elision (&(futex), private)
-#define lll_trylock_elision(futex, adapt_count) \
-  __lll_trylock_elision(&(futex), &(adapt_count))
-
-#endif  /* !__ASSEMBLER__ */
-
-#endif	/* lowlevellock.h */