about summary refs log tree commit diff
path: root/sysdeps/unix/sysv
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2014-12-17 19:09:04 +0100
committerTorvald Riegel <triegel@redhat.com>2014-12-19 15:27:35 +0100
commita4a43a907c1e7b610715848f5a69e937a9f22903 (patch)
tree9ca4f61d197116e53bd48c71aba8cb899e84faa5 /sysdeps/unix/sysv
parent76f71081cd3fe355b9c18d1fc5e87643c788cfac (diff)
downloadglibc-a4a43a907c1e7b610715848f5a69e937a9f22903.tar.gz
glibc-a4a43a907c1e7b610715848f5a69e937a9f22903.tar.xz
glibc-a4a43a907c1e7b610715848f5a69e937a9f22903.zip
i386: Move futex functions from lowlevellock.h to lowlevellock-futex.h.
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock-futex.h137
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.h130
2 files changed, 157 insertions, 110 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/i386/lowlevellock-futex.h
new file mode 100644
index 0000000000..d7f8728689
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock-futex.h
@@ -0,0 +1,137 @@
+/* Low-level locking access to futex facilities.  Linux/i386 version.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _LOWLEVELLOCK_FUTEX_H
+#define _LOWLEVELLOCK_FUTEX_H	1
+
+#define FUTEX_WAIT		0
+#define FUTEX_WAKE		1
+#define FUTEX_CMP_REQUEUE	4
+#define FUTEX_WAKE_OP		5
+#define FUTEX_LOCK_PI		6
+#define FUTEX_UNLOCK_PI		7
+#define FUTEX_TRYLOCK_PI	8
+#define FUTEX_WAIT_BITSET	9
+#define FUTEX_WAKE_BITSET	10
+#define FUTEX_WAIT_REQUEUE_PI	11
+#define FUTEX_CMP_REQUEUE_PI	12
+#define FUTEX_PRIVATE_FLAG	128
+#define FUTEX_CLOCK_REALTIME	256
+
+#define FUTEX_BITSET_MATCH_ANY	0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE	0
+#define LLL_SHARED	FUTEX_PRIVATE_FLAG
+
+
+#if IS_IN (libc) || IS_IN (rtld)
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)					      \
+   ? ((private) == 0							      \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \
+      : (fl))								      \
+   : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG);		      \
+	asm ("andl %%gs:%P1, %0" : "+r" (__fl)				      \
+	     : "i" (offsetof (struct pthread, header.private_futex)));	      \
+	__fl | (fl); }))
+# endif
+#endif
+
+
+#ifndef __ASSEMBLER__
+
+/* To avoid naming conflicts with lowlevellock.h, use a different prefix
+   here.  */
+#ifdef PIC
+# define LLLF_EBX_LOAD	"xchgl %2, %%ebx\n"
+# define LLLF_EBX_REG	"D"
+#else
+# define LLLF_EBX_LOAD
+# define LLLF_EBX_REG	"b"
+#endif
+
+#ifdef I386_USE_SYSENTER
+# ifdef SHARED
+#  define LLLF_ENTER_KERNEL	"call *%%gs:%P6\n\t"
+# else
+#  define LLLF_ENTER_KERNEL	"call *_dl_sysinfo\n\t"
+# endif
+#else
+# define LLLF_ENTER_KERNEL	"int $0x80\n\t"
+#endif
+
+
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait (futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+  ({									      \
+    int __status;							      \
+    register __typeof (val) _val asm ("edx") = (val);			      \
+    __asm __volatile (LLLF_EBX_LOAD					      \
+		      LLLF_ENTER_KERNEL					      \
+		      LLLF_EBX_LOAD					      \
+		      : "=a" (__status)					      \
+		      : "0" (SYS_futex), LLLF_EBX_REG (futex), "S" (timeout),  \
+			"c" (__lll_private_flag (FUTEX_WAIT, private)),	      \
+			"d" (_val), "i" (offsetof (tcbhead_t, sysinfo))	      \
+		      : "memory");					      \
+    __status;								      \
+  })
+
+
+#define lll_futex_wake(futex, nr, private) \
+  ({									      \
+    int __status;							      \
+    register __typeof (nr) _nr asm ("edx") = (nr);			      \
+    LIBC_PROBE (lll_futex_wake, 3, futex, nr, private);                       \
+    __asm __volatile (LLLF_EBX_LOAD					      \
+		      LLLF_ENTER_KERNEL					      \
+		      LLLF_EBX_LOAD					      \
+		      : "=a" (__status)					      \
+		      : "0" (SYS_futex), LLLF_EBX_REG (futex),		      \
+			"c" (__lll_private_flag (FUTEX_WAKE, private)),	      \
+			"d" (_nr),					      \
+			"i" (0) /* phony, to align next arg's number */,      \
+			"i" (offsetof (tcbhead_t, sysinfo)));		      \
+    __status;								      \
+  })
+
+
+#endif  /* !__ASSEMBLER__ */
+
+#endif	/* lowlevellock-futex.h */
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 1032f4b666..c3528a8830 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -45,57 +45,10 @@
 # endif
 #endif
 
+#include <lowlevellock-futex.h>
+
+/* XXX Remove when no assembler code uses futexes anymore.  */
 #define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_CMP_REQUEUE	4
-#define FUTEX_WAKE_OP		5
-#define FUTEX_LOCK_PI		6
-#define FUTEX_UNLOCK_PI		7
-#define FUTEX_TRYLOCK_PI	8
-#define FUTEX_WAIT_BITSET	9
-#define FUTEX_WAKE_BITSET	10
-#define FUTEX_WAIT_REQUEUE_PI	11
-#define FUTEX_CMP_REQUEUE_PI	12
-#define FUTEX_PRIVATE_FLAG	128
-#define FUTEX_CLOCK_REALTIME	256
-
-#define FUTEX_BITSET_MATCH_ANY	0xffffffff
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
-
-/* Values for 'private' parameter of locking macros.  Yes, the
-   definition seems to be backwards.  But it is not.  The bit will be
-   reversed before passing to the system call.  */
-#define LLL_PRIVATE	0
-#define LLL_SHARED	FUTEX_PRIVATE_FLAG
-
-
-#if IS_IN (libc) || IS_IN (rtld)
-/* In libc.so or ld.so all futexes are private.  */
-# ifdef __ASSUME_PRIVATE_FUTEX
-#  define __lll_private_flag(fl, private) \
-  ((fl) | FUTEX_PRIVATE_FLAG)
-# else
-#  define __lll_private_flag(fl, private) \
-  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
-# endif
-#else
-# ifdef __ASSUME_PRIVATE_FUTEX
-#  define __lll_private_flag(fl, private) \
-  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
-# else
-#  define __lll_private_flag(fl, private) \
-  (__builtin_constant_p (private)					      \
-   ? ((private) == 0							      \
-      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \
-      : (fl))								      \
-   : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG);		      \
-	asm ("andl %%gs:%P1, %0" : "+r" (__fl)				      \
-	     : "i" (offsetof (struct pthread, header.private_futex)));	      \
-	__fl | (fl); }))
-# endif
-#endif
 
 #ifndef __ASSEMBLER__
 
@@ -126,43 +79,6 @@
 /* Delay in spinlock loop.  */
 #define BUSY_WAIT_NOP	asm ("rep; nop")
 
-#define lll_futex_wait(futex, val, private) \
-  lll_futex_timed_wait (futex, val, NULL, private)
-
-
-#define lll_futex_timed_wait(futex, val, timeout, private) \
-  ({									      \
-    int __status;							      \
-    register __typeof (val) _val asm ("edx") = (val);			      \
-    __asm __volatile (LLL_EBX_LOAD					      \
-		      LLL_ENTER_KERNEL					      \
-		      LLL_EBX_LOAD					      \
-		      : "=a" (__status)					      \
-		      : "0" (SYS_futex), LLL_EBX_REG (futex), "S" (timeout),  \
-			"c" (__lll_private_flag (FUTEX_WAIT, private)),	      \
-			"d" (_val), "i" (offsetof (tcbhead_t, sysinfo))	      \
-		      : "memory");					      \
-    __status;								      \
-  })
-
-
-#define lll_futex_wake(futex, nr, private) \
-  ({									      \
-    int __status;							      \
-    register __typeof (nr) _nr asm ("edx") = (nr);			      \
-    LIBC_PROBE (lll_futex_wake, 3, futex, nr, private);                       \
-    __asm __volatile (LLL_EBX_LOAD					      \
-		      LLL_ENTER_KERNEL					      \
-		      LLL_EBX_LOAD					      \
-		      : "=a" (__status)					      \
-		      : "0" (SYS_futex), LLL_EBX_REG (futex),		      \
-			"c" (__lll_private_flag (FUTEX_WAKE, private)),	      \
-			"d" (_nr),					      \
-			"i" (0) /* phony, to align next arg's number */,      \
-			"i" (offsetof (tcbhead_t, sysinfo)));		      \
-    __status;								      \
-  })
-
 
 /* NB: in the lll_trylock macro we simply return the value in %eax
    after the cmpxchg instruction.  In case the operation succeded this
@@ -381,43 +297,37 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
   (futex != LLL_LOCK_INITIALIZER)
 
 /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
-   wakeup when the clone terminates.  The memory location contains the
-   thread ID while the clone is running and is reset to zero
-   afterwards.
-
-   The macro parameter must not have any side effect.  */
+   wake-up when the clone terminates.  The memory location contains the
+   thread ID while the clone is running and is reset to zero by the kernel
+   afterwards.  The kernel up to version 3.16.3 does not use the private futex
+   operations for futex wake-up when the clone terminates.  */
 #define lll_wait_tid(tid) \
-  do {									      \
-    int __ignore;							      \
-    register __typeof (tid) _tid asm ("edx") = (tid);			      \
-    if (_tid != 0)							      \
-      __asm __volatile (LLL_EBX_LOAD					      \
-			"1:\tmovl %1, %%eax\n\t"			      \
-			LLL_ENTER_KERNEL				      \
-			"cmpl $0, (%%ebx)\n\t"				      \
-			"jne 1b\n\t"					      \
-			LLL_EBX_LOAD					      \
-			: "=&a" (__ignore)				      \
-			: "i" (SYS_futex), LLL_EBX_REG (&tid), "S" (0),	      \
-			  "c" (FUTEX_WAIT), "d" (_tid),			      \
-			  "i" (offsetof (tcbhead_t, sysinfo))		      \
-			: "memory");					      \
+  do {					\
+    __typeof (tid) __tid;		\
+    while ((__tid = (tid)) != 0)	\
+      lll_futex_wait (&(tid), __tid, LLL_SHARED);\
   } while (0)
 
 extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
      __attribute__ ((regparm (2))) attribute_hidden;
+
+/* As lll_wait_tid, but with a timeout.  If the timeout occurs then return
+   ETIMEDOUT.  If ABSTIME is invalid, return EINVAL.
+   XXX Note that this differs from the generic version in that we do the
+   error checking here and not in __lll_timedwait_tid.  */
 #define lll_timedwait_tid(tid, abstime) \
   ({									      \
     int __result = 0;							      \
-    if (tid != 0)							      \
+    if ((tid) != 0)							      \
       {									      \
-	if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)	      \
+	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
 	  __result = EINVAL;						      \
 	else								      \
-	  __result = __lll_timedwait_tid (&tid, abstime);		      \
+	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
       }									      \
     __result; })
 
+
 extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
   attribute_hidden;