about summary refs log tree commit diff
path: root/nptl
diff options
context:
space:
mode:
Diffstat (limited to 'nptl')
-rw-r--r--nptl/pthread_mutex_init.c13
-rw-r--r--nptl/pthread_mutex_lock.c24
-rw-r--r--nptl/pthread_mutex_timedlock.c60
-rw-r--r--nptl/pthread_mutex_trylock.c8
-rw-r--r--nptl/pthread_mutex_unlock.c6
5 files changed, 42 insertions, 69 deletions
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index fe4eeee37c..20800b80f5 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -24,6 +24,7 @@
 #include "pthreadP.h"
 #include <atomic.h>
 #include <pthread-offsets.h>
+#include <futex-internal.h>
 
 #include <stap-probe.h>
 
@@ -37,19 +38,13 @@ static const struct pthread_mutexattr default_mutexattr =
 static bool
 prio_inherit_missing (void)
 {
-#ifdef __NR_futex
   static int tpi_supported;
-  if (__glibc_unlikely (tpi_supported == 0))
+  if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0))
     {
-      int lock = 0;
-      INTERNAL_SYSCALL_DECL (err);
-      int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
-      assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
-      tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
+      int e = futex_unlock_pi (&(unsigned int){0}, 0);
+      atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1);
     }
   return __glibc_unlikely (tpi_supported < 0);
-#endif
-  return true;
 }
 
 int
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index ace436d5a6..05bba50666 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -24,7 +24,7 @@
 #include <not-cancel.h>
 #include "pthreadP.h"
 #include <atomic.h>
-#include <lowlevellock.h>
+#include <futex-internal.h>
 #include <stap-probe.h>
 
 #ifndef lll_lock_elision
@@ -416,21 +416,16 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 	    int private = (robust
 			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
 			   : PTHREAD_MUTEX_PSHARED (mutex));
-	    INTERNAL_SYSCALL_DECL (__err);
-	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-				      __lll_private_flag (FUTEX_LOCK_PI,
-							  private), 1, 0);
-
-	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
-		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
-		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+	    int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
+				   NULL, private);
+	    if (e == ESRCH || e == EDEADLK)
 	      {
-		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+		assert (e != EDEADLK
 			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
 			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
 		/* ESRCH can happen only for non-robust PI mutexes where
 		   the owner of the lock died.  */
-		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+		assert (e != ESRCH || !robust);
 
 		/* Delay the thread indefinitely.  */
 		while (1)
@@ -479,11 +474,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
 	    /* This mutex is now not recoverable.  */
 	    mutex->__data.__count = 0;
 
-	    INTERNAL_SYSCALL_DECL (__err);
-	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
-						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-			      0, 0);
+	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
 	    /* To the kernel, this will be visible after the kernel has
 	       acquired the mutex in the syscall.  */
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 76b93bddc6..eb4baae93b 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -25,6 +25,7 @@
 #include <atomic.h>
 #include <lowlevellock.h>
 #include <not-cancel.h>
+#include <futex-internal.h>
 
 #include <stap-probe.h>
 
@@ -377,39 +378,29 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 	    int private = (robust
 			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
 			   : PTHREAD_MUTEX_PSHARED (mutex));
-	    INTERNAL_SYSCALL_DECL (__err);
-
-	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-				      __lll_private_flag (FUTEX_LOCK_PI,
-							  private), 1,
-				      abstime);
-	    if (INTERNAL_SYSCALL_ERROR_P (e, __err))
+	    int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
+				   abstime, private);
+	    if (e == ETIMEDOUT)
+	      return ETIMEDOUT;
+	    else if (e == ESRCH || e == EDEADLK)
 	      {
-		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
-		  return ETIMEDOUT;
-
-		if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
-		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
-		  {
-		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
-			    || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
-				&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
-		    /* ESRCH can happen only for non-robust PI mutexes where
-		       the owner of the lock died.  */
-		    assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
-			    || !robust);
-
-		    /* Delay the thread until the timeout is reached.
-		       Then return ETIMEDOUT.  */
-		    do
-		      e = lll_timedwait (&(int){0}, 0, clockid, abstime,
-					 private);
-		    while (e != ETIMEDOUT);
-		    return ETIMEDOUT;
-		  }
-
-		return INTERNAL_SYSCALL_ERRNO (e, __err);
+		assert (e != EDEADLK
+			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+			   && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+		/* ESRCH can happen only for non-robust PI mutexes where
+		   the owner of the lock died.  */
+		assert (e != ESRCH || !robust);
+
+		/* Delay the thread until the timeout is reached. Then return
+		   ETIMEDOUT.  */
+		do
+		  e = lll_timedwait (&(int){0}, 0, clockid, abstime,
+				     private);
+		while (e != ETIMEDOUT);
+		return ETIMEDOUT;
 	      }
+	    else if (e != 0)
+	      return e;
 
 	    oldval = mutex->__data.__lock;
 
@@ -447,11 +438,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
 	    /* This mutex is now not recoverable.  */
 	    mutex->__data.__count = 0;
 
-	    INTERNAL_SYSCALL_DECL (__err);
-	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
-						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-			      0, 0);
+	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
 	    /* To the kernel, this will be visible after the kernel has
 	       acquired the mutex in the syscall.  */
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index 87e87c013a..d24bb58a8b 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -21,6 +21,7 @@
 #include <stdlib.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
+#include <futex-internal.h>
 
 #ifndef lll_trylock_elision
 #define lll_trylock_elision(a,t) lll_trylock(a)
@@ -346,11 +347,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
 	    /* This mutex is now not recoverable.  */
 	    mutex->__data.__count = 0;
 
-	    INTERNAL_SYSCALL_DECL (__err);
-	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-			      __lll_private_flag (FUTEX_UNLOCK_PI,
-						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-			      0, 0);
+	    futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+			     PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
 	    /* To the kernel, this will be visible after the kernel has
 	       acquired the mutex in the syscall.  */
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index 71038f92e4..53f8b868e4 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -22,6 +22,7 @@
 #include "pthreadP.h"
 #include <lowlevellock.h>
 #include <stap-probe.h>
+#include <futex-internal.h>
 
 #ifndef lll_unlock_elision
 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
@@ -277,9 +278,8 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	  if (((l & FUTEX_WAITERS) != 0)
 	      || (l != THREAD_GETMEM (THREAD_SELF, tid)))
 	    {
-	      INTERNAL_SYSCALL_DECL (__err);
-	      INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
-				__lll_private_flag (FUTEX_UNLOCK_PI, private));
+	      futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+			       private);
 	      break;
 	    }
 	}