about summary refs log tree commit diff
path: root/nptl/pthread_mutex_lock.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2006-02-15 17:20:33 +0000
committerUlrich Drepper <drepper@redhat.com>2006-02-15 17:20:33 +0000
commit683040c3b2c7dc2bc08045202a0a0b46d143b3cb (patch)
treefcd73510f2bcfcdf57078331d800db4217db759b /nptl/pthread_mutex_lock.c
parentb007ce7cc6971e4bd4cd91c558efd3d4603d941d (diff)
downloadglibc-683040c3b2c7dc2bc08045202a0a0b46d143b3cb.tar.gz
glibc-683040c3b2c7dc2bc08045202a0a0b46d143b3cb.tar.xz
glibc-683040c3b2c7dc2bc08045202a0a0b46d143b3cb.zip
* sysdeps/unix/sysv/linux/not-cancel.h (__openat_not_cancel,
	__openat64_not_cancel): Remove prototypes.
	(__openat_nocancel, __openat64_nocancel): New prototypes or defines.
	(openat_not_cancel, openat_not_cancel_3, openat64_not_cancel,
	openat64_not_cancel_3): Use them.
Diffstat (limited to 'nptl/pthread_mutex_lock.c')
-rw-r--r--nptl/pthread_mutex_lock.c113
1 files changed, 69 insertions, 44 deletions
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index 420711a4d4..dd22567c71 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,6 +27,7 @@
 #ifndef LLL_MUTEX_LOCK
 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
 #endif
 
 
@@ -36,6 +37,7 @@ __pthread_mutex_lock (mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
   int retval = 0;
@@ -107,60 +109,83 @@ __pthread_mutex_lock (mutex)
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+      oldval = mutex->__data.__lock;
+      do
 	{
-	  /* Just bump the counter.  */
-	  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-	    /* Overflow of the counter.  */
-	    return EAGAIN;
-
-	  ++mutex->__data.__count;
-
-	  return 0;
-	}
-
-      /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+	  if ((oldval & FUTEX_OWNER_DIED) != 0)
+	    {
+	      /* The previous owner died.  Try locking the mutex.  */
+	      int newval;
+	      while ((newval
+		      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+							     id, oldval))
+		     != oldval)
+		{
+		  if ((newval & FUTEX_OWNER_DIED) == 0)
+		    goto normal;
+		  oldval = newval;
+		}
 
-      mutex->__data.__count = 1;
+	      /* We got the mutex.  */
+	      mutex->__data.__count = 1;
+	      /* But it is inconsistent unless marked otherwise.  */
+	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+	      ENQUEUE_MUTEX (mutex);
+
+	      /* Note that we deliberately exit here.  If we fall
+		 through to the end of the function __nusers would be
+		 incremented which is not correct because the old
+		 owner has to be discounted.  If we are not supposed
+		 to increment __nusers we actually have to decrement
+		 it here.  */
+#ifdef NO_INCR
+	      --mutex->__data.__nusers;
+#endif
 
-      goto robust;
+	      return EOWNERDEAD;
+	    }
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-	return EDEADLK;
+	normal:
+	  /* Check whether we already hold the mutex.  */
+	  if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+				== id, 0))
+	    {
+	      if (mutex->__data.__kind
+		  == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+		return EDEADLK;
 
-      /* FALLTHROUGH */
+	      if (mutex->__data.__kind
+		  == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+		{
+		  /* Just bump the counter.  */
+		  if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+		    /* Overflow of the counter.  */
+		    return EAGAIN;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+		  ++mutex->__data.__count;
 
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-			    == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-	{
-	  /* This mutex is now not recoverable.  */
-	  mutex->__data.__count = 0;
-	  lll_mutex_unlock (mutex->__data.__lock);
-	  return ENOTRECOVERABLE;
-	}
+		  return 0;
+		}
+	    }
 
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-	      || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
+	  oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
 
-      if (__builtin_expect (mutex->__data.__owner
-			    == PTHREAD_MUTEX_OWNERDEAD, 0))
-	{
-	  retval = EOWNERDEAD;
-	  /* We signal ownership of a not yet recovered robust mutex
-	     by storing the negative thread ID.  */
-	  id = -id;
+	  if (__builtin_expect (mutex->__data.__owner
+				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+	    {
+	      /* This mutex is now not recoverable.  */
+	      mutex->__data.__count = 0;
+	      lll_mutex_unlock (mutex->__data.__lock);
+	      return ENOTRECOVERABLE;
+	    }
 	}
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      mutex->__data.__count = 1;
       ENQUEUE_MUTEX (mutex);
       break;