about summary refs log tree commit diff
path: root/nptl/pthread_cond_destroy.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_cond_destroy.c')
-rw-r--r--nptl/pthread_cond_destroy.c82
1 files changed, 29 insertions, 53 deletions
diff --git a/nptl/pthread_cond_destroy.c b/nptl/pthread_cond_destroy.c
index 1acd8042d8..5845c6a7ad 100644
--- a/nptl/pthread_cond_destroy.c
+++ b/nptl/pthread_cond_destroy.c
@@ -20,66 +20,42 @@
 #include <shlib-compat.h>
 #include "pthreadP.h"
 #include <stap-probe.h>
-
-
+#include <atomic.h>
+#include <futex-internal.h>
+
+#include "pthread_cond_common.c"
+
+/* See __pthread_cond_wait for a high-level description of the algorithm.
+
+   A correct program must make sure that no waiters are blocked on the condvar
+   when it is destroyed, and that there are no concurrent signals or
+   broadcasts.  To wake waiters reliably, the program must signal or
+   broadcast while holding the mutex or after having held the mutex.  It must
+   also ensure that no signal or broadcast are still pending to unblock
+   waiters; IOW, because waiters can wake up spuriously, the program must
+   effectively ensure that destruction happens after the execution of those
+   signal or broadcast calls.
+   Thus, we can assume that all waiters that are still accessing the condvar
+   have been woken.  We wait until they have confirmed to have woken up by
+   decrementing __wrefs.  */
 int
 __pthread_cond_destroy (pthread_cond_t *cond)
 {
-  int pshared = (cond->__data.__mutex == (void *) ~0l)
-		? LLL_SHARED : LLL_PRIVATE;
-
   LIBC_PROBE (cond_destroy, 1, cond);
 
-  /* Make sure we are alone.  */
-  lll_lock (cond->__data.__lock, pshared);
-
-  if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
-    {
-      /* If there are still some waiters which have not been
-	 woken up, this is an application bug.  */
-      lll_unlock (cond->__data.__lock, pshared);
-      return EBUSY;
-    }
-
-  /* Tell pthread_cond_*wait that this condvar is being destroyed.  */
-  cond->__data.__total_seq = -1ULL;
-
-  /* If there are waiters which have been already signalled or
-     broadcasted, but still are using the pthread_cond_t structure,
-     pthread_cond_destroy needs to wait for them.  */
-  unsigned int nwaiters = cond->__data.__nwaiters;
-
-  if (nwaiters >= (1 << COND_NWAITERS_SHIFT))
+  /* Set the wake request flag.  We could also spin, but destruction that is
+     concurrent with still-active waiters is probably neither common nor
+     performance critical.  Acquire MO to synchronize with waiters confirming
+     that they finished.  */
+  unsigned int wrefs = atomic_fetch_or_acquire (&cond->__data.__wrefs, 4);
+  int private = __condvar_get_private (wrefs);
+  while (wrefs >> 3 != 0)
     {
-      /* Wake everybody on the associated mutex in case there are
-	 threads that have been requeued to it.
-	 Without this, pthread_cond_destroy could block potentially
-	 for a long time or forever, as it would depend on other
-	 thread's using the mutex.
-	 When all threads waiting on the mutex are woken up, pthread_cond_wait
-	 only waits for threads to acquire and release the internal
-	 condvar lock.  */
-      if (cond->__data.__mutex != NULL
-	  && cond->__data.__mutex != (void *) ~0l)
-	{
-	  pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
-	  lll_futex_wake (&mut->__data.__lock, INT_MAX,
-			  PTHREAD_MUTEX_PSHARED (mut));
-	}
-
-      do
-	{
-	  lll_unlock (cond->__data.__lock, pshared);
-
-	  lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared);
-
-	  lll_lock (cond->__data.__lock, pshared);
-
-	  nwaiters = cond->__data.__nwaiters;
-	}
-      while (nwaiters >= (1 << COND_NWAITERS_SHIFT));
+      futex_wait_simple (&cond->__data.__wrefs, wrefs, private);
+      /* See above.  */
+      wrefs = atomic_load_acquire (&cond->__data.__wrefs);
     }
-
+  /* The memory the condvar occupies can now be reused.  */
   return 0;
 }
 versioned_symbol (libpthread, __pthread_cond_destroy,