about summary refs log tree commit diff
path: root/sysdeps/htl
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2020-06-01 17:27:48 +0000
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2020-06-01 17:38:31 +0000
commit8081702460726304af496be52234385094392a6f (patch)
tree55606e2a42e6342aaed360c27c17e9675c9c478e /sysdeps/htl
parenta3e589d1f68d43d4c3f67d59497862875c2d5afc (diff)
downloadglibc-8081702460726304af496be52234385094392a6f.tar.gz
glibc-8081702460726304af496be52234385094392a6f.tar.xz
glibc-8081702460726304af496be52234385094392a6f.zip
htl: Make pthread_cond_destroy wait for threads to be woken
This allows to reuse the storage after calling pthread_cond_destroy.

* sysdeps/htl/bits/types/struct___pthread_cond.h (__pthread_cond):
Replace unused struct __pthread_condimpl *__impl field with unsigned int
__wrefs.
(__PTHREAD_COND_INITIALIZER): Update accordingly.
* sysdeps/htl/pt-cond-timedwait.c (__pthread_cond_timedwait_internal):
Register as waiter in __wrefs field. On unregistering, wake any pending
pthread_cond_destroy.
* sysdeps/htl/pt-cond-destroy.c (__pthread_cond_destroy): Register wake
request in __wrefs.
* nptl/Makefile (tests): Move tst-cond20 tst-cond21 to...
* sysdeps/pthread/Makefile (tests): ... here.
* nptl/tst-cond20.c nptl/tst-cond21.c: Move to...
* sysdeps/pthread/tst-cond20.c sysdeps/pthread/tst-cond21.c: ... here.
Diffstat (limited to 'sysdeps/htl')
-rw-r--r--sysdeps/htl/bits/types/struct___pthread_cond.h4
-rw-r--r--sysdeps/htl/pt-cond-destroy.c18
-rw-r--r--sysdeps/htl/pt-cond-timedwait.c11
3 files changed, 28 insertions, 5 deletions
diff --git a/sysdeps/htl/bits/types/struct___pthread_cond.h b/sysdeps/htl/bits/types/struct___pthread_cond.h
index 150a37c4c9..c040b171ac 100644
--- a/sysdeps/htl/bits/types/struct___pthread_cond.h
+++ b/sysdeps/htl/bits/types/struct___pthread_cond.h
@@ -27,12 +27,12 @@ struct __pthread_cond
   __pthread_spinlock_t __lock;
   struct __pthread *__queue;
   struct __pthread_condattr *__attr;
-  struct __pthread_condimpl *__impl;
+  unsigned int __wrefs;
   void *__data;
 };
 
 /* Initializer for a condition variable.  */
 #define __PTHREAD_COND_INITIALIZER \
-  { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, NULL, NULL }
+  { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, NULL, 0, NULL }
 
 #endif /* bits/types/struct___pthread_cond.h */
diff --git a/sysdeps/htl/pt-cond-destroy.c b/sysdeps/htl/pt-cond-destroy.c
index 0664f3f6cc..722516a8e2 100644
--- a/sysdeps/htl/pt-cond-destroy.c
+++ b/sysdeps/htl/pt-cond-destroy.c
@@ -22,14 +22,26 @@
 int
 __pthread_cond_destroy (pthread_cond_t *cond)
 {
-  int ret = 0;
+  /* Set the wake request flag. */
+  unsigned int wrefs = atomic_fetch_or_acquire (&cond->__wrefs, 1);
 
   __pthread_spin_wait (&cond->__lock);
   if (cond->__queue)
-    ret = EBUSY;
+    {
+      __pthread_spin_unlock (&cond->__lock);
+      return EBUSY;
+    }
   __pthread_spin_unlock (&cond->__lock);
 
-  return ret;
+  while (wrefs >> 1 != 0)
+    {
+      gsync_wait (__mach_task_self (), (vm_offset_t) &cond->__wrefs, wrefs,
+		  0, 0, 0);
+      wrefs = atomic_load_acquire (&cond->__wrefs);
+    }
+  /* The memory the condvar occupies can now be reused.  */
+
+  return 0;
 }
 
 weak_alias (__pthread_cond_destroy, pthread_cond_destroy);
diff --git a/sysdeps/htl/pt-cond-timedwait.c b/sysdeps/htl/pt-cond-timedwait.c
index a0ced9a074..c05944d16d 100644
--- a/sysdeps/htl/pt-cond-timedwait.c
+++ b/sysdeps/htl/pt-cond-timedwait.c
@@ -144,6 +144,10 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
   /* Release MUTEX before blocking.  */
   __pthread_mutex_unlock (mutex);
 
+  /* Increase the waiter reference count.  Relaxed MO is sufficient because
+     we only need to synchronize when decrementing the reference count.  */
+  atomic_fetch_add_relaxed (&cond->__wrefs, 2);
+
   /* Block the thread.  */
   if (abstime != NULL)
     err = __pthread_timedblock (self, abstime, clock_id);
@@ -178,6 +182,13 @@ __pthread_cond_timedwait_internal (pthread_cond_t *cond,
     }
   __pthread_spin_unlock (&cond->__lock);
 
+  /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
+     are the last waiter (prior value of __wrefs was 1 << 1), then wake any
+     threads waiting in pthread_cond_destroy.  Release MO to synchronize with
+     these threads.  Don't bother clearing the wake-up request flag.  */
+  if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
+    __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
+
   if (drain)
     __pthread_block (self);