about summary refs log tree commit diff
path: root/sysdeps/htl
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2020-12-13 15:14:40 +0000
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2020-12-16 01:58:33 +0100
commitc8f9421298f5f973b31a7cbbc76e61b06eca03bc (patch)
tree86bf4c058624d76b6e35fd44f1f506c3ad9b3d94 /sysdeps/htl
parent644d98ec4d8405e9b721ecb715483ea1983e116f (diff)
downloadglibc-c8f9421298f5f973b31a7cbbc76e61b06eca03bc.tar.gz
glibc-c8f9421298f5f973b31a7cbbc76e61b06eca03bc.tar.xz
glibc-c8f9421298f5f973b31a7cbbc76e61b06eca03bc.zip
htl: Add pshared semaphore support
The implementation is extremely similar to the nptl implementation, but
with slight differences in the futex interface. This fixes some of BZ
25521.
Diffstat (limited to 'sysdeps/htl')
-rw-r--r--sysdeps/htl/bits/semaphore.h20
-rw-r--r--sysdeps/htl/sem-destroy.c10
-rw-r--r--sysdeps/htl/sem-getvalue.c10
-rw-r--r--sysdeps/htl/sem-init.c10
-rw-r--r--sysdeps/htl/sem-post.c54
-rw-r--r--sysdeps/htl/sem-timedwait.c263
-rw-r--r--sysdeps/htl/sem-trywait.c15
-rw-r--r--sysdeps/htl/sem-waitfast.c55
8 files changed, 253 insertions, 184 deletions
diff --git a/sysdeps/htl/bits/semaphore.h b/sysdeps/htl/bits/semaphore.h
index 8611bac5ce..77a2be13d3 100644
--- a/sysdeps/htl/bits/semaphore.h
+++ b/sysdeps/htl/bits/semaphore.h
@@ -27,21 +27,15 @@
 #include <bits/pthread.h>
 
 /* User visible part of a semaphore.  */
-struct __semaphore
-{
-  __pthread_spinlock_t __lock;
-  struct __pthread *__queue;
-  int __pshared;
-  int __value;
-  void *__data;
-};
 
-typedef struct __semaphore sem_t;
+#define __SIZEOF_SEM_T	20
 
-#define SEM_FAILED ((void *) 0)
+typedef union
+{
+  char __size[__SIZEOF_SEM_T];
+  long int __align;
+} sem_t;
 
-/* Initializer for a semaphore.  */
-#define __SEMAPHORE_INITIALIZER(pshared, value) \
-  { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, (pshared), (value), NULL }
+#define SEM_FAILED ((void *) 0)
 
 #endif /* bits/semaphore.h */
diff --git a/sysdeps/htl/sem-destroy.c b/sysdeps/htl/sem-destroy.c
index 4caa004444..ebfeb2a0e6 100644
--- a/sysdeps/htl/sem-destroy.c
+++ b/sysdeps/htl/sem-destroy.c
@@ -24,7 +24,15 @@
 int
 __sem_destroy (sem_t *sem)
 {
-  if (sem->__queue)
+  struct new_sem *isem = (struct new_sem *) sem;
+  if (
+#if __HAVE_64B_ATOMICS
+      atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT
+#else
+      atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK
+      || isem->nwaiters
+#endif
+      )
     /* There are threads waiting on *SEM.  */
     {
       errno = EBUSY;
diff --git a/sysdeps/htl/sem-getvalue.c b/sysdeps/htl/sem-getvalue.c
index 2d72a63824..728f763f9e 100644
--- a/sysdeps/htl/sem-getvalue.c
+++ b/sysdeps/htl/sem-getvalue.c
@@ -22,9 +22,13 @@
 int
 __sem_getvalue (sem_t *restrict sem, int *restrict value)
 {
-  __pthread_spin_wait (&sem->__lock);
-  *value = sem->__value;
-  __pthread_spin_unlock (&sem->__lock);
+  struct new_sem *isem = (struct new_sem *) sem;
+
+#if __HAVE_64B_ATOMICS
+  *value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
+#else
+  *value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
+#endif
 
   return 0;
 }
diff --git a/sysdeps/htl/sem-init.c b/sysdeps/htl/sem-init.c
index 2be6ab449b..196846d311 100644
--- a/sysdeps/htl/sem-init.c
+++ b/sysdeps/htl/sem-init.c
@@ -24,12 +24,6 @@
 int
 __sem_init (sem_t *sem, int pshared, unsigned value)
 {
-  if (pshared != 0)
-    {
-      errno = EOPNOTSUPP;
-      return -1;
-    }
-
 #ifdef SEM_VALUE_MAX
   if (value > SEM_VALUE_MAX)
     {
@@ -38,7 +32,9 @@ __sem_init (sem_t *sem, int pshared, unsigned value)
     }
 #endif
 
-  *sem = (sem_t) __SEMAPHORE_INITIALIZER (pshared, value);
+  struct new_sem *isem = (struct new_sem *) sem;
+
+  *isem = (struct new_sem) __SEMAPHORE_INITIALIZER (value, pshared);
   return 0;
 }
 
diff --git a/sysdeps/htl/sem-post.c b/sysdeps/htl/sem-post.c
index 720b73a059..83a3279c84 100644
--- a/sysdeps/htl/sem-post.c
+++ b/sysdeps/htl/sem-post.c
@@ -19,48 +19,50 @@
 #include <semaphore.h>
 #include <assert.h>
 
+#include <hurdlock.h>
+
 #include <pt-internal.h>
 
 int
 __sem_post (sem_t *sem)
 {
-  struct __pthread *wakeup;
+  struct new_sem *isem = (struct new_sem *) sem;
+  int flags = isem->pshared ? GSYNC_SHARED : 0;
+
+#if __HAVE_64B_ATOMICS
+  uint64_t d = atomic_load_relaxed (&isem->data);
 
-  __pthread_spin_wait (&sem->__lock);
-  if (sem->__value > 0)
-    /* Do a quick up.  */
+  do
     {
-      if (sem->__value == SEM_VALUE_MAX)
+      if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
 	{
-	  __pthread_spin_unlock (&sem->__lock);
 	  errno = EOVERFLOW;
 	  return -1;
 	}
-
-      assert (sem->__queue == NULL);
-      sem->__value++;
-      __pthread_spin_unlock (&sem->__lock);
-      return 0;
     }
+  while (!atomic_compare_exchange_weak_release (&isem->data, &d, d + 1));
 
-  if (sem->__queue == NULL)
-    /* No one waiting.  */
+  if ((d >> SEM_NWAITERS_SHIFT) != 0)
+    /* Wake one waiter.  */
+    __lll_wake (((unsigned int *) &isem->data) + SEM_VALUE_OFFSET, flags);
+#else
+  unsigned int v = atomic_load_relaxed (&isem->value);
+
+  do
     {
-      sem->__value = 1;
-      __pthread_spin_unlock (&sem->__lock);
-      return 0;
+      if ((v >> SEM_VALUE_SHIFT) == SEM_VALUE_MAX)
+	{
+	  errno = EOVERFLOW;
+	  return -1;
+	}
     }
+  while (!atomic_compare_exchange_weak_release
+	  (&isem->value, &v, v + (1 << SEM_VALUE_SHIFT)));
 
-  /* Wake someone up.  */
-
-  /* First dequeue someone.  */
-  wakeup = sem->__queue;
-  __pthread_dequeue (wakeup);
-
-  /* Then drop the lock and transfer control.  */
-  __pthread_spin_unlock (&sem->__lock);
-
-  __pthread_wakeup (wakeup);
+  if ((v & SEM_NWAITERS_MASK) != 0)
+    /* Wake one waiter.  */
+    __lll_wake (&isem->value, flags);
+#endif
 
   return 0;
 }
diff --git a/sysdeps/htl/sem-timedwait.c b/sysdeps/htl/sem-timedwait.c
index 5095d49b28..4afccd88fc 100644
--- a/sysdeps/htl/sem-timedwait.c
+++ b/sysdeps/htl/sem-timedwait.c
@@ -20,37 +20,27 @@
 #include <errno.h>
 #include <assert.h>
 #include <time.h>
+#include <hurdlock.h>
+#include <hurd/hurd.h>
+#include <sysdep-cancel.h>
 
 #include <pt-internal.h>
 
-struct cancel_ctx
-{
-  struct __pthread *wakeup;
-  sem_t *sem;
-  int cancel_wake;
-};
+#if !__HAVE_64B_ATOMICS
+static void
+__sem_wait_32_finish (struct new_sem *isem);
+#endif
 
 static void
-cancel_hook (void *arg)
+__sem_wait_cleanup (void *arg)
 {
-  struct cancel_ctx *ctx = arg;
-  struct __pthread *wakeup = ctx->wakeup;
-  sem_t *sem = ctx->sem;
-  int unblock;
-
-  __pthread_spin_wait (&sem->__lock);
-  /* The thread only needs to be awaken if it's blocking or about to block.
-     If it was already unblocked, it's not queued any more.  */
-  unblock = wakeup->prevp != NULL;
-  if (unblock)
-    {
-      __pthread_dequeue (wakeup);
-      ctx->cancel_wake = 1;
-    }
-  __pthread_spin_unlock (&sem->__lock);
+  struct new_sem *isem = arg;
 
-  if (unblock)
-    __pthread_wakeup (wakeup);
+#if __HAVE_64B_ATOMICS
+  atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
+#else
+  __sem_wait_32_finish (isem);
+#endif
 }
 
 int
@@ -58,123 +48,148 @@ __sem_timedwait_internal (sem_t *restrict sem,
 			  clockid_t clock_id,
 			  const struct timespec *restrict timeout)
 {
-  error_t err;
-  int cancelled, oldtype, drain;
-  int ret = 0;
-
-  struct __pthread *self = _pthread_self ();
-  struct cancel_ctx ctx;
-  ctx.wakeup = self;
-  ctx.sem = sem;
-  ctx.cancel_wake = 0;
-
-  /* Test for a pending cancellation request, switch to deferred mode for
-     safer resource handling, and prepare the hook to call in case we're
-     cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
-     hook can be called by another thread at any time.  Whatever happens,
-     this function must exit with MUTEX locked.
-
-     This function contains inline implementations of pthread_testcancel and
-     pthread_setcanceltype to reduce locking overhead.  */
-  __pthread_mutex_lock (&self->cancel_lock);
-  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
-      && self->cancel_pending;
-
-  if (cancelled)
-    {
-      __pthread_mutex_unlock (&self->cancel_lock);
-      __pthread_exit (PTHREAD_CANCELED);
-    }
+  struct new_sem *isem = (struct new_sem *) sem;
+  int err, ret = 0;
+  int flags = isem->pshared ? GSYNC_SHARED : 0;
 
-  self->cancel_hook = cancel_hook;
-  self->cancel_hook_arg = &ctx;
-  oldtype = self->cancel_type;
+  __pthread_testcancel ();
 
-  if (oldtype != PTHREAD_CANCEL_DEFERRED)
-    self->cancel_type = PTHREAD_CANCEL_DEFERRED;
+  if (__sem_waitfast (isem, 0) == 0)
+    return 0;
 
-  /* Add ourselves to the list of waiters.  This is done while setting
-     the cancellation hook to simplify the cancellation procedure, i.e.
-     if the thread is queued, it can be cancelled, otherwise it is
-     already unblocked, progressing on the return path.  */
-  __pthread_spin_wait (&sem->__lock);
-  if (sem->__value > 0)
-    /* Successful down.  */
-    {
-      sem->__value--;
-      __pthread_spin_unlock (&sem->__lock);
-      goto out_locked;
-    }
+  int cancel_oldtype = LIBC_CANCEL_ASYNC();
 
-  if (timeout != NULL && ! valid_nanoseconds (timeout->tv_nsec))
-    {
-      errno = EINVAL;
-      ret = -1;
-      __pthread_spin_unlock (&sem->__lock);
-      goto out_locked;
-    }
+#if __HAVE_64B_ATOMICS
+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+		 (uint64_t) 1 << SEM_NWAITERS_SHIFT);
+
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
 
-  /* Add ourselves to the queue.  */
-  __pthread_enqueue (&sem->__queue, self);
-  __pthread_spin_unlock (&sem->__lock);
-
-  __pthread_mutex_unlock (&self->cancel_lock);
-
-  /* Block the thread.  */
-  if (timeout != NULL)
-    err = __pthread_timedblock_intr (self, timeout, clock_id);
-  else
-    err = __pthread_block_intr (self);
-
-  __pthread_spin_wait (&sem->__lock);
-  if (self->prevp == NULL)
-    /* Another thread removed us from the queue, which means a wakeup message
-       has been sent.  It was either consumed while we were blocking, or
-       queued after we timed out and before we acquired the semaphore lock, in
-       which case the message queue must be drained.  */
-    drain = err ? 1 : 0;
-  else
+  for (;;)
     {
-      /* We're still in the queue.  Noone attempted to wake us up, i.e. we
-         timed out.  */
-      __pthread_dequeue (self);
-      drain = 0;
+      if ((d & SEM_VALUE_MASK) == 0)
+	{
+	  /* No token, sleep.  */
+	  if (timeout)
+	    err = __lll_abstimed_wait_intr (
+		      ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
+		      0, timeout, flags, clock_id);
+	  else
+	    err = __lll_wait_intr (
+		      ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
+		      0, flags);
+
+	  if (err != 0)
+	    {
+	      /* Error, interruption or timeout, abort.  */
+	      if (err == KERN_TIMEDOUT)
+		err = ETIMEDOUT;
+	      if (err == KERN_INTERRUPTED)
+		err = EINTR;
+	      ret = __hurd_fail (err);
+	      __sem_wait_cleanup (isem);
+	      break;
+	    }
+
+	  /* Token changed */
+	  d = atomic_load_relaxed (&sem->data);
+	}
+      else
+	{
+	  /* Try to acquire and dequeue.  */
+	  if (atomic_compare_exchange_weak_acquire (&sem->data,
+	      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
+	    {
+	      /* Success */
+	      ret = 0;
+	      break;
+	    }
+	}
     }
-  __pthread_spin_unlock (&sem->__lock);
 
-  if (drain)
-    __pthread_block (self);
+  pthread_cleanup_pop (0);
+#else
+  unsigned int v;
+
+  atomic_fetch_add_acquire (&isem->nwaiters, 1);
 
-  if (err)
+  pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+  v = atomic_load_relaxed (&isem->value);
+  do
     {
-      assert (err == ETIMEDOUT || err == EINTR);
-      errno = err;
-      ret = -1;
+      do
+	{
+	  do
+	    {
+	      if ((v & SEM_NWAITERS_MASK) != 0)
+		break;
+	    }
+	  while (!atomic_compare_exchange_weak_release (&isem->value,
+	      &v, v | SEM_NWAITERS_MASK));
+
+	  if ((v >> SEM_VALUE_SHIFT) == 0)
+	    {
+	      /* No token, sleep.  */
+	      if (timeout)
+		err = __lll_abstimed_wait_intr (&isem->value,
+			  SEM_NWAITERS_MASK, timeout, flags, clock_id);
+	      else
+		err = __lll_wait_intr (&isem->value,
+			  SEM_NWAITERS_MASK, flags);
+
+	      if (err != 0)
+		{
+		  /* Error, interruption or timeout, abort.  */
+		  if (err == KERN_TIMEDOUT)
+		    err = ETIMEDOUT;
+		  if (err == KERN_INTERRUPTED)
+		    err = EINTR;
+		  ret = __hurd_fail (err);
+		  goto error;
+		}
+
+	      /* Token changed */
+	      v = atomic_load_relaxed (&isem->value);
+	    }
+	}
+      while ((v >> SEM_VALUE_SHIFT) == 0);
     }
+  while (!atomic_compare_exchange_weak_acquire (&isem->value,
+	  &v, v - (1 << SEM_VALUE_SHIFT)));
 
-  /* We're almost done.  Remove the unblock hook, restore the previous
-     cancellation type, and check for a pending cancellation request.  */
-  __pthread_mutex_lock (&self->cancel_lock);
-out_locked:
-  self->cancel_hook = NULL;
-  self->cancel_hook_arg = NULL;
-  self->cancel_type = oldtype;
-  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
-      && self->cancel_pending;
-  __pthread_mutex_unlock (&self->cancel_lock);
-
-  if (cancelled)
-    {
-      if (ret == 0 && ctx.cancel_wake == 0)
-	/* We were cancelled while waking up with a token, put it back.  */
-	__sem_post (sem);
+error:
+  pthread_cleanup_pop (0);
 
-      __pthread_exit (PTHREAD_CANCELED);
-    }
+  __sem_wait_32_finish (isem);
+#endif
+
+  LIBC_CANCEL_RESET (cancel_oldtype);
 
   return ret;
 }
 
+#if !__HAVE_64B_ATOMICS
+/* Stop being a registered waiter (non-64b-atomics code only).  */
+static void
+__sem_wait_32_finish (struct new_sem *isem)
+{
+  unsigned int wguess = atomic_load_relaxed (&isem->nwaiters);
+  if (wguess == 1)
+    atomic_fetch_and_acquire (&isem->value, ~SEM_NWAITERS_MASK);
+
+  unsigned int wfinal = atomic_fetch_add_release (&isem->nwaiters, -1);
+  if (wfinal > 1 && wguess == 1)
+    {
+      unsigned int v = atomic_fetch_or_relaxed (&isem->value,
+						SEM_NWAITERS_MASK);
+      v >>= SEM_VALUE_SHIFT;
+      while (v--)
+	__lll_wake (&isem->value, isem->pshared ? GSYNC_SHARED : 0);
+    }
+}
+#endif
+
 int
 __sem_clockwait (sem_t *sem, clockid_t clockid,
 		 const struct timespec *restrict timeout)
diff --git a/sysdeps/htl/sem-trywait.c b/sysdeps/htl/sem-trywait.c
index 6a0633bfef..b9301963ab 100644
--- a/sysdeps/htl/sem-trywait.c
+++ b/sysdeps/htl/sem-trywait.c
@@ -24,18 +24,13 @@
 int
 __sem_trywait (sem_t *sem)
 {
-  __pthread_spin_wait (&sem->__lock);
-  if (sem->__value > 0)
-    /* Successful down.  */
-    {
-      sem->__value--;
-      __pthread_spin_unlock (&sem->__lock);
-      return 0;
-    }
-  __pthread_spin_unlock (&sem->__lock);
+  struct new_sem *isem = (struct new_sem *) sem;
+
+  if (__sem_waitfast (isem, 1) == 0)
+    return 0;
 
   errno = EAGAIN;
   return -1;
 }
 
-strong_alias (__sem_trywait, sem_trywait);
+weak_alias (__sem_trywait, sem_trywait);
diff --git a/sysdeps/htl/sem-waitfast.c b/sysdeps/htl/sem-waitfast.c
new file mode 100644
index 0000000000..7ece73da26
--- /dev/null
+++ b/sysdeps/htl/sem-waitfast.c
@@ -0,0 +1,55 @@
+/* Lock a semaphore if it does not require blocking.  Generic version.
+   Copyright (C) 2005-2020 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library;  if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <semaphore.h>
+#include <errno.h>
+
+#include <pt-internal.h>
+
+int
+__sem_waitfast (struct new_sem *isem, int definitive_result)
+{
+#if __HAVE_64B_ATOMICS
+  uint64_t d = atomic_load_relaxed (&isem->data);
+
+  do
+    {
+      if ((d & SEM_VALUE_MASK) == 0)
+	break;
+      if (atomic_compare_exchange_weak_acquire (&isem->data, &d, d - 1))
+	/* Successful down.  */
+	return 0;
+    }
+  while (definitive_result);
+  return -1;
+#else
+  unsigned v = atomic_load_relaxed (&isem->value);
+
+  do
+    {
+      if ((v >> SEM_VALUE_SHIFT) == 0)
+	break;
+      if (atomic_compare_exchange_weak_acquire (&isem->value,
+	    &v, v - (1 << SEM_VALUE_SHIFT)))
+	/* Successful down.  */
+	return 0;
+    }
+  while (definitive_result);
+  return -1;
+#endif
+}