about summary refs log tree commit diff
path: root/linuxthreads/condvar.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads/condvar.c
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.xz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03  Kaz Kylheku  <kaz@ashi.footprints.net>

	Redesigned how cancellation unblocks a thread from internal
	cancellation points (sem_wait, pthread_join,
	pthread_cond_{wait,timedwait}).
	Cancellation won't eat a signal in any of these functions
	(*required* by POSIX and Single Unix Spec!).
	* condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a
	simultaneous condition variable signal (not required by POSIX
	or Single Unix Spec, but nice).
	* spinlock.c: __pthread_lock queues back any received restarts
	that don't belong to it instead of assuming ownership of lock
	upon any restart; fastlock can no longer be acquired by two threads
	simultaneously.
	* restart.h: restarts queue even on kernels that don't have
	queued real time signals (2.0, early 2.1), thanks to atomic counter,
	avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads/condvar.c')
-rw-r--r--linuxthreads/condvar.c324
1 files changed, 278 insertions, 46 deletions
diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c
index 2ea7513c68..87a93a9115 100644
--- a/linuxthreads/condvar.c
+++ b/linuxthreads/condvar.c
@@ -25,6 +25,22 @@
 #include "queue.h"
 #include "restart.h"
 
+static int pthread_cond_timedwait_relative_old(pthread_cond_t *,
+    pthread_mutex_t *, const struct timespec *);
+
+static int pthread_cond_timedwait_relative_new(pthread_cond_t *,
+    pthread_mutex_t *, const struct timespec *);
+
+static int (*pthread_cond_tw_rel)(pthread_cond_t *, pthread_mutex_t *,
+    const struct timespec *) = pthread_cond_timedwait_relative_old;
+
+/* initialize this module */
+void __pthread_init_condvar(int rt_sig_available)
+{
+  if (rt_sig_available)
+    pthread_cond_tw_rel = pthread_cond_timedwait_relative_new;
+}
+
 int pthread_cond_init(pthread_cond_t *cond,
                       const pthread_condattr_t *cond_attr)
 {
@@ -39,54 +55,125 @@ int pthread_cond_destroy(pthread_cond_t *cond)
   return 0;
 }
 
+/* Function called by pthread_cancel to remove the thread from
+   waiting on a condition variable queue. */
+
+static int cond_extricate_func(void *obj, pthread_descr th)
+{
+  volatile pthread_descr self = thread_self();
+  pthread_cond_t *cond = obj;
+  int did_remove = 0;
+
+  __pthread_lock(&cond->__c_lock, self);
+  did_remove = remove_from_queue(&cond->__c_waiting, th);
+  __pthread_unlock(&cond->__c_lock);
+
+  return did_remove;
+}
+
 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
   volatile pthread_descr self = thread_self();
+  pthread_extricate_if extr;
+  int already_canceled = 0;
+
+  /* Set up extrication interface */
+  extr.pu_object = cond;
+  extr.pu_extricate_func = cond_extricate_func;
+
+  /* Register extrication interface */
+  __pthread_set_own_extricate_if(self, &extr);
+
+  /* Atomically enqueue thread for waiting, but only if it is not
+     canceled. If the thread is canceled, then it will fall through the
+     suspend call below, and then call pthread_exit without
+     having to worry about whether it is still on the condition variable queue.
+     This depends on pthread_cancel setting p_canceled before calling the
+     extricate function. */
 
   __pthread_lock(&cond->__c_lock, self);
-  enqueue(&cond->__c_waiting, self);
+  if (!(THREAD_GETMEM(self, p_canceled)
+      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+    enqueue(&cond->__c_waiting, self);
+  else
+    already_canceled = 1;
   __pthread_unlock(&cond->__c_lock);
+
+  if (already_canceled) {
+    __pthread_set_own_extricate_if(self, 0);
+    pthread_exit(PTHREAD_CANCELED);
+  }
+
   pthread_mutex_unlock(mutex);
-  suspend_with_cancellation(self);
-  pthread_mutex_lock(mutex);
-  /* This is a cancellation point */
-  if (THREAD_GETMEM(self, p_canceled)
+
+  suspend(self);
+  __pthread_set_own_extricate_if(self, 0);
+
+  /* Check for cancellation again, to provide correct cancellation
+     point behavior */
+
+  if (THREAD_GETMEM(self, p_woken_by_cancel)
       && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
-    /* Remove ourselves from the waiting queue if we're still on it */
-    __pthread_lock(&cond->__c_lock, self);
-    remove_from_queue(&cond->__c_waiting, self);
-    __pthread_unlock(&cond->__c_lock);
+    THREAD_SETMEM(self, p_woken_by_cancel, 0);
+    pthread_mutex_lock(mutex);
     pthread_exit(PTHREAD_CANCELED);
   }
+
+  pthread_mutex_lock(mutex);
   return 0;
 }
 
+/* The following function is used on kernels that don't have rt signals.
+   SIGUSR1 is used as the restart signal. The different code is needed
+   because that ordinary signal does not queue. */
+
 static int
-pthread_cond_timedwait_relative(pthread_cond_t *cond,
+pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
 				pthread_mutex_t *mutex,
 				const struct timespec * reltime)
 {
   volatile pthread_descr self = thread_self();
   sigset_t unblock, initial_mask;
-  int retsleep;
+  int retsleep, already_canceled, was_signalled;
   sigjmp_buf jmpbuf;
+  pthread_extricate_if extr;
+
+requeue_and_wait_again:
 
-  /* Wait on the condition */
+  retsleep = 0;
+  already_canceled = 0;
+  was_signalled = 0;
+
+  /* Set up extrication interface */
+  extr.pu_object = cond;
+  extr.pu_extricate_func = cond_extricate_func;
+
+  /* Register extrication interface */
+  __pthread_set_own_extricate_if(self, &extr);
+
+  /* Enqueue to wait on the condition and check for cancellation. */
   __pthread_lock(&cond->__c_lock, self);
-  enqueue(&cond->__c_waiting, self);
+  if (!(THREAD_GETMEM(self, p_canceled)
+      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+    enqueue(&cond->__c_waiting, self);
+  else
+    already_canceled = 1;
   __pthread_unlock(&cond->__c_lock);
+
+  if (already_canceled) {
+    __pthread_set_own_extricate_if(self, 0);
+    pthread_exit(PTHREAD_CANCELED);
+  }
+
   pthread_mutex_unlock(mutex);
- continue_waiting:
-  /* Set up a longjmp handler for the restart and cancel signals */
-  if (sigsetjmp(jmpbuf, 1) == 0) {
-    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
-    THREAD_SETMEM(self, p_cancel_jmp, &jmpbuf);
-    THREAD_SETMEM(self, p_signal, 0);
-    /* Check for cancellation */
-    if (THREAD_GETMEM(self, p_canceled)
-	&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
-      retsleep = -1;
-    } else {
+
+  if (atomic_decrement(&self->p_resume_count) == 0) {
+    /* Set up a longjmp handler for the restart signal, unblock
+       the signal and sleep. */
+
+    if (sigsetjmp(jmpbuf, 1) == 0) {
+      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+      THREAD_SETMEM(self, p_signal, 0);
       /* Unblock the restart signal */
       sigemptyset(&unblock);
       sigaddset(&unblock, __pthread_sig_restart);
@@ -95,37 +182,180 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
       retsleep = __libc_nanosleep(reltime, NULL);
       /* Block the restart signal again */
       sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+      was_signalled = 0;
+    } else {
+      retsleep = -1;
+      was_signalled = 1;
     }
-  } else {
-    retsleep = -1;
+    THREAD_SETMEM(self, p_signal_jmp, NULL);
   }
-  THREAD_SETMEM(self, p_signal_jmp, NULL);
-  THREAD_SETMEM(self, p_cancel_jmp, NULL);
-  /* Here, either the condition was signaled (self->p_signal != 0)
-                   or we got canceled (self->p_canceled != 0)
-                   or the timeout occurred (retsleep == 0)
-                   or another interrupt occurred (retsleep == -1) */
-  /* This is a cancellation point */
-  if (THREAD_GETMEM(self, p_canceled)
+
+  /* Now was_signalled is true if we exited the above code
+     due to the delivery of a restart signal.  In that case,
+     we know we have been dequeued and resumed and that the
+     resume count is balanced.  Otherwise, there are some
+     cases to consider. First, try to bump up the resume count
+     back to zero. If it goes to 1, it means restart() was
+     invoked on this thread. The signal must be consumed
+     and the count bumped down and everything is cool.
+     Otherwise, no restart was delivered yet, so we remove
+     the thread from the queue. If this succeeds, it's a clear
+     case of timeout. If we fail to remove from the queue, then we
+     must wait for a restart. */
+
+  if (!was_signalled) {
+    if (atomic_increment(&self->p_resume_count) != -1) {
+      __pthread_wait_for_restart_signal(self);
+      atomic_decrement(&self->p_resume_count); /* should be zero now! */
+    } else {
+      int was_on_queue;
+      __pthread_lock(&cond->__c_lock, self);
+      was_on_queue = remove_from_queue(&cond->__c_waiting, self);
+      __pthread_unlock(&cond->__c_lock);
+
+      if (was_on_queue) {
+	__pthread_set_own_extricate_if(self, 0);
+	pthread_mutex_lock(mutex);
+
+	if (retsleep == 0)
+	  return ETIMEDOUT;
+	/* Woken by a signal: resume waiting as
+	   required by Single Unix Specification. */
+	goto requeue_and_wait_again;
+      }
+
+      suspend(self);
+    }
+  }
+
+  __pthread_set_own_extricate_if(self, 0);
+
+  /* The remaining logic is the same as in other cancellable waits,
+     such as pthread_join sem_wait or pthread_cond wait. */
+
+  if (THREAD_GETMEM(self, p_woken_by_cancel)
       && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
-    __pthread_lock(&cond->__c_lock, self);
-    remove_from_queue(&cond->__c_waiting, self);
-    __pthread_unlock(&cond->__c_lock);
+    THREAD_SETMEM(self, p_woken_by_cancel, 0);
     pthread_mutex_lock(mutex);
     pthread_exit(PTHREAD_CANCELED);
   }
-  /* If not signaled: also remove ourselves and return an error code, but
-     only if the timeout has elapsed.  If not, just continue waiting. */
-  if (THREAD_GETMEM(self, p_signal) == 0) {
-    if (retsleep != 0)
-      goto continue_waiting;
+
+  pthread_mutex_lock(mutex);
+  return 0;
+}
+
+/* The following function is used on new (late 2.1 and 2.2 and higher) kernels
+   that have rt signals which queue. */
+
+static int
+pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
+				pthread_mutex_t *mutex,
+				const struct timespec * reltime)
+{
+  volatile pthread_descr self = thread_self();
+  sigset_t unblock, initial_mask;
+  int retsleep, already_canceled, was_signalled;
+  sigjmp_buf jmpbuf;
+  pthread_extricate_if extr;
+
+ requeue_and_wait_again:
+
+  retsleep = 0;
+  already_canceled = 0;
+  was_signalled = 0;
+
+  /* Set up extrication interface */
+  extr.pu_object = cond;
+  extr.pu_extricate_func = cond_extricate_func;
+
+  /* Register extrication interface */
+  __pthread_set_own_extricate_if(self, &extr);
+
+  /* Enqueue to wait on the condition and check for cancellation. */
+  __pthread_lock(&cond->__c_lock, self);
+  if (!(THREAD_GETMEM(self, p_canceled)
+      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+    enqueue(&cond->__c_waiting, self);
+  else
+    already_canceled = 1;
+  __pthread_unlock(&cond->__c_lock);
+
+  if (already_canceled) {
+    __pthread_set_own_extricate_if(self, 0);
+    pthread_exit(PTHREAD_CANCELED);
+  }
+
+  pthread_mutex_unlock(mutex);
+
+  /* Set up a longjmp handler for the restart signal, unblock
+     the signal and sleep. */
+
+  if (sigsetjmp(jmpbuf, 1) == 0) {
+    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+    THREAD_SETMEM(self, p_signal, 0);
+    /* Unblock the restart signal */
+    sigemptyset(&unblock);
+    sigaddset(&unblock, __pthread_sig_restart);
+    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+    /* Sleep for the required duration */
+    retsleep = __libc_nanosleep(reltime, NULL);
+    /* Block the restart signal again */
+    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+    was_signalled = 0;
+  } else {
+    retsleep = -1;
+    was_signalled = 1;
+  }
+  THREAD_SETMEM(self, p_signal_jmp, NULL);
+
+  /* Now was_signalled is true if we exited the above code
+     due to the delivery of a restart signal.  In that case,
+     everything is cool. We have been removed from the queue
+     by the other thread, and consumed its signal.
+
+     Otherwise we this thread woke up spontaneously, or due to a signal other
+     than restart. The next thing to do is to try to remove the thread
+     from the queue. This may fail due to a race against another thread
+     trying to do the same. In the failed case, we know we were signalled,
+     and we may also have to consume a restart signal. */
+
+  if (!was_signalled) {
+    int was_on_queue;
+
+    /* __pthread_lock will queue back any spurious restarts that
+       may happen to it. */
+
     __pthread_lock(&cond->__c_lock, self);
-    remove_from_queue(&cond->__c_waiting, self);
+    was_on_queue = remove_from_queue(&cond->__c_waiting, self);
     __pthread_unlock(&cond->__c_lock);
+
+    if (was_on_queue) {
+      __pthread_set_own_extricate_if(self, 0);
+      pthread_mutex_lock(mutex);
+
+      if (retsleep == 0)
+	return ETIMEDOUT;
+      /* Woken by a signal: resume waiting as
+	 required by Single Unix Specification. */
+      goto requeue_and_wait_again;
+    }
+
+    /* Eat the outstanding restart() from the signaller */
+    suspend(self);
+  }
+
+  __pthread_set_own_extricate_if(self, 0);
+
+  /* The remaining logic is the same as in other cancellable waits,
+     such as pthread_join sem_wait or pthread_cond wait. */
+
+  if (THREAD_GETMEM(self, p_woken_by_cancel)
+      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
+    THREAD_SETMEM(self, p_woken_by_cancel, 0);
     pthread_mutex_lock(mutex);
-    return ETIMEDOUT;
+    pthread_exit(PTHREAD_CANCELED);
   }
-  /* Otherwise, return normally */
+
   pthread_mutex_lock(mutex);
   return 0;
 }
@@ -144,7 +374,9 @@ int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
     reltime.tv_sec -= 1;
   }
   if (reltime.tv_sec < 0) return ETIMEDOUT;
-  return pthread_cond_timedwait_relative(cond, mutex, &reltime);
+
+  /* Indirect call through pointer! */
+  return pthread_cond_tw_rel(cond, mutex, &reltime);
 }
 
 int pthread_cond_signal(pthread_cond_t *cond)