about summary refs log tree commit diff
path: root/linuxthreads/spinlock.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads/spinlock.c
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.xz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03  Kaz Kylheku  <kaz@ashi.footprints.net>

	Redesigned how cancellation unblocks a thread from internal
	cancellation points (sem_wait, pthread_join,
	pthread_cond_{wait,timedwait}).
	Cancellation won't eat a signal in any of these functions
	(*required* by POSIX and Single Unix Spec!).
	* condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a
	simultaneous condition variable signal (not required by POSIX
	or Single Unix Spec, but nice).
	* spinlock.c: __pthread_lock queues back any received restarts
	that don't belong to it instead of assuming ownership of lock
	upon any restart; fastlock can no longer be acquired by two threads
	simultaneously.
	* restart.h: restarts queue even on kernels that don't have
	queued real time signals (2.0, early 2.1), thanks to atomic counter,
	avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads/spinlock.c')
-rw-r--r--linuxthreads/spinlock.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index ce6ff9e310..b1a99d9753 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -40,6 +40,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
 				      pthread_descr self)
 {
   long oldstatus, newstatus;
+  int spurious_wakeup_count = 0;
 
   do {
     oldstatus = lock->__status;
@@ -56,7 +57,28 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
     }
   } while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
                              &lock->__spinlock));
-  if (oldstatus != 0) suspend(self);
+
+  /* Suspend with guard against spurious wakeup. 
+     This can happen in pthread_cond_timedwait_relative, when the thread
+     wakes up due to timeout and is still on the condvar queue, and then
+     locks the queue to remove itself. At that point it may still be on the
+     queue, and may be resumed by a condition signal. */
+
+  if (oldstatus != 0) {
+    for (;;) {
+      suspend(self);
+      if (self->p_nextlock != NULL) {
+	/* Count resumes that don't belong to us. */
+	spurious_wakeup_count++;
+	continue;
+      }
+      break;
+    }
+  }
+
+  /* Put back any resumes we caught that don't belong to us. */
+  while (spurious_wakeup_count--)
+    restart(self);
 }
 
 void internal_function __pthread_unlock(struct _pthread_fastlock * lock)