about summary refs log tree commit diff
path: root/linuxthreads/cancel.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads/cancel.c
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.xz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03  Kaz Kylheku  <kaz@ashi.footprints.net>

	Redesigned how cancellation unblocks a thread from internal
	cancellation points (sem_wait, pthread_join,
	pthread_cond_{wait,timedwait}).
	Cancellation won't eat a signal in any of these functions
	(*required* by POSIX and Single Unix Spec!).
	* condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a
	simultaneous condition variable signal (not required by POSIX
	or Single Unix Spec, but nice).
	* spinlock.c: __pthread_lock queues back any received restarts
	that don't belong to it instead of assuming ownership of lock
	upon any restart; fastlock can no longer be acquired by two threads
	simultaneously.
	* restart.h: restarts queue even on kernels that don't have
	queued real time signals (2.0, early 2.1), thanks to atomic counter,
	avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads/cancel.c')
-rw-r--r--linuxthreads/cancel.c44
1 files changed, 41 insertions, 3 deletions
diff --git a/linuxthreads/cancel.c b/linuxthreads/cancel.c
index c45cac97a3..8fd8c1e60f 100644
--- a/linuxthreads/cancel.c
+++ b/linuxthreads/cancel.c
@@ -52,16 +52,54 @@ int pthread_cancel(pthread_t thread)
 {
   pthread_handle handle = thread_handle(thread);
   int pid;
+  int dorestart = 0;
+  pthread_descr th;
+  pthread_extricate_if *pextricate;
 
   __pthread_lock(&handle->h_lock, NULL);
   if (invalid_handle(handle, thread)) {
     __pthread_unlock(&handle->h_lock);
     return ESRCH;
   }
-  handle->h_descr->p_canceled = 1;
-  pid = handle->h_descr->p_pid;
+
+  th = handle->h_descr;
+
+  if (th->p_canceled) {
+    __pthread_unlock(&handle->h_lock);
+    return 0;
+  }
+
+  pextricate = th->p_extricate;
+  th->p_canceled = 1;
+  pid = th->p_pid;
+
+  /* If the thread has registered an extrication interface, then
+     invoke the interface. If it returns 1, then we succeeded in
+     dequeuing the thread from whatever waiting object it was enqueued
+     with. In that case, it is our responsibility to wake it up. 
+     And also to set the p_woken_by_cancel flag so the woken thread
+     can tell that it was woken by cancellation. */
+
+  if (pextricate != NULL) {
+    dorestart = pextricate->pu_extricate_func(pextricate->pu_object, th);
+    th->p_woken_by_cancel = dorestart;
+  }
+
   __pthread_unlock(&handle->h_lock);
-   kill(pid, __pthread_sig_cancel);
+
+  /* If the thread has suspended or is about to, then we unblock it by
+     issuing a restart, instead of a cancel signal. Otherwise we send
+     the cancel signal to unblock the thread from a cancellation point,
+     or to initiate asynchronous cancellation. The restart is needed so
+     we have proper accounting of restarts; suspend decrements the thread's
+     resume count, and restart() increments it.  This also means that suspend's
+     handling of the cancel signal is obsolete. */
+
+  if (dorestart)
+    restart(th);
+  else 
+    kill(pid, __pthread_sig_cancel);
+
   return 0;
 }