diff options
-rw-r--r-- | linuxthreads/ChangeLog | 13 | ||||
-rw-r--r-- | linuxthreads/condvar.c | 6 | ||||
-rw-r--r-- | linuxthreads/internals.h | 1 | ||||
-rw-r--r-- | linuxthreads/pthread.c | 8 | ||||
-rw-r--r-- | linuxthreads/semaphore.c | 62 | ||||
-rw-r--r-- | linuxthreads/sysdeps/pthread/pthread.h | 4 |
6 files changed, 74 insertions, 20 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog index 880de6a0b9..b0ae0eea25 100644 --- a/linuxthreads/ChangeLog +++ b/linuxthreads/ChangeLog @@ -1,5 +1,18 @@ 2000-07-25 Ulrich Drepper <drepper@redhat.com> + * internals.h (strict __pthread_descr_struct): Add p_sem_avail. + * semaphore.c: Handle spurious wakeups. + + * sysdeps/pthread/pthread.h: Add back PTHREAD_MUTX_FAST_NP as an alias + for PTHREAD_MUTEX_ADAPTIVE_NP for source code compatibility. + + * pthread.c (__pthread_set_own_extricate): Use THREAD_GETMEM. + (__pthread_wait_for_restart): Likewise. + + * condvar.c (pthread_cond_wait): Also check whether thread is + cancelable before aborting loop. + (pthread_cond_timedwait): Likewise. + * signals.c (pthread_sighandler): Remove special code to restrore %gs on x86. (pthread_sighandler_t): Likewise. diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c index ae1cef1ea9..f9c46a3316 100644 --- a/linuxthreads/condvar.c +++ b/linuxthreads/condvar.c @@ -103,7 +103,8 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { suspend(self); if (THREAD_GETMEM(self, p_condvar_avail) == 0 - && THREAD_GETMEM(self, p_woken_by_cancel) == 0) + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) { /* Count resumes that don't belong to us. */ spurious_wakeup_count++; @@ -196,7 +197,8 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, } if (THREAD_GETMEM(self, p_condvar_avail) == 0 - && THREAD_GETMEM(self, p_woken_by_cancel) == 0) + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) { /* Count resumes that don't belong to us. */ spurious_wakeup_count++; diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h index 2ed49295ee..93ec93620c 100644 --- a/linuxthreads/internals.h +++ b/linuxthreads/internals.h @@ -168,6 +168,7 @@ struct _pthread_descr_struct { called on thread */ char p_woken_by_cancel; /* cancellation performed wakeup */ char p_condvar_avail; /* flag if conditional variable became avail */ + char p_sem_avail; /* flag if semaphore became available */ pthread_extricate_if *p_extricate; /* See above */ pthread_readlock_info *p_readlock_list; /* List of readlock info structs */ pthread_readlock_info *p_readlock_free; /* Free list of structs */ diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c index 3a669c8dec..7195b2dedc 100644 --- a/linuxthreads/pthread.c +++ b/linuxthreads/pthread.c @@ -868,9 +868,9 @@ weak_alias (__pthread_getconcurrency, pthread_getconcurrency) void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *peif) { - __pthread_lock(self->p_lock, self); + __pthread_lock(THREAD_GETMEM(self, p_lock), self); THREAD_SETMEM(self, p_extricate, peif); - __pthread_unlock(self->p_lock); + __pthread_unlock(THREAD_GETMEM (self, p_lock)); } /* Primitives for controlling thread execution */ @@ -881,10 +881,10 @@ void __pthread_wait_for_restart_signal(pthread_descr self) sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */ sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */ + THREAD_SETMEM(self, p_signal, 0); do { - self->p_signal = 0; sigsuspend(&mask); /* Wait for signal */ - } while (self->p_signal !=__pthread_sig_restart ); + } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart); } #if !__ASSUME_REALTIME_SIGNALS diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c index a772ea5091..e5afce43eb 100644 --- a/linuxthreads/semaphore.c +++ b/linuxthreads/semaphore.c @@ -60,6 +60,7 @@ int __new_sem_wait(sem_t * sem) volatile pthread_descr self = thread_self(); pthread_extricate_if extr; int already_canceled = 0; + int spurious_wakeup_count; /* Set up extrication interface */ extr.pu_object = sem; @@ -72,6 +73,7 @@ int __new_sem_wait(sem_t * sem) return 0; } /* Register extrication interface */ + THREAD_SETMEM(self, p_sem_avail, 0); __pthread_set_own_extricate_if(self, &extr); /* Enqueue only if not already cancelled. */ if (!(THREAD_GETMEM(self, p_canceled) @@ -87,7 +89,20 @@ int __new_sem_wait(sem_t * sem) } /* Wait for sem_post or cancellation, or fall through if already canceled */ - suspend(self); + spurious_wakeup_count = 0; + while (1) + { + suspend(self); + if (THREAD_GETMEM(self, p_sem_avail) == 0 + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) + { + /* Count resumes that don't belong to us. */ + spurious_wakeup_count++; + continue; + } + break; + } __pthread_set_own_extricate_if(self, 0); /* Terminate only if the wakeup came from cancellation. */ @@ -138,6 +153,8 @@ int __new_sem_post(sem_t * sem) } else { th = dequeue(&sem->__sem_waiting); __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock); + th->p_sem_avail = 1; + WRITE_MEMORY_BARRIER(); restart(th); } } else { @@ -195,6 +212,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) pthread_descr self = thread_self(); pthread_extricate_if extr; int already_canceled = 0; + int spurious_wakeup_count; __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self); if (sem->__sem_value > 0) { @@ -215,6 +233,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) extr.pu_extricate_func = new_sem_extricate_func; /* Register extrication interface */ + THREAD_SETMEM(self, p_sem_avail, 0); __pthread_set_own_extricate_if(self, &extr); /* Enqueue only if not already cancelled. */ if (!(THREAD_GETMEM(self, p_canceled) @@ -229,24 +248,39 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime) pthread_exit(PTHREAD_CANCELED); } - if (timedsuspend(self, abstime) == 0) { - int was_on_queue; + spurious_wakeup_count = 0; + while (1) + { + if (timedsuspend(self, abstime) == 0) { + int was_on_queue; + + /* __pthread_lock will queue back any spurious restarts that + may happen to it. */ - /* __pthread_lock will queue back any spurious restarts that - may happen to it. */ + __pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self); + was_on_queue = remove_from_queue(&sem->__sem_waiting, self); + __pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock); - __pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self); - was_on_queue = remove_from_queue(&sem->__sem_waiting, self); - __pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock); + if (was_on_queue) { + __pthread_set_own_extricate_if(self, 0); + return ETIMEDOUT; + } - if (was_on_queue) { - __pthread_set_own_extricate_if(self, 0); - return ETIMEDOUT; + /* Eat the outstanding restart() from the signaller */ + suspend(self); + } + + if (THREAD_GETMEM(self, p_sem_avail) == 0 + && (THREAD_GETMEM(self, p_woken_by_cancel) == 0 + || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE)) + { + /* Count resumes that don't belong to us. */ + spurious_wakeup_count++; + continue; + } + break; } - /* Eat the outstanding restart() from the signaller */ - suspend(self); - } __pthread_set_own_extricate_if(self, 0); /* Terminate only if the wakeup came from cancellation. */ diff --git a/linuxthreads/sysdeps/pthread/pthread.h b/linuxthreads/sysdeps/pthread/pthread.h index dd27ae0678..b017007f80 100644 --- a/linuxthreads/sysdeps/pthread/pthread.h +++ b/linuxthreads/sysdeps/pthread/pthread.h @@ -88,6 +88,10 @@ enum PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP, PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL #endif +#ifdef __USE_GNU + /* For compatibility. */ + , PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_ADAPTIVE_NP +#endif }; enum |