diff options
Diffstat (limited to 'linuxthreads')
-rw-r--r-- | linuxthreads/cancel.c | 2 | ||||
-rw-r--r-- | linuxthreads/condvar.c | 14 | ||||
-rw-r--r-- | linuxthreads/join.c | 8 | ||||
-rw-r--r-- | linuxthreads/manager.c | 6 | ||||
-rw-r--r-- | linuxthreads/mutex.c | 6 | ||||
-rw-r--r-- | linuxthreads/pthread.c | 4 | ||||
-rw-r--r-- | linuxthreads/rwlock.c | 17 | ||||
-rw-r--r-- | linuxthreads/semaphore.c | 11 | ||||
-rw-r--r-- | linuxthreads/signals.c | 2 | ||||
-rw-r--r-- | linuxthreads/spinlock.c | 20 |
10 files changed, 40 insertions, 50 deletions
diff --git a/linuxthreads/cancel.c b/linuxthreads/cancel.c index 3ff595418b..c45cac97a3 100644 --- a/linuxthreads/cancel.c +++ b/linuxthreads/cancel.c @@ -53,7 +53,7 @@ int pthread_cancel(pthread_t thread) pthread_handle handle = thread_handle(thread); int pid; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c index b880a38a73..cd22a241af 100644 --- a/linuxthreads/condvar.c +++ b/linuxthreads/condvar.c @@ -43,7 +43,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { volatile pthread_descr self = thread_self(); - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, self); enqueue(&cond->__c_waiting, self); __pthread_unlock(&cond->__c_lock); pthread_mutex_unlock(mutex); @@ -53,7 +53,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { /* Remove ourselves from the waiting queue if we're still on it */ - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, self); remove_from_queue(&cond->__c_waiting, self); __pthread_unlock(&cond->__c_lock); pthread_exit(PTHREAD_CANCELED); @@ -72,7 +72,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, sigjmp_buf jmpbuf; /* Wait on the condition */ - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, self); enqueue(&cond->__c_waiting, self); __pthread_unlock(&cond->__c_lock); pthread_mutex_unlock(mutex); @@ -107,7 +107,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, /* This is a cancellation point */ if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, self); remove_from_queue(&cond->__c_waiting, self); __pthread_unlock(&cond->__c_lock); pthread_mutex_lock(mutex); @@ -115,7 +115,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond, } /* If not signaled: also remove ourselves and return an error code */ if (THREAD_GETMEM(self, p_signal) == 0) { - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, self); remove_from_queue(&cond->__c_waiting, self); __pthread_unlock(&cond->__c_lock); pthread_mutex_lock(mutex); @@ -147,7 +147,7 @@ int pthread_cond_signal(pthread_cond_t *cond) { pthread_descr th; - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, NULL); th = dequeue(&cond->__c_waiting); __pthread_unlock(&cond->__c_lock); if (th != NULL) restart(th); @@ -158,7 +158,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond) { pthread_descr tosignal, th; - __pthread_lock(&cond->__c_lock); + __pthread_lock(&cond->__c_lock, NULL); /* Copy the current state of the waiting queue and empty it */ tosignal = cond->__c_waiting; cond->__c_waiting = NULL; diff --git a/linuxthreads/join.c b/linuxthreads/join.c index 482f0d1dcc..42eb033746 100644 --- a/linuxthreads/join.c +++ b/linuxthreads/join.c @@ -35,7 +35,7 @@ void pthread_exit(void * retval) __pthread_perform_cleanup(); __pthread_destroy_specifics(); /* Store return value */ - __pthread_lock(THREAD_GETMEM(self, p_lock)); + __pthread_lock(THREAD_GETMEM(self, p_lock), self); THREAD_SETMEM(self, p_retval, retval); /* Say that we've terminated */ THREAD_SETMEM(self, p_terminated, 1); @@ -65,7 +65,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return) pthread_handle handle = thread_handle(thread_id); pthread_descr th; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, self); if (invalid_handle(handle, thread_id)) { __pthread_unlock(&handle->h_lock); return ESRCH; @@ -91,7 +91,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return) th->p_joining = NULL; pthread_exit(PTHREAD_CANCELED); } - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, self); } /* Get return value */ if (thread_return != NULL) *thread_return = th->p_retval; @@ -114,7 +114,7 @@ int pthread_detach(pthread_t thread_id) pthread_handle handle = thread_handle(thread_id); pthread_descr th; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread_id)) { __pthread_unlock(&handle->h_lock); return ESRCH; diff --git a/linuxthreads/manager.c b/linuxthreads/manager.c index b6107da1fb..eafff3f4a8 100644 --- a/linuxthreads/manager.c +++ b/linuxthreads/manager.c @@ -417,7 +417,7 @@ static void pthread_free(pthread_descr th) ASSERT(th->p_exited); /* Make the handle invalid */ handle = thread_handle(th->p_tid); - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); handle->h_descr = NULL; handle->h_bottom = (char *)(-1L); __pthread_unlock(&handle->h_lock); @@ -452,7 +452,7 @@ static void pthread_exited(pid_t pid) th->p_nextlive->p_prevlive = th->p_prevlive; th->p_prevlive->p_nextlive = th->p_nextlive; /* Mark thread as exited, and if detached, free its resources */ - __pthread_lock(th->p_lock); + __pthread_lock(th->p_lock, NULL); th->p_exited = 1; detached = th->p_detached; __pthread_unlock(th->p_lock); @@ -494,7 +494,7 @@ static void pthread_handle_free(pthread_t th_id) pthread_handle handle = thread_handle(th_id); pthread_descr th; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, th_id)) { /* pthread_reap_children has deallocated the thread already, nothing needs to be done */ diff --git a/linuxthreads/mutex.c b/linuxthreads/mutex.c index d3ef78cfbe..7e5271b5ea 100644 --- a/linuxthreads/mutex.c +++ b/linuxthreads/mutex.c @@ -81,7 +81,7 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex) switch(mutex->__m_kind) { case PTHREAD_MUTEX_FAST_NP: - __pthread_lock(&mutex->__m_lock); + __pthread_lock(&mutex->__m_lock, NULL); return 0; case PTHREAD_MUTEX_RECURSIVE_NP: self = thread_self(); @@ -89,14 +89,14 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex) mutex->__m_count++; return 0; } - __pthread_lock(&mutex->__m_lock); + __pthread_lock(&mutex->__m_lock, self); mutex->__m_owner = self; mutex->__m_count = 0; return 0; case PTHREAD_MUTEX_ERRORCHECK_NP: self = thread_self(); if (mutex->__m_owner == self) return EDEADLK; - __pthread_lock(&mutex->__m_lock); + __pthread_lock(&mutex->__m_lock, self); mutex->__m_owner = self; return 0; default: diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c index d0c66d0329..bd4ea5a8c2 100644 --- a/linuxthreads/pthread.c +++ b/linuxthreads/pthread.c @@ -394,7 +394,7 @@ int pthread_setschedparam(pthread_t thread, int policy, pthread_handle handle = thread_handle(thread); pthread_descr th; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; @@ -417,7 +417,7 @@ int pthread_getschedparam(pthread_t thread, int *policy, pthread_handle handle = thread_handle(thread); int pid, pol; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; diff --git a/linuxthreads/rwlock.c b/linuxthreads/rwlock.c index bff4e38fa8..1fb18a3f9e 100644 --- a/linuxthreads/rwlock.c +++ b/linuxthreads/rwlock.c @@ -57,7 +57,7 @@ pthread_rwlock_destroy (pthread_rwlock_t *rwlock) int readers; _pthread_descr writer; - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, NULL); readers = rwlock->__rw_readers; writer = rwlock->__rw_writer; __pthread_unlock (&rwlock->__rw_lock); @@ -72,11 +72,11 @@ pthread_rwlock_destroy (pthread_rwlock_t *rwlock) int pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { - pthread_descr self; + pthread_descr self = NULL; while (1) { - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock->__rw_writer == NULL || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP && rwlock->__rw_readers != 0)) @@ -84,7 +84,8 @@ pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) break; /* Suspend ourselves, then try again */ - self = thread_self (); + if (self == NULL) + self = thread_self (); enqueue (&rwlock->__rw_read_waiting, self); __pthread_unlock (&rwlock->__rw_lock); suspend (self); /* This is not a cancellation point */ @@ -102,7 +103,7 @@ pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { int result = EBUSY; - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, NULL); if (rwlock->__rw_writer == NULL || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP && rwlock->__rw_readers != 0)) @@ -123,7 +124,7 @@ pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) while(1) { - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, self); if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = self; @@ -144,7 +145,7 @@ pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { int result = EBUSY; - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, NULL); if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL) { rwlock->__rw_writer = thread_self (); @@ -162,7 +163,7 @@ pthread_rwlock_unlock (pthread_rwlock_t *rwlock) pthread_descr torestart; pthread_descr th; - __pthread_lock (&rwlock->__rw_lock); + __pthread_lock (&rwlock->__rw_lock, NULL); if (rwlock->__rw_writer != NULL) { /* Unlocking a write lock. */ diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c index af5f115a16..cb23a71a78 100644 --- a/linuxthreads/semaphore.c +++ b/linuxthreads/semaphore.c @@ -40,15 +40,14 @@ int sem_init(sem_t *sem, int pshared, unsigned int value) int sem_wait(sem_t * sem) { - volatile pthread_descr self; + volatile pthread_descr self = thread_self(); - __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock, self); if (sem->sem_value > 0) { sem->sem_value--; __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); return 0; } - self = thread_self(); enqueue(&sem->sem_waiting, self); /* Wait for sem_post or cancellation */ __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); @@ -57,7 +56,7 @@ int sem_wait(sem_t * sem) if (THREAD_GETMEM(self, p_canceled) && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) { /* Remove ourselves from the waiting list if we're still on it */ - __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock, self); remove_from_queue(&sem->sem_waiting, self); __pthread_unlock((struct _pthread_fastlock *) &sem->sem_lock); pthread_exit(PTHREAD_CANCELED); @@ -70,7 +69,7 @@ int sem_trywait(sem_t * sem) { int retval; - __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock, NULL); if (sem->sem_value == 0) { errno = EAGAIN; retval = -1; @@ -88,7 +87,7 @@ int sem_post(sem_t * sem) struct pthread_request request; if (THREAD_GETMEM(self, p_in_sighandler) == NULL) { - __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock); + __pthread_lock((struct _pthread_fastlock *) &sem->sem_lock, self); if (sem->sem_waiting == NULL) { if (sem->sem_value >= SEM_VALUE_MAX) { /* Overflow */ diff --git a/linuxthreads/signals.c b/linuxthreads/signals.c index 5444ef73f3..e833778d53 100644 --- a/linuxthreads/signals.c +++ b/linuxthreads/signals.c @@ -53,7 +53,7 @@ int pthread_kill(pthread_t thread, int signo) pthread_handle handle = thread_handle(thread); int pid; - __pthread_lock(&handle->h_lock); + __pthread_lock(&handle->h_lock, NULL); if (invalid_handle(handle, thread)) { __pthread_unlock(&handle->h_lock); return ESRCH; diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c index 172cb7afe4..00a8691e5b 100644 --- a/linuxthreads/spinlock.c +++ b/linuxthreads/spinlock.c @@ -36,17 +36,18 @@ This is safe because there are no concurrent __pthread_unlock operations -- only the thread that locked the mutex can unlock it. */ -void __pthread_lock(struct _pthread_fastlock * lock) +void internal_function __pthread_lock(struct _pthread_fastlock * lock, + pthread_descr self) { long oldstatus, newstatus; - pthread_descr self = NULL; do { oldstatus = lock->__status; if (oldstatus == 0) { newstatus = 1; } else { - self = thread_self(); + if (self == NULL) + self = thread_self(); newstatus = (long) self; } if (self != NULL) @@ -56,18 +57,7 @@ void __pthread_lock(struct _pthread_fastlock * lock) if (oldstatus != 0) suspend(self); } -int __pthread_trylock(struct _pthread_fastlock * lock) -{ - long oldstatus; - - do { - oldstatus = lock->__status; - if (oldstatus != 0) return EBUSY; - } while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock)); - return 0; -} - -void __pthread_unlock(struct _pthread_fastlock * lock) +void internal_function __pthread_unlock(struct _pthread_fastlock * lock) { long oldstatus; pthread_descr thr, * ptr, * maxptr; |