summary refs log tree commit diff
path: root/linuxthreads/semaphore.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-04-13 05:57:21 +0000
committerUlrich Drepper <drepper@redhat.com>2000-04-13 05:57:21 +0000
commitd8d914df6806c6057b20c7311cad0bc2ac201c03 (patch)
tree6d2512373ef92b0abbebd4e0d0761cdd9715ea0b /linuxthreads/semaphore.c
parentb3ae0650bcff54f12d87f878000d4c488b365bf7 (diff)
downloadglibc-d8d914df6806c6057b20c7311cad0bc2ac201c03.tar.gz
glibc-d8d914df6806c6057b20c7311cad0bc2ac201c03.tar.xz
glibc-d8d914df6806c6057b20c7311cad0bc2ac201c03.zip
Update.
	* sysdeps/pthread/pthread.h: Add prototypes for pthread_spin_init,
	pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
	and pthread_spin_unlock.
	* sysdeps/pthread/bits/pthreadtypes.h: Change struct _pthread_fastlock
	into pthread_spinlock_t.  Change all uses.
	* spinlock.c: Implement pthread_spin_lock.
	Rename __pthread_unlock to __pthread_spin_unlock and define weak
	alias for real name.
	Define pthread_spin_trylock, pthread_spin_init, and
	pthread_spin_destroy.
	Change all uses of _pthread_fastlock to pthread_spinlock_t.
	* spinlock.h: Rename __pthread_unlock to __pthread_spin_unlock.
	Change all uses of _pthread_fastlock to pthread_spinlock_t.
	* Versions [libpthread] (GLIBC_2.2): Add pthread_spin_init,
	pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
	and pthread_spin_unlock.
	* cancel.c: Use __pthread_spin_unlock instead of __pthread_unlock.
	Change all uses of _pthread_fastlock to pthread_spinlock_t.
	* condvar.c: Likewise.
	* internals.h: Likewise.
	* join.c: Likewise.
	* manager.c: Likewise.
	* mutex.c: Likewise.
	* pthread.c: Likewise.
	* rwlock.c: Likewise.
	* semaphore.c: Likewise.
	* signals.c: Likewise.
Diffstat (limited to 'linuxthreads/semaphore.c')
-rw-r--r--linuxthreads/semaphore.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c
index 3344d0d859..7775b5a7d3 100644
--- a/linuxthreads/semaphore.c
+++ b/linuxthreads/semaphore.c
@@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
     errno = ENOSYS;
     return -1;
   }
-  __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_init_lock((pthread_spinlock_t *) &sem->__sem_lock);
   sem->__sem_value = value;
   sem->__sem_waiting = NULL;
   return 0;
@@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th)
   sem_t *sem = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
   did_remove = remove_from_queue(&sem->__sem_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
 
   return did_remove;
 }
@@ -65,10 +65,10 @@ int __new_sem_wait(sem_t * sem)
   extr.pu_object = sem;
   extr.pu_extricate_func = new_sem_extricate_func;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     sem->__sem_value--;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
     return 0;
   }
   /* Register extrication interface */
@@ -79,7 +79,7 @@ int __new_sem_wait(sem_t * sem)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -106,7 +106,7 @@ int __new_sem_trywait(sem_t * sem)
 {
   int retval;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+  __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, NULL);
   if (sem->__sem_value == 0) {
     errno = EAGAIN;
     retval = -1;
@@ -114,7 +114,7 @@ int __new_sem_trywait(sem_t * sem)
     sem->__sem_value--;
     retval = 0;
   }
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
   return retval;
 }
 
@@ -125,19 +125,19 @@ int __new_sem_post(sem_t * sem)
   struct pthread_request request;
 
   if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
-    __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+    __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
     if (sem->__sem_waiting == NULL) {
       if (sem->__sem_value >= SEM_VALUE_MAX) {
         /* Overflow */
         errno = ERANGE;
-        __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+        __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
         return -1;
       }
       sem->__sem_value++;
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
     } else {
       th = dequeue(&sem->__sem_waiting);
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
       restart(th);
     }
   } else {
@@ -200,17 +200,17 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
   sigset_t unblock;
   sigset_t initial_mask;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     --sem->__sem_value;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
     return 0;
   }
 
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
     /* The standard requires that if the function would block and the
        time value is illegal, the function returns with an error.  */
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
     return EINVAL;
   }
 
@@ -226,7 +226,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -288,9 +288,9 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
     /* __pthread_lock will queue back any spurious restarts that
        may happen to it. */
 
-    __pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
+    __pthread_lock((pthread_spinlock_t *)&sem->__sem_lock, self);
     was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
-    __pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
+    __pthread_spin_unlock((pthread_spinlock_t *)&sem->__sem_lock);
 
     if (was_on_queue) {
       __pthread_set_own_extricate_if(self, 0);