about summary refs log tree commit diff
path: root/linuxthreads/semaphore.c
diff options
context:
space:
mode:
Diffstat (limited to 'linuxthreads/semaphore.c')
-rw-r--r--linuxthreads/semaphore.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c
index bb681b3621..d3a1450978 100644
--- a/linuxthreads/semaphore.c
+++ b/linuxthreads/semaphore.c
@@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
     errno = ENOSYS;
     return -1;
   }
-  __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_init_lock(&sem->__sem_lock);
   sem->__sem_value = value;
   sem->__sem_waiting = NULL;
   return 0;
@@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th)
   sem_t *sem = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   did_remove = remove_from_queue(&sem->__sem_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   return did_remove;
 }
@@ -66,10 +66,10 @@ int __new_sem_wait(sem_t * sem)
   extr.pu_object = sem;
   extr.pu_extricate_func = new_sem_extricate_func;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     sem->__sem_value--;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
   /* Register extrication interface */
@@ -81,7 +81,7 @@ int __new_sem_wait(sem_t * sem)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -121,7 +121,7 @@ int __new_sem_trywait(sem_t * sem)
 {
   int retval;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+  __pthread_lock(&sem->__sem_lock, NULL);
   if (sem->__sem_value == 0) {
     errno = EAGAIN;
     retval = -1;
@@ -129,7 +129,7 @@ int __new_sem_trywait(sem_t * sem)
     sem->__sem_value--;
     retval = 0;
   }
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
   return retval;
 }
 
@@ -140,19 +140,19 @@ int __new_sem_post(sem_t * sem)
   struct pthread_request request;
 
   if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
-    __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+    __pthread_lock(&sem->__sem_lock, self);
     if (sem->__sem_waiting == NULL) {
       if (sem->__sem_value >= SEM_VALUE_MAX) {
         /* Overflow */
         errno = ERANGE;
-        __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+        __pthread_unlock(&sem->__sem_lock);
         return -1;
       }
       sem->__sem_value++;
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
     } else {
       th = dequeue(&sem->__sem_waiting);
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
       th->p_sem_avail = 1;
       WRITE_MEMORY_BARRIER();
       restart(th);
@@ -214,17 +214,17 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
   int already_canceled = 0;
   int spurious_wakeup_count;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     --sem->__sem_value;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
 
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
     /* The standard requires that if the function would block and the
        time value is illegal, the function returns with an error.  */
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return EINVAL;
   }
 
@@ -241,7 +241,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -257,9 +257,9 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
 	/* __pthread_lock will queue back any spurious restarts that
 	   may happen to it. */
 
-	__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
+	__pthread_lock(&sem->__sem_lock, self);
 	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
-	__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
+	__pthread_unlock(&sem->__sem_lock);
 
 	if (was_on_queue) {
 	  __pthread_set_own_extricate_if(self, 0);