about summary refs log tree commit diff
path: root/linuxthreads
diff options
context:
space:
mode:
Diffstat (limited to 'linuxthreads')
-rw-r--r--linuxthreads/ChangeLog5
-rw-r--r--linuxthreads/spinlock.c6
2 files changed, 8 insertions, 3 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index 9928956ec0..323de8ec51 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,3 +1,8 @@
+2000-07-04  Ulrich Drepper  <drepper@redhat.com>
+
+	* spinlock.c (__pthread_unlock): Properly place write barrier.
+	Patch by Kaz Kylheku <kaz@ashi.footprints.net>.
+
 2000-07-03  Ulrich Drepper  <drepper@redhat.com>
 
 	* spinlock.c: Replace fast spinlocks by adaptive spinlocks which are
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index 02ab9a9613..a63c6535c9 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -34,7 +34,7 @@
 
    (status & 1) == 1: spinlock is taken and (status & ~1L) is a
                       pointer to the first waiting thread; other
-		      waiting threads are linked via the p_nextlock 
+		      waiting threads are linked via the p_nextlock
 		      field.
    (status & 1) == 0: same as above, but spinlock is not taken.
 
@@ -149,8 +149,8 @@ int __pthread_unlock(struct _pthread_fastlock * lock)
 #endif
 #if !defined HAS_COMPARE_AND_SWAP
   {
-    lock->__spinlock = 0;
     WRITE_MEMORY_BARRIER();
+    lock->__spinlock = 0;
     return 0;
   }
 #endif
@@ -160,7 +160,7 @@ again:
   oldstatus = lock->__status;
 
   while ((oldstatus = lock->__status) == 1) {
-    if (__compare_and_swap_with_release_semantics(&lock->__status, 
+    if (__compare_and_swap_with_release_semantics(&lock->__status,
 	oldstatus, 0))
       return 0;
   }