about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog7
-rw-r--r--sysdeps/hppa/nptl/pthread_spin_init.c24
-rw-r--r--sysdeps/hppa/nptl/pthread_spin_unlock.c24
3 files changed, 47 insertions, 8 deletions
diff --git a/ChangeLog b/ChangeLog
index 72614feb82..acaf7a87a0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2016-01-06  John David Anglin  <dave.anglin@bell.net>
+
+	* sysdeps/hppa/nptl/pthread_spin_init.c (pthread_spin_init): Replace
+	asm stw with atomic_exchange_rel.  Add explanatory comment.
+	* sysdeps/hppa/nptl/pthread_spin_unlock.c (pthread_spin_unlock):
+	Likewise.
+
 2016-01-05  H.J. Lu  <hongjiu.lu@intel.com>
 
 	[BZ #19122]
diff --git a/sysdeps/hppa/nptl/pthread_spin_init.c b/sysdeps/hppa/nptl/pthread_spin_init.c
index 729d53f26f..2df0376d8b 100644
--- a/sysdeps/hppa/nptl/pthread_spin_init.c
+++ b/sysdeps/hppa/nptl/pthread_spin_init.c
@@ -20,9 +20,25 @@
 int
 pthread_spin_init (pthread_spinlock_t *lock, int pshared)
 {
-  int tmp = 0;
-  /* This should be a memory barrier to newer compilers */
-  __asm__ __volatile__ ("stw,ma %1,0(%0)"
-                        : : "r" (lock), "r" (tmp) : "memory");
+  /* CONCURRENCTY NOTES:
+
+     The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in
+     pthread_spin_lock.
+
+     On hppa we must not use a plain `stw` to reset the guard lock.  This
+     has to do with the kernel compare-and-swap helper that is used to
+     implement all of the atomic operations.
+
+     The kernel CAS helper uses its own internal locks and that means that
+     to create a true happens-before relationship between any two threads,
+     the second thread must observe the internal lock having a value of 0
+     (it must attempt to take the lock with ldcw).  This creates the
+     ordering required for a second thread to observe the effects of the
+     RMW of the kernel CAS helper in any other thread.
+
+     Therefore if a variable is used in an atomic macro it must always be
+     manipulated with atomic macros in order for memory ordering rules to
+     be preserved.  */
+  atomic_exchange_rel (lock, 0);
   return 0;
 }
diff --git a/sysdeps/hppa/nptl/pthread_spin_unlock.c b/sysdeps/hppa/nptl/pthread_spin_unlock.c
index 31162a7873..6e4d71ecf1 100644
--- a/sysdeps/hppa/nptl/pthread_spin_unlock.c
+++ b/sysdeps/hppa/nptl/pthread_spin_unlock.c
@@ -20,9 +20,25 @@
 int
 pthread_spin_unlock (pthread_spinlock_t *lock)
 {
-  int tmp = 0;
-  /* This should be a memory barrier to newer compilers */
-  __asm__ __volatile__ ("stw,ma %1,0(%0)"
-                        : : "r" (lock), "r" (tmp) : "memory");
+  /* CONCURRENCTY NOTES:
+
+     The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in
+     pthread_spin_lock.
+
+     On hppa we must not use a plain `stw` to reset the guard lock.  This
+     has to do with the kernel compare-and-swap helper that is used to
+     implement all of the atomic operations.
+
+     The kernel CAS helper uses its own internal locks and that means that
+     to create a true happens-before relationship between any two threads,
+     the second thread must observe the internal lock having a value of 0
+     (it must attempt to take the lock with ldcw).  This creates the
+     ordering required for a second thread to observe the effects of the
+     RMW of the kernel CAS helper in any other thread.
+
+     Therefore if a variable is used in an atomic macro it must always be
+     manipulated with atomic macros in order for memory ordering rules to
+     be preserved.  */
+  atomic_exchange_rel (lock, 0);
   return 0;
 }