diff options
author | John David Anglin <dave.anglin@bell.net> | 2014-05-04 14:02:30 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2016-01-06 17:26:04 -0500 |
commit | d7f914848b7d5e9b11bbffd1fecc4659d4acdc2d (patch) | |
tree | b2d229d7551680a57ed3aba6086f941852a426fd | |
parent | db2f6f4794f722eac43b4b831f41abfc784fcfb5 (diff) | |
download | glibc-d7f914848b7d5e9b11bbffd1fecc4659d4acdc2d.tar.gz glibc-d7f914848b7d5e9b11bbffd1fecc4659d4acdc2d.tar.xz glibc-d7f914848b7d5e9b11bbffd1fecc4659d4acdc2d.zip |
hppa: fix pthread spinlock
URL: https://bugs.debian.org/725508
-rw-r--r-- | ChangeLog | 7 | ||||
-rw-r--r-- | sysdeps/hppa/nptl/pthread_spin_init.c | 24 | ||||
-rw-r--r-- | sysdeps/hppa/nptl/pthread_spin_unlock.c | 24 |
3 files changed, 47 insertions, 8 deletions
diff --git a/ChangeLog b/ChangeLog index 72614feb82..acaf7a87a0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +2016-01-06 John David Anglin <dave.anglin@bell.net> + + * sysdeps/hppa/nptl/pthread_spin_init.c (pthread_spin_init): Replace + asm stw with atomic_exchange_rel. Add explanatory comment. + * sysdeps/hppa/nptl/pthread_spin_unlock.c (pthread_spin_unlock): + Likewise. + 2016-01-05 H.J. Lu <hongjiu.lu@intel.com> [BZ #19122] diff --git a/sysdeps/hppa/nptl/pthread_spin_init.c b/sysdeps/hppa/nptl/pthread_spin_init.c index 729d53f26f..2df0376d8b 100644 --- a/sysdeps/hppa/nptl/pthread_spin_init.c +++ b/sysdeps/hppa/nptl/pthread_spin_init.c @@ -20,9 +20,25 @@ int pthread_spin_init (pthread_spinlock_t *lock, int pshared) { - int tmp = 0; - /* This should be a memory barrier to newer compilers */ - __asm__ __volatile__ ("stw,ma %1,0(%0)" - : : "r" (lock), "r" (tmp) : "memory"); + /* CONCURRENCTY NOTES: + + The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in + pthread_spin_lock. + + On hppa we must not use a plain `stw` to reset the guard lock. This + has to do with the kernel compare-and-swap helper that is used to + implement all of the atomic operations. + + The kernel CAS helper uses its own internal locks and that means that + to create a true happens-before relationship between any two threads, + the second thread must observe the internal lock having a value of 0 + (it must attempt to take the lock with ldcw). This creates the + ordering required for a second thread to observe the effects of the + RMW of the kernel CAS helper in any other thread. + + Therefore if a variable is used in an atomic macro it must always be + manipulated with atomic macros in order for memory ordering rules to + be preserved. */ + atomic_exchange_rel (lock, 0); return 0; } diff --git a/sysdeps/hppa/nptl/pthread_spin_unlock.c b/sysdeps/hppa/nptl/pthread_spin_unlock.c index 31162a7873..6e4d71ecf1 100644 --- a/sysdeps/hppa/nptl/pthread_spin_unlock.c +++ b/sysdeps/hppa/nptl/pthread_spin_unlock.c @@ -20,9 +20,25 @@ int pthread_spin_unlock (pthread_spinlock_t *lock) { - int tmp = 0; - /* This should be a memory barrier to newer compilers */ - __asm__ __volatile__ ("stw,ma %1,0(%0)" - : : "r" (lock), "r" (tmp) : "memory"); + /* CONCURRENCTY NOTES: + + The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in + pthread_spin_lock. + + On hppa we must not use a plain `stw` to reset the guard lock. This + has to do with the kernel compare-and-swap helper that is used to + implement all of the atomic operations. + + The kernel CAS helper uses its own internal locks and that means that + to create a true happens-before relationship between any two threads, + the second thread must observe the internal lock having a value of 0 + (it must attempt to take the lock with ldcw). This creates the + ordering required for a second thread to observe the effects of the + RMW of the kernel CAS helper in any other thread. + + Therefore if a variable is used in an atomic macro it must always be + manipulated with atomic macros in order for memory ordering rules to + be preserved. */ + atomic_exchange_rel (lock, 0); return 0; } |