diff options
Diffstat (limited to 'sysdeps/unix')
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-lock.c | 10 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-trylock.c | 7 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-unlock.c | 15 |
3 files changed, 20 insertions, 12 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c index 45894916c4..f7a5cbcd3a 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c @@ -45,7 +45,9 @@ int __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) { - if (*adapt_count > 0) + /* adapt_count is accessed concurrently but is just a hint. Thus, + use atomic accesses but relaxed MO is sufficient. */ + if (atomic_load_relaxed (adapt_count) > 0) { goto use_lock; } @@ -67,7 +69,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ())) { if (aconf.skip_lock_internal_abort > 0) - *adapt_count = aconf.skip_lock_internal_abort; + atomic_store_relaxed (adapt_count, + aconf.skip_lock_internal_abort); goto use_lock; } } @@ -75,7 +78,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) /* Fall back to locks for a bit if retries have been exhausted */ if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0) - *adapt_count = aconf.skip_lock_out_of_tbegin_retries; + atomic_store_relaxed (adapt_count, + aconf.skip_lock_out_of_tbegin_retries); use_lock: return LLL_LOCK ((*lock), pshared); diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c index 1e5cbe8610..ed244d3f12 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c @@ -34,7 +34,7 @@ __lll_trylock_elision (int *futex, short *adapt_count) __libc_tabort (_ABORT_NESTED_TRYLOCK); /* Only try a transaction if it's worth it. */ - if (*adapt_count > 0) + if (atomic_load_relaxed (adapt_count) > 0) { goto use_lock; } @@ -49,7 +49,7 @@ __lll_trylock_elision (int *futex, short *adapt_count) __libc_tend (0); if (aconf.skip_lock_busy > 0) - *adapt_count = aconf.skip_lock_busy; + atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); } else { @@ -59,7 +59,8 @@ __lll_trylock_elision (int *futex, short *adapt_count) result in another failure. Use normal locking now and for the next couple of calls. */ if (aconf.skip_trylock_internal_abort > 0) - *adapt_count = aconf.skip_trylock_internal_abort; + atomic_store_relaxed (adapt_count, + aconf.skip_trylock_internal_abort); } } diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c index 6f45a9c006..759c1464a7 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c @@ -28,13 +28,16 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared) __libc_tend (0); else { - lll_unlock ((*lock), pshared); + /* Update adapt_count in the critical section to prevent a + write-after-destroy error as mentioned in BZ 20822. The + following update of adapt_count has to be contained within + the critical region of the fall-back lock in order to not violate + the mutex destruction requirements. */ + short __tmp = atomic_load_relaxed (adapt_count); + if (__tmp > 0) + atomic_store_relaxed (adapt_count, __tmp--); - /* Update the adapt count AFTER completing the critical section. - Doing this here prevents unneeded stalling when entering - a critical section. Saving about 8% runtime on P8. */ - if (*adapt_count > 0) - (*adapt_count)--; + lll_unlock ((*lock), pshared); } return 0; } |