diff options
author | Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com> | 2017-01-03 17:16:02 -0200 |
---|---|---|
committer | Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com> | 2017-01-03 17:21:41 -0200 |
commit | e9a96ea1aca4ebaa7c86e8b83b766f118d689d0f (patch) | |
tree | c8171c3229049caaea73c8f5ee2f2f068c02669b /sysdeps/unix | |
parent | daaff5cc793acdd4f8667fca3b901647b4c34363 (diff) | |
download | glibc-e9a96ea1aca4ebaa7c86e8b83b766f118d689d0f.tar.gz glibc-e9a96ea1aca4ebaa7c86e8b83b766f118d689d0f.tar.xz glibc-e9a96ea1aca4ebaa7c86e8b83b766f118d689d0f.zip |
powerpc: Fix write-after-destroy in lock elision [BZ #20822]
The update of *adapt_count after the release of the lock causes a race condition when thread A unlocks, thread B continues and destroys the mutex, and thread A writes to *adapt_count.
Diffstat (limited to 'sysdeps/unix')
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-lock.c | 10 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-trylock.c | 7 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/powerpc/elision-unlock.c | 15 |
3 files changed, 20 insertions, 12 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c index 45894916c4..f7a5cbcd3a 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c @@ -45,7 +45,9 @@ int __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) { - if (*adapt_count > 0) + /* adapt_count is accessed concurrently but is just a hint. Thus, + use atomic accesses but relaxed MO is sufficient. */ + if (atomic_load_relaxed (adapt_count) > 0) { goto use_lock; } @@ -67,7 +69,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ())) { if (aconf.skip_lock_internal_abort > 0) - *adapt_count = aconf.skip_lock_internal_abort; + atomic_store_relaxed (adapt_count, + aconf.skip_lock_internal_abort); goto use_lock; } } @@ -75,7 +78,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared) /* Fall back to locks for a bit if retries have been exhausted */ if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0) - *adapt_count = aconf.skip_lock_out_of_tbegin_retries; + atomic_store_relaxed (adapt_count, + aconf.skip_lock_out_of_tbegin_retries); use_lock: return LLL_LOCK ((*lock), pshared); diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c index 1e5cbe8610..ed244d3f12 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c @@ -34,7 +34,7 @@ __lll_trylock_elision (int *futex, short *adapt_count) __libc_tabort (_ABORT_NESTED_TRYLOCK); /* Only try a transaction if it's worth it. */ - if (*adapt_count > 0) + if (atomic_load_relaxed (adapt_count) > 0) { goto use_lock; } @@ -49,7 +49,7 @@ __lll_trylock_elision (int *futex, short *adapt_count) __libc_tend (0); if (aconf.skip_lock_busy > 0) - *adapt_count = aconf.skip_lock_busy; + atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); } else { @@ -59,7 +59,8 @@ __lll_trylock_elision (int *futex, short *adapt_count) result in another failure. Use normal locking now and for the next couple of calls. */ if (aconf.skip_trylock_internal_abort > 0) - *adapt_count = aconf.skip_trylock_internal_abort; + atomic_store_relaxed (adapt_count, + aconf.skip_trylock_internal_abort); } } diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c index 6f45a9c006..759c1464a7 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c @@ -28,13 +28,16 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared) __libc_tend (0); else { - lll_unlock ((*lock), pshared); + /* Update adapt_count in the critical section to prevent a + write-after-destroy error as mentioned in BZ 20822. The + following update of adapt_count has to be contained within + the critical region of the fall-back lock in order to not violate + the mutex destruction requirements. */ + short __tmp = atomic_load_relaxed (adapt_count); + if (__tmp > 0) + atomic_store_relaxed (adapt_count, __tmp--); - /* Update the adapt count AFTER completing the critical section. - Doing this here prevents unneeded stalling when entering - a critical section. Saving about 8% runtime on P8. */ - if (*adapt_count > 0) - (*adapt_count)--; + lll_unlock ((*lock), pshared); } return 0; } |