diff options
author | Stefan Liebler <stli@linux.ibm.com> | 2018-10-17 12:23:04 +0200 |
---|---|---|
committer | Stefan Liebler <stli@linux.ibm.com> | 2018-10-17 12:23:04 +0200 |
commit | 403b4feb22dcbc85ace72a361d2a951380372471 (patch) | |
tree | fe2235a8bce38cad1bbef3b4f28da82dc353e138 /nptl/pthread_mutex_timedlock.c | |
parent | ce5a7de6cd1479a1e78fda0db023bd4effa072a4 (diff) | |
download | glibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.gz glibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.xz glibc-403b4feb22dcbc85ace72a361d2a951380372471.zip |
Fix race in pthread_mutex_lock while promoting to PTHREAD_MUTEX_ELISION_NP [BZ #23275]
The race leads either to pthread_mutex_destroy returning EBUSY or triggering an assertion (See description in bugzilla). This patch is fixing the race by ensuring that the elision path is used in all cases if elision is enabled by the GLIBC_TUNABLES framework. The __kind variable in struct __pthread_mutex_s is accessed concurrently. Therefore we are now using the atomic macros. The new testcase tst-mutex10 is triggering the race on s390x and intel. Presumably also on power, but I don't have access to a power machine with lock-elision. At least the code for power is the same as on the other two architectures. ChangeLog: [BZ #23275] * nptl/tst-mutex10.c: New File. * nptl/Makefile (tests): Add tst-mutex10. (tst-mutex10-ENV): New variable. * sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION): Ensure that elision path is used if elision is available. * sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION): Likewise. * sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION): Likewise. * nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION) (PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed. * nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise. * nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling): Likewise. * nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full) (__pthread_mutex_cond_lock_adjust): Likewise. * nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling): Likewise. * nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise. * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise. * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise. * sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s): Add comments. * nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy): Use atomic_load_relaxed and atomic_store_relaxed. * nptl/pthread_mutex_init.c (__pthread_mutex_init): Use atomic_store_relaxed.
Diffstat (limited to 'nptl/pthread_mutex_timedlock.c')
-rw-r--r-- | nptl/pthread_mutex_timedlock.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c index 28237b0e58..888c12fe28 100644 --- a/nptl/pthread_mutex_timedlock.c +++ b/nptl/pthread_mutex_timedlock.c @@ -53,6 +53,8 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, /* We must not check ABSTIME here. If the thread does not block abstime must not be checked for a valid value. */ + /* See concurrency notes regarding mutex type which is loaded from __kind + in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex), PTHREAD_MUTEX_TIMED_NP)) { @@ -338,8 +340,14 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: { - int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; - int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; + int kind, robust; + { + /* See concurrency notes regarding __kind in struct __pthread_mutex_s + in sysdeps/nptl/bits/thread-shared-types.h. */ + int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); + kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; + robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; + } if (robust) { @@ -509,7 +517,10 @@ __pthread_mutex_timedlock (pthread_mutex_t *mutex, case PTHREAD_MUTEX_PP_NORMAL_NP: case PTHREAD_MUTEX_PP_ADAPTIVE_NP: { - int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; + /* See concurrency notes regarding __kind in struct __pthread_mutex_s + in sysdeps/nptl/bits/thread-shared-types.h. */ + int kind = atomic_load_relaxed (&(mutex->__data.__kind)) + & PTHREAD_MUTEX_KIND_MASK_NP; oldval = mutex->__data.__lock; |