about summary refs log tree commit diff
path: root/nptl/pthread_mutex_unlock.c
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.ibm.com>2018-10-17 12:23:04 +0200
committerStefan Liebler <stli@linux.ibm.com>2018-10-17 12:23:04 +0200
commit403b4feb22dcbc85ace72a361d2a951380372471 (patch)
treefe2235a8bce38cad1bbef3b4f28da82dc353e138 /nptl/pthread_mutex_unlock.c
parentce5a7de6cd1479a1e78fda0db023bd4effa072a4 (diff)
downloadglibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.gz
glibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.xz
glibc-403b4feb22dcbc85ace72a361d2a951380372471.zip
Fix race in pthread_mutex_lock while promoting to PTHREAD_MUTEX_ELISION_NP [BZ #23275]
The race leads either to pthread_mutex_destroy returning EBUSY
or triggering an assertion (See description in bugzilla).

This patch is fixing the race by ensuring that the elision path is
used in all cases if elision is enabled by the GLIBC_TUNABLES framework.

The __kind variable in struct __pthread_mutex_s is accessed concurrently.
Therefore we are now using the atomic macros.

The new testcase tst-mutex10 is triggering the race on s390x and intel.
Presumably also on power, but I don't have access to a power machine
with lock-elision. At least the code for power is the same as on the other
two architectures.

ChangeLog:

	[BZ #23275]
	* nptl/tst-mutex10.c: New File.
	* nptl/Makefile (tests): Add tst-mutex10.
	(tst-mutex10-ENV): New variable.
	* sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION):
	Ensure that elision path is used if elision is available.
	* sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION):
	Likewise.
	* sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION):
	Likewise.
	* nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION)
	(PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed.
	* nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise.
	* nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling):
	Likewise.
	* nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full)
	(__pthread_mutex_cond_lock_adjust): Likewise.
	* nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling):
	Likewise.
	* nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise.
	* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s):
	Add comments.
	* nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy):
	Use atomic_load_relaxed and atomic_store_relaxed.
	* nptl/pthread_mutex_init.c (__pthread_mutex_init):
	Use atomic_store_relaxed.
Diffstat (limited to 'nptl/pthread_mutex_unlock.c')
-rw-r--r--nptl/pthread_mutex_unlock.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index 9ea62943b7..68d04d5395 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -35,6 +35,8 @@ int
 attribute_hidden
 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
 {
+  /* See concurrency notes regarding mutex type which is loaded from __kind
+     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
   int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
   if (__builtin_expect (type &
 		~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
@@ -222,13 +224,19 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
       /* If the previous owner died and the caller did not succeed in
 	 making the state consistent, mark the mutex as unrecoverable
 	 and make all waiters.  */
-      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      if ((atomic_load_relaxed (&(mutex->__data.__kind))
+	   & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
 	  && __builtin_expect (mutex->__data.__owner
 			       == PTHREAD_MUTEX_INCONSISTENT, 0))
       pi_notrecoverable:
        newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
 
-      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      if ((atomic_load_relaxed (&(mutex->__data.__kind))
+	   & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
 	{
 	continue_pi_robust:
 	  /* Remove mutex from the list.
@@ -251,7 +259,10 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
       /* Unlock.  Load all necessary mutex data before releasing the mutex
 	 to not violate the mutex destruction requirements (see
 	 lll_unlock).  */
-      int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+      /* See concurrency notes regarding __kind in struct __pthread_mutex_s
+	 in sysdeps/nptl/bits/thread-shared-types.h.  */
+      int robust = atomic_load_relaxed (&(mutex->__data.__kind))
+	& PTHREAD_MUTEX_ROBUST_NORMAL_NP;
       private = (robust
 		 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
 		 : PTHREAD_MUTEX_PSHARED (mutex));