about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/x86
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.ibm.com>2018-10-17 12:23:04 +0200
committerStefan Liebler <stli@linux.ibm.com>2018-10-17 12:23:04 +0200
commit403b4feb22dcbc85ace72a361d2a951380372471 (patch)
treefe2235a8bce38cad1bbef3b4f28da82dc353e138 /sysdeps/unix/sysv/linux/x86
parentce5a7de6cd1479a1e78fda0db023bd4effa072a4 (diff)
downloadglibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.gz
glibc-403b4feb22dcbc85ace72a361d2a951380372471.tar.xz
glibc-403b4feb22dcbc85ace72a361d2a951380372471.zip
Fix race in pthread_mutex_lock while promoting to PTHREAD_MUTEX_ELISION_NP [BZ #23275]
The race leads either to pthread_mutex_destroy returning EBUSY
or triggering an assertion (See description in bugzilla).

This patch is fixing the race by ensuring that the elision path is
used in all cases if elision is enabled by the GLIBC_TUNABLES framework.

The __kind variable in struct __pthread_mutex_s is accessed concurrently.
Therefore we are now using the atomic macros.

The new testcase tst-mutex10 is triggering the race on s390x and intel.
Presumably also on power, but I don't have access to a power machine
with lock-elision. At least the code for power is the same as on the other
two architectures.

ChangeLog:

	[BZ #23275]
	* nptl/tst-mutex10.c: New File.
	* nptl/Makefile (tests): Add tst-mutex10.
	(tst-mutex10-ENV): New variable.
	* sysdeps/unix/sysv/linux/s390/force-elision.h: (FORCE_ELISION):
	Ensure that elision path is used if elision is available.
	* sysdeps/unix/sysv/linux/powerpc/force-elision.h (FORCE_ELISION):
	Likewise.
	* sysdeps/unix/sysv/linux/x86/force-elision.h: (FORCE_ELISION):
	Likewise.
	* nptl/pthreadP.h (PTHREAD_MUTEX_TYPE, PTHREAD_MUTEX_TYPE_ELISION)
	(PTHREAD_MUTEX_PSHARED): Use atomic_load_relaxed.
	* nptl/pthread_mutex_consistent.c (pthread_mutex_consistent): Likewise.
	* nptl/pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling):
	Likewise.
	* nptl/pthread_mutex_lock.c (__pthread_mutex_lock_full)
	(__pthread_mutex_cond_lock_adjust): Likewise.
	* nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling):
	Likewise.
	* nptl/pthread_mutex_timedlock.c (__pthread_mutex_timedlock): Likewise.
	* nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/bits/thread-shared-types.h (struct __pthread_mutex_s):
	Add comments.
	* nptl/pthread_mutex_destroy.c (__pthread_mutex_destroy):
	Use atomic_load_relaxed and atomic_store_relaxed.
	* nptl/pthread_mutex_init.c (__pthread_mutex_init):
	Use atomic_store_relaxed.
Diffstat (limited to 'sysdeps/unix/sysv/linux/x86')
-rw-r--r--sysdeps/unix/sysv/linux/x86/force-elision.h44
1 files changed, 40 insertions, 4 deletions
diff --git a/sysdeps/unix/sysv/linux/x86/force-elision.h b/sysdeps/unix/sysv/linux/x86/force-elision.h
index dd659c908f..61282d6678 100644
--- a/sysdeps/unix/sysv/linux/x86/force-elision.h
+++ b/sysdeps/unix/sysv/linux/x86/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }