about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/force-elision.h44
-rw-r--r--sysdeps/unix/sysv/linux/s390/force-elision.h44
-rw-r--r--sysdeps/unix/sysv/linux/x86/force-elision.h44
3 files changed, 120 insertions, 12 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/force-elision.h b/sysdeps/unix/sysv/linux/powerpc/force-elision.h
index fe5d6ceade..d8f5a4b1c7 100644
--- a/sysdeps/unix/sysv/linux/powerpc/force-elision.h
+++ b/sysdeps/unix/sysv/linux/powerpc/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
diff --git a/sysdeps/unix/sysv/linux/s390/force-elision.h b/sysdeps/unix/sysv/linux/s390/force-elision.h
index d8a1b9972f..71f32367dd 100644
--- a/sysdeps/unix/sysv/linux/s390/force-elision.h
+++ b/sysdeps/unix/sysv/linux/s390/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
diff --git a/sysdeps/unix/sysv/linux/x86/force-elision.h b/sysdeps/unix/sysv/linux/x86/force-elision.h
index dd659c908f..61282d6678 100644
--- a/sysdeps/unix/sysv/linux/x86/force-elision.h
+++ b/sysdeps/unix/sysv/linux/x86/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }