about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/nptl/bits/thread-shared-types.h22
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/force-elision.h44
-rw-r--r--sysdeps/unix/sysv/linux/s390/force-elision.h44
-rw-r--r--sysdeps/unix/sysv/linux/x86/force-elision.h44
4 files changed, 141 insertions, 13 deletions
diff --git a/sysdeps/nptl/bits/thread-shared-types.h b/sysdeps/nptl/bits/thread-shared-types.h
index 1e2092a05d..05c94e7a71 100644
--- a/sysdeps/nptl/bits/thread-shared-types.h
+++ b/sysdeps/nptl/bits/thread-shared-types.h
@@ -124,7 +124,27 @@ struct __pthread_mutex_s
   unsigned int __nusers;
 #endif
   /* KIND must stay at this position in the structure to maintain
-     binary compatibility with static initializers.  */
+     binary compatibility with static initializers.
+
+     Concurrency notes:
+     The __kind of a mutex is initialized either by the static
+     PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init.
+
+     After a mutex has been initialized, the __kind of a mutex is usually not
+     changed.  BUT it can be set to -1 in pthread_mutex_destroy or elision can
+     be enabled.  This is done concurrently in the pthread_mutex_*lock functions
+     by using the macro FORCE_ELISION. This macro is only defined for
+     architectures which supports lock elision.
+
+     For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and
+     PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set
+     type of a mutex.
+     Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set
+     with pthread_mutexattr_settype.
+     After a mutex has been initialized, the functions pthread_mutex_*lock can
+     enable elision - if the mutex-type and the machine supports it - by setting
+     the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards
+     the lock / unlock functions are using specific elision code-paths.  */
   int __kind;
   __PTHREAD_COMPAT_PADDING_MID
 #if __PTHREAD_MUTEX_NUSERS_AFTER_KIND
diff --git a/sysdeps/unix/sysv/linux/powerpc/force-elision.h b/sysdeps/unix/sysv/linux/powerpc/force-elision.h
index fe5d6ceade..d8f5a4b1c7 100644
--- a/sysdeps/unix/sysv/linux/powerpc/force-elision.h
+++ b/sysdeps/unix/sysv/linux/powerpc/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
diff --git a/sysdeps/unix/sysv/linux/s390/force-elision.h b/sysdeps/unix/sysv/linux/s390/force-elision.h
index d8a1b9972f..71f32367dd 100644
--- a/sysdeps/unix/sysv/linux/s390/force-elision.h
+++ b/sysdeps/unix/sysv/linux/s390/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }
diff --git a/sysdeps/unix/sysv/linux/x86/force-elision.h b/sysdeps/unix/sysv/linux/x86/force-elision.h
index dd659c908f..61282d6678 100644
--- a/sysdeps/unix/sysv/linux/x86/force-elision.h
+++ b/sysdeps/unix/sysv/linux/x86/force-elision.h
@@ -18,9 +18,45 @@
 
 /* Automatically enable elision for existing user lock kinds.  */
 #define FORCE_ELISION(m, s)						\
-  if (__pthread_force_elision						\
-      && (m->__data.__kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)	\
+  if (__pthread_force_elision)						\
     {									\
-      mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP;			\
-      s;								\
+      /* See concurrency notes regarding __kind in			\
+	 struct __pthread_mutex_s in					\
+	 sysdeps/nptl/bits/thread-shared-types.h.			\
+									\
+	 There are the following cases for the kind of a mutex		\
+	 (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags	\
+	 PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where	\
+	 only one of both flags can be set):				\
+	 - both flags are not set:					\
+	 This is the first lock operation for this mutex.  Enable	\
+	 elision as it is not enabled so far.				\
+	 Note: It can happen that multiple threads are calling e.g.	\
+	 pthread_mutex_lock at the same time as the first lock		\
+	 operation for this mutex.  Then elision is enabled for this	\
+	 mutex by multiple threads.  Storing with relaxed MO is enough	\
+	 as all threads will store the same new value for the kind of	\
+	 the mutex.  But we have to ensure that we always use the	\
+	 elision path regardless if this thread has enabled elision or	\
+	 another one.							\
+									\
+	 - PTHREAD_MUTEX_ELISION_NP flag is set:			\
+	 Elision was already enabled for this mutex by a previous lock	\
+	 operation.  See case above.  Just use the elision path.	\
+									\
+	 - PTHREAD_MUTEX_NO_ELISION_NP flag is set:			\
+	 Elision was explicitly disabled by pthread_mutexattr_settype.	\
+	 Do not use the elision path.					\
+	 Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be	\
+	 changed after mutex initialization.  */			\
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));	\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)		\
+	{								\
+	  mutex_kind |= PTHREAD_MUTEX_ELISION_NP;			\
+	  atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);	\
+	}								\
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)			\
+	{								\
+	  s;								\
+	}								\
     }