about summary refs log tree commit diff
path: root/sysdeps/nptl
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2021-02-23 14:59:34 +0100
committerFlorian Weimer <fweimer@redhat.com>2021-02-23 14:59:34 +0100
commit5a664d7ae8e42d641a7b4b436987ff67ab483b08 (patch)
treed2530831e3613ab86b5f536341304ac8cc98f452 /sysdeps/nptl
parent597d0267b5c4a925f0175837ec09df9f77e0a250 (diff)
downloadglibc-5a664d7ae8e42d641a7b4b436987ff67ab483b08.tar.gz
glibc-5a664d7ae8e42d641a7b4b436987ff67ab483b08.tar.xz
glibc-5a664d7ae8e42d641a7b4b436987ff67ab483b08.zip
nptl: Move elision implementations into libc
The elision interfaces are closely aligned between the targets that
implement them, so declare them in the generic <lowlevellock.h>
file.

Empty .c stubs are provided, so that fewer makefile updates
under sysdeps are needed.  Also simplify initialization via
__libc_early_init.

The symbols __lll_clocklock_elision, __lll_lock_elision,
__lll_trylock_elision, __lll_unlock_elision, __pthread_force_elision
move into libc.  For the time being, non-hidden references are used
from libpthread to access them, but once that part of libpthread
is moved into libc, hidden symbols will be used again.  (Hidden
references seem desirable to reduce the likelihood of transactions
aborts.)
Diffstat (limited to 'sysdeps/nptl')
-rw-r--r--sysdeps/nptl/lowlevellock.h102
1 files changed, 102 insertions, 0 deletions
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 176ba96251..be60c9ac28 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -20,6 +20,7 @@
 #define _LOWLEVELLOCK_H	1
 
 #include <atomic.h>
+#include <elision-conf.h>
 #include <lowlevellock-futex.h>
 #include <time.h>
 
@@ -160,4 +161,105 @@ libc_hidden_proto (__lll_lock_wait)
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 
+/* Elision support.  */
+
+#if ENABLE_ELISION_SUPPORT
+/* Force elision for all new locks.  This is used to decide whether
+   existing DEFAULT locks should be automatically upgraded to elision
+   in pthread_mutex_lock.  Disabled for suid programs.  Only used when
+   elision is available.  */
+extern int __pthread_force_elision;
+libc_hidden_proto (__pthread_force_elision)
+
+extern void __lll_elision_init (void) attribute_hidden;
+extern int __lll_clocklock_elision (int *futex, short *adapt_count,
+                                    clockid_t clockid,
+				    const struct __timespec64 *timeout,
+				    int private);
+libc_hidden_proto (__lll_clocklock_elision)
+
+extern int __lll_lock_elision (int *futex, short *adapt_count, int private);
+libc_hidden_proto (__lll_lock_elision)
+
+# if ELISION_UNLOCK_NEEDS_ADAPT_COUNT
+extern int __lll_unlock_elision (int *lock, short *adapt_count, int private);
+# else
+extern int __lll_unlock_elision (int *lock, int private);
+# endif
+libc_hidden_proto (__lll_unlock_elision)
+
+extern int __lll_trylock_elision (int *lock, short *adapt_count);
+libc_hidden_proto (__lll_trylock_elision)
+
+# define lll_clocklock_elision(futex, adapt_count, clockid, timeout, private) \
+  __lll_clocklock_elision (&(futex), &(adapt_count), clockid, timeout, private)
+# define lll_lock_elision(futex, adapt_count, private)		\
+  __lll_lock_elision (&(futex), &(adapt_count), private)
+# define lll_trylock_elision(futex, adapt_count)	\
+  __lll_trylock_elision (&(futex), &(adapt_count))
+# if ELISION_UNLOCK_NEEDS_ADAPT_COUNT
+#  define lll_unlock_elision(futex, adapt_count, private)	\
+  __lll_unlock_elision (&(futex), &(adapt_count), private)
+# else
+#  define lll_unlock_elision(futex, adapt_count, private)	\
+  __lll_unlock_elision (&(futex), private)
+# endif
+
+/* Automatically enable elision for existing user lock kinds.  */
+# define FORCE_ELISION(m, s)                                            \
+  if (__pthread_force_elision)                                          \
+    {                                                                   \
+      /* See concurrency notes regarding __kind in                      \
+         struct __pthread_mutex_s in                                    \
+         sysdeps/nptl/bits/thread-shared-types.h.                       \
+                                                                        \
+         There are the following cases for the kind of a mutex          \
+         (The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags      \
+         PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where \
+         only one of both flags can be set):                            \
+         - both flags are not set:                                      \
+         This is the first lock operation for this mutex.  Enable       \
+         elision as it is not enabled so far.                           \
+         Note: It can happen that multiple threads are calling e.g.     \
+         pthread_mutex_lock at the same time as the first lock          \
+         operation for this mutex.  Then elision is enabled for this    \
+         mutex by multiple threads.  Storing with relaxed MO is enough  \
+         as all threads will store the same new value for the kind of   \
+         the mutex.  But we have to ensure that we always use the       \
+         elision path regardless if this thread has enabled elision or  \
+         another one.                                                   \
+                                                                        \
+         - PTHREAD_MUTEX_ELISION_NP flag is set:                        \
+         Elision was already enabled for this mutex by a previous lock  \
+         operation.  See case above.  Just use the elision path.        \
+                                                                        \
+         - PTHREAD_MUTEX_NO_ELISION_NP flag is set:                     \
+         Elision was explicitly disabled by pthread_mutexattr_settype.  \
+         Do not use the elision path.                                   \
+         Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be       \
+         changed after mutex initialization.  */                        \
+      int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind));     \
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0)           \
+        {                                                               \
+          mutex_kind |= PTHREAD_MUTEX_ELISION_NP;                       \
+          atomic_store_relaxed (&((m)->__data.__kind), mutex_kind);     \
+        }                                                               \
+      if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0)                 \
+        {                                                               \
+          s;                                                            \
+        }                                                               \
+    }
+
+#else /* !ENABLE_ELISION_SUPPORT */
+
+# define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
+  __futex_clocklock64 (&(futex), clockid, abstime, private)
+# define lll_lock_elision(lock, try_lock, private)	\
+  ({ lll_lock (lock, private); 0; })
+# define lll_trylock_elision(a,t) lll_trylock(a)
+# define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
+# define FORCE_ELISION(m, s)
+
+#endif /* !ENABLE_ELISION_SUPPORT */
+
 #endif	/* lowlevellock.h */