diff options
26 files changed, 357 insertions, 94 deletions
diff --git a/ChangeLog b/ChangeLog index c2a78bbf40..98ae6ac712 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,42 @@ +2017-12-05 Rogerio A. Cardoso <rcardoso@linux.vnet.ibm.com>, + Paul E. Murphy <murphyp@linux.vnet.ibm.com>, + Carlos O'Donnell <carlos@redhat.com> + + * elf/dl-tunables.list: Add elision parameters. + * manual/tunables.texi: Add entries about elision tunable. + * sysdeps/unix/sysv/linux/powerpc/elision-conf.c: + Add callback functions to dynamically enable/disable elision. + Add multiple callbacks functions to set elision parameters. + Deleted __libc_enable_secure check. + * sysdeps/unix/sysv/linux/s390/elision-conf.c: Likewise. + * sysdeps/unix/sysv/linux/x86/elision-conf.c: Likewise. + * configure: Regenerated. + * configure.ac: Option enable_lock_elision was deleted. + * config.h.in: ENABLE_LOCK_ELISION flag was deleted. + * config.make.in: Remove references to enable_lock_elision. + * manual/install.texi: Elision configure option was removed. + * INSTALL: Regenerated to remove enable_lock_elision. + * nptl/Makefile: + Disable elision so it can verify error case for destroying a mutex. + * sysdeps/powerpc/nptl/elide.h: + Cleanup ENABLE_LOCK_ELISION check. + Deleted macros for the case when ENABLE_LOCK_ELISION was not defined. + * sysdeps/s390/configure: Regenerated. + * sysdeps/s390/configure.ac: Remove references to enable_lock_elision.. + * nptl/tst-mutex8.c: + Deleted all #ifndef ENABLE_LOCK_ELISION from the test. + * sysdeps/powerpc/powerpc32/sysdep.h: + Deleted all ENABLE_LOCK_ELISION checks. + * sysdeps/powerpc/powerpc64/sysdep.h: Likewise. + * sysdeps/powerpc/sysdep.h: Likewise. + * sysdeps/s390/nptl/bits/pthreadtypes-arch.h: Likewise. + * sysdeps/unix/sysv/linux/powerpc/force-elision.h: Likewise. + * sysdeps/unix/sysv/linux/s390/elision-conf.h: Likewise. + * sysdeps/unix/sysv/linux/s390/force-elision.h: Likewise. + * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise. + * sysdeps/unix/sysv/linux/s390/Makefile: Remove references to + enable-lock-elision. + 2017-12-05 Joseph Myers <joseph@codesourcery.com> * stdlib/strtod.c: Include <bits/floatn.h>. diff --git a/INSTALL b/INSTALL index d1a34c2a90..e59c11dabd 100644 --- a/INSTALL +++ b/INSTALL @@ -116,9 +116,6 @@ will be used, and CFLAGS sets optimization options for the compiler. formats may change over time. Consult the 'timezone' subdirectory for more details. -'--enable-lock-elision=yes' - Enable lock elision for pthread mutexes by default. - '--enable-stack-protector' '--enable-stack-protector=strong' '--enable-stack-protector=all' diff --git a/config.h.in b/config.h.in index 8d76dadca2..3c91d597ff 100644 --- a/config.h.in +++ b/config.h.in @@ -131,9 +131,6 @@ /* Define if __stack_chk_guard canary should be randomized at program startup. */ #undef ENABLE_STACKGUARD_RANDOMIZE -/* Define if lock elision should be enabled by default. */ -#undef ENABLE_LOCK_ELISION - /* Package description. */ #undef PKGVERSION diff --git a/config.make.in b/config.make.in index bd84a5747d..9da77d1efa 100644 --- a/config.make.in +++ b/config.make.in @@ -99,7 +99,6 @@ build-nscd = @build_nscd@ use-nscd = @use_nscd@ build-hardcoded-path-in-tests= @hardcoded_path_in_tests@ build-pt-chown = @build_pt_chown@ -enable-lock-elision = @enable_lock_elision@ have-tunables = @have_tunables@ # Build tools. diff --git a/configure b/configure index caba102846..dd8b8c95e6 100755 --- a/configure +++ b/configure @@ -679,7 +679,6 @@ enable_werror all_warnings force_install bindnow -enable_lock_elision hardcoded_path_in_tests enable_timezone_tools use_default_link @@ -768,7 +767,6 @@ enable_profile enable_timezone_tools enable_hardcoded_path_in_tests enable_stackguard_randomization -enable_lock_elision enable_hidden_plt enable_bind_now enable_stack_protector @@ -1428,8 +1426,6 @@ Optional Features: --enable-stackguard-randomization initialize __stack_chk_guard canary with a random number at program start - --enable-lock-elision=yes/no - Enable lock elision for pthread mutexes by default --disable-hidden-plt do not hide internal function calls to avoid PLT --enable-bind-now disable lazy relocations in DSOs --enable-stack-protector=[yes|no|all|strong] @@ -3395,19 +3391,6 @@ if test "$enable_stackguard_randomize" = yes; then fi -# Check whether --enable-lock-elision was given. -if test "${enable_lock_elision+set}" = set; then : - enableval=$enable_lock_elision; enable_lock_elision=$enableval -else - enable_lock_elision=no -fi - - -if test "$enable_lock_elision" = yes ; then - $as_echo "#define ENABLE_LOCK_ELISION 1" >>confdefs.h - -fi - # Check whether --enable-hidden-plt was given. if test "${enable_hidden_plt+set}" = set; then : enableval=$enable_hidden_plt; hidden=$enableval diff --git a/configure.ac b/configure.ac index 9707ae4d8e..f85a50da53 100644 --- a/configure.ac +++ b/configure.ac @@ -199,16 +199,6 @@ if test "$enable_stackguard_randomize" = yes; then AC_DEFINE(ENABLE_STACKGUARD_RANDOMIZE) fi -AC_ARG_ENABLE([lock-elision], - AC_HELP_STRING([--enable-lock-elision[=yes/no]], - [Enable lock elision for pthread mutexes by default]), - [enable_lock_elision=$enableval], - [enable_lock_elision=no]) -AC_SUBST(enable_lock_elision) -if test "$enable_lock_elision" = yes ; then - AC_DEFINE(ENABLE_LOCK_ELISION) -fi - AC_ARG_ENABLE([hidden-plt], AC_HELP_STRING([--disable-hidden-plt], [do not hide internal function calls to avoid PLT]), diff --git a/elf/dl-tunables.list b/elf/dl-tunables.list index c188c6ad52..ec0fe20787 100644 --- a/elf/dl-tunables.list +++ b/elf/dl-tunables.list @@ -96,4 +96,38 @@ glibc { default: HWCAP_IMPORTANT } } + + elision { + enable { + type: INT_32 + minval: 0 + maxval: 1 + security_level: SXID_ERASE + } + skip_lock_busy { + type: INT_32 + default: 3 + security_level: SXID_ERASE + } + skip_lock_internal_abort { + type: INT_32 + default: 3 + security_level: SXID_ERASE + } + skip_lock_after_retries { + type: INT_32 + default: 3 + security_level: SXID_ERASE + } + tries { + type: INT_32 + default: 3 + security_level: SXID_ERASE + } + skip_trylock_internal_abort { + type: INT_32 + default: 3 + security_level: SXID_ERASE + } + } } diff --git a/manual/install.texi b/manual/install.texi index ea559e4e72..a3cb09dafc 100644 --- a/manual/install.texi +++ b/manual/install.texi @@ -145,9 +145,6 @@ Note that you need to make sure the external tools are kept in sync with the versions that @theglibc{} expects as the data formats may change over time. Consult the @file{timezone} subdirectory for more details. -@item --enable-lock-elision=yes -Enable lock elision for pthread mutexes by default. - @item --enable-stack-protector @itemx --enable-stack-protector=strong @itemx --enable-stack-protector=all diff --git a/manual/tunables.texi b/manual/tunables.texi index f503daef56..e851b95c59 100644 --- a/manual/tunables.texi +++ b/manual/tunables.texi @@ -31,6 +31,7 @@ their own namespace. @menu * Tunable names:: The structure of a tunable name * Memory Allocation Tunables:: Tunables in the memory allocation subsystem +* Elision Tunables:: Tunables in elision subsystem * Hardware Capability Tunables:: Tunables that modify the hardware capabilities seen by @theglibc{} @end menu @@ -212,6 +213,74 @@ pre-fill the per-thread cache with. The default, or when set to zero, is no limit. @end deftp +@node Elision Tunables +@section Elision Tunables +@cindex elision tunables +@cindex tunables, elision + +@deftp {Tunable namespace} glibc.elision +Contended locks are usually slow and can lead to performance and scalability +issues in multithread code. Lock elision will use memory transactions to under +certain conditions, to elide locks and improve performance. +Elision behavior can be modified by setting the following tunables in +the @code{elision} namespace: +@end deftp + +@deftp Tunable glibc.elision.enable +The @code{glibc.elision.enable} tunable enables lock elision if the feature is +supported by the hardware. If elision is not supported by the hardware this +tunable has no effect. + +Elision tunables are supported for 64-bit Intel, IBM POWER, and z System +architectures. +@end deftp + +@deftp Tunable glibc.elision.skip_lock_busy +The @code{glibc.elision.skip_lock_busy} tunable sets how many times to use a +non-transactional lock after a transactional failure has occurred because the +lock is already acquired. Expressed in number of lock acquisition attempts. + +The default value of this tunable is @samp{3}. +@end deftp + +@deftp Tunable glibc.elision.skip_lock_internal_abort +The @code{glibc.elision.skip_lock_internal_abort} tunable sets how many times +the thread should avoid using elision if a transaction aborted for any reason +other than a different thread's memory accesses. Expressed in number of lock +acquisition attempts. + +The default value of this tunable is @samp{3}. +@end deftp + +@deftp Tunable glibc.elision.skip_lock_after_retries +The @code{glibc.elision.skip_lock_after_retries} tunable sets how many times +to try to elide a lock with transactions, that only failed due to a different +thread's memory accesses, before falling back to regular lock. +Expressed in number of lock elision attempts. + +This tunable is supported only on IBM POWER, and z System architectures. + +The default value of this tunable is @samp{3}. +@end deftp + +@deftp Tunable glibc.elision.tries +The @code{glibc.elision.tries} sets how many times to retry elision if there is +chance for the transaction to finish execution e.g., it wasn't +aborted due to the lock being already acquired. If elision is not supported +by the hardware this tunable is set to @samp{0} to avoid retries. + +The default value of this tunable is @samp{3}. +@end deftp + +@deftp Tunable glibc.elision.skip_trylock_internal_abort +The @code{glibc.elision.skip_trylock_internal_abort} tunable sets how many +times the thread should avoid trying the lock if a transaction aborted due to +reasons other than a different thread's memory accesses. Expressed in number +of try lock attempts. + +The default value of this tunable is @samp{3}. +@end deftp + @node Hardware Capability Tunables @section Hardware Capability Tunables @cindex hardware capability tunables diff --git a/nptl/Makefile b/nptl/Makefile index b0215e12d7..11e6ecd88b 100644 --- a/nptl/Makefile +++ b/nptl/Makefile @@ -714,6 +714,10 @@ endif $(objpfx)tst-compat-forwarder: $(objpfx)tst-compat-forwarder-mod.so +# Disable elision for tst-mutex8 so it can verify error case for +# destroying a mutex. +tst-mutex8-ENV = GLIBC_TUNABLES=glibc.elision.enable=0 + # The tests here better do not run in parallel ifneq ($(filter %tests,$(MAKECMDGOALS)),) .NOTPARALLEL: diff --git a/nptl/tst-mutex8.c b/nptl/tst-mutex8.c index 1d288d243c..ef59db55de 100644 --- a/nptl/tst-mutex8.c +++ b/nptl/tst-mutex8.c @@ -127,9 +127,8 @@ check_type (const char *mas, pthread_mutexattr_t *ma) return 1; } - /* Elided mutexes don't fail destroy. If elision is not explicitly disabled - we don't know, so can also not check this. */ -#ifndef ENABLE_LOCK_ELISION + /* Elided mutexes don't fail destroy, but this test is run with + elision disabled so we can test them. */ e = pthread_mutex_destroy (m); if (e == 0) { @@ -142,7 +141,6 @@ check_type (const char *mas, pthread_mutexattr_t *ma) mas); return 1; } -#endif if (pthread_mutex_unlock (m) != 0) { @@ -157,7 +155,6 @@ check_type (const char *mas, pthread_mutexattr_t *ma) } /* Elided mutexes don't fail destroy. */ -#ifndef ENABLE_LOCK_ELISION e = pthread_mutex_destroy (m); if (e == 0) { @@ -171,7 +168,6 @@ mutex_destroy of self-trylocked mutex did not return EBUSY %s\n", mas); return 1; } -#endif if (pthread_mutex_unlock (m) != 0) { @@ -207,7 +203,6 @@ mutex_destroy of self-trylocked mutex did not return EBUSY %s\n", } /* Elided mutexes don't fail destroy. */ -#ifndef ENABLE_LOCK_ELISION e = pthread_mutex_destroy (m); if (e == 0) { @@ -220,7 +215,6 @@ mutex_destroy of self-trylocked mutex did not return EBUSY %s\n", mutex_destroy of condvar-used mutex did not return EBUSY for %s\n", mas); return 1; } -#endif done = true; if (pthread_cond_signal (&c) != 0) @@ -280,7 +274,6 @@ mutex_destroy of condvar-used mutex did not return EBUSY for %s\n", mas); } /* Elided mutexes don't fail destroy. */ -#ifndef ENABLE_LOCK_ELISION e = pthread_mutex_destroy (m); if (e == 0) { @@ -295,7 +288,6 @@ mutex_destroy of condvar-used mutex did not return EBUSY for %s\n", mas); mas); return 1; } -#endif if (pthread_cancel (th) != 0) { diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h index 1c42814b71..06986ccbce 100644 --- a/sysdeps/powerpc/nptl/elide.h +++ b/sysdeps/powerpc/nptl/elide.h @@ -19,7 +19,6 @@ #ifndef ELIDE_PPC_H # define ELIDE_PPC_H -#ifdef ENABLE_LOCK_ELISION # include <htm.h> # include <elision-conf.h> @@ -114,12 +113,4 @@ __elide_unlock (int is_lock_free) # define ELIDE_UNLOCK(is_lock_free) \ __elide_unlock (is_lock_free) -# else - -# define ELIDE_LOCK(adapt_count, is_lock_free) 0 -# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) 0 -# define ELIDE_UNLOCK(is_lock_free) 0 - -#endif /* ENABLE_LOCK_ELISION */ - #endif diff --git a/sysdeps/powerpc/powerpc32/sysdep.h b/sysdeps/powerpc/powerpc32/sysdep.h index 965ea43c94..1d2ff732ee 100644 --- a/sysdeps/powerpc/powerpc32/sysdep.h +++ b/sysdeps/powerpc/powerpc32/sysdep.h @@ -90,7 +90,7 @@ GOT_LABEL: ; \ cfi_endproc; \ ASM_SIZE_DIRECTIVE(name) -#if ! IS_IN(rtld) && defined (ENABLE_LOCK_ELISION) +#if ! IS_IN(rtld) # define ABORT_TRANSACTION \ cmpwi 2,0; \ beq 1f; \ diff --git a/sysdeps/powerpc/powerpc64/sysdep.h b/sysdeps/powerpc/powerpc64/sysdep.h index ab5f395cfd..bff184e374 100644 --- a/sysdeps/powerpc/powerpc64/sysdep.h +++ b/sysdeps/powerpc/powerpc64/sysdep.h @@ -263,7 +263,7 @@ LT_LABELSUFFIX(name,_name_end): ; \ TRACEBACK_MASK(name,mask); \ END_2(name) -#if !IS_IN(rtld) && defined (ENABLE_LOCK_ELISION) +#if !IS_IN(rtld) # define ABORT_TRANSACTION \ cmpdi 13,0; \ beq 1f; \ diff --git a/sysdeps/powerpc/sysdep.h b/sysdeps/powerpc/sysdep.h index f07b959eee..d1a9bd9b55 100644 --- a/sysdeps/powerpc/sysdep.h +++ b/sysdeps/powerpc/sysdep.h @@ -21,10 +21,8 @@ */ #define _SYSDEPS_SYSDEP_H 1 #include <bits/hwcap.h> -#ifdef ENABLE_LOCK_ELISION #include <tls.h> #include <htm.h> -#endif #define PPC_FEATURE_970 (PPC_FEATURE_POWER4 + PPC_FEATURE_HAS_ALTIVEC) @@ -176,7 +174,7 @@ we abort transaction just before syscalls. [1] Documentation/powerpc/transactional_memory.txt [Syscalls] */ -#if !IS_IN(rtld) && defined (ENABLE_LOCK_ELISION) +#if !IS_IN(rtld) # define ABORT_TRANSACTION \ ({ \ if (THREAD_GET_TM_CAPABLE ()) \ diff --git a/sysdeps/s390/configure b/sysdeps/s390/configure index d4a4a3dcf8..74b415f2ab 100644 --- a/sysdeps/s390/configure +++ b/sysdeps/s390/configure @@ -35,7 +35,7 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_gcc_builtin_tbegin" >&5 $as_echo "$libc_cv_gcc_builtin_tbegin" >&6; } -if test "$enable_lock_elision" = yes && test "$libc_cv_gcc_builtin_tbegin" = no ; then +if test "$libc_cv_gcc_builtin_tbegin" = no ; then critic_missing="$critic_missing The used GCC has no support for __builtin_tbegin, which is needed for lock-elision on target S390." fi diff --git a/sysdeps/s390/configure.ac b/sysdeps/s390/configure.ac index 7d0b5ce46f..1cdb021282 100644 --- a/sysdeps/s390/configure.ac +++ b/sysdeps/s390/configure.ac @@ -26,7 +26,7 @@ else fi rm -f conftest* ]) -if test "$enable_lock_elision" = yes && test "$libc_cv_gcc_builtin_tbegin" = no ; then +if test "$libc_cv_gcc_builtin_tbegin" = no ; then critic_missing="$critic_missing The used GCC has no support for __builtin_tbegin, which is needed for lock-elision on target S390." fi diff --git a/sysdeps/s390/nptl/bits/pthreadtypes-arch.h b/sysdeps/s390/nptl/bits/pthreadtypes-arch.h index 1ae277367d..fd15931974 100644 --- a/sysdeps/s390/nptl/bits/pthreadtypes-arch.h +++ b/sysdeps/s390/nptl/bits/pthreadtypes-arch.h @@ -40,11 +40,7 @@ /* Definitions for internal mutex struct. */ #define __PTHREAD_COMPAT_PADDING_MID #define __PTHREAD_COMPAT_PADDING_END -#ifdef ENABLE_LOCK_ELISION #define __PTHREAD_MUTEX_LOCK_ELISION 1 -#else -#define __PTHREAD_MUTEX_LOCK_ELISION 0 -#endif #define __PTHREAD_MUTEX_NUSERS_AFTER_KIND (__WORDSIZE != 64) #define __PTHREAD_MUTEX_USE_UNION (__WORDSIZE != 64) diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-conf.c b/sysdeps/unix/sysv/linux/powerpc/elision-conf.c index f631f0a035..06361e6b2f 100644 --- a/sysdeps/unix/sysv/linux/powerpc/elision-conf.c +++ b/sysdeps/unix/sysv/linux/powerpc/elision-conf.c @@ -22,6 +22,11 @@ #include <unistd.h> #include <dl-procinfo.h> +#if HAVE_TUNABLES +# define TUNABLE_NAMESPACE elision +#endif +#include <elf/dl-tunables.h> + /* Reasonable initial tuning values, may be revised in the future. This is a conservative initial value. */ @@ -50,7 +55,52 @@ struct elision_config __elision_aconf = DEFAULT locks should be automatically use elision in pthread_mutex_lock(). Disabled for suid programs. Only used when elision is available. */ -int __pthread_force_elision attribute_hidden; +int __pthread_force_elision attribute_hidden = 0; + +#if HAVE_TUNABLES +static inline void +__always_inline +do_set_elision_enable (int32_t elision_enable) +{ + /* Enable elision if it's avaliable in hardware. It's not necessary to check + if __libc_enable_secure isn't enabled since elision_enable will be set + according to the default, which is disabled. */ + if (elision_enable == 1) + __pthread_force_elision = (GLRO (dl_hwcap2) + & PPC_FEATURE2_HAS_HTM) ? 1 : 0; +} + +/* The pthread->elision_enable tunable is 0 or 1 indicating that elision + should be disabled or enabled respectively. The feature will only be used + if it's supported by the hardware. */ + +void +TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp) +{ + int32_t elision_enable = (int32_t) valp->numval; + do_set_elision_enable (elision_enable); +} + +#define TUNABLE_CALLBACK_FNDECL(__name, __type) \ +static inline void \ +__always_inline \ +do_set_elision_ ## __name (__type value) \ +{ \ + __elision_aconf.__name = value; \ +} \ +void \ +TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \ +{ \ + __type value = (__type) (valp)->numval; \ + do_set_elision_ ## __name (value); \ +} + +TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_lock_out_of_tbegin_retries, int32_t); +TUNABLE_CALLBACK_FNDECL (try_tbegin, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t); +#endif /* Initialize elision. */ @@ -59,13 +109,26 @@ elision_init (int argc __attribute__ ((unused)), char **argv __attribute__ ((unused)), char **environ) { -#ifdef ENABLE_LOCK_ELISION - int elision_available = (GLRO (dl_hwcap2) & PPC_FEATURE2_HAS_HTM) ? 1 : 0; - __pthread_force_elision = __libc_enable_secure ? 0 : elision_available; +#if HAVE_TUNABLES + /* Elision depends on tunables and must be explicitly turned on by setting + the appropriate tunable on a supported platform. */ + + TUNABLE_GET (enable, int32_t, + TUNABLE_CALLBACK (set_elision_enable)); + TUNABLE_GET (skip_lock_busy, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_busy)); + TUNABLE_GET (skip_lock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort)); + TUNABLE_GET (skip_lock_after_retries, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_out_of_tbegin_retries)); + TUNABLE_GET (tries, int32_t, + TUNABLE_CALLBACK (set_elision_try_tbegin)); + TUNABLE_GET (skip_trylock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort)); #endif + if (!__pthread_force_elision) - /* Disable elision on rwlocks. */ - __elision_aconf.try_tbegin = 0; + __elision_aconf.try_tbegin = 0; /* Disable elision on rwlocks. */ } #ifdef SHARED diff --git a/sysdeps/unix/sysv/linux/powerpc/force-elision.h b/sysdeps/unix/sysv/linux/powerpc/force-elision.h index 318f7915c7..d1feeeb01e 100644 --- a/sysdeps/unix/sysv/linux/powerpc/force-elision.h +++ b/sysdeps/unix/sysv/linux/powerpc/force-elision.h @@ -16,7 +16,6 @@ License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ -#ifdef ENABLE_LOCK_ELISION /* Automatically enable elision for existing user lock kinds. */ #define FORCE_ELISION(m, s) \ if (__pthread_force_elision \ @@ -25,4 +24,3 @@ mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP; \ s; \ } -#endif diff --git a/sysdeps/unix/sysv/linux/s390/Makefile b/sysdeps/unix/sysv/linux/s390/Makefile index c5f544d139..77f38523b5 100644 --- a/sysdeps/unix/sysv/linux/s390/Makefile +++ b/sysdeps/unix/sysv/linux/s390/Makefile @@ -16,7 +16,6 @@ sysdep_routines += dl-vdso endif ifeq ($(subdir),nptl) -ifeq ($(enable-lock-elision),yes) libpthread-sysdep_routines += elision-lock elision-unlock elision-timed \ elision-trylock @@ -26,7 +25,6 @@ CFLAGS-elision-timed.c = $(elision-CFLAGS) CFLAGS-elision-trylock.c = $(elision-CFLAGS) CFLAGS-elision-unlock.c = $(elision-CFLAGS) endif -endif ifeq ($(subdir),misc) tests += tst-ptrace-singleblock diff --git a/sysdeps/unix/sysv/linux/s390/elision-conf.c b/sysdeps/unix/sysv/linux/s390/elision-conf.c index cc0fdef2aa..ab334cb79b 100644 --- a/sysdeps/unix/sysv/linux/s390/elision-conf.c +++ b/sysdeps/unix/sysv/linux/s390/elision-conf.c @@ -22,6 +22,11 @@ #include <unistd.h> #include <dl-procinfo.h> +#if HAVE_TUNABLES +# define TUNABLE_NAMESPACE elision +#endif +#include <elf/dl-tunables.h> + /* Reasonable initial tuning values, may be revised in the future. This is a conservative initial value. */ @@ -53,6 +58,50 @@ struct elision_config __elision_aconf = int __pthread_force_elision attribute_hidden = 0; +#if HAVE_TUNABLES +static inline void +__always_inline +do_set_elision_enable (int32_t elision_enable) +{ + /* Enable elision if it's avaliable in hardware. It's not necessary to check + if __libc_enable_secure isn't enabled since elision_enable will be set + according to the default, which is disabled. */ + if (elision_enable == 1) + __pthread_force_elision = (GLRO (dl_hwcap) & HWCAP_S390_TE) ? 1 : 0; +} + +/* The pthread->elision_enable tunable is 0 or 1 indicating that elision + should be disabled or enabled respectively. The feature will only be used + if it's supported by the hardware. */ + +void +TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp) +{ + int32_t elision_enable = (int32_t) valp->numval; + do_set_elision_enable (elision_enable); +} + +#define TUNABLE_CALLBACK_FNDECL(__name, __type) \ +static inline void \ +__always_inline \ +do_set_elision_ ## __name (__type value) \ +{ \ + __elision_aconf.__name = value; \ +} \ +void \ +TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \ +{ \ + __type value = (__type) (valp)->numval; \ + do_set_elision_ ## __name (value); \ +} + +TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_lock_out_of_tbegin_retries, int32_t); +TUNABLE_CALLBACK_FNDECL (try_tbegin, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t); +#endif + /* Initialize elison. */ static void @@ -60,11 +109,26 @@ elision_init (int argc __attribute__ ((unused)), char **argv __attribute__ ((unused)), char **environ) { - /* Set when the CPU and the kernel supports transactional execution. - When false elision is never attempted. */ - int elision_available = (GLRO (dl_hwcap) & HWCAP_S390_TE) ? 1 : 0; +#if HAVE_TUNABLES + /* Elision depends on tunables and must be explicitly turned on by setting + the appropriate tunable on a supported platform. */ + + TUNABLE_GET (enable, int32_t, + TUNABLE_CALLBACK (set_elision_enable)); + TUNABLE_GET (skip_lock_busy, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_busy)); + TUNABLE_GET (skip_lock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort)); + TUNABLE_GET (skip_lock_after_retries, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_out_of_tbegin_retries)); + TUNABLE_GET (tries, int32_t, + TUNABLE_CALLBACK (set_elision_try_tbegin)); + TUNABLE_GET (skip_trylock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort)); +#endif - __pthread_force_elision = __libc_enable_secure ? 0 : elision_available; + if (!__pthread_force_elision) + __elision_aconf.try_tbegin = 0; /* Disable elision on rwlocks. */ } #ifdef SHARED diff --git a/sysdeps/unix/sysv/linux/s390/elision-conf.h b/sysdeps/unix/sysv/linux/s390/elision-conf.h index 3143f3b114..32f0ed3b8c 100644 --- a/sysdeps/unix/sysv/linux/s390/elision-conf.h +++ b/sysdeps/unix/sysv/linux/s390/elision-conf.h @@ -15,7 +15,6 @@ You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ -#ifdef ENABLE_LOCK_ELISION #ifndef _ELISION_CONF_H #define _ELISION_CONF_H 1 @@ -41,4 +40,3 @@ extern int __pthread_force_elision attribute_hidden; #define HAVE_ELISION 1 #endif -#endif diff --git a/sysdeps/unix/sysv/linux/s390/force-elision.h b/sysdeps/unix/sysv/linux/s390/force-elision.h index 3ae3bcd566..8e1e33e1c5 100644 --- a/sysdeps/unix/sysv/linux/s390/force-elision.h +++ b/sysdeps/unix/sysv/linux/s390/force-elision.h @@ -16,7 +16,6 @@ License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ -#ifdef ENABLE_LOCK_ELISION /* Automatically enable elision for existing user lock kinds. */ #define FORCE_ELISION(m, s) \ if (__pthread_force_elision \ @@ -25,4 +24,3 @@ mutex->__data.__kind |= PTHREAD_MUTEX_ELISION_NP; \ s; \ } -#endif diff --git a/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/sysdeps/unix/sysv/linux/s390/lowlevellock.h index 604137f7f2..48f87a85f5 100644 --- a/sysdeps/unix/sysv/linux/s390/lowlevellock.h +++ b/sysdeps/unix/sysv/linux/s390/lowlevellock.h @@ -22,7 +22,6 @@ #include <sysdeps/nptl/lowlevellock.h> /* Transactional lock elision definitions. */ -# ifdef ENABLE_LOCK_ELISION extern int __lll_timedlock_elision (int *futex, short *adapt_count, const struct timespec *timeout, int private) attribute_hidden; @@ -45,6 +44,5 @@ extern int __lll_trylock_elision(int *futex, short *adapt_count) __lll_unlock_elision (&(futex), &(adapt_count), private) # define lll_trylock_elision(futex, adapt_count) \ __lll_trylock_elision(&(futex), &(adapt_count)) -# endif /* ENABLE_LOCK_ELISION */ #endif /* lowlevellock.h */ diff --git a/sysdeps/unix/sysv/linux/x86/elision-conf.c b/sysdeps/unix/sysv/linux/x86/elision-conf.c index 673b0005a7..7e9fbf9382 100644 --- a/sysdeps/unix/sysv/linux/x86/elision-conf.c +++ b/sysdeps/unix/sysv/linux/x86/elision-conf.c @@ -22,6 +22,11 @@ #include <elision-conf.h> #include <unistd.h> +#if HAVE_TUNABLES +# define TUNABLE_NAMESPACE elision +#endif +#include <elf/dl-tunables.h> + /* Reasonable initial tuning values, may be revised in the future. This is a conservative initial value. */ @@ -48,21 +53,76 @@ struct elision_config __elision_aconf = pthread_mutex_lock(). Disabled for suid programs. Only used when elision is available. */ -int __pthread_force_elision attribute_hidden; +int __pthread_force_elision attribute_hidden = 0; + +#if HAVE_TUNABLES +static inline void +__always_inline +do_set_elision_enable (int32_t elision_enable) +{ + /* Enable elision if it's avaliable in hardware. It's not necessary to check + if __libc_enable_secure isn't enabled since elision_enable will be set + according to the default, which is disabled. */ + if (elision_enable == 1) + __pthread_force_elision = HAS_CPU_FEATURE (RTM) ? 1 : 0; +} + +/* The pthread->elision_enable tunable is 0 or 1 indicating that elision + should be disabled or enabled respectively. The feature will only be used + if it's supported by the hardware. */ -/* Initialize elison. */ +void +TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp) +{ + int32_t elision_enable = (int32_t) valp->numval; + do_set_elision_enable (elision_enable); +} + +#define TUNABLE_CALLBACK_FNDECL(__name, __type) \ +static inline void \ +__always_inline \ +do_set_elision_ ## __name (__type value) \ +{ \ + __elision_aconf.__name = value; \ +} \ +void \ +TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \ +{ \ + __type value = (__type) (valp)->numval; \ + do_set_elision_ ## __name (value); \ +} + +TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t); +TUNABLE_CALLBACK_FNDECL (retry_try_xbegin, int32_t); +TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t); +#endif + +/* Initialize elision. */ static void elision_init (int argc __attribute__ ((unused)), char **argv __attribute__ ((unused)), char **environ) { - int elision_available = HAS_CPU_FEATURE (RTM); -#ifdef ENABLE_LOCK_ELISION - __pthread_force_elision = __libc_enable_secure ? 0 : elision_available; +#if HAVE_TUNABLES + /* Elision depends on tunables and must be explicitly turned on by setting + the appropriate tunable on a supported platform. */ + + TUNABLE_GET (enable, int32_t, + TUNABLE_CALLBACK (set_elision_enable)); + TUNABLE_GET (skip_lock_busy, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_busy)); + TUNABLE_GET (skip_lock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort)); + TUNABLE_GET (tries, int32_t, + TUNABLE_CALLBACK (set_elision_retry_try_xbegin)); + TUNABLE_GET (skip_trylock_internal_abort, int32_t, + TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort)); #endif - if (!elision_available) - __elision_aconf.retry_try_xbegin = 0; /* Disable elision on rwlocks */ + + if (!__pthread_force_elision) + __elision_aconf.retry_try_xbegin = 0; /* Disable elision on rwlocks. */ } #ifdef SHARED |