about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.vnet.ibm.com>2016-12-20 15:12:48 +0100
committerStefan Liebler <stli@linux.vnet.ibm.com>2016-12-20 15:12:48 +0100
commit8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4 (patch)
treeb2b0f195981735dd34afe82f1cd524ba73d11285 /sysdeps
parentc813dae5d8e469262f96b1cda0191ea076f10809 (diff)
downloadglibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.tar.gz
glibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.tar.xz
glibc-8bfc4a2ab4bebdf86c151665aae8a266e2f18fb4.zip
S390: Use own tbegin macro instead of __builtin_tbegin.
This patch defines __libc_tbegin, __libc_tend, __libc_tabort and
__libc_tx_nesting_depth in htm.h which replaces the direct usage of
equivalent gcc builtins.

We have to use an own inline assembly instead of __builtin_tbegin,
as tbegin has to filter program interruptions which can't be done with
the builtin.  Before this change, e.g. a segmentation fault within a
transaction, leads to a coredump where the instruction pointer points
behind the tbegin instruction instead of real failing one.
Now the transaction aborts and the code should be reexecuted by the
fallback path without transactions.  The segmentation fault will
produce a coredump with the real failing instruction.

The fpc is not saved before starting the transaction.  If e.g. the
rounging mode is changed and the transaction is aborting afterwards,
the builtin will not restore the fpc.  This is now done with the
__libc_tbegin macro.

Now the call saved fprs have to be saved / restored in the
__libc_tbegin macro.  Using the gcc builtin had forced the saving /
restoring of fprs at begin / end of e.g. __lll_lock_elision function.
The new macro saves these fprs before tbegin instruction and only
restores them on a transaction abort.  Restoring is not needed on
a successfully started transaction.

The used inline assembly does not clobber the fprs / vrs!
Clobbering the latter ones would force the compiler to save / restore
the call saved fprs as those overlap with the vrs, but they only
need to be restored if the transaction fails.  Thus the user of the
tbegin macros has to compile the file / function with -msoft-float.
It prevents gcc from using fprs / vrs.

ChangeLog:

	* sysdeps/unix/sysv/linux/s390/Makefile (elision-CFLAGS):
	Add -msoft-float.
	* sysdeps/unix/sysv/linux/s390/htm.h: New File.
	* sysdeps/unix/sysv/linux/s390/elision-lock.c:
	Use __libc_t* transaction macros instead of __builtin_t*.
	* sysdeps/unix/sysv/linux/s390/elision-trylock.c: Likewise.
	* sysdeps/unix/sysv/linux/s390/elision-unlock.c: Likewise.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/unix/sysv/linux/s390/Makefile2
-rw-r--r--sysdeps/unix/sysv/linux/s390/elision-lock.c16
-rw-r--r--sysdeps/unix/sysv/linux/s390/elision-trylock.c16
-rw-r--r--sysdeps/unix/sysv/linux/s390/elision-unlock.c6
-rw-r--r--sysdeps/unix/sysv/linux/s390/htm.h149
5 files changed, 164 insertions, 25 deletions
diff --git a/sysdeps/unix/sysv/linux/s390/Makefile b/sysdeps/unix/sysv/linux/s390/Makefile
index f8ed013e9e..3867c33d91 100644
--- a/sysdeps/unix/sysv/linux/s390/Makefile
+++ b/sysdeps/unix/sysv/linux/s390/Makefile
@@ -22,7 +22,7 @@ ifeq ($(enable-lock-elision),yes)
 libpthread-sysdep_routines += elision-lock elision-unlock elision-timed \
 			      elision-trylock
 
-elision-CFLAGS = -mhtm
+elision-CFLAGS = -mhtm -msoft-float
 CFLAGS-elision-lock.c = $(elision-CFLAGS)
 CFLAGS-elision-timed.c = $(elision-CFLAGS)
 CFLAGS-elision-trylock.c = $(elision-CFLAGS)
diff --git a/sysdeps/unix/sysv/linux/s390/elision-lock.c b/sysdeps/unix/sysv/linux/s390/elision-lock.c
index 1876d2128d..48cc3db2aa 100644
--- a/sysdeps/unix/sysv/linux/s390/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/s390/elision-lock.c
@@ -19,7 +19,7 @@
 #include <pthread.h>
 #include <pthreadP.h>
 #include <lowlevellock.h>
-#include <htmintrin.h>
+#include <htm.h>
 #include <elision-conf.h>
 #include <stdint.h>
 
@@ -60,27 +60,23 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
       goto use_lock;
     }
 
-  __asm__ volatile (".machinemode \"zarch_nohighgprs\"\n\t"
-		    ".machine \"all\""
-		    : : : "memory");
-
   int try_tbegin;
   for (try_tbegin = aconf.try_tbegin;
        try_tbegin > 0;
        try_tbegin--)
     {
-      unsigned status;
+      int status;
       if (__builtin_expect
-	  ((status = __builtin_tbegin((void *)0)) == _HTM_TBEGIN_STARTED, 1))
+	  ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
 	{
 	  if (*futex == 0)
 	    return 0;
 	  /* Lock was busy.  Fall back to normal locking.  */
-	  if (__builtin_expect (__builtin_tx_nesting_depth (), 1))
+	  if (__builtin_expect (__libc_tx_nesting_depth (), 1))
 	    {
 	      /* In a non-nested transaction there is no need to abort,
 		 which is expensive.  */
-	      __builtin_tend ();
+	      __libc_tend ();
 	      /* Don't try to use transactions for the next couple of times.
 		 See above for why relaxed MO is sufficient.  */
 	      if (aconf.skip_lock_busy > 0)
@@ -100,7 +96,7 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
 		 because using the default lock with the inner mutex
 		 would abort the outer transaction.
 	      */
-	      __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
+	      __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
 	    }
 	}
       else
diff --git a/sysdeps/unix/sysv/linux/s390/elision-trylock.c b/sysdeps/unix/sysv/linux/s390/elision-trylock.c
index a3252b83ce..e21fc26253 100644
--- a/sysdeps/unix/sysv/linux/s390/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/s390/elision-trylock.c
@@ -19,7 +19,7 @@
 #include <pthread.h>
 #include <pthreadP.h>
 #include <lowlevellock.h>
-#include <htmintrin.h>
+#include <htm.h>
 #include <elision-conf.h>
 
 #define aconf __elision_aconf
@@ -30,15 +30,11 @@
 int
 __lll_trylock_elision (int *futex, short *adapt_count)
 {
-  __asm__ __volatile__ (".machinemode \"zarch_nohighgprs\"\n\t"
-			".machine \"all\""
-			: : : "memory");
-
   /* Implement POSIX semantics by forbiding nesting elided trylocks.
      Sorry.  After the abort the code is re-executed
      non transactional and if the lock was already locked
      return an error.  */
-  if (__builtin_tx_nesting_depth () > 0)
+  if (__libc_tx_nesting_depth () > 0)
     {
       /* Note that this abort may terminate an outermost transaction that
 	 was created outside glibc.
@@ -46,7 +42,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
 	 them to use the default lock instead of retrying transactions
 	 until their try_tbegin is zero.
       */
-      __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
+      __libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
     }
 
   /* Only try a transaction if it's worth it.  See __lll_lock_elision for
@@ -54,17 +50,17 @@ __lll_trylock_elision (int *futex, short *adapt_count)
      just a hint.  */
   if (atomic_load_relaxed (adapt_count) <= 0)
     {
-      unsigned status;
+      int status;
 
       if (__builtin_expect
-	  ((status = __builtin_tbegin ((void *)0)) == _HTM_TBEGIN_STARTED, 1))
+	  ((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
 	{
 	  if (*futex == 0)
 	    return 0;
 	  /* Lock was busy.  Fall back to normal locking.  */
 	  /* Since we are in a non-nested transaction there is no need to abort,
 	     which is expensive.  */
-	  __builtin_tend ();
+	  __libc_tend ();
 	  /* Note: Changing the adapt_count here might abort a transaction on a
 	     different cpu, but that could happen anyway when the futex is
 	     acquired, so there's no need to check the nesting depth here.
diff --git a/sysdeps/unix/sysv/linux/s390/elision-unlock.c b/sysdeps/unix/sysv/linux/s390/elision-unlock.c
index 483abe15ff..0b1ade9e5f 100644
--- a/sysdeps/unix/sysv/linux/s390/elision-unlock.c
+++ b/sysdeps/unix/sysv/linux/s390/elision-unlock.c
@@ -18,6 +18,7 @@
 
 #include <pthreadP.h>
 #include <lowlevellock.h>
+#include <htm.h>
 
 int
 __lll_unlock_elision(int *futex, int private)
@@ -27,10 +28,7 @@ __lll_unlock_elision(int *futex, int private)
      have closed the transaction, but that is impossible to detect reliably.  */
   if (*futex == 0)
     {
-      __asm__ volatile (".machinemode \"zarch_nohighgprs\"\n\t"
-			".machine \"all\""
-			: : : "memory");
-      __builtin_tend();
+      __libc_tend ();
     }
   else
     lll_unlock ((*futex), private);
diff --git a/sysdeps/unix/sysv/linux/s390/htm.h b/sysdeps/unix/sysv/linux/s390/htm.h
new file mode 100644
index 0000000000..6b4e8f4634
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/s390/htm.h
@@ -0,0 +1,149 @@
+/* Shared HTM header.  Work around false transactional execution facility
+   intrinsics.
+
+   Copyright (C) 2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _HTM_H
+#define _HTM_H 1
+
+#include <htmintrin.h>
+
+#ifdef __s390x__
+# define TX_FPRS_BYTES 64
+# define TX_SAVE_FPRS						\
+  "   std %%f8, 0(%[R_FPRS])\n\t"				\
+  "   std %%f9, 8(%[R_FPRS])\n\t"				\
+  "   std %%f10, 16(%[R_FPRS])\n\t"				\
+  "   std %%f11, 24(%[R_FPRS])\n\t"				\
+  "   std %%f12, 32(%[R_FPRS])\n\t"				\
+  "   std %%f13, 40(%[R_FPRS])\n\t"				\
+  "   std %%f14, 48(%[R_FPRS])\n\t"				\
+  "   std %%f15, 56(%[R_FPRS])\n\t"
+
+# define TX_RESTORE_FPRS					\
+  "   ld %%f8, 0(%[R_FPRS])\n\t"				\
+  "   ld %%f9, 8(%[R_FPRS])\n\t"				\
+  "   ld %%f10, 16(%[R_FPRS])\n\t"				\
+  "   ld %%f11, 24(%[R_FPRS])\n\t"				\
+  "   ld %%f12, 32(%[R_FPRS])\n\t"				\
+  "   ld %%f13, 40(%[R_FPRS])\n\t"				\
+  "   ld %%f14, 48(%[R_FPRS])\n\t"				\
+  "   ld %%f15, 56(%[R_FPRS])\n\t"
+
+#else
+
+# define TX_FPRS_BYTES 16
+# define TX_SAVE_FPRS						\
+  "   std %%f4, 0(%[R_FPRS])\n\t"				\
+  "   std %%f6, 8(%[R_FPRS])\n\t"
+
+# define TX_RESTORE_FPRS					\
+  "   ld %%f4, 0(%[R_FPRS])\n\t"				\
+  "   ld %%f6, 8(%[R_FPRS])\n\t"
+
+#endif /* ! __s390x__  */
+
+/* Use own inline assembly instead of __builtin_tbegin, as tbegin
+   has to filter program interruptions which can't be done with the builtin.
+   Now the fprs have to be saved / restored here, too.
+   The fpc is also not saved / restored with the builtin.
+   The used inline assembly does not clobber the volatile fprs / vrs!
+   Clobbering the latter ones would force the compiler to save / restore
+   the call saved fprs as those overlap with the vrs, but they only need to be
+   restored if the transaction fails but not if the transaction is successfully
+   started.  Thus the user of the tbegin macros in this header file has to
+   compile the file / function with -msoft-float.  It prevents gcc from using
+   fprs / vrs.  */
+#define __libc_tbegin(tdb)						\
+  ({ int __ret;								\
+     int __fpc;								\
+     char __fprs[TX_FPRS_BYTES];					\
+     __asm__ __volatile__ (".machine push\n\t"				\
+			   ".machinemode \"zarch_nohighgprs\"\n\t"	\
+			   ".machine \"all\"\n\t"			\
+			   /* Save state at the outermost transaction.	\
+			      As extracting nesting depth is expensive	\
+			      on at least zEC12, save fprs at inner	\
+			      transactions, too.			\
+			      The fpc and fprs are saved here as they	\
+			      are not saved by tbegin.  There exist no	\
+			      call-saved vrs, thus they are not saved	\
+			      here.  */					\
+			   "   efpc %[R_FPC]\n\t"			\
+			   TX_SAVE_FPRS					\
+			   /* Begin transaction: save all gprs, allow	\
+			      ar modification and fp operations.  Some	\
+			      program-interruptions (e.g. a null	\
+			      pointer access) are filtered and the	\
+			      trancsaction will abort.  In this case	\
+			      the normal lock path will execute it	\
+			      again and result in a core dump wich does	\
+			      now show at tbegin but the real executed	\
+			      instruction.  */				\
+			   "   tbegin 0, 0xFF0E\n\t"			\
+			   /* Branch away in abort case (this is the	\
+			      prefered sequence.  See PoP in chapter 5	\
+			      Transactional-Execution Facility		\
+			      Operation).  */				\
+			   "   jnz 0f\n\t"				\
+			   /* Transaction has successfully started.  */	\
+			   "   lhi %[R_RET], 0\n\t"			\
+			   "   j 1f\n\t"				\
+			   /* Transaction has aborted.  Now we are at	\
+			      the outermost transaction.  Restore fprs	\
+			      and fpc. */				\
+			   "0: ipm %[R_RET]\n\t"			\
+			   "   srl %[R_RET], 28\n\t"			\
+			   "   sfpc %[R_FPC]\n\t"			\
+			   TX_RESTORE_FPRS				\
+			   "1:\n\t"					\
+			   ".machine pop\n"				\
+			   : [R_RET] "=&d" (__ret),			\
+			     [R_FPC] "=&d" (__fpc)			\
+			   : [R_FPRS] "a" (__fprs)			\
+			   : "cc", "memory");				\
+     __ret;								\
+     })
+
+/* These builtins are correct.  Use them.  */
+#define __libc_tend()							\
+  ({ __asm__ __volatile__ (".machine push\n\t"				\
+			   ".machinemode \"zarch_nohighgprs\"\n\t"	\
+			   ".machine \"all\"\n\t");			\
+    int __ret = __builtin_tend ();					\
+    __asm__ __volatile__ (".machine pop");				\
+    __ret;								\
+  })
+
+#define __libc_tabort(abortcode)					\
+  __asm__ __volatile__ (".machine push\n\t"				\
+			".machinemode \"zarch_nohighgprs\"\n\t"		\
+			".machine \"all\"\n\t");			\
+  __builtin_tabort (abortcode);						\
+  __asm__ __volatile__ (".machine pop")
+
+#define __libc_tx_nesting_depth() \
+  ({ __asm__ __volatile__ (".machine push\n\t"				\
+			   ".machinemode \"zarch_nohighgprs\"\n\t"	\
+			   ".machine \"all\"\n\t");			\
+    int __ret = __builtin_tx_nesting_depth ();				\
+    __asm__ __volatile__ (".machine pop");				\
+    __ret;								\
+  })
+
+#endif