about summary refs log tree commit diff
diff options
context:
space:
mode:
authorTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>2015-12-28 12:24:43 -0200
committerTulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>2016-01-08 17:47:33 -0200
commit42bf1c897170ff951c7fd0ee9da25f97ff787396 (patch)
tree3e69cd2d5201944482407b86b4854fe75ab34f12
parentbc49a7afd38c1bd00f0ad9fd6592a5959d5ba72e (diff)
downloadglibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.tar.gz
glibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.tar.xz
glibc-42bf1c897170ff951c7fd0ee9da25f97ff787396.zip
powerpc: Enforce compiler barriers on hardware transactions
Work around a GCC behavior with hardware transactional memory built-ins.
GCC doesn't treat the PowerPC transactional built-ins as compiler
barriers, moving instructions past the transaction boundaries and
altering their atomicity.
-rw-r--r--ChangeLog12
-rw-r--r--sysdeps/powerpc/nptl/elide.h8
-rw-r--r--sysdeps/powerpc/sysdep.h2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-lock.c4
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-trylock.c6
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-unlock.c2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/htm.h39
7 files changed, 58 insertions, 15 deletions
diff --git a/ChangeLog b/ChangeLog
index 704c80fca7..515d173402 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2016-01-08  Tulio Magno Quites Machado Filho  <tuliom@linux.vnet.ibm.com>
+
+	* sysdeps/unix/sysv/linux/powerpc/htm.h (__libc_tbegin,
+	__libc_tabort, __libc_tend): New wrappers that enforce compiler
+	barriers to their respective compiler built-ins.
+	* sysdeps/powerpc/nptl/elide.h (__get_new_count, ELIDE_LOCK,
+	ELIDE_TRYLOCK, __elide_unlock): Use the new wrappers.
+	* sysdeps/powerpc/sysdep.h: Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/elision-lock.c: Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/elision-trylock.c: Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/elision-unlock.c: Likewise.
+
 2016-01-08  Marko Myllynen  <myllynen@redhat.com>
 
 	* scripts/config.guess: Revert previous shebang change.
diff --git a/sysdeps/powerpc/nptl/elide.h b/sysdeps/powerpc/nptl/elide.h
index c737464901..77bd82ecba 100644
--- a/sysdeps/powerpc/nptl/elide.h
+++ b/sysdeps/powerpc/nptl/elide.h
@@ -68,14 +68,14 @@ __get_new_count (uint8_t *adapt_count, int attempt)
     else								\
       for (int i = __elision_aconf.try_tbegin; i > 0; i--)		\
 	{								\
-	  if (__builtin_tbegin (0))					\
+	  if (__libc_tbegin (0))					\
 	    {								\
 	      if (is_lock_free)						\
 		{							\
 		  ret = 1;						\
 		  break;						\
 		}							\
-	      __builtin_tabort (_ABORT_LOCK_BUSY);			\
+	      __libc_tabort (_ABORT_LOCK_BUSY);				\
 	    }								\
 	  else								\
 	    if (!__get_new_count (&adapt_count,i))			\
@@ -90,7 +90,7 @@ __get_new_count (uint8_t *adapt_count, int attempt)
     if (__elision_aconf.try_tbegin > 0)				\
       {								\
 	if (write)						\
-	  __builtin_tabort (_ABORT_NESTED_TRYLOCK);		\
+	  __libc_tabort (_ABORT_NESTED_TRYLOCK);		\
 	ret = ELIDE_LOCK (adapt_count, is_lock_free);		\
       }								\
     ret;							\
@@ -102,7 +102,7 @@ __elide_unlock (int is_lock_free)
 {
   if (is_lock_free)
     {
-      __builtin_tend (0);
+      __libc_tend (0);
       return true;
     }
   return false;
diff --git a/sysdeps/powerpc/sysdep.h b/sysdeps/powerpc/sysdep.h
index def54f08a2..800623af37 100644
--- a/sysdeps/powerpc/sysdep.h
+++ b/sysdeps/powerpc/sysdep.h
@@ -180,7 +180,7 @@
 # define ABORT_TRANSACTION \
   ({ 						\
     if (THREAD_GET_TM_CAPABLE ())		\
-      __builtin_tabort (_ABORT_SYSCALL);	\
+      __libc_tabort (_ABORT_SYSCALL);	\
   })
 #else
 # define ABORT_TRANSACTION
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index 830d2ccfe1..dd1e4c3b17 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -52,12 +52,12 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
 
   for (int i = aconf.try_tbegin; i > 0; i--)
     {
-      if (__builtin_tbegin (0))
+      if (__libc_tbegin (0))
 	{
 	  if (*lock == 0)
 	    return 0;
 	  /* Lock was busy.  Fall back to normal locking.  */
-	  __builtin_tabort (_ABORT_LOCK_BUSY);
+	  __libc_tabort (_ABORT_LOCK_BUSY);
 	}
       else
 	{
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
index 9263f1d1b8..0807a6a432 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
@@ -31,7 +31,7 @@ int
 __lll_trylock_elision (int *futex, short *adapt_count)
 {
   /* Implement POSIX semantics by forbiding nesting elided trylocks.  */
-  __builtin_tabort (_ABORT_NESTED_TRYLOCK);
+  __libc_tabort (_ABORT_NESTED_TRYLOCK);
 
   /* Only try a transaction if it's worth it.  */
   if (*adapt_count > 0)
@@ -39,14 +39,14 @@ __lll_trylock_elision (int *futex, short *adapt_count)
       goto use_lock;
     }
 
-  if (__builtin_tbegin (0))
+  if (__libc_tbegin (0))
     {
       if (*futex == 0)
 	return 0;
 
       /* Lock was busy.  This is never a nested transaction.
          End it, and set the adapt count.  */
-      __builtin_tend (0);
+      __libc_tend (0);
 
       if (aconf.skip_lock_busy > 0)
 	*adapt_count = aconf.skip_lock_busy;
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
index 2561b1dca2..43c5a67df2 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
@@ -25,7 +25,7 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
 {
   /* When the lock was free we're in a transaction.  */
   if (*lock == 0)
-    __builtin_tend (0);
+    __libc_tend (0);
   else
     {
       lll_unlock ((*lock), pshared);
diff --git a/sysdeps/unix/sysv/linux/powerpc/htm.h b/sysdeps/unix/sysv/linux/powerpc/htm.h
index b18b47ee67..16b2237fcc 100644
--- a/sysdeps/unix/sysv/linux/powerpc/htm.h
+++ b/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -118,13 +118,44 @@
      __ret;				\
   })
 
-#define __builtin_tbegin(tdb)       _tbegin ()
-#define __builtin_tend(nested)      _tend ()
-#define __builtin_tabort(abortcode) _tabort (abortcode)
-#define __builtin_get_texasru()     _texasru ()
+#define __libc_tbegin(tdb)       _tbegin ()
+#define __libc_tend(nested)      _tend ()
+#define __libc_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru()  _texasru ()
 
 #else
 # include <htmintrin.h>
+
+# ifdef __TM_FENCE__
+   /* New GCC behavior.  */
+#  define __libc_tbegin(R)  __builtin_tbegin (R);
+#  define __libc_tend(R)    __builtin_tend (R);
+#  define __libc_tabort(R)  __builtin_tabort (R);
+# else
+   /* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
+      didn't use to treat __builtin_tbegin, __builtin_tend and
+      __builtin_tabort as compiler barriers, moving instructions into and
+      out the transaction.
+      Remove this when glibc drops support for GCC 5.0.  */
+#  define __libc_tbegin(R)			\
+   ({ __asm__ volatile("" ::: "memory");	\
+     unsigned int __ret = __builtin_tbegin (R);	\
+     __asm__ volatile("" ::: "memory");		\
+     __ret;					\
+   })
+#  define __libc_tabort(R)			\
+  ({ __asm__ volatile("" ::: "memory");		\
+    unsigned int __ret = __builtin_tabort (R);	\
+    __asm__ volatile("" ::: "memory");		\
+    __ret;					\
+  })
+#  define __libc_tend(R)			\
+   ({ __asm__ volatile("" ::: "memory");	\
+     unsigned int __ret = __builtin_tend (R);	\
+     __asm__ volatile("" ::: "memory");		\
+     __ret;					\
+   })
+# endif /* __TM_FENCE__  */
 #endif /* __HTM__  */
 
 #endif /* __ASSEMBLER__ */