about summary refs log tree commit diff
path: root/sysdeps/unix/sysv
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-lock.c4
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-trylock.c6
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/elision-unlock.c2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/htm.h39
4 files changed, 41 insertions, 10 deletions
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
index 830d2ccfe1..dd1e4c3b17 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -52,12 +52,12 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
 
   for (int i = aconf.try_tbegin; i > 0; i--)
     {
-      if (__builtin_tbegin (0))
+      if (__libc_tbegin (0))
 	{
 	  if (*lock == 0)
 	    return 0;
 	  /* Lock was busy.  Fall back to normal locking.  */
-	  __builtin_tabort (_ABORT_LOCK_BUSY);
+	  __libc_tabort (_ABORT_LOCK_BUSY);
 	}
       else
 	{
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
index 9263f1d1b8..0807a6a432 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
@@ -31,7 +31,7 @@ int
 __lll_trylock_elision (int *futex, short *adapt_count)
 {
   /* Implement POSIX semantics by forbiding nesting elided trylocks.  */
-  __builtin_tabort (_ABORT_NESTED_TRYLOCK);
+  __libc_tabort (_ABORT_NESTED_TRYLOCK);
 
   /* Only try a transaction if it's worth it.  */
   if (*adapt_count > 0)
@@ -39,14 +39,14 @@ __lll_trylock_elision (int *futex, short *adapt_count)
       goto use_lock;
     }
 
-  if (__builtin_tbegin (0))
+  if (__libc_tbegin (0))
     {
       if (*futex == 0)
 	return 0;
 
       /* Lock was busy.  This is never a nested transaction.
          End it, and set the adapt count.  */
-      __builtin_tend (0);
+      __libc_tend (0);
 
       if (aconf.skip_lock_busy > 0)
 	*adapt_count = aconf.skip_lock_busy;
diff --git a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
index 2561b1dca2..43c5a67df2 100644
--- a/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
+++ b/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
@@ -25,7 +25,7 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
 {
   /* When the lock was free we're in a transaction.  */
   if (*lock == 0)
-    __builtin_tend (0);
+    __libc_tend (0);
   else
     {
       lll_unlock ((*lock), pshared);
diff --git a/sysdeps/unix/sysv/linux/powerpc/htm.h b/sysdeps/unix/sysv/linux/powerpc/htm.h
index b18b47ee67..16b2237fcc 100644
--- a/sysdeps/unix/sysv/linux/powerpc/htm.h
+++ b/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -118,13 +118,44 @@
      __ret;				\
   })
 
-#define __builtin_tbegin(tdb)       _tbegin ()
-#define __builtin_tend(nested)      _tend ()
-#define __builtin_tabort(abortcode) _tabort (abortcode)
-#define __builtin_get_texasru()     _texasru ()
+#define __libc_tbegin(tdb)       _tbegin ()
+#define __libc_tend(nested)      _tend ()
+#define __libc_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru()  _texasru ()
 
 #else
 # include <htmintrin.h>
+
+# ifdef __TM_FENCE__
+   /* New GCC behavior.  */
+#  define __libc_tbegin(R)  __builtin_tbegin (R);
+#  define __libc_tend(R)    __builtin_tend (R);
+#  define __libc_tabort(R)  __builtin_tabort (R);
+# else
+   /* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
+      didn't use to treat __builtin_tbegin, __builtin_tend and
+      __builtin_tabort as compiler barriers, moving instructions into and
+      out the transaction.
+      Remove this when glibc drops support for GCC 5.0.  */
+#  define __libc_tbegin(R)			\
+   ({ __asm__ volatile("" ::: "memory");	\
+     unsigned int __ret = __builtin_tbegin (R);	\
+     __asm__ volatile("" ::: "memory");		\
+     __ret;					\
+   })
+#  define __libc_tabort(R)			\
+  ({ __asm__ volatile("" ::: "memory");		\
+    unsigned int __ret = __builtin_tabort (R);	\
+    __asm__ volatile("" ::: "memory");		\
+    __ret;					\
+  })
+#  define __libc_tend(R)			\
+   ({ __asm__ volatile("" ::: "memory");	\
+     unsigned int __ret = __builtin_tend (R);	\
+     __asm__ volatile("" ::: "memory");		\
+     __ret;					\
+   })
+# endif /* __TM_FENCE__  */
 #endif /* __HTM__  */
 
 #endif /* __ASSEMBLER__ */