File powerpc-htm-compiler-barriers.patch of Package glibc.6721
2016-01-22 Paul E. Murphy <murphyp@linux.vnet.ibm.com>
* sysdeps/unix/sysv/linux/powerpc/htm.h (__libc_tbegin): Remove
semicolon.
(__libc_tend): Likewise.
(__libc_tabort): Likewise.
2016-01-08 Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
* sysdeps/unix/sysv/linux/powerpc/htm.h (__libc_tbegin,
__libc_tabort, __libc_tend): New wrappers that enforce compiler
barriers to their respective compiler built-ins.
* sysdeps/powerpc/nptl/elide.h (__get_new_count, ELIDE_LOCK,
ELIDE_TRYLOCK, __elide_unlock): Use the new wrappers.
* sysdeps/powerpc/sysdep.h: Likewise.
* sysdeps/unix/sysv/linux/powerpc/elision-lock.c: Likewise.
* sysdeps/unix/sysv/linux/powerpc/elision-trylock.c: Likewise.
* sysdeps/unix/sysv/linux/powerpc/elision-unlock.c: Likewise.
Index: glibc-2.22/sysdeps/powerpc/nptl/elide.h
===================================================================
--- glibc-2.22.orig/sysdeps/powerpc/nptl/elide.h
+++ glibc-2.22/sysdeps/powerpc/nptl/elide.h
@@ -68,14 +68,14 @@ __get_new_count (uint8_t *adapt_count, i
else \
for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
{ \
- if (__builtin_tbegin (0)) \
+ if (__libc_tbegin (0)) \
{ \
if (is_lock_free) \
{ \
ret = 1; \
break; \
} \
- __builtin_tabort (_ABORT_LOCK_BUSY); \
+ __libc_tabort (_ABORT_LOCK_BUSY); \
} \
else \
if (!__get_new_count (&adapt_count,i)) \
@@ -90,7 +90,7 @@ __get_new_count (uint8_t *adapt_count, i
if (__elision_aconf.try_tbegin > 0) \
{ \
if (write) \
- __builtin_tabort (_ABORT_NESTED_TRYLOCK); \
+ __libc_tabort (_ABORT_NESTED_TRYLOCK); \
ret = ELIDE_LOCK (adapt_count, is_lock_free); \
} \
ret; \
@@ -102,7 +102,7 @@ __elide_unlock (int is_lock_free)
{
if (is_lock_free)
{
- __builtin_tend (0);
+ __libc_tend (0);
return true;
}
return false;
Index: glibc-2.22/sysdeps/powerpc/sysdep.h
===================================================================
--- glibc-2.22.orig/sysdeps/powerpc/sysdep.h
+++ glibc-2.22/sysdeps/powerpc/sysdep.h
@@ -180,7 +180,7 @@
# define ABORT_TRANSACTION \
({ \
if (THREAD_GET_TM_CAPABLE ()) \
- __builtin_tabort (_ABORT_SYSCALL); \
+ __libc_tabort (_ABORT_SYSCALL); \
})
#else
# define ABORT_TRANSACTION
Index: glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
===================================================================
--- glibc-2.22.orig/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
+++ glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-lock.c
@@ -53,12 +53,12 @@ __lll_lock_elision (int *lock, short *ad
for (int i = aconf.try_tbegin; i > 0; i--)
{
- if (__builtin_tbegin (0))
+ if (__libc_tbegin (0))
{
if (*lock == 0)
return 0;
/* Lock was busy. Fall back to normal locking. */
- __builtin_tabort (_ABORT_LOCK_BUSY);
+ __libc_tabort (_ABORT_LOCK_BUSY);
}
else
{
Index: glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
===================================================================
--- glibc-2.22.orig/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
+++ glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
@@ -31,7 +31,7 @@ int
__lll_trylock_elision (int *futex, short *adapt_count)
{
/* Implement POSIX semantics by forbiding nesting elided trylocks. */
- __builtin_tabort (_ABORT_NESTED_TRYLOCK);
+ __libc_tabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. */
if (*adapt_count > 0)
@@ -40,14 +40,14 @@ __lll_trylock_elision (int *futex, short
goto use_lock;
}
- if (__builtin_tbegin (0))
+ if (__libc_tbegin (0))
{
if (*futex == 0)
return 0;
/* Lock was busy. This is never a nested transaction.
End it, and set the adapt count. */
- __builtin_tend (0);
+ __libc_tend (0);
if (aconf.skip_lock_busy > 0)
*adapt_count = aconf.skip_lock_busy;
Index: glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
===================================================================
--- glibc-2.22.orig/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
+++ glibc-2.22/sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
@@ -25,7 +25,7 @@ __lll_unlock_elision(int *lock, int psha
{
/* When the lock was free we're in a transaction. */
if (*lock == 0)
- __builtin_tend (0);
+ __libc_tend (0);
else
lll_unlock ((*lock), pshared);
return 0;
Index: glibc-2.22/sysdeps/unix/sysv/linux/powerpc/htm.h
===================================================================
--- glibc-2.22.orig/sysdeps/unix/sysv/linux/powerpc/htm.h
+++ glibc-2.22/sysdeps/unix/sysv/linux/powerpc/htm.h
@@ -118,13 +118,44 @@
__ret; \
})
-#define __builtin_tbegin(tdb) _tbegin ()
-#define __builtin_tend(nested) _tend ()
-#define __builtin_tabort(abortcode) _tabort (abortcode)
-#define __builtin_get_texasru() _texasru ()
+#define __libc_tbegin(tdb) _tbegin ()
+#define __libc_tend(nested) _tend ()
+#define __libc_tabort(abortcode) _tabort (abortcode)
+#define __builtin_get_texasru() _texasru ()
#else
# include <htmintrin.h>
+
+# ifdef __TM_FENCE__
+ /* New GCC behavior. */
+# define __libc_tbegin(R) __builtin_tbegin (R)
+# define __libc_tend(R) __builtin_tend (R)
+# define __libc_tabort(R) __builtin_tabort (R)
+# else
+ /* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
+ didn't use to treat __builtin_tbegin, __builtin_tend and
+ __builtin_tabort as compiler barriers, moving instructions into and
+ out the transaction.
+ Remove this when glibc drops support for GCC 5.0. */
+# define __libc_tbegin(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tbegin (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# define __libc_tabort(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tabort (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# define __libc_tend(R) \
+ ({ __asm__ volatile("" ::: "memory"); \
+ unsigned int __ret = __builtin_tend (R); \
+ __asm__ volatile("" ::: "memory"); \
+ __ret; \
+ })
+# endif /* __TM_FENCE__ */
#endif /* __HTM__ */
#endif /* __ASSEMBLER__ */