diff options
-rw-r--r-- | ports/ChangeLog.arm | 10 | ||||
-rw-r--r-- | ports/sysdeps/arm/bits/atomic.h | 94 |
2 files changed, 96 insertions, 8 deletions
diff --git a/ports/ChangeLog.arm b/ports/ChangeLog.arm index 35f6f7765c..bff557c732 100644 --- a/ports/ChangeLog.arm +++ b/ports/ChangeLog.arm @@ -1,3 +1,13 @@ +2013-09-18 Dinar Temirbulatov <dtemirbulatov@gmail.com> + + [BZ #15640] + * sysdeps/arm/bits/atomic.h (atomic_exchange_acq, atomic_exchange_rel) + (atomic_compare_and_exchange_bool_acq) + (atomic_compare_and_exchange_val_acq) + (atomic_compare_and_exchange_bool_rel) + (atomic_compare_and_exchange_val_rel): Use __atomic_exchange_n and + __atomic_compare_exchange_n builtins when GCC supports them. + 2013-09-16 Will Newton <will.newton@linaro.org> * sysdeps/arm/armv7/multiarch/memcpy_impl.S: Tighten check diff --git a/ports/sysdeps/arm/bits/atomic.h b/ports/sysdeps/arm/bits/atomic.h index 6e20df741d..5e0801dd8e 100644 --- a/ports/sysdeps/arm/bits/atomic.h +++ b/ports/sysdeps/arm/bits/atomic.h @@ -35,9 +35,6 @@ typedef uintmax_t uatomic_max_t; void __arm_link_error (void); -/* Use the atomic builtins provided by GCC in case the backend provides - a pattern to do this efficiently. */ - #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 # define atomic_full_barrier() __sync_synchronize () #else @@ -51,9 +48,88 @@ void __arm_link_error (void); # define __arm_assisted_full_barrier() __arm_link_error() #endif -/* Atomic compare and exchange. */ +/* Use the atomic builtins provided by GCC in case the backend provides + a pattern to do this efficiently. */ +#if __GNUC_PREREQ (4, 7) && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_16_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_64_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_RELEASE) + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +#elif defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +/* Atomic compare and exchange. */ # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ __sync_val_compare_and_swap ((mem), (oldval), (newval)) #else @@ -61,16 +137,18 @@ void __arm_link_error (void); __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval)) #endif +#if !__GNUC_PREREQ (4, 7) || !defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) /* We don't support atomic operations on any non-word types. So make them link errors. */ -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ +# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __arm_link_error (); oldval; }) -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ +# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __arm_link_error (); oldval; }) -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ __arm_link_error (); oldval; }) +#endif /* An OS-specific bits/atomic.h file will define this macro if the OS can provide something. If not, we'll fail to build |