diff options
author | Ulrich Drepper <drepper@redhat.com> | 2003-05-13 21:14:28 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2003-05-13 21:14:28 +0000 |
commit | 7158eae4a8c8f15261e50ecab8929050df0a0435 (patch) | |
tree | c6edb8b3be4c23ce0cb1984e32c5b7fecf51db74 /sysdeps/powerpc | |
parent | edf205d5ef1528247fde0637572a4631e0f1a5c2 (diff) | |
download | glibc-7158eae4a8c8f15261e50ecab8929050df0a0435.tar.gz glibc-7158eae4a8c8f15261e50ecab8929050df0a0435.tar.xz glibc-7158eae4a8c8f15261e50ecab8929050df0a0435.zip |
Update.
2003-05-12 Steven Munroe <sjmunroe@us.ibm.com> * sysdeps/powerpc/bits/atomic.h (__arch_compare_and_exchange_bool_8_rel): Define. (__arch_compare_and_exchange_bool_16_rel): Define. (__ARCH_REL_INSTR): Define if not already defined. (__arch_atomic_exchange_and_add_32): Add "memory" to clobber list. (__arch_atomic_decrement_if_positive_32): Add "memory" to clobber list. (__arch_compare_and_exchange_val_32_acq): Remove release sync. (__arch_compare_and_exchange_val_32_rel): Define. (__arch_atomic_exchange_32): Remove. (__arch_atomic_exchange_32_acq): Define. (__arch_atomic_exchange_32_rel): Define. (atomic_compare_and_exchange_val_rel): Define. (atomic_exchange_acq): Use __arch_atomic_exchange_*_acq forms. (atomic_exchange_rel): Define. * sysdeps/powerpc/powerpc32/bits/atomic.h (__arch_compare_and_exchange_bool_32_acq): Remove release sync. (__arch_compare_and_exchange_bool_32_rel): Define. (__arch_compare_and_exchange_bool_64_rel): Define. (__arch_compare_and_exchange_val_64_rel): Define. (__arch_atomic_exchange_64): Remove. (__arch_atomic_exchange_64_acq): Define. (__arch_atomic_exchange_64_rel): Define. * sysdeps/powerpc/powerpc64/bits/atomic.h (__arch_compare_and_exchange_bool_32_rel): Define. (__arch_compare_and_exchange_bool_64_acq): Remove release sync. (__arch_compare_and_exchange_bool_64_rel): Define. (__arch_compare_and_exchange_val_64_acq): Remove release sync. (__arch_compare_and_exchange_val_64_rel): Define. (__arch_atomic_exchange_64): Remove. (__arch_atomic_exchange_64_acq): Define. (__arch_atomic_exchange_64_rel): Define. (__arch_atomic_exchange_and_add_64): Add "memory" to clobber list. (__arch_atomic_decrement_if_positive_64): Add "memory" to clobber list. [!UP](__ARCH_REL_INSTR): Define as lwsync. the space-padded-by-default conversion specifiers, %e, %k, %l.
Diffstat (limited to 'sysdeps/powerpc')
-rw-r--r-- | sysdeps/powerpc/bits/atomic.h | 85 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/bits/atomic.h | 31 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc64/bits/atomic.h | 84 |
3 files changed, 176 insertions, 24 deletions
diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h index ebac76ab3b..5deb797256 100644 --- a/sysdeps/powerpc/bits/atomic.h +++ b/sysdeps/powerpc/bits/atomic.h @@ -53,24 +53,44 @@ typedef uintmax_t uatomic_max_t; #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ (abort (), 0) + +#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ + (abort (), 0) #ifdef UP # define __ARCH_ACQ_INSTR "" # define __ARCH_REL_INSTR "" #else # define __ARCH_ACQ_INSTR "isync" -# define __ARCH_REL_INSTR "sync" +# ifndef __ARCH_REL_INSTR +# define __ARCH_REL_INSTR "sync" +# endif #endif #define atomic_full_barrier() __asm ("sync" ::: "memory") #define atomic_write_barrier() __asm ("eieio" ::: "memory") -/* - * XXX At present these have both acquire and release semantics. - * Ultimately we should do separate _acq and _rel versions. - */ +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ( \ + "1: lwarx %0,0,%1\n" \ + " cmpw %0,%2\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ @@ -80,14 +100,28 @@ typedef uintmax_t uatomic_max_t; " bne 2f\n" \ " stwcx. %3,0,%1\n" \ " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ + "2: " \ : "=&r" (__tmp) \ : "b" (__memp), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp; \ }) -#define __arch_atomic_exchange_32(mem, value) \ +#define __arch_atomic_exchange_32_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile ( \ + "1: lwarx %0,0,%2\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "1" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_32_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile (__ARCH_REL_INSTR "\n" \ @@ -96,7 +130,7 @@ typedef uintmax_t uatomic_max_t; " bne- 1b" \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "1" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) @@ -109,7 +143,7 @@ typedef uintmax_t uatomic_max_t; " bne- 1b" \ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ : "b" (mem), "r" (value), "2" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) @@ -124,11 +158,10 @@ typedef uintmax_t uatomic_max_t; "2: " __ARCH_ACQ_INSTR \ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ : "b" (mem), "2" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) - #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __result; \ @@ -140,14 +173,38 @@ typedef uintmax_t uatomic_max_t; abort (); \ __result; \ }) + +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) #define atomic_exchange_acq(mem, value) \ ({ \ __typeof (*(mem)) __result; \ if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_32 (mem, value); \ + __result = __arch_atomic_exchange_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_rel(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_rel (mem, value); \ else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_64 (mem, value); \ + __result = __arch_atomic_exchange_64_rel (mem, value); \ else \ abort (); \ __result; \ diff --git a/sysdeps/powerpc/powerpc32/bits/atomic.h b/sysdeps/powerpc/powerpc32/bits/atomic.h index 54caa45de1..4e2e24335d 100644 --- a/sysdeps/powerpc/powerpc32/bits/atomic.h +++ b/sysdeps/powerpc/powerpc32/bits/atomic.h @@ -24,10 +24,10 @@ * (a load word and zero (high 32) form). So powerpc64 has a slightly * different version in sysdeps/powerpc/powerpc64/bits/atomic.h. */ -# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm __volatile ( \ "1: lwarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -40,6 +40,22 @@ __tmp != 0; \ }) +# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + /* * Powerpc32 processors don't implement the 64-bit (doubleword) forms of * load and reserve (ldarx) and store conditional (stdcx.) instructions. @@ -50,8 +66,17 @@ # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) + +# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ + (abort (), 0) + +# define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +# define __arch_atomic_exchange_64_acq(mem, value) \ + ({ abort (); (*mem) = (value); }) -# define __arch_atomic_exchange_64(mem, value) \ +# define __arch_atomic_exchange_64_rel(mem, value) \ ({ abort (); (*mem) = (value); }) # define __arch_atomic_exchange_and_add_64(mem, value) \ diff --git a/sysdeps/powerpc/powerpc64/bits/atomic.h b/sysdeps/powerpc/powerpc64/bits/atomic.h index 938e5ee3f0..76be01edf2 100644 --- a/sysdeps/powerpc/powerpc64/bits/atomic.h +++ b/sysdeps/powerpc/powerpc64/bits/atomic.h @@ -30,7 +30,7 @@ # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm __volatile ( \ "1: lwarx %0,0,%1\n" \ " extsw %0,%0\n" \ " subf. %0,%2,%0\n" \ @@ -44,6 +44,23 @@ __tmp != 0; \ }) +# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1\n" \ + " extsw %0,%0\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + /* * Only powerpc64 processors support Load doubleword and reserve index (ldarx) * and Store doubleword conditional indexed (stdcx) instructions. So here @@ -52,7 +69,7 @@ # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ ({ \ unsigned long __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm __volatile ( \ "1: ldarx %0,0,%1\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ @@ -65,11 +82,27 @@ __tmp != 0; \ }) +# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ +({ \ + unsigned long __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ + __asm __volatile ( \ "1: ldarx %0,0,%1\n" \ " cmpd %0,%2\n" \ " bne 2f\n" \ @@ -82,7 +115,38 @@ __tmp; \ }) -# define __arch_atomic_exchange_64(mem, value) \ +#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1\n" \ + " cmpd %0,%2\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +# define __arch_atomic_exchange_64_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "1" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_exchange_64_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile (__ARCH_REL_INSTR "\n" \ @@ -91,7 +155,7 @@ " bne- 1b" \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "1" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) @@ -104,7 +168,7 @@ " bne- 1b" \ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ : "b" (mem), "r" (value), "2" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) @@ -119,7 +183,7 @@ "2: " __ARCH_ACQ_INSTR \ : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ : "b" (mem), "2" (*mem) \ - : "cr0"); \ + : "cr0", "memory"); \ __val; \ }) @@ -127,6 +191,12 @@ * All powerpc64 processors support the new "light weight" sync (lwsync). */ # define atomic_read_barrier() __asm ("lwsync" ::: "memory") +/* + * "light weight" sync can also be used for the release barrier. + */ +# ifndef UP +# define __ARCH_REL_INSTR "lwsync" +# endif /* * Include the rest of the atomic ops macros which are common to both |