diff options
author | Torvald Riegel <triegel@redhat.com> | 2015-06-23 15:22:25 +0200 |
---|---|---|
committer | Torvald Riegel <triegel@redhat.com> | 2015-06-30 15:57:15 +0200 |
commit | 4eb984d3ab5641ce7992204756ac15a61f5f7181 (patch) | |
tree | 12603bae9d2582033a3186174d1379121e1ea642 /sysdeps | |
parent | e02920bc029019443326eecaa7b267b78ff2892e (diff) | |
download | glibc-4eb984d3ab5641ce7992204756ac15a61f5f7181.tar.gz glibc-4eb984d3ab5641ce7992204756ac15a61f5f7181.tar.xz glibc-4eb984d3ab5641ce7992204756ac15a61f5f7181.zip |
Clean up BUSY_WAIT_NOP and atomic_delay.
This patch combines BUSY_WAIT_NOP and atomic_delay into a new atomic_spin_nop function and adjusts all clients. The new function is put into atomic.h because what is best done in a spin loop is architecture-specific, and atomics must be used for spinning. The function name is meant to tell users that this has no effect on synchronization semantics but is a performance aid for spinning.
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/i386/i486/bits/atomic.h | 2 | ||||
-rw-r--r-- | sysdeps/nacl/lll_timedwait_tid.c | 2 | ||||
-rw-r--r-- | sysdeps/nacl/lowlevellock.h | 6 | ||||
-rw-r--r-- | sysdeps/sparc/sparc32/sparcv9/bits/atomic.h | 3 | ||||
-rw-r--r-- | sysdeps/sparc/sparc64/bits/atomic.h | 3 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/i386/lowlevellock.h | 4 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/sparc/lowlevellock.h | 6 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/x86_64/lowlevellock.h | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/bits/atomic.h | 2 |
9 files changed, 10 insertions, 21 deletions
diff --git a/sysdeps/i386/i486/bits/atomic.h b/sysdeps/i386/i486/bits/atomic.h index 59165bec94..59f3d34871 100644 --- a/sysdeps/i386/i486/bits/atomic.h +++ b/sysdeps/i386/i486/bits/atomic.h @@ -479,7 +479,7 @@ typedef uintmax_t uatomic_max_t; __result; }) -#define atomic_delay() asm ("rep; nop") +#define atomic_spin_nop() asm ("rep; nop") #define __arch_and_body(lock, mem, mask) \ diff --git a/sysdeps/nacl/lll_timedwait_tid.c b/sysdeps/nacl/lll_timedwait_tid.c index ecaf0b113a..ef544cf84f 100644 --- a/sysdeps/nacl/lll_timedwait_tid.c +++ b/sysdeps/nacl/lll_timedwait_tid.c @@ -40,7 +40,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime) finish quick enough that the timeout doesn't matter. If any thread ever stays in this state for long, there is something catastrophically wrong. */ - BUSY_WAIT_NOP; + atomic_spin_nop (); else { assert (tid > 0); diff --git a/sysdeps/nacl/lowlevellock.h b/sysdeps/nacl/lowlevellock.h index 0b85d8d317..3634f1959a 100644 --- a/sysdeps/nacl/lowlevellock.h +++ b/sysdeps/nacl/lowlevellock.h @@ -21,10 +21,6 @@ /* Everything except the exit handling is the same as the generic code. */ # include <sysdeps/nptl/lowlevellock.h> -# ifndef BUSY_WAIT_NOP -# define BUSY_WAIT_NOP __sync_synchronize () -# endif - /* See exit-thread.h for details. */ # define NACL_EXITING_TID 1 @@ -36,7 +32,7 @@ while ((__tid = atomic_load_relaxed (__tidp)) != 0) \ { \ if (__tid == NACL_EXITING_TID) \ - BUSY_WAIT_NOP; \ + atomic_spin_nop (); \ else \ lll_futex_wait (__tidp, __tid, LLL_PRIVATE); \ } \ diff --git a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h b/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h index 317be62ccb..2122afbb09 100644 --- a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h +++ b/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h @@ -100,3 +100,6 @@ typedef uintmax_t uatomic_max_t; __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") #define atomic_write_barrier() \ __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop () __cpu_relax () diff --git a/sysdeps/sparc/sparc64/bits/atomic.h b/sysdeps/sparc/sparc64/bits/atomic.h index 35804a8e14..48b7fd6216 100644 --- a/sysdeps/sparc/sparc64/bits/atomic.h +++ b/sysdeps/sparc/sparc64/bits/atomic.h @@ -121,3 +121,6 @@ typedef uintmax_t uatomic_max_t; __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") #define atomic_write_barrier() \ __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop () __cpu_relax () diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h index f57afc6e2a..58f5638e37 100644 --- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h +++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h @@ -58,10 +58,6 @@ #define LLL_LOCK_INITIALIZER_WAITERS (2) -/* Delay in spinlock loop. */ -#define BUSY_WAIT_NOP asm ("rep; nop") - - /* NB: in the lll_trylock macro we simply return the value in %eax after the cmpxchg instruction. In case the operation succeded this value is zero. In case the operation failed, the cmpxchg instruction diff --git a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h index 9aefd9eb59..7608c01d17 100644 --- a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h +++ b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h @@ -25,12 +25,6 @@ #include <atomic.h> #include <kernel-features.h> -#ifndef __sparc32_atomic_do_lock -/* Delay in spinlock loop. */ -extern void __cpu_relax (void); -#define BUSY_WAIT_NOP __cpu_relax () -#endif - #include <lowlevellock-futex.h> static inline int diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h index 573b48c4fe..de525cd4c7 100644 --- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h +++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h @@ -57,9 +57,6 @@ #define LLL_LOCK_INITIALIZER_LOCKED (1) #define LLL_LOCK_INITIALIZER_WAITERS (2) -/* Delay in spinlock loop. */ -#define BUSY_WAIT_NOP asm ("rep; nop") - /* NB: in the lll_trylock macro we simply return the value in %eax after the cmpxchg instruction. In case the operation succeded this diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h index 203d92c20d..337b334db1 100644 --- a/sysdeps/x86_64/bits/atomic.h +++ b/sysdeps/x86_64/bits/atomic.h @@ -410,7 +410,7 @@ typedef uintmax_t uatomic_max_t; __result; }) -#define atomic_delay() asm ("rep; nop") +#define atomic_spin_nop() asm ("rep; nop") #define __arch_and_body(lock, mem, mask) \ |