about summary refs log tree commit diff
path: root/arch/x86_64/atomic.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-01-11 19:37:09 -0500
committerRich Felker <dalias@aerifal.cx>2014-01-11 19:37:09 -0500
commit311736516e3a0782a6ec0e338190d08410d942de (patch)
tree4fbaaac2af53bd90a7309613cbaad79cded6477b /arch/x86_64/atomic.h
parentf29e834d985fbca0d8f7e70c056c7dbcfc5c9c9e (diff)
downloadmusl-311736516e3a0782a6ec0e338190d08410d942de.tar.gz
musl-311736516e3a0782a6ec0e338190d08410d942de.tar.xz
musl-311736516e3a0782a6ec0e338190d08410d942de.zip
remove gratuitous temp vars, casts, and suffixes in x86_64 atomic.h
aside from general cleanup, this should allow the identical atomic.h
file to be used for the upcoming x32 port.
Diffstat (limited to 'arch/x86_64/atomic.h')
-rw-r--r--arch/x86_64/atomic.h24
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h
index 0d3da6f8..3f2c5e1d 100644
--- a/arch/x86_64/atomic.h
+++ b/arch/x86_64/atomic.h
@@ -5,28 +5,26 @@
 
 static inline int a_ctz_64(uint64_t x)
 {
-	long r;
-	__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
-	return r;
+	__asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+	return x;
 }
 
 static inline int a_ctz_l(unsigned long x)
 {
-	long r;
-	__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
-	return r;
+	__asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
+	return x;
 }
 
 static inline void a_and_64(volatile uint64_t *p, uint64_t v)
 {
-	__asm__( "lock ; andq %1, %0"
-			 : "=m"(*(long *)p) : "r"(v) : "memory" );
+	__asm__( "lock ; and %1, %0"
+			 : "=m"(*p) : "r"(v) : "memory" );
 }
 
 static inline void a_or_64(volatile uint64_t *p, uint64_t v)
 {
-	__asm__( "lock ; orq %1, %0"
-			 : "=m"(*(long *)p) : "r"(v) : "memory" );
+	__asm__( "lock ; or %1, %0"
+			 : "=m"(*p) : "r"(v) : "memory" );
 }
 
 static inline void a_store_l(volatile void *p, long x)
@@ -56,7 +54,7 @@ static inline long a_cas_l(volatile void *p, long t, long s)
 
 static inline int a_cas(volatile int *p, int t, int s)
 {
-	__asm__( "lock ; cmpxchgl %3, %1"
+	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
@@ -74,13 +72,13 @@ static inline long a_swap_l(volatile void *x, long v)
 
 static inline void a_or(volatile void *p, int v)
 {
-	__asm__( "lock ; orl %1, %0"
+	__asm__( "lock ; or %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }
 
 static inline void a_and(volatile void *p, int v)
 {
-	__asm__( "lock ; andl %1, %0"
+	__asm__( "lock ; and %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }