about summary refs log tree commit diff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-07-19 13:51:35 -0400
committerRich Felker <dalias@aerifal.cx>2014-07-28 00:28:00 -0400
commit806655b22539416290aeff36c4be588ce3c0e9a1 (patch)
treed93a537fb1c5e0d63570e2592faaaa257c8980dc
parent9e8f22fb3e797c1029b58d25eb1d3c529bc4b5b5 (diff)
downloadmusl-806655b22539416290aeff36c4be588ce3c0e9a1.tar.gz
musl-806655b22539416290aeff36c4be588ce3c0e9a1.tar.xz
musl-806655b22539416290aeff36c4be588ce3c0e9a1.zip
use memory constraints for mips atomic asm
despite lacking the semantic content that the asm accesses the
pointed-to object rather than just using its address as a value, the
mips asm was not actually broken. the asm blocks were declared
volatile, meaning that the compiler must treat them as having unknown
side effects.

however changing the asm to use memory constraints is desirable not
just from a semantic correctness and consistency standpoint, but also
produces better code. the compiler is able to use base/offset
addressing expressions for the atomic object's address rather than
having to load the address into a single register. this improves
access to global locks in static libc, and access to non-zero-offset
atomic fields in synchronization primitives, etc.

(cherry picked from commit a294f539c78c6ba0a2786ef3c5b2a1210a33864e)
-rw-r--r--arch/mips/atomic.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/arch/mips/atomic.h b/arch/mips/atomic.h
index 69dcdf48..6731d17b 100644
--- a/arch/mips/atomic.h
+++ b/arch/mips/atomic.h
@@ -29,15 +29,15 @@ static inline int a_cas(volatile int *p, int t, int s)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%2)\n"
+		"1:	ll %0, %2\n"
 		"	bne %0, %3, 1f\n"
 		"	addu %1, %4, $0\n"
-		"	sc %1, 0(%2)\n"
+		"	sc %1, %2\n"
 		"	beq %1, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(t), "=&r"(dummy) : "r"(p), "r"(t), "r"(s) : "memory" );
+		: "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
         return t;
 }
 
@@ -59,14 +59,14 @@ static inline int a_swap(volatile int *x, int v)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%2)\n"
+		"1:	ll %0, %2\n"
 		"	addu %1, %3, $0\n"
-		"	sc %1, 0(%2)\n"
+		"	sc %1, %2\n"
 		"	beq %1, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : "memory" );
+		: "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
         return old;
 }
 
@@ -77,14 +77,14 @@ static inline int a_fetch_add(volatile int *x, int v)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%2)\n"
+		"1:	ll %0, %2\n"
 		"	addu %1, %0, %3\n"
-		"	sc %1, 0(%2)\n"
+		"	sc %1, %2\n"
 		"	beq %1, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : "memory" );
+		: "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
         return old;
 }
 
@@ -95,14 +95,14 @@ static inline void a_inc(volatile int *x)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%1)\n"
+		"1:	ll %0, %1\n"
 		"	addu %0, %0, 1\n"
-		"	sc %0, 0(%1)\n"
+		"	sc %0, %1\n"
 		"	beq %0, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(dummy) : "r"(x) : "memory" );
+		: "=&r"(dummy), "+m"(*x) : : "memory" );
 }
 
 static inline void a_dec(volatile int *x)
@@ -112,14 +112,14 @@ static inline void a_dec(volatile int *x)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%1)\n"
+		"1:	ll %0, %1\n"
 		"	subu %0, %0, 1\n"
-		"	sc %0, 0(%1)\n"
+		"	sc %0, %1\n"
 		"	beq %0, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(dummy) : "r"(x) : "memory" );
+		: "=&r"(dummy), "+m"(*x) : : "memory" );
 }
 
 static inline void a_store(volatile int *p, int x)
@@ -129,14 +129,14 @@ static inline void a_store(volatile int *p, int x)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%1)\n"
+		"1:	ll %0, %1\n"
 		"	addu %0, %2, $0\n"
-		"	sc %0, 0(%1)\n"
+		"	sc %0, %1\n"
 		"	beq %0, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(dummy) : "r"(p), "r"(x) : "memory" );
+		: "=&r"(dummy), "+m"(*p) : "r"(x) : "memory" );
 }
 
 static inline void a_spin()
@@ -155,14 +155,14 @@ static inline void a_and(volatile int *p, int v)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%1)\n"
+		"1:	ll %0, %1\n"
 		"	and %0, %0, %2\n"
-		"	sc %0, 0(%1)\n"
+		"	sc %0, %1\n"
 		"	beq %0, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(dummy) : "r"(p), "r"(v) : "memory" );
+		: "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
 }
 
 static inline void a_or(volatile int *p, int v)
@@ -172,14 +172,14 @@ static inline void a_or(volatile int *p, int v)
 		".set push\n"
 		".set mips2\n"
 		".set noreorder\n"
-		"1:	ll %0, 0(%1)\n"
+		"1:	ll %0, %1\n"
 		"	or %0, %0, %2\n"
-		"	sc %0, 0(%1)\n"
+		"	sc %0, %1\n"
 		"	beq %0, $0, 1b\n"
 		"	nop\n"
 		"1:	\n"
 		".set pop\n"
-		: "=&r"(dummy) : "r"(p), "r"(v) : "memory" );
+		: "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
 }
 
 static inline void a_or_l(volatile void *p, long v)