about summary refs log tree commit diff
path: root/arch/aarch64/atomic_arch.h
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
committerRich Felker <dalias@aerifal.cx>2016-01-21 19:08:54 +0000
commit1315596b510189b5159e742110b504177bdd4932 (patch)
tree27159b7b95b944671454b11f36ee13308241f4b5 /arch/aarch64/atomic_arch.h
parentce3e24eaae91e7a90f87eb7f1edea8df5942de11 (diff)
downloadmusl-1315596b510189b5159e742110b504177bdd4932.tar.gz
musl-1315596b510189b5159e742110b504177bdd4932.tar.xz
musl-1315596b510189b5159e742110b504177bdd4932.zip
refactor internal atomic.h
rather than having each arch provide its own atomic.h, there is a new
shared atomic.h in src/internal which pulls arch-specific definitions
from arc/$(ARCH)/atomic_arch.h. the latter can be extremely minimal,
defining only a_cas or new ll/sc type primitives which the shared
atomic.h will use to construct everything else.

this commit avoids making heavy changes to the individual archs'
atomic implementations. definitions which are identical or
near-identical to what the new shared atomic.h would produce have been
removed, but otherwise the changes made are just hooking up the
arch-specific files to the new infrastructure. major changes to take
advantage of the new system will come in subsequent commits.
Diffstat (limited to 'arch/aarch64/atomic_arch.h')
-rw-r--r--arch/aarch64/atomic_arch.h202
1 files changed, 202 insertions, 0 deletions
diff --git a/arch/aarch64/atomic_arch.h b/arch/aarch64/atomic_arch.h
new file mode 100644
index 00000000..0755534f
--- /dev/null
+++ b/arch/aarch64/atomic_arch.h
@@ -0,0 +1,202 @@
+#define a_ctz_64 a_ctz_64
+static inline int a_ctz_64(uint64_t x)
+{
+	__asm__(
+		"	rbit %0, %1\n"
+		"	clz %0, %0\n"
+		: "=r"(x) : "r"(x));
+	return x;
+}
+
+#define a_barrier a_barrier
+static inline void a_barrier()
+{
+	__asm__ __volatile__("dmb ish");
+}
+
+#define a_cas_p a_cas_p
+static inline void *a_cas_p(volatile void *p, void *t, void *s)
+{
+	void *old;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %0,%3\n"
+		"	cmp %0,%1\n"
+		"	b.ne 1f\n"
+		"	stxr %w0,%2,%3\n"
+		"	cbnz %w0,1b\n"
+		"	mov %0,%1\n"
+		"1:	dmb ish\n"
+		: "=&r"(old)
+		: "r"(t), "r"(s), "Q"(*(long*)p)
+		: "memory", "cc");
+	return old;
+}
+
+#define a_cas a_cas
+static inline int a_cas(volatile int *p, int t, int s)
+{
+	int old;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%3\n"
+		"	cmp %w0,%w1\n"
+		"	b.ne 1f\n"
+		"	stxr %w0,%w2,%3\n"
+		"	cbnz %w0,1b\n"
+		"	mov %w0,%w1\n"
+		"1:	dmb ish\n"
+		: "=&r"(old)
+		: "r"(t), "r"(s), "Q"(*p)
+		: "memory", "cc");
+	return old;
+}
+
+#define a_swap a_swap
+static inline int a_swap(volatile int *x, int v)
+{
+	int old, tmp;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%3\n"
+		"	stxr %w1,%w2,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(old), "=&r"(tmp)
+		: "r"(v), "Q"(*x)
+		: "memory", "cc" );
+	return old;
+}
+
+#define a_fetch_add a_fetch_add
+static inline int a_fetch_add(volatile int *x, int v)
+{
+	int old, tmp;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%3\n"
+		"	add %w0,%w0,%w2\n"
+		"	stxr %w1,%w0,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(old), "=&r"(tmp)
+		: "r"(v), "Q"(*x)
+		: "memory", "cc" );
+	return old-v;
+}
+
+#define a_inc a_inc
+static inline void a_inc(volatile int *x)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%2\n"
+		"	add %w0,%w0,#1\n"
+		"	stxr %w1,%w0,%2\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "Q"(*x)
+		: "memory", "cc" );
+}
+
+#define a_dec a_dec
+static inline void a_dec(volatile int *x)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%2\n"
+		"	sub %w0,%w0,#1\n"
+		"	stxr %w1,%w0,%2\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "Q"(*x)
+		: "memory", "cc" );
+}
+
+#define a_and_64 a_and_64
+static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %0,%3\n"
+		"	and %0,%0,%2\n"
+		"	stxr %w1,%0,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "r"(v), "Q"(*p)
+		: "memory", "cc" );
+}
+
+#define a_and a_and
+static inline void a_and(volatile int *p, int v)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%3\n"
+		"	and %w0,%w0,%w2\n"
+		"	stxr %w1,%w0,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "r"(v), "Q"(*p)
+		: "memory", "cc" );
+}
+
+#define a_or_64 a_or_64
+static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %0,%3\n"
+		"	orr %0,%0,%2\n"
+		"	stxr %w1,%0,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "r"(v), "Q"(*p)
+		: "memory", "cc" );
+}
+
+#define a_or_l a_or_l
+static inline void a_or_l(volatile void *p, long v)
+{
+	return a_or_64(p, v);
+}
+
+#define a_or a_or
+static inline void a_or(volatile int *p, int v)
+{
+	int tmp, tmp2;
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"1:	ldxr %w0,%3\n"
+		"	orr %w0,%w0,%w2\n"
+		"	stxr %w1,%w0,%3\n"
+		"	cbnz %w1,1b\n"
+		"	dmb ish\n"
+		: "=&r"(tmp), "=&r"(tmp2)
+		: "r"(v), "Q"(*p)
+		: "memory", "cc" );
+}
+
+#define a_store a_store
+static inline void a_store(volatile int *p, int x)
+{
+	__asm__ __volatile__(
+		"	dmb ish\n"
+		"	str %w1,%0\n"
+		"	dmb ish\n"
+		: "=m"(*p)
+		: "r"(x)
+		: "memory", "cc" );
+}
+
+#define a_spin a_barrier