about summary refs log tree commit diff
path: root/arch/arm
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2018-05-01 14:34:22 -0400
committerRich Felker <dalias@aerifal.cx>2018-05-01 14:34:22 -0400
commite3c682ab5257aaa6739ef242a9676d897370e78e (patch)
treeb5a07bf71a14d4886bf7d9d6547fae8d88ea76cf /arch/arm
parent9be4ed5d89ecca80123311a4ec73781e5cc97a9c (diff)
downloadmusl-e3c682ab5257aaa6739ef242a9676d897370e78e.tar.gz
musl-e3c682ab5257aaa6739ef242a9676d897370e78e.tar.xz
musl-e3c682ab5257aaa6739ef242a9676d897370e78e.zip
work around arm gcc's rejection of r7 asm constraints in thumb mode
in thumb mode, r7 is the ABI frame pointer register, and unless frame
pointer is disabled, gcc insists on treating it as a fixed register,
refusing to spill it to satisfy constraints. unfortunately, r7 is also
used in the syscall ABI for passing the syscall number.

up til now we just treated this as a requirement to disable frame
pointer when generating code as thumb, but it turns out gcc forcibly
enables frame pointer, and the fixed register constraint that goes
with it, for functions which contain VLAs. this produces an
unacceptable arch-specific constraint that (non-arm-specific) source
files making syscalls cannot use VLAs.

as a workaround, avoid r7 register constraints when producing thumb
code and instead save/restore r7 in a temp register as part of the asm
block. at some point we may want/need to support armv6-m/thumb1, so
the asm has been tweaked to be thumb1-compatible while also
near-optimal for thumb2: it allows the temp and/or syscall number to
be in high registers (necessary since r0-r5 may all be used for
syscalll args) and in thumb2 mode allows the syscall number to be an
8-bit immediate.
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/syscall_arch.h53
1 files changed, 39 insertions, 14 deletions
diff --git a/arch/arm/syscall_arch.h b/arch/arm/syscall_arch.h
index 6023303b..4db7d152 100644
--- a/arch/arm/syscall_arch.h
+++ b/arch/arm/syscall_arch.h
@@ -3,74 +3,99 @@
 ((union { long long ll; long l[2]; }){ .ll = x }).l[1]
 #define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
 
+#ifdef __thumb__
+
+/* Avoid use of r7 in asm constraints when producing thumb code,
+ * since it's reserved as frame pointer and might not be supported. */
+#define __ASM____R7__
+#define __asm_syscall(...) do { \
+	__asm__ __volatile__ ( "mov %1,r7 ; mov r7,%2 ; svc 0 ; mov r7,%1" \
+	: "=r"(r0), "=&r"((int){0}) : __VA_ARGS__ : "memory"); \
+	return r0; \
+	} while (0)
+
+#else
+
+#define __ASM____R7__ __asm__("r7")
 #define __asm_syscall(...) do { \
 	__asm__ __volatile__ ( "svc 0" \
 	: "=r"(r0) : __VA_ARGS__ : "memory"); \
 	return r0; \
 	} while (0)
+#endif
+
+/* For thumb2, we can allow 8-bit immediate syscall numbers, saving a
+ * register in the above dance around r7. Does not work for thumb1 where
+ * only movs, not mov, supports immediates, and we can't use movs because
+ * it doesn't support high regs. */
+#ifdef __thumb2__
+#define R7_OPERAND "rI"(r7)
+#else
+#define R7_OPERAND "r"(r7)
+#endif
 
 static inline long __syscall0(long n)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0");
-	__asm_syscall("r"(r7));
+	__asm_syscall(R7_OPERAND);
 }
 
 static inline long __syscall1(long n, long a)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
-	__asm_syscall("r"(r7), "0"(r0));
+	__asm_syscall(R7_OPERAND, "0"(r0));
 }
 
 static inline long __syscall2(long n, long a, long b)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
 	register long r1 __asm__("r1") = b;
-	__asm_syscall("r"(r7), "0"(r0), "r"(r1));
+	__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1));
 }
 
 static inline long __syscall3(long n, long a, long b, long c)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
 	register long r1 __asm__("r1") = b;
 	register long r2 __asm__("r2") = c;
-	__asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2));
+	__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2));
 }
 
 static inline long __syscall4(long n, long a, long b, long c, long d)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
 	register long r1 __asm__("r1") = b;
 	register long r2 __asm__("r2") = c;
 	register long r3 __asm__("r3") = d;
-	__asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2), "r"(r3));
+	__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3));
 }
 
 static inline long __syscall5(long n, long a, long b, long c, long d, long e)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
 	register long r1 __asm__("r1") = b;
 	register long r2 __asm__("r2") = c;
 	register long r3 __asm__("r3") = d;
 	register long r4 __asm__("r4") = e;
-	__asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
+	__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
 }
 
 static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
 {
-	register long r7 __asm__("r7") = n;
+	register long r7 __ASM____R7__ = n;
 	register long r0 __asm__("r0") = a;
 	register long r1 __asm__("r1") = b;
 	register long r2 __asm__("r2") = c;
 	register long r3 __asm__("r3") = d;
 	register long r4 __asm__("r4") = e;
 	register long r5 __asm__("r5") = f;
-	__asm_syscall("r"(r7), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
+	__asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
 }
 
 #define VDSO_USEFUL