about summary refs log tree commit diff
path: root/arch
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-04-10 17:10:36 -0400
committerRich Felker <dalias@aerifal.cx>2019-04-10 17:10:36 -0400
commit22e5bbd0deadcbd767864bd714e890b70e1fe1df (patch)
tree4482da4b24c0e08b29769dad08bf800207db1227 /arch
parente97681d6f2c44bf5fa9ecdd30607cb63c780062e (diff)
downloadmusl-22e5bbd0deadcbd767864bd714e890b70e1fe1df.tar.gz
musl-22e5bbd0deadcbd767864bd714e890b70e1fe1df.tar.xz
musl-22e5bbd0deadcbd767864bd714e890b70e1fe1df.zip
overhaul i386 syscall mechanism not to depend on external asm source
this is the first part of a series of patches intended to make
__syscall fully self-contained in the object file produced using
syscall.h, which will make it possible for crt1 code to perform
syscalls.

the (confusingly named) i386 __vsyscall mechanism, which this commit
removes, was introduced before the presence of a valid thread pointer
was mandatory; back then the thread pointer was setup lazily only if
threads were used. the intent was to be able to perform syscalls using
the kernel's fast entry point in the VDSO, which can use the sysenter
(Intel) or syscall (AMD) instruction instead of int $128, but without
inlining an access to the __syscall global at the point of each
syscall, which would incur a significant size cost from PIC setup
everywhere. the mechanism also shuffled registers/calling convention
around to avoid spills of call-saved registers, and to avoid
allocating ebx or ebp via asm constraints, since there are plenty of
broken-but-supported compiler versions which are incapable of
allocating ebx with -fPIC or ebp with -fno-omit-frame-pointer.

the new mechanism preserves the properties of avoiding spills and
avoiding allocation of ebx/ebp in constraints, but does it inline,
using some fairly simple register shuffling, and uses a field of the
thread structure rather than global data for the vdso-provided syscall
code address.

for now, the external __syscall function is refactored not to use the
old __vsyscall so it can be kept, but the intent is to remove it too.
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/syscall_arch.h29
1 files changed, 20 insertions, 9 deletions
diff --git a/arch/i386/syscall_arch.h b/arch/i386/syscall_arch.h
index 4c9d874a..8fe35424 100644
--- a/arch/i386/syscall_arch.h
+++ b/arch/i386/syscall_arch.h
@@ -3,52 +3,63 @@
 ((union { long long ll; long l[2]; }){ .ll = x }).l[1]
 #define __SYSCALL_LL_O(x) __SYSCALL_LL_E((x))
 
+#if SYSCALL_NO_TLS
+#define SYSCALL_INSNS "int $128"
+#else
+#define SYSCALL_INSNS "call *%%gs:16"
+#endif
+
+#define SYSCALL_INSNS_12 "xchg %%ebx,%%edx ; " SYSCALL_INSNS " ; xchg %%ebx,%%edx"
+#define SYSCALL_INSNS_34 "xchg %%ebx,%%edi ; " SYSCALL_INSNS " ; xchg %%ebx,%%edi"
+
 static inline long __syscall0(long n)
 {
 	unsigned long __ret;
-	__asm__ __volatile__ (".hidden __vsyscall ; call __vsyscall" : "=a"(__ret) : "a"(n) : "memory");
+	__asm__ __volatile__ (SYSCALL_INSNS : "=a"(__ret) : "a"(n) : "memory");
 	return __ret;
 }
 
 static inline long __syscall1(long n, long a1)
 {
 	unsigned long __ret;
-	__asm__ __volatile__ (".hidden __vsyscall ; call __vsyscall" : "=a"(__ret) : "a"(n), "d"(a1) : "memory");
+	__asm__ __volatile__ (SYSCALL_INSNS_12 : "=a"(__ret) : "a"(n), "d"(a1) : "memory");
 	return __ret;
 }
 
 static inline long __syscall2(long n, long a1, long a2)
 {
 	unsigned long __ret;
-	__asm__ __volatile__ (".hidden __vsyscall ; call __vsyscall" : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2) : "memory");
+	__asm__ __volatile__ (SYSCALL_INSNS_12 : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2) : "memory");
 	return __ret;
 }
 
 static inline long __syscall3(long n, long a1, long a2, long a3)
 {
 	unsigned long __ret;
-	__asm__ __volatile__ (".hidden __vsyscall ; call __vsyscall" : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2), "D"(a3) : "memory");
+	__asm__ __volatile__ (SYSCALL_INSNS_34 : "=a"(__ret) : "a"(n), "D"(a1), "c"(a2), "d"(a3) : "memory");
 	return __ret;
 }
 
 static inline long __syscall4(long n, long a1, long a2, long a3, long a4)
 {
 	unsigned long __ret;
-	__asm__ __volatile__ (".hidden __vsyscall ; call __vsyscall" : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2), "D"(a3), "S"(a4) : "memory");
+	__asm__ __volatile__ (SYSCALL_INSNS_34 : "=a"(__ret) : "a"(n), "D"(a1), "c"(a2), "d"(a3), "S"(a4) : "memory");
 	return __ret;
 }
 
 static inline long __syscall5(long n, long a1, long a2, long a3, long a4, long a5)
 {
-	unsigned long __ret;
-	__asm__ __volatile__ ("push %6 ; .hidden __vsyscall ; call __vsyscall ; add $4,%%esp" : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2), "D"(a3), "S"(a4), "g"(a5) : "memory");
+	unsigned long __ret, __tmp;
+	__asm__ __volatile__ ("mov %%ebx,%1 ; mov %3,%%ebx ; " SYSCALL_INSNS " ; mov %1,%%ebx"
+		: "=a"(__ret), "=m"(__tmp) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5) : "memory");
 	return __ret;
 }
 
 static inline long __syscall6(long n, long a1, long a2, long a3, long a4, long a5, long a6)
 {
-	unsigned long __ret;
-	__asm__ __volatile__ ("push %6 ; .hidden __vsyscall6 ; call __vsyscall6 ; add $4,%%esp" : "=a"(__ret) : "a"(n), "d"(a1), "c"(a2), "D"(a3), "S"(a4), "g"(0+(long[]){a5, a6}) : "memory");
+	unsigned long __ret, __tmp1, __tmp2;
+	__asm__ __volatile__ ("mov %%ebx,%1 ; mov %%ebp,%2 ; mov %4,%%ebx ; mov %9,%%ebp ; " SYSCALL_INSNS " ; mov %2,%%ebp ; mov %1,%%ebx"
+		: "=a"(__ret), "=m"(__tmp1), "=m"(__tmp2) : "a"(n), "g"(a1), "c"(a2), "d"(a3), "S"(a4), "D"(a5), "g"(a6) : "memory");
 	return __ret;
 }