about summary refs log tree commit diff
path: root/arch
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2012-04-04 00:37:33 -0400
committerRich Felker <dalias@aerifal.cx>2012-04-04 00:37:33 -0400
commit5bd0ab8af66829af74ed80cac767ce1c041fd767 (patch)
tree0e4c9ae49ee2aea74348ae866a2cdb0197ce2315 /arch
parent450f2c4a8515f2c5c9f469082bf69fdef009d26a (diff)
downloadmusl-5bd0ab8af66829af74ed80cac767ce1c041fd767.tar.gz
musl-5bd0ab8af66829af74ed80cac767ce1c041fd767.tar.xz
musl-5bd0ab8af66829af74ed80cac767ce1c041fd767.zip
work around nasty gcc bug in the i386 syscall asm
when the "r" (register) constraint is used to let gcc choose a
register, gcc will sometimes assign the same register that was used
for one of the other fixed-register operands, if it knows the values
are the same. one common case is multiple zero arguments to a syscall.
this horribly breaks the intended usage, which is swapping the GOT
pointer from ebx into the temp register and back to perform the
syscall.

presumably there is a way to fix this with advanced usage of register
constaints on the inline asm, but having bad memories about hellish
compatibility issues with different gcc versions, for the time being
i'm just going to hard-code specific registers to be used. this may
hurt the compiler's ability to optimize, but it will fix serious
miscompilation issues.

so far the only function i know what compiled incorrectly is
getrlimit.c, and naturally the bug only applies to shared (PIC)
builds, but it may be more extensive and may have gone undetected..
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/bits/syscall.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/i386/bits/syscall.h b/arch/i386/bits/syscall.h
index 1147e9f3..daea5525 100644
--- a/arch/i386/bits/syscall.h
+++ b/arch/i386/bits/syscall.h
@@ -61,7 +61,7 @@ static inline long __syscall1(long __n, long __a1)
 {
 	unsigned long __ret;
 	__asm__ __volatile__ ("xchg %2,%%ebx ; int $128 ; xchg %2,%%ebx"
-		: "=a"(__ret) : "a"(__n), "r"(__a1) : "memory");
+		: "=a"(__ret) : "a"(__n), "d"(__a1) : "memory");
 	return __ret;
 }
 
@@ -69,7 +69,7 @@ static inline long __syscall2(long __n, long __a1, long __a2)
 {
 	unsigned long __ret;
 	__asm__ __volatile__ ("xchg %2,%%ebx ; int $128 ; xchg %2,%%ebx"
-		: "=a"(__ret) : "a"(__n), "r"(__a1), "c"(__a2) : "memory");
+		: "=a"(__ret) : "a"(__n), "d"(__a1), "c"(__a2) : "memory");
 	return __ret;
 }
 
@@ -77,7 +77,7 @@ static inline long __syscall3(long __n, long __a1, long __a2, long __a3)
 {
 	unsigned long __ret;
 	__asm__ __volatile__ ("xchg %2,%%ebx ; int $128 ; xchg %2,%%ebx"
-		: "=a"(__ret) : "a"(__n), "r"(__a1), "c"(__a2), "d"(__a3) : "memory");
+		: "=a"(__ret) : "a"(__n), "S"(__a1), "c"(__a2), "d"(__a3) : "memory");
 	return __ret;
 }
 
@@ -85,7 +85,7 @@ static inline long __syscall4(long __n, long __a1, long __a2, long __a3, long __
 {
 	unsigned long __ret;
 	__asm__ __volatile__ ("xchg %2,%%ebx ; int $128 ; xchg %2,%%ebx"
-		: "=a"(__ret) : "a"(__n), "r"(__a1), "c"(__a2), "d"(__a3), "S"(__a4) : "memory");
+		: "=a"(__ret) : "a"(__n), "D"(__a1), "c"(__a2), "d"(__a3), "S"(__a4) : "memory");
 	return __ret;
 }