summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/unix/sysv/linux')
-rw-r--r--sysdeps/unix/sysv/linux/ia64/brk.S18
-rw-r--r--sysdeps/unix/sysv/linux/ia64/clone2.S57
-rw-r--r--sysdeps/unix/sysv/linux/ia64/getcontext.S19
-rw-r--r--sysdeps/unix/sysv/linux/ia64/setcontext.S13
-rw-r--r--sysdeps/unix/sysv/linux/ia64/sysdep.h141
-rw-r--r--sysdeps/unix/sysv/linux/ia64/vfork.S3
6 files changed, 169 insertions, 82 deletions
diff --git a/sysdeps/unix/sysv/linux/ia64/brk.S b/sysdeps/unix/sysv/linux/ia64/brk.S
index 0638d42ac8..0e4114a331 100644
--- a/sysdeps/unix/sysv/linux/ia64/brk.S
+++ b/sysdeps/unix/sysv/linux/ia64/brk.S
@@ -35,19 +35,17 @@ __curbrk:
 weak_alias (__curbrk, ___brk_addr)
 
 LEAF(__brk)
-	mov	r15=__NR_brk
-	break.i	__BREAK_SYSCALL
+	.regstk 1, 0, 0, 0
+	DO_CALL(__NR_brk)
+	cmp.ltu	p6, p0 = ret0, in0
+	addl r9 = @ltoff(__curbrk), gp
 	;;
-	cmp.ltu	p6,p0=ret0,r32	/* r32 is the input register, even though we
-				   haven't allocated a frame */
-	addl	r9=@ltoff(__curbrk),gp
-	;;
-	ld8	r9=[r9]
-(p6) 	mov	ret0=ENOMEM
+	ld8 r9 = [r9]
+(p6) 	mov ret0 = ENOMEM
 (p6)	br.cond.spnt.few __syscall_error
 	;;
-	st8	[r9]=ret0
-	mov 	ret0=0
+	st8 [r9] = ret0
+	mov ret0 = 0
 	ret
 END(__brk)
 
diff --git a/sysdeps/unix/sysv/linux/ia64/clone2.S b/sysdeps/unix/sysv/linux/ia64/clone2.S
index 17620ff646..968d1e811c 100644
--- a/sysdeps/unix/sysv/linux/ia64/clone2.S
+++ b/sysdeps/unix/sysv/linux/ia64/clone2.S
@@ -25,49 +25,56 @@
 /* 	         size_t child_stack_size, int flags, void *arg,		*/
 /*	         pid_t *parent_tid, void *tls, pid_t *child_tid)	*/
 
+#define CHILD	p8
+#define PARENT	p9
+
 ENTRY(__clone2)
-	alloc r2=ar.pfs,8,2,6,0
+	.prologue
+	alloc r2=ar.pfs,8,0,6,0
 	cmp.eq p6,p0=0,in0
 	mov r8=EINVAL
-(p6)	br.cond.spnt.few __syscall_error
-	;;
-	flushrs			/* This is necessary, since the child	*/
-				/* will be running with the same 	*/
-				/* register backing store for a few 	*/
-				/* instructions.  We need to ensure	*/
-				/* that it will not read or write the	*/
-				/* backing store.			*/
-	mov loc0=in0		/* save fn	*/
-	mov loc1=in4		/* save arg	*/
 	mov out0=in3		/* Flags are first syscall argument.	*/
 	mov out1=in1		/* Stack address.			*/
+(p6)	br.cond.spnt.many __syscall_error
+	;;
 	mov out2=in2		/* Stack size.				*/
 	mov out3=in5		/* Parent TID Pointer			*/
 	mov out4=in7		/* Child TID Pointer			*/
  	mov out5=in6		/* TLS pointer				*/
-        DO_CALL (SYS_ify (clone2))
+	/*
+	 * clone2() is special: the child cannot execute br.ret right
+	 * after the system call returns, because it starts out
+	 * executing on an empty stack.  Because of this, we can't use
+	 * the new (lightweight) syscall convention here.  Instead, we
+	 * just fall back on always using "break".
+	 *
+	 * Furthermore, since the child starts with an empty stack, we
+	 * need to avoid unwinding past invalid memory.  To that end,
+	 * we'll pretend now that __clone2() is the end of the
+	 * call-chain.  This is wrong for the parent, but only until
+	 * it returns from clone2() but it's better than the
+	 * alternative.
+	 */
+	mov r15=SYS_ify (clone2)
+	.save rp, r0
+	break __BREAK_SYSCALL
+	.body
         cmp.eq p6,p0=-1,r10
+	cmp.eq CHILD,PARENT=0,r8 /* Are we the child?   */
+(p6)	br.cond.spnt.many __syscall_error
 	;;
-(p6)	br.cond.spnt.few __syscall_error
-
-#	define CHILD p6
-#	define PARENT p7
-	cmp.eq CHILD,PARENT=0,r8 /* Are we the child?	*/
-	;;
-(CHILD)	ld8 out1=[loc0],8	/* Retrieve code pointer.	*/
-(CHILD)	mov out0=loc1		/* Pass proper argument	to fn */
+(CHILD)	ld8 out1=[in0],8	/* Retrieve code pointer.	*/
+(CHILD)	mov out0=in4		/* Pass proper argument	to fn */
 (PARENT) ret
 	;;
-	ld8 gp=[loc0]		/* Load function gp.		*/
+	ld8 gp=[in0]		/* Load function gp.		*/
 	mov b6=out1
-	;;
-	br.call.dptk.few rp=b6	/* Call fn(arg) in the child 	*/
+	br.call.dptk.many rp=b6	/* Call fn(arg) in the child 	*/
 	;;
 	mov out0=r8		/* Argument to _exit		*/
 	.globl _exit
-	br.call.dpnt.few rp=_exit /* call _exit with result from fn.	*/
+	br.call.dpnt.many rp=_exit /* call _exit with result from fn.	*/
 	ret			/* Not reached.		*/
-
 PSEUDO_END(__clone2)
 
 /* For now we leave __clone undefined.  This is unlikely to be a	*/
diff --git a/sysdeps/unix/sysv/linux/ia64/getcontext.S b/sysdeps/unix/sysv/linux/ia64/getcontext.S
index 7b613611da..0f9cc5db19 100644
--- a/sysdeps/unix/sysv/linux/ia64/getcontext.S
+++ b/sysdeps/unix/sysv/linux/ia64/getcontext.S
@@ -35,26 +35,27 @@
 
 ENTRY(__getcontext)
 	.prologue
-	alloc r16 = ar.pfs, 1, 0, 4, 0
+	.body
+	alloc r11 = ar.pfs, 1, 0, 4, 0
 
 	// sigprocmask (SIG_BLOCK, NULL, &sc->sc_mask):
 
-	mov r2 = SC_MASK
-	mov r15 = __NR_rt_sigprocmask
-	;;
+	mov r3 = SC_MASK
 	mov out0 = SIG_BLOCK
-	mov out1 = 0
-	add out2 = r2, in0
-	mov out3 = 8	// sizeof kernel sigset_t
 
-	break __BREAK_SYSCALL
 	flushrs					// save dirty partition on rbs
+	mov out1 = 0
+	add out2 = r3, in0
+
+	mov out3 = 8	// sizeof kernel sigset_t
+	DO_CALL(__NR_rt_sigprocmask)
 
 	mov.m rFPSR = ar.fpsr
 	mov.m rRSC = ar.rsc
 	add r2 = SC_GR+1*8, r32
 	;;
 	mov.m rBSP = ar.bsp
+	.prologue
 	.save ar.unat, rUNAT
 	mov.m rUNAT = ar.unat
 	.body
@@ -63,7 +64,7 @@ ENTRY(__getcontext)
 
 .mem.offset 0,0; st8.spill [r2] = r1, (5*8 - 1*8)
 .mem.offset 8,0; st8.spill [r3] = r4, 16
-	mov.i rPFS = ar.pfs
+	mov rPFS = r11
 	;;
 .mem.offset 0,0; st8.spill [r2] = r5, 16
 .mem.offset 8,0; st8.spill [r3] = r6, 48
diff --git a/sysdeps/unix/sysv/linux/ia64/setcontext.S b/sysdeps/unix/sysv/linux/ia64/setcontext.S
index e0d1825566..e18c40fa4c 100644
--- a/sysdeps/unix/sysv/linux/ia64/setcontext.S
+++ b/sysdeps/unix/sysv/linux/ia64/setcontext.S
@@ -32,20 +32,21 @@
   other than the PRESERVED state.  */
 
 ENTRY(__setcontext)
-	alloc r16 = ar.pfs, 1, 0, 4, 0
+	.prologue
+	.body
+	alloc r11 = ar.pfs, 1, 0, 4, 0
 
 	// sigprocmask (SIG_SETMASK, &sc->sc_mask, NULL):
 
-	mov r2 = SC_MASK
-	mov r15 = __NR_rt_sigprocmask
-	;;
+	mov r3 = SC_MASK
 	mov out0 = SIG_SETMASK
-	add out1 = r2, in0
+	;;
+	add out1 = r3, in0
 	mov out2 = 0
 	mov out3 = 8	// sizeof kernel sigset_t
 
 	invala
-	break __BREAK_SYSCALL
+	DO_CALL(__NR_rt_sigprocmask)
 	add r2 = SC_NAT, r32
 
 	add r3 = SC_RNAT, r32			// r3 <- &sc_ar_rnat
diff --git a/sysdeps/unix/sysv/linux/ia64/sysdep.h b/sysdeps/unix/sysv/linux/ia64/sysdep.h
index c298461cc2..6c418b6ab1 100644
--- a/sysdeps/unix/sysv/linux/ia64/sysdep.h
+++ b/sysdeps/unix/sysv/linux/ia64/sysdep.h
@@ -23,6 +23,8 @@
 
 #include <sysdeps/unix/sysdep.h>
 #include <sysdeps/ia64/sysdep.h>
+#include <dl-sysdep.h>
+#include <tls.h>
 
 /* As of GAS v2.4.90.0.7, including a ".align" directive inside a
    function will cause bad unwind info to be emitted (GAS doesn't know
@@ -58,6 +60,14 @@
 # define __NR_semtimedop 1247
 #endif
 
+#if defined USE_DL_SYSINFO \
+	&& (!defined NOT_IN_libc \
+	    || defined IS_IN_libpthread || defined IS_IN_librt)
+# define IA64_USE_NEW_STUB
+#else
+# undef IA64_USE_NEW_STUB
+#endif
+
 #ifdef __ASSEMBLER__
 
 #undef CALL_MCOUNT
@@ -102,9 +112,45 @@
 	cmp.eq p6,p0=-1,r10;			\
 (p6)	br.cond.spnt.few __syscall_error;
 
-#define DO_CALL(num)				\
+#define DO_CALL_VIA_BREAK(num)			\
 	mov r15=num;				\
-	break __BREAK_SYSCALL;
+	break __BREAK_SYSCALL
+
+#ifdef IA64_USE_NEW_STUB
+# ifdef SHARED
+#  define DO_CALL(num)				\
+	.prologue;				\
+	adds r2 = SYSINFO_OFFSET, r13;;		\
+	ld8 r2 = [r2];				\
+	.save ar.pfs, r11;			\
+	mov r11 = ar.pfs;;			\
+	.body;					\
+	mov r15 = num;				\
+	mov b7 = r2;				\
+	br.call.sptk.many b6 = b7;;		\
+	.restore sp;				\
+	mov ar.pfs = r11;			\
+	.prologue;				\
+	.body
+# else /* !SHARED */
+#  define DO_CALL(num)				\
+	.prologue;				\
+	mov r15 = num;				\
+	movl r2 = _dl_sysinfo;;			\
+	ld8 r2 = [r2];				\
+	.save ar.pfs, r11;			\
+	mov r11 = ar.pfs;;			\
+	.body;					\
+	mov b7 = r2;				\
+	br.call.sptk.many b6 = b7;;		\
+	.restore sp;				\
+	mov ar.pfs = r11;			\
+	.prologue;				\
+	.body
+# endif
+#else
+# define DO_CALL(num)				DO_CALL_VIA_BREAK(num)
+#endif
 
 #undef PSEUDO_END
 #define PSEUDO_END(name)	.endp C_SYMBOL_NAME(name);
@@ -150,45 +196,64 @@
    from a syscall.  r10 is set to -1 on error, whilst r8 contains the
    (non-negative) errno on error or the return value on success.
  */
-#undef INLINE_SYSCALL
-#define INLINE_SYSCALL(name, nr, args...)			\
-  ({								\
+
+#ifdef IA64_USE_NEW_STUB
+
+#define DO_INLINE_SYSCALL(name, nr, args...)					\
+    register long _r8 __asm ("r8");						\
+    register long _r10 __asm ("r10");						\
+    register long _r15 __asm ("r15") = __NR_##name;				\
+    register void *_b7 __asm ("b7") = ((tcbhead_t *) __thread_self)->private;	\
+    long _retval;								\
+    LOAD_ARGS_##nr (args);							\
+    /*										\
+     * Don't specify any unwind info here.  We mark ar.pfs as			\
+     * clobbered.  This will force the compiler to save ar.pfs			\
+     * somewhere and emit appropriate unwind info for that save.		\
+     */										\
+    __asm __volatile ("br.call.sptk.many b6=%0;;\n"				\
+		      : "=b"(_b7), "=r" (_r8), "=r" (_r10), "=r" (_r15)		\
+			ASM_OUTARGS_##nr					\
+		      : "0" (_b7), "3" (_r15) ASM_ARGS_##nr			\
+		      : "memory", "ar.pfs" ASM_CLOBBERS_##nr);			\
+    _retval = _r8;
+
+#else /* !IA64_USE_NEW_STUB */
+
+#define DO_INLINE_SYSCALL(name, nr, args...)			\
     register long _r8 asm ("r8");				\
     register long _r10 asm ("r10");				\
     register long _r15 asm ("r15") = __NR_##name;		\
     long _retval;						\
     LOAD_ARGS_##nr (args);					\
     __asm __volatile (BREAK_INSN (__BREAK_SYSCALL)		\
-                      : "=r" (_r8), "=r" (_r10), "=r" (_r15)	\
+		      : "=r" (_r8), "=r" (_r10), "=r" (_r15)	\
 			ASM_OUTARGS_##nr			\
-                      : "2" (_r15) ASM_ARGS_##nr		\
-                      : "memory" ASM_CLOBBERS_##nr);		\
-    _retval = _r8;						\
-    if (_r10 == -1)						\
-      {								\
-        __set_errno (_retval);					\
-        _retval = -1;						\
-      }								\
+		      : "2" (_r15) ASM_ARGS_##nr		\
+		      : "memory" ASM_CLOBBERS_##nr);		\
+    _retval = _r8;
+
+#endif /* !IA64_USE_NEW_STUB */
+
+#undef INLINE_SYSCALL
+#define INLINE_SYSCALL(name, nr, args...)	\
+  ({						\
+    DO_INLINE_SYSCALL(name, nr, args)		\
+    if (_r10 == -1)				\
+      {						\
+	__set_errno (_retval);			\
+	_retval = -1;				\
+      }						\
     _retval; })
 
 #undef INTERNAL_SYSCALL_DECL
 #define INTERNAL_SYSCALL_DECL(err) long int err
 
 #undef INTERNAL_SYSCALL
-#define INTERNAL_SYSCALL(name, err, nr, args...)		\
-  ({								\
-    register long _r8 asm ("r8");				\
-    register long _r10 asm ("r10");				\
-    register long _r15 asm ("r15") = __NR_##name;		\
-    long _retval;						\
-    LOAD_ARGS_##nr (args);					\
-    __asm __volatile (BREAK_INSN (__BREAK_SYSCALL)		\
-                      : "=r" (_r8), "=r" (_r10), "=r" (_r15)	\
-			ASM_OUTARGS_##nr			\
-                      : "2" (_r15) ASM_ARGS_##nr		\
-                      : "memory" ASM_CLOBBERS_##nr);		\
-    _retval = _r8;						\
-    err = _r10;							\
+#define INTERNAL_SYSCALL(name, err, nr, args...)	\
+  ({							\
+    DO_INLINE_SYSCALL(name, nr, args)			\
+    err = _r10;						\
     _retval; })
 
 #undef INTERNAL_SYSCALL_ERROR_P
@@ -225,6 +290,15 @@
 #define ASM_OUTARGS_5	ASM_OUTARGS_4, "=r" (_out4)
 #define ASM_OUTARGS_6	ASM_OUTARGS_5, "=r" (_out5)
 
+#ifdef IA64_USE_NEW_STUB
+#define ASM_ARGS_0
+#define ASM_ARGS_1	ASM_ARGS_0, "4" (_out0)
+#define ASM_ARGS_2	ASM_ARGS_1, "5" (_out1)
+#define ASM_ARGS_3	ASM_ARGS_2, "6" (_out2)
+#define ASM_ARGS_4	ASM_ARGS_3, "7" (_out3)
+#define ASM_ARGS_5	ASM_ARGS_4, "8" (_out4)
+#define ASM_ARGS_6	ASM_ARGS_5, "9" (_out5)
+#else
 #define ASM_ARGS_0
 #define ASM_ARGS_1	ASM_ARGS_0, "3" (_out0)
 #define ASM_ARGS_2	ASM_ARGS_1, "4" (_out1)
@@ -232,6 +306,7 @@
 #define ASM_ARGS_4	ASM_ARGS_3, "6" (_out3)
 #define ASM_ARGS_5	ASM_ARGS_4, "7" (_out4)
 #define ASM_ARGS_6	ASM_ARGS_5, "8" (_out5)
+#endif
 
 #define ASM_CLOBBERS_0	ASM_CLOBBERS_1, "out0"
 #define ASM_CLOBBERS_1	ASM_CLOBBERS_2, "out1"
@@ -239,7 +314,7 @@
 #define ASM_CLOBBERS_3	ASM_CLOBBERS_4, "out3"
 #define ASM_CLOBBERS_4	ASM_CLOBBERS_5, "out4"
 #define ASM_CLOBBERS_5	ASM_CLOBBERS_6, "out5"
-#define ASM_CLOBBERS_6	, "out6", "out7",				\
+#define ASM_CLOBBERS_6_COMMON	, "out6", "out7",			\
   /* Non-stacked integer registers, minus r8, r10, r15.  */		\
   "r2", "r3", "r9", "r11", "r12", "r13", "r14", "r16", "r17", "r18",	\
   "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27",	\
@@ -249,7 +324,13 @@
   /* Non-rotating fp registers.  */					\
   "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",	\
   /* Branch registers.  */						\
-  "b6", "b7"
+  "b6"
+
+#ifdef IA64_USE_NEW_STUB
+# define ASM_CLOBBERS_6	ASM_CLOBBERS_6_COMMON
+#else
+# define ASM_CLOBBERS_6	ASM_CLOBBERS_6_COMMON , "b7"
+#endif
 
 #endif /* not __ASSEMBLER__ */
 
diff --git a/sysdeps/unix/sysv/linux/ia64/vfork.S b/sysdeps/unix/sysv/linux/ia64/vfork.S
index cbfaa3177c..086fce9387 100644
--- a/sysdeps/unix/sysv/linux/ia64/vfork.S
+++ b/sysdeps/unix/sysv/linux/ia64/vfork.S
@@ -34,9 +34,8 @@ ENTRY(__vfork)
 	mov out0=CLONE_VM+CLONE_VFORK+SIGCHLD
 	mov out1=0		/* Standard sp value.			*/
 	;;
-	DO_CALL (SYS_ify (clone))
+	DO_CALL_VIA_BREAK (SYS_ify (clone))
 	cmp.eq p6,p0=-1,r10
-	;;
 (p6)	br.cond.spnt.few __syscall_error
 	ret
 PSEUDO_END(__vfork)