diff options
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/generic/dl-fptr.c | 2 | ||||
-rw-r--r-- | sysdeps/generic/s_nexttowardf.c | 9 | ||||
-rw-r--r-- | sysdeps/ia64/dl-machine.h | 10 | ||||
-rw-r--r-- | sysdeps/ia64/elf/initfini.c | 28 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/brk.S | 18 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/clone2.S | 57 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/getcontext.S | 19 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/setcontext.S | 13 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/sysdep.h | 141 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/ia64/vfork.S | 3 |
10 files changed, 206 insertions, 94 deletions
diff --git a/sysdeps/generic/dl-fptr.c b/sysdeps/generic/dl-fptr.c index 9768850846..32cf28c0ef 100644 --- a/sysdeps/generic/dl-fptr.c +++ b/sysdeps/generic/dl-fptr.c @@ -163,7 +163,7 @@ make_fdesc (ElfW(Addr) ip, ElfW(Addr) gp) } -static inline ElfW(Addr) * +static inline ElfW(Addr) * __attribute__ ((always_inline)) make_fptr_table (struct link_map *map) { const ElfW(Sym) *symtab diff --git a/sysdeps/generic/s_nexttowardf.c b/sysdeps/generic/s_nexttowardf.c index 1b3a2536ba..4f502c210e 100644 --- a/sysdeps/generic/s_nexttowardf.c +++ b/sysdeps/generic/s_nexttowardf.c @@ -22,6 +22,7 @@ #include "math.h" #include "math_private.h" +#include <float.h> #ifdef __STDC__ float __nexttowardf(float x, long double y) @@ -65,7 +66,13 @@ hx += 1; } hy = hx&0x7f800000; - if(hy>=0x7f800000) return x+x; /* overflow */ + if(hy>=0x7f800000) { + x = x+x; /* overflow */ + if (FLT_EVAL_METHOD != 0) + /* Force conversion to float. */ + asm ("" : "=m"(x) : "m"(x)); + return x; + } if(hy<0x00800000) { /* underflow */ float x2 = x*x; if(x2!=x) { /* raise underflow flag */ diff --git a/sysdeps/ia64/dl-machine.h b/sysdeps/ia64/dl-machine.h index a8ba121868..5c0ff9f20c 100644 --- a/sysdeps/ia64/dl-machine.h +++ b/sysdeps/ia64/dl-machine.h @@ -33,7 +33,7 @@ in l_info array. */ #define DT_IA_64(x) (DT_IA_64_##x - DT_LOPROC + DT_NUM) -static inline void +static inline void __attribute__ ((always_inline)) __ia64_init_bootstrap_fdesc_table (struct link_map *map) { Elf64_Addr *boot_table; @@ -49,7 +49,7 @@ __ia64_init_bootstrap_fdesc_table (struct link_map *map) __ia64_init_bootstrap_fdesc_table (&bootstrap_map); /* Return nonzero iff ELF header is compatible with the running host. */ -static inline int +static inline int __attribute__ ((unused)) elf_machine_matches_host (const Elf64_Ehdr *ehdr) { return ehdr->e_machine == EM_IA_64; @@ -57,7 +57,7 @@ elf_machine_matches_host (const Elf64_Ehdr *ehdr) /* Return the link-time address of _DYNAMIC. */ -static inline Elf64_Addr +static inline Elf64_Addr __attribute__ ((unused, const)) elf_machine_dynamic (void) { Elf64_Addr *p; @@ -77,7 +77,7 @@ elf_machine_dynamic (void) /* Return the run-time load address of the shared object. */ -static inline Elf64_Addr +static inline Elf64_Addr __attribute__ ((unused)) elf_machine_load_address (void) { Elf64_Addr ip; @@ -98,7 +98,7 @@ elf_machine_load_address (void) /* Set up the loaded object described by L so its unrelocated PLT entries will jump to the on-demand fixup code in dl-runtime.c. */ -static inline int __attribute__ ((always_inline)) +static inline int __attribute__ ((unused, always_inline)) elf_machine_runtime_setup (struct link_map *l, int lazy, int profile) { extern void _dl_runtime_resolve (void); diff --git a/sysdeps/ia64/elf/initfini.c b/sysdeps/ia64/elf/initfini.c index f86991922c..b901d3967b 100644 --- a/sysdeps/ia64/elf/initfini.c +++ b/sysdeps/ia64/elf/initfini.c @@ -61,16 +61,20 @@ __asm__ (".section .init_array, \"aw\"\n" #endif __asm__ (".section .init\n" -" .align 16\n" " .global _init#\n" " .proc _init#\n" "_init:\n" +" .prologue\n" +" .save ar.pfs, r34\n" " alloc r34 = ar.pfs, 0, 3, 0, 0\n" +" .vframe r32\n" " mov r32 = r12\n" +" .save rp, r33\n" " mov r33 = b0\n" +" .body\n" " adds r12 = -16, r12\n" #ifdef HAVE_INITFINI_ARRAY - " ;;\n" /* see gmon_initializer() below */ +" ;;\n" /* see gmon_initializer() above */ #else " .weak __gmon_start__#\n" " addl r14 = @ltoff(@fptr(__gmon_start__#)), gp\n" @@ -90,12 +94,17 @@ __asm__ (".section .init\n" " ;;\n" ".L5:\n" #endif -" .align 16\n" " .endp _init#\n" "\n" "/*@_init_PROLOG_ENDS*/\n" "\n" "/*@_init_EPILOG_BEGINS*/\n" +" .proc _init#\n" +" .prologue\n" +" .save ar.pfs, r34\n" +" .vframe r32\n" +" .save rp, r33\n" +" .body\n" " .section .init\n" " .regstk 0,2,0,0\n" " mov r12 = r32\n" @@ -107,16 +116,19 @@ __asm__ (".section .init\n" "\n" "/*@_fini_PROLOG_BEGINS*/\n" " .section .fini\n" -" .align 16\n" " .global _fini#\n" " .proc _fini#\n" "_fini:\n" +" .prologue\n" +" .save ar.pfs, r34\n" " alloc r34 = ar.pfs, 0, 3, 0, 0\n" +" .vframe r32\n" " mov r32 = r12\n" +" .save rp, r33\n" " mov r33 = b0\n" +" .body\n" " adds r12 = -16, r12\n" " ;;\n" -" .align 16\n" " .endp _fini#\n" "\n" "/*@_fini_PROLOG_ENDS*/\n" @@ -125,6 +137,12 @@ __asm__ (".section .init\n" "\n" "/*@_fini_EPILOG_BEGINS*/\n" " .section .fini\n" +" .proc _fini#\n" +" .prologue\n" +" .save ar.pfs, r34\n" +" .vframe r32\n" +" .save rp, r33\n" +" .body\n" " mov r12 = r32\n" " mov ar.pfs = r34\n" " mov b0 = r33\n" diff --git a/sysdeps/unix/sysv/linux/ia64/brk.S b/sysdeps/unix/sysv/linux/ia64/brk.S index 0638d42ac8..0e4114a331 100644 --- a/sysdeps/unix/sysv/linux/ia64/brk.S +++ b/sysdeps/unix/sysv/linux/ia64/brk.S @@ -35,19 +35,17 @@ __curbrk: weak_alias (__curbrk, ___brk_addr) LEAF(__brk) - mov r15=__NR_brk - break.i __BREAK_SYSCALL + .regstk 1, 0, 0, 0 + DO_CALL(__NR_brk) + cmp.ltu p6, p0 = ret0, in0 + addl r9 = @ltoff(__curbrk), gp ;; - cmp.ltu p6,p0=ret0,r32 /* r32 is the input register, even though we - haven't allocated a frame */ - addl r9=@ltoff(__curbrk),gp - ;; - ld8 r9=[r9] -(p6) mov ret0=ENOMEM + ld8 r9 = [r9] +(p6) mov ret0 = ENOMEM (p6) br.cond.spnt.few __syscall_error ;; - st8 [r9]=ret0 - mov ret0=0 + st8 [r9] = ret0 + mov ret0 = 0 ret END(__brk) diff --git a/sysdeps/unix/sysv/linux/ia64/clone2.S b/sysdeps/unix/sysv/linux/ia64/clone2.S index 17620ff646..968d1e811c 100644 --- a/sysdeps/unix/sysv/linux/ia64/clone2.S +++ b/sysdeps/unix/sysv/linux/ia64/clone2.S @@ -25,49 +25,56 @@ /* size_t child_stack_size, int flags, void *arg, */ /* pid_t *parent_tid, void *tls, pid_t *child_tid) */ +#define CHILD p8 +#define PARENT p9 + ENTRY(__clone2) - alloc r2=ar.pfs,8,2,6,0 + .prologue + alloc r2=ar.pfs,8,0,6,0 cmp.eq p6,p0=0,in0 mov r8=EINVAL -(p6) br.cond.spnt.few __syscall_error - ;; - flushrs /* This is necessary, since the child */ - /* will be running with the same */ - /* register backing store for a few */ - /* instructions. We need to ensure */ - /* that it will not read or write the */ - /* backing store. */ - mov loc0=in0 /* save fn */ - mov loc1=in4 /* save arg */ mov out0=in3 /* Flags are first syscall argument. */ mov out1=in1 /* Stack address. */ +(p6) br.cond.spnt.many __syscall_error + ;; mov out2=in2 /* Stack size. */ mov out3=in5 /* Parent TID Pointer */ mov out4=in7 /* Child TID Pointer */ mov out5=in6 /* TLS pointer */ - DO_CALL (SYS_ify (clone2)) + /* + * clone2() is special: the child cannot execute br.ret right + * after the system call returns, because it starts out + * executing on an empty stack. Because of this, we can't use + * the new (lightweight) syscall convention here. Instead, we + * just fall back on always using "break". + * + * Furthermore, since the child starts with an empty stack, we + * need to avoid unwinding past invalid memory. To that end, + * we'll pretend now that __clone2() is the end of the + * call-chain. This is wrong for the parent, but only until + * it returns from clone2() but it's better than the + * alternative. + */ + mov r15=SYS_ify (clone2) + .save rp, r0 + break __BREAK_SYSCALL + .body cmp.eq p6,p0=-1,r10 + cmp.eq CHILD,PARENT=0,r8 /* Are we the child? */ +(p6) br.cond.spnt.many __syscall_error ;; -(p6) br.cond.spnt.few __syscall_error - -# define CHILD p6 -# define PARENT p7 - cmp.eq CHILD,PARENT=0,r8 /* Are we the child? */ - ;; -(CHILD) ld8 out1=[loc0],8 /* Retrieve code pointer. */ -(CHILD) mov out0=loc1 /* Pass proper argument to fn */ +(CHILD) ld8 out1=[in0],8 /* Retrieve code pointer. */ +(CHILD) mov out0=in4 /* Pass proper argument to fn */ (PARENT) ret ;; - ld8 gp=[loc0] /* Load function gp. */ + ld8 gp=[in0] /* Load function gp. */ mov b6=out1 - ;; - br.call.dptk.few rp=b6 /* Call fn(arg) in the child */ + br.call.dptk.many rp=b6 /* Call fn(arg) in the child */ ;; mov out0=r8 /* Argument to _exit */ .globl _exit - br.call.dpnt.few rp=_exit /* call _exit with result from fn. */ + br.call.dpnt.many rp=_exit /* call _exit with result from fn. */ ret /* Not reached. */ - PSEUDO_END(__clone2) /* For now we leave __clone undefined. This is unlikely to be a */ diff --git a/sysdeps/unix/sysv/linux/ia64/getcontext.S b/sysdeps/unix/sysv/linux/ia64/getcontext.S index 7b613611da..0f9cc5db19 100644 --- a/sysdeps/unix/sysv/linux/ia64/getcontext.S +++ b/sysdeps/unix/sysv/linux/ia64/getcontext.S @@ -35,26 +35,27 @@ ENTRY(__getcontext) .prologue - alloc r16 = ar.pfs, 1, 0, 4, 0 + .body + alloc r11 = ar.pfs, 1, 0, 4, 0 // sigprocmask (SIG_BLOCK, NULL, &sc->sc_mask): - mov r2 = SC_MASK - mov r15 = __NR_rt_sigprocmask - ;; + mov r3 = SC_MASK mov out0 = SIG_BLOCK - mov out1 = 0 - add out2 = r2, in0 - mov out3 = 8 // sizeof kernel sigset_t - break __BREAK_SYSCALL flushrs // save dirty partition on rbs + mov out1 = 0 + add out2 = r3, in0 + + mov out3 = 8 // sizeof kernel sigset_t + DO_CALL(__NR_rt_sigprocmask) mov.m rFPSR = ar.fpsr mov.m rRSC = ar.rsc add r2 = SC_GR+1*8, r32 ;; mov.m rBSP = ar.bsp + .prologue .save ar.unat, rUNAT mov.m rUNAT = ar.unat .body @@ -63,7 +64,7 @@ ENTRY(__getcontext) .mem.offset 0,0; st8.spill [r2] = r1, (5*8 - 1*8) .mem.offset 8,0; st8.spill [r3] = r4, 16 - mov.i rPFS = ar.pfs + mov rPFS = r11 ;; .mem.offset 0,0; st8.spill [r2] = r5, 16 .mem.offset 8,0; st8.spill [r3] = r6, 48 diff --git a/sysdeps/unix/sysv/linux/ia64/setcontext.S b/sysdeps/unix/sysv/linux/ia64/setcontext.S index e0d1825566..e18c40fa4c 100644 --- a/sysdeps/unix/sysv/linux/ia64/setcontext.S +++ b/sysdeps/unix/sysv/linux/ia64/setcontext.S @@ -32,20 +32,21 @@ other than the PRESERVED state. */ ENTRY(__setcontext) - alloc r16 = ar.pfs, 1, 0, 4, 0 + .prologue + .body + alloc r11 = ar.pfs, 1, 0, 4, 0 // sigprocmask (SIG_SETMASK, &sc->sc_mask, NULL): - mov r2 = SC_MASK - mov r15 = __NR_rt_sigprocmask - ;; + mov r3 = SC_MASK mov out0 = SIG_SETMASK - add out1 = r2, in0 + ;; + add out1 = r3, in0 mov out2 = 0 mov out3 = 8 // sizeof kernel sigset_t invala - break __BREAK_SYSCALL + DO_CALL(__NR_rt_sigprocmask) add r2 = SC_NAT, r32 add r3 = SC_RNAT, r32 // r3 <- &sc_ar_rnat diff --git a/sysdeps/unix/sysv/linux/ia64/sysdep.h b/sysdeps/unix/sysv/linux/ia64/sysdep.h index c298461cc2..6c418b6ab1 100644 --- a/sysdeps/unix/sysv/linux/ia64/sysdep.h +++ b/sysdeps/unix/sysv/linux/ia64/sysdep.h @@ -23,6 +23,8 @@ #include <sysdeps/unix/sysdep.h> #include <sysdeps/ia64/sysdep.h> +#include <dl-sysdep.h> +#include <tls.h> /* As of GAS v2.4.90.0.7, including a ".align" directive inside a function will cause bad unwind info to be emitted (GAS doesn't know @@ -58,6 +60,14 @@ # define __NR_semtimedop 1247 #endif +#if defined USE_DL_SYSINFO \ + && (!defined NOT_IN_libc \ + || defined IS_IN_libpthread || defined IS_IN_librt) +# define IA64_USE_NEW_STUB +#else +# undef IA64_USE_NEW_STUB +#endif + #ifdef __ASSEMBLER__ #undef CALL_MCOUNT @@ -102,9 +112,45 @@ cmp.eq p6,p0=-1,r10; \ (p6) br.cond.spnt.few __syscall_error; -#define DO_CALL(num) \ +#define DO_CALL_VIA_BREAK(num) \ mov r15=num; \ - break __BREAK_SYSCALL; + break __BREAK_SYSCALL + +#ifdef IA64_USE_NEW_STUB +# ifdef SHARED +# define DO_CALL(num) \ + .prologue; \ + adds r2 = SYSINFO_OFFSET, r13;; \ + ld8 r2 = [r2]; \ + .save ar.pfs, r11; \ + mov r11 = ar.pfs;; \ + .body; \ + mov r15 = num; \ + mov b7 = r2; \ + br.call.sptk.many b6 = b7;; \ + .restore sp; \ + mov ar.pfs = r11; \ + .prologue; \ + .body +# else /* !SHARED */ +# define DO_CALL(num) \ + .prologue; \ + mov r15 = num; \ + movl r2 = _dl_sysinfo;; \ + ld8 r2 = [r2]; \ + .save ar.pfs, r11; \ + mov r11 = ar.pfs;; \ + .body; \ + mov b7 = r2; \ + br.call.sptk.many b6 = b7;; \ + .restore sp; \ + mov ar.pfs = r11; \ + .prologue; \ + .body +# endif +#else +# define DO_CALL(num) DO_CALL_VIA_BREAK(num) +#endif #undef PSEUDO_END #define PSEUDO_END(name) .endp C_SYMBOL_NAME(name); @@ -150,45 +196,64 @@ from a syscall. r10 is set to -1 on error, whilst r8 contains the (non-negative) errno on error or the return value on success. */ -#undef INLINE_SYSCALL -#define INLINE_SYSCALL(name, nr, args...) \ - ({ \ + +#ifdef IA64_USE_NEW_STUB + +#define DO_INLINE_SYSCALL(name, nr, args...) \ + register long _r8 __asm ("r8"); \ + register long _r10 __asm ("r10"); \ + register long _r15 __asm ("r15") = __NR_##name; \ + register void *_b7 __asm ("b7") = ((tcbhead_t *) __thread_self)->private; \ + long _retval; \ + LOAD_ARGS_##nr (args); \ + /* \ + * Don't specify any unwind info here. We mark ar.pfs as \ + * clobbered. This will force the compiler to save ar.pfs \ + * somewhere and emit appropriate unwind info for that save. \ + */ \ + __asm __volatile ("br.call.sptk.many b6=%0;;\n" \ + : "=b"(_b7), "=r" (_r8), "=r" (_r10), "=r" (_r15) \ + ASM_OUTARGS_##nr \ + : "0" (_b7), "3" (_r15) ASM_ARGS_##nr \ + : "memory", "ar.pfs" ASM_CLOBBERS_##nr); \ + _retval = _r8; + +#else /* !IA64_USE_NEW_STUB */ + +#define DO_INLINE_SYSCALL(name, nr, args...) \ register long _r8 asm ("r8"); \ register long _r10 asm ("r10"); \ register long _r15 asm ("r15") = __NR_##name; \ long _retval; \ LOAD_ARGS_##nr (args); \ __asm __volatile (BREAK_INSN (__BREAK_SYSCALL) \ - : "=r" (_r8), "=r" (_r10), "=r" (_r15) \ + : "=r" (_r8), "=r" (_r10), "=r" (_r15) \ ASM_OUTARGS_##nr \ - : "2" (_r15) ASM_ARGS_##nr \ - : "memory" ASM_CLOBBERS_##nr); \ - _retval = _r8; \ - if (_r10 == -1) \ - { \ - __set_errno (_retval); \ - _retval = -1; \ - } \ + : "2" (_r15) ASM_ARGS_##nr \ + : "memory" ASM_CLOBBERS_##nr); \ + _retval = _r8; + +#endif /* !IA64_USE_NEW_STUB */ + +#undef INLINE_SYSCALL +#define INLINE_SYSCALL(name, nr, args...) \ + ({ \ + DO_INLINE_SYSCALL(name, nr, args) \ + if (_r10 == -1) \ + { \ + __set_errno (_retval); \ + _retval = -1; \ + } \ _retval; }) #undef INTERNAL_SYSCALL_DECL #define INTERNAL_SYSCALL_DECL(err) long int err #undef INTERNAL_SYSCALL -#define INTERNAL_SYSCALL(name, err, nr, args...) \ - ({ \ - register long _r8 asm ("r8"); \ - register long _r10 asm ("r10"); \ - register long _r15 asm ("r15") = __NR_##name; \ - long _retval; \ - LOAD_ARGS_##nr (args); \ - __asm __volatile (BREAK_INSN (__BREAK_SYSCALL) \ - : "=r" (_r8), "=r" (_r10), "=r" (_r15) \ - ASM_OUTARGS_##nr \ - : "2" (_r15) ASM_ARGS_##nr \ - : "memory" ASM_CLOBBERS_##nr); \ - _retval = _r8; \ - err = _r10; \ +#define INTERNAL_SYSCALL(name, err, nr, args...) \ + ({ \ + DO_INLINE_SYSCALL(name, nr, args) \ + err = _r10; \ _retval; }) #undef INTERNAL_SYSCALL_ERROR_P @@ -225,6 +290,15 @@ #define ASM_OUTARGS_5 ASM_OUTARGS_4, "=r" (_out4) #define ASM_OUTARGS_6 ASM_OUTARGS_5, "=r" (_out5) +#ifdef IA64_USE_NEW_STUB +#define ASM_ARGS_0 +#define ASM_ARGS_1 ASM_ARGS_0, "4" (_out0) +#define ASM_ARGS_2 ASM_ARGS_1, "5" (_out1) +#define ASM_ARGS_3 ASM_ARGS_2, "6" (_out2) +#define ASM_ARGS_4 ASM_ARGS_3, "7" (_out3) +#define ASM_ARGS_5 ASM_ARGS_4, "8" (_out4) +#define ASM_ARGS_6 ASM_ARGS_5, "9" (_out5) +#else #define ASM_ARGS_0 #define ASM_ARGS_1 ASM_ARGS_0, "3" (_out0) #define ASM_ARGS_2 ASM_ARGS_1, "4" (_out1) @@ -232,6 +306,7 @@ #define ASM_ARGS_4 ASM_ARGS_3, "6" (_out3) #define ASM_ARGS_5 ASM_ARGS_4, "7" (_out4) #define ASM_ARGS_6 ASM_ARGS_5, "8" (_out5) +#endif #define ASM_CLOBBERS_0 ASM_CLOBBERS_1, "out0" #define ASM_CLOBBERS_1 ASM_CLOBBERS_2, "out1" @@ -239,7 +314,7 @@ #define ASM_CLOBBERS_3 ASM_CLOBBERS_4, "out3" #define ASM_CLOBBERS_4 ASM_CLOBBERS_5, "out4" #define ASM_CLOBBERS_5 ASM_CLOBBERS_6, "out5" -#define ASM_CLOBBERS_6 , "out6", "out7", \ +#define ASM_CLOBBERS_6_COMMON , "out6", "out7", \ /* Non-stacked integer registers, minus r8, r10, r15. */ \ "r2", "r3", "r9", "r11", "r12", "r13", "r14", "r16", "r17", "r18", \ "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", \ @@ -249,7 +324,13 @@ /* Non-rotating fp registers. */ \ "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ /* Branch registers. */ \ - "b6", "b7" + "b6" + +#ifdef IA64_USE_NEW_STUB +# define ASM_CLOBBERS_6 ASM_CLOBBERS_6_COMMON +#else +# define ASM_CLOBBERS_6 ASM_CLOBBERS_6_COMMON , "b7" +#endif #endif /* not __ASSEMBLER__ */ diff --git a/sysdeps/unix/sysv/linux/ia64/vfork.S b/sysdeps/unix/sysv/linux/ia64/vfork.S index cbfaa3177c..086fce9387 100644 --- a/sysdeps/unix/sysv/linux/ia64/vfork.S +++ b/sysdeps/unix/sysv/linux/ia64/vfork.S @@ -34,9 +34,8 @@ ENTRY(__vfork) mov out0=CLONE_VM+CLONE_VFORK+SIGCHLD mov out1=0 /* Standard sp value. */ ;; - DO_CALL (SYS_ify (clone)) + DO_CALL_VIA_BREAK (SYS_ify (clone)) cmp.eq p6,p0=-1,r10 - ;; (p6) br.cond.spnt.few __syscall_error ret PSEUDO_END(__vfork) |