diff options
Diffstat (limited to 'sysdeps/sparc')
-rw-r--r-- | sysdeps/sparc/sparc64/dl-machine.h | 16 | ||||
-rw-r--r-- | sysdeps/sparc/sparc64/lshift.S | 127 | ||||
-rw-r--r-- | sysdeps/sparc/sparc64/mul_1.S | 7 | ||||
-rw-r--r-- | sysdeps/sparc/sparc64/rshift.S | 121 | ||||
-rw-r--r-- | sysdeps/sparc/sparc64/submul_1.S | 38 |
5 files changed, 153 insertions, 156 deletions
diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h index b042989881..5a86ec57d8 100644 --- a/sysdeps/sparc/sparc64/dl-machine.h +++ b/sysdeps/sparc/sparc64/dl-machine.h @@ -25,6 +25,9 @@ #include <elf/ldsodefs.h> #include <sysdep.h> +#define ELF64_R_TYPE_ID(info) ((info) & 0xff) +#define ELF64_R_TYPE_DATA(info) ((info) >> 8) + /* Return nonzero iff E_MACHINE is compatible with the running host. */ static inline int elf_machine_matches_host (Elf64_Half e_machine) @@ -178,14 +181,14 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc, weak_extern (_dl_rtld_map); #endif - if (ELF64_R_TYPE (reloc->r_info) == R_SPARC_RELATIVE) + if (ELF64_R_TYPE_ID (reloc->r_info) == R_SPARC_RELATIVE) { #ifndef RTLD_BOOTSTRAP if (map != &_dl_rtld_map) /* Already done in rtld itself. */ #endif *reloc_addr = map->l_addr + reloc->r_addend; } - else if (ELF64_R_TYPE (reloc->r_info) != R_SPARC_NONE) /* Who is Wilbur? */ + else if (ELF64_R_TYPE_ID (reloc->r_info) != R_SPARC_NONE) /* Who is Wilbur? */ { const Elf64_Sym *const refsym = sym; Elf64_Addr value; @@ -194,13 +197,13 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc, value = map->l_addr; else { - value = RESOLVE (&sym, version, ELF64_R_TYPE (reloc->r_info)); + value = RESOLVE (&sym, version, ELF64_R_TYPE_ID (reloc->r_info)); if (sym) value += sym->st_value; } value += reloc->r_addend; /* Assume copy relocs have zero addend. */ - switch (ELF64_R_TYPE (reloc->r_info)) + switch (ELF64_R_TYPE_ID (reloc->r_info)) { case R_SPARC_COPY: if (sym == NULL) @@ -262,6 +265,11 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc, ((*(unsigned int *)reloc_addr & 0xffc00000) | (value >> 10)); break; + case R_SPARC_OLO10: + *(unsigned int *) reloc_addr = + ((*(unsigned int *)reloc_addr & ~0x1fff) | + (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff)); + break; /* MEDMID code model relocs */ case R_SPARC_H44: diff --git a/sysdeps/sparc/sparc64/lshift.S b/sysdeps/sparc/sparc64/lshift.S index f211924ddb..4f265ad96b 100644 --- a/sysdeps/sparc/sparc64/lshift.S +++ b/sysdeps/sparc/sparc64/lshift.S @@ -1,6 +1,6 @@ /* SPARC v9 __mpn_lshift -- - Copyright (C) 1996 Free Software Foundation, Inc. + Copyright (C) 1996, 1999 Free Software Foundation, Inc. This file is part of the GNU MP Library. @@ -22,75 +22,72 @@ #include <sysdep.h> /* INPUT PARAMETERS - res_ptr %i0 - src_ptr %i1 - size %i2 - cnt %i3 */ + res_ptr %o0 + src_ptr %o1 + size %o2 + cnt %o3 */ ENTRY(__mpn_lshift) - save %sp, -192, %sp - - sllx %i2,3,%g1 - add %i1,%g1,%i1 ! make %i1 point at end of src - ldx [%i1-8],%g2 ! load first limb - sub %g0,%i3,%i5 ! negate shift count - add %i0,%g1,%i0 ! make %i0 point at end of res - add %i2,-1,%i2 - and %i2,4-1,%l4 ! number of limbs in first loop - srlx %g2,%i5,%g1 ! compute function result - brz,pn %l4,.L0 ! if multiple of 4 limbs, skip first loop - mov %g1,%l1 - - sub %i2,%l4,%i2 ! adjust count for main loop - -.Loop0: ldx [%i1-16],%g3 - add %i0,-8,%i0 - add %i1,-8,%i1 - add %l4,-1,%l4 - sllx %g2,%i3,%i4 - srlx %g3,%i5,%g1 + sllx %o2,3,%g1 + add %o1,%g1,%o1 ! make %o1 point at end of src + ldx [%o1-8],%g2 ! load first limb + sub %g0,%o3,%o5 ! negate shift count + add %o0,%g1,%o0 ! make %o0 point at end of res + add %o2,-1,%o2 + andcc %o2,4-1,%g4 ! number of limbs in first loop + srlx %g2,%o5,%g1 ! compute function result + be,pn %xcc,.L0 ! if multiple of 4 limbs, skip first loop + mov %g1,%g5 + + sub %o2,%g4,%o2 ! adjust count for main loop + +.Loop0: ldx [%o1-16],%g3 + add %o0,-8,%o0 + add %o1,-8,%o1 + sllx %g2,%o3,%o4 + addcc %g4,-1,%g4 + srlx %g3,%o5,%g1 mov %g3,%g2 - or %i4,%g1,%i4 - brnz,pt %l4,.Loop0 - stx %i4,[%i0+0] + or %o4,%g1,%o4 + bne,pt %xcc,.Loop0 + stx %o4,[%o0+0] -.L0: brz,pn %i2,.Lend +.L0: brz,pn %o2,.Lend nop -.Loop: ldx [%i1-16],%g3 - add %i0,-32,%i0 - add %i2,-4,%i2 - sllx %g2,%i3,%i4 - srlx %g3,%i5,%g1 - - ldx [%i1-24],%g2 - sllx %g3,%i3,%l4 - or %i4,%g1,%i4 - stx %i4,[%i0+24] - srlx %g2,%i5,%g1 - - ldx [%i1-32],%g3 - sllx %g2,%i3,%i4 - or %l4,%g1,%l4 - stx %l4,[%i0+16] - srlx %g3,%i5,%g1 - - ldx [%i1-40],%g2 - sllx %g3,%i3,%l4 - or %i4,%g1,%i4 - stx %i4,[%i0+8] - srlx %g2,%i5,%g1 - - add %i1,-32,%i1 - or %l4,%g1,%l4 - brnz,pt %i2,.Loop - stx %l4,[%i0+0] - -.Lend: sllx %g2,%i3,%g2 - stx %g2,[%i0-8] - - mov %l1,%i0 - jmpl %i7+8, %g0 - restore +.Loop: ldx [%o1-16],%g3 + add %o0,-32,%o0 + sllx %g2,%o3,%o4 + addcc %o2,-4,%o2 + srlx %g3,%o5,%g1 + + ldx [%o1-24],%g2 + sllx %g3,%o3,%g4 + or %o4,%g1,%o4 + stx %o4,[%o0+24] + srlx %g2,%o5,%g1 + + ldx [%o1-32],%g3 + sllx %g2,%o3,%o4 + or %g4,%g1,%g4 + stx %g4,[%o0+16] + srlx %g3,%o5,%g1 + + ldx [%o1-40],%g2 + sllx %g3,%o3,%g4 + or %o4,%g1,%o4 + stx %o4,[%o0+8] + srlx %g2,%o5,%g1 + + add %o1,-32,%o1 + or %g4,%g1,%g4 + bne,pt %xcc,.Loop + stx %g4,[%o0+0] + +.Lend: sllx %g2,%o3,%g2 + stx %g2,[%o0-8] + + jmpl %o7+8, %g0 + mov %g5,%o0 END(__mpn_lshift) diff --git a/sysdeps/sparc/sparc64/mul_1.S b/sysdeps/sparc/sparc64/mul_1.S index 757856b4cd..67b9696682 100644 --- a/sysdeps/sparc/sparc64/mul_1.S +++ b/sysdeps/sparc/sparc64/mul_1.S @@ -72,13 +72,12 @@ ENTRY(__mpn_mul_1) addcc %i0,%o0,%i0 ! add cy_limb to low 64 bits of result mov 0,%g5 movcs %xcc,1,%g5 - add %o7,1,%o7 + addcc %o7,1,%o7 stx %i0,[%o4+%g1] - brnz %o7,.Loop + bne,pt %xcc,.Loop add %i1,%g5,%o0 ! compute new cy_limb - mov %o0,%i0 jmpl %i7+8,%g0 - restore + restore %o0,%g0,%o0 END(__mpn_mul_1) diff --git a/sysdeps/sparc/sparc64/rshift.S b/sysdeps/sparc/sparc64/rshift.S index 51eb4af3ab..f43d25efe8 100644 --- a/sysdeps/sparc/sparc64/rshift.S +++ b/sysdeps/sparc/sparc64/rshift.S @@ -1,6 +1,6 @@ /* SPARC v9 __mpn_rshift -- - Copyright (C) 1996 Free Software Foundation, Inc. + Copyright (C) 1996, 1999 Free Software Foundation, Inc. This file is part of the GNU MP Library. @@ -22,72 +22,69 @@ #include <sysdep.h> /* INPUT PARAMETERS - res_ptr %i0 - src_ptr %i1 - size %i2 - cnt %i3 */ + res_ptr %o0 + src_ptr %o1 + size %o2 + cnt %o3 */ ENTRY(__mpn_rshift) - save %sp, -192, %sp - - ldx [%i1],%g2 ! load first limb - sub %g0,%i3,%i5 ! negate shift count - add %i2,-1,%i2 - and %i2,4-1,%l4 ! number of limbs in first loop - sllx %g2,%i5,%g1 ! compute function result - brz,pn %l4,.L0 ! if multiple of 4 limbs, skip first loop - mov %g1,%l1 - - sub %i2,%l4,%i2 ! adjust count for main loop - -.Loop0: ldx [%i1+8],%g3 - add %i0,8,%i0 - add %i1,8,%i1 - add %l4,-1,%l4 - srlx %g2,%i3,%i4 - sllx %g3,%i5,%g1 + ldx [%o1],%g2 ! load first limb + sub %g0,%o3,%o5 ! negate shift count + add %o2,-1,%o2 + andcc %o2,4-1,%g4 ! number of limbs in first loop + sllx %g2,%o5,%g1 ! compute function result + be,pn %xcc,.L0 ! if multiple of 4 limbs, skip first loop + mov %g1,%g5 + + sub %o2,%g4,%o2 ! adjust count for main loop + +.Loop0: ldx [%o1+8],%g3 + add %o0,8,%o0 + add %o1,8,%o1 + srlx %g2,%o3,%o4 + addcc %g4,-1,%g4 + sllx %g3,%o5,%g1 mov %g3,%g2 - or %i4,%g1,%i4 - brnz,pt %l4,.Loop0 - stx %i4,[%i0-8] + or %o4,%g1,%o4 + bne,pt %xcc,.Loop0 + stx %o4,[%o0-8] -.L0: brz,pn %i2,.Lend +.L0: brz,pn %o2,.Lend nop -.Loop: ldx [%i1+8],%g3 - add %i0,32,%i0 - add %i2,-4,%i2 - srlx %g2,%i3,%i4 - sllx %g3,%i5,%g1 - - ldx [%i1+16],%g2 - srlx %g3,%i3,%l4 - or %i4,%g1,%i4 - stx %i4,[%i0-32] - sllx %g2,%i5,%g1 - - ldx [%i1+24],%g3 - srlx %g2,%i3,%i4 - or %l4,%g1,%l4 - stx %l4,[%i0-24] - sllx %g3,%i5,%g1 - - ldx [%i1+32],%g2 - srlx %g3,%i3,%l4 - or %i4,%g1,%i4 - stx %i4,[%i0-16] - sllx %g2,%i5,%g1 - - add %i1,32,%i1 - or %l4,%g1,%l4 - brnz %i2,.Loop - stx %l4,[%i0-8] - -.Lend: srlx %g2,%i3,%g2 - stx %g2,[%i0-0] - - mov %l1,%i0 - jmpl %i7+8,%g0 - restore +.Loop: ldx [%o1+8],%g3 + add %o0,32,%o0 + srlx %g2,%o3,%o4 + addcc %o2,-4,%o2 + sllx %g3,%o5,%g1 + + ldx [%o1+16],%g2 + srlx %g3,%o3,%g4 + or %o4,%g1,%o4 + stx %o4,[%o0-32] + sllx %g2,%o5,%g1 + + ldx [%o1+24],%g3 + srlx %g2,%o3,%o4 + or %g4,%g1,%g4 + stx %g4,[%o0-24] + sllx %g3,%o5,%g1 + + ldx [%o1+32],%g2 + srlx %g3,%o3,%g4 + or %o4,%g1,%o4 + stx %o4,[%o0-16] + sllx %g2,%o5,%g1 + + add %o1,32,%o1 + or %g4,%g1,%g4 + bne,pt %xcc,.Loop + stx %g4,[%o0-8] + +.Lend: srlx %g2,%o3,%g2 + stx %g2,[%o0-0] + + jmpl %o7+8,%g0 + mov %g5,%o0 END(__mpn_rshift) diff --git a/sysdeps/sparc/sparc64/submul_1.S b/sysdeps/sparc/sparc64/submul_1.S index ce9a80464f..8f86916133 100644 --- a/sysdeps/sparc/sparc64/submul_1.S +++ b/sysdeps/sparc/sparc64/submul_1.S @@ -30,29 +30,26 @@ s2_limb o3 */ ENTRY(__mpn_submul_1) - !#PROLOGUE# 0 save %sp,-192,%sp - !#PROLOGUE# 1 sub %g0,%i2,%o7 - sllx %o7,3,%g5 - sub %i1,%g5,%o3 - sub %i0,%g5,%o4 mov 0,%o0 ! zero cy_limb - + sllx %o7,3,%o7 + sethi %hi(0x80000000),%o2 srl %i3,0,%o1 ! extract low 32 bits of s2_limb + sub %i1,%o7,%o3 srlx %i3,32,%i3 ! extract high 32 bits of s2_limb - mov 1,%o2 - sllx %o2,32,%o2 ! o2 = 0x100000000 + sub %i0,%o7,%o4 + add %o2,%o2,%o2 ! o2 = 0x100000000 ! hi ! ! mid-1 ! ! mid-2 ! ! lo ! -.Loop: - sllx %o7,3,%g1 - ldx [%o3+%g1],%g5 +1: + ldx [%o3+%o7],%g5 srl %g5,0,%i0 ! zero hi bits + ldx [%o4+%o7],%l1 srlx %g5,32,%g5 mulx %o1,%i0,%i4 ! lo product mulx %i3,%i0,%i1 ! mid-1 product @@ -63,25 +60,24 @@ ENTRY(__mpn_submul_1) addcc %i1,%l2,%i1 ! add mid products mov 0,%l0 ! we need the carry from that add... movcs %xcc,%o2,%l0 ! ...compute it and... + sllx %i1,32,%i0 ! align low bits of mid product add %i5,%l0,%i5 ! ...add to bit 32 of the hi product - sllx %i1,32,%i0 ! align low bits of mid product srl %i4,0,%g5 ! zero high 32 bits of lo product add %i0,%g5,%i0 ! combine into low 64 bits of result srlx %i1,32,%i1 ! extract high bits of mid product... + addcc %i0,%o0,%i0 ! add cy_limb to low 64 bits of result add %i5,%i1,%i1 ! ...and add them to the high result - addcc %i0,%o0,%i0 ! add cy_limb to low 64 bits of result mov 0,%g5 movcs %xcc,1,%g5 - add %o7,1,%o7 - ldx [%o4+%g1],%l1 subcc %l1,%i0,%i0 - movcs %xcc,1,%g5 - stx %i0,[%o4+%g1] - brnz %o7,.Loop + stx %i0,[%o4+%o7] + add %g5,1,%l1 + movcs %xcc,%l1,%g5 + addcc %o7,8,%o7 + bne,pt %xcc,1b add %i1,%g5,%o0 ! compute new cy_limb - mov %o0,%i0 - jmpl %i7+8,%g0 - restore + jmpl %i7+8, %g0 + restore %o0,%g0,%o0 END(__mpn_submul_1) |