1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
/* SPARC v9 __mpn_rshift --
Copyright (C) 1996 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Library General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
License for more details.
You should have received a copy of the GNU Library General Public License
along with the GNU MP Library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA. */
#include <sysdep.h>
/* INPUT PARAMETERS
res_ptr %i0
src_ptr %i1
size %i2
cnt %i3 */
ENTRY(__mpn_rshift)
save %sp, -192, %sp
ldx [%i1],%g2 ! load first limb
sub %g0,%i3,%i5 ! negate shift count
add %i2,-1,%i2
and %i2,4-1,%l4 ! number of limbs in first loop
sllx %g2,%i5,%g1 ! compute function result
brz,pn %l4,.L0 ! if multiple of 4 limbs, skip first loop
mov %g1,%l1
sub %i2,%l4,%i2 ! adjust count for main loop
.Loop0: ldx [%i1+8],%g3
add %i0,8,%i0
add %i1,8,%i1
add %l4,-1,%l4
srlx %g2,%i3,%i4
sllx %g3,%i5,%g1
mov %g3,%g2
or %i4,%g1,%i4
brnz,pt %l4,.Loop0
stx %i4,[%i0-8]
.L0: brz,pn %i2,.Lend
nop
.Loop: ldx [%i1+8],%g3
add %i0,32,%i0
add %i2,-4,%i2
srlx %g2,%i3,%i4
sllx %g3,%i5,%g1
ldx [%i1+16],%g2
srlx %g3,%i3,%l4
or %i4,%g1,%i4
stx %i4,[%i0-32]
sllx %g2,%i5,%g1
ldx [%i1+24],%g3
srlx %g2,%i3,%i4
or %l4,%g1,%l4
stx %l4,[%i0-24]
sllx %g3,%i5,%g1
ldx [%i1+32],%g2
srlx %g3,%i3,%l4
or %i4,%g1,%i4
stx %i4,[%i0-16]
sllx %g2,%i5,%g1
add %i1,32,%i1
or %l4,%g1,%l4
brnz %i2,.Loop
stx %l4,[%i0-8]
.Lend: srlx %g2,%i3,%g2
stx %g2,[%i0-0]
mov %l1,%i0
jmpl %i7+8,%g0
restore
END(__mpn_rshift)
|