1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/* AMD64 __mpn_mul_1 -- Multiply a limb vector with a limb and store
the result in a second limb vector.
Copyright (C) 2003-2022 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library; see the file COPYING.LIB. If not,
see <https://www.gnu.org/licenses/>. */
#include <sysdep.h>
#include "asm-syntax.h"
#define rp %rdi
#define up %rsi
#define n_param %rdx
#define vl %rcx
#define n %r11
.text
ENTRY (__mpn_mul_1)
push %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
xor %r10, %r10
mov (up), %rax /* read first u limb early */
mov n_param, %rbx /* move away n from rdx, mul uses it */
mul vl
mov %rbx, %r11
add %r10, %rax
adc $0, %rdx
and $3, %ebx
jz L(b0)
cmp $2, %ebx
jz L(b2)
jg L(b3)
L(b1): dec n
jne L(gt1)
mov %rax, (rp)
jmp L(ret)
L(gt1): lea 8(up,n,8), up
lea -8(rp,n,8), rp
neg n
xor %r10, %r10
xor %ebx, %ebx
mov %rax, %r9
mov (up,n,8), %rax
mov %rdx, %r8
jmp L(L1)
L(b0): lea (up,n,8), up
lea -16(rp,n,8), rp
neg n
xor %r10, %r10
mov %rax, %r8
mov %rdx, %rbx
jmp L(L0)
L(b3): lea -8(up,n,8), up
lea -24(rp,n,8), rp
neg n
mov %rax, %rbx
mov %rdx, %r10
jmp L(L3)
L(b2): lea -16(up,n,8), up
lea -32(rp,n,8), rp
neg n
xor %r8, %r8
xor %ebx, %ebx
mov %rax, %r10
mov 24(up,n,8), %rax
mov %rdx, %r9
jmp L(L2)
.p2align 4
L(top): mov %r10, (rp,n,8)
add %rax, %r9
mov (up,n,8), %rax
adc %rdx, %r8
mov $0, %r10d
L(L1): mul vl
mov %r9, 8(rp,n,8)
add %rax, %r8
adc %rdx, %rbx
L(L0): mov 8(up,n,8), %rax
mul vl
mov %r8, 16(rp,n,8)
add %rax, %rbx
adc %rdx, %r10
L(L3): mov 16(up,n,8), %rax
mul vl
mov %rbx, 24(rp,n,8)
mov $0, %r8d # zero
mov %r8, %rbx # zero
add %rax, %r10
mov 24(up,n,8), %rax
mov %r8, %r9 # zero
adc %rdx, %r9
L(L2): mul vl
add $4, n
js L(top)
mov %r10, (rp,n,8)
add %rax, %r9
adc %r8, %rdx
mov %r9, 8(rp,n,8)
add %r8, %rdx
L(ret): mov %rdx, %rax
pop %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
ret
END (__mpn_mul_1)
|