1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
/* x86-64 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
the result to a second limb vector.
Copyright (C) 2003-2015 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version.
The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library; see the file COPYING.LIB. If not,
see <http://www.gnu.org/licenses/>. */
#include "sysdep.h"
#include "asm-syntax.h"
#define rp %rdi
#define up %rsi
#define n %rdx
#define v0 %rcx
#ifndef func
# define func __mpn_addmul_1
# define ADDSUB add
#endif
.text
ENTRY (func)
push %rbx
push %rbp
lea (%rdx), %rbx
neg %rbx
mov (up), %rax
mov (rp), %r10
lea -16(rp,%rdx,8), rp
lea (up,%rdx,8), up
mul %rcx
bt $0, %ebx
jc L(odd)
lea (%rax), %r11
mov 8(up,%rbx,8), %rax
lea (%rdx), %rbp
mul %rcx
add $2, %rbx
jns L(n2)
lea (%rax), %r8
mov (up,%rbx,8), %rax
lea (%rdx), %r9
jmp L(mid)
L(odd): add $1, %rbx
jns L(n1)
lea (%rax), %r8
mov (up,%rbx,8), %rax
lea (%rdx), %r9
mul %rcx
lea (%rax), %r11
mov 8(up,%rbx,8), %rax
lea (%rdx), %rbp
jmp L(e)
.p2align 4
L(top): mul %rcx
ADDSUB %r8, %r10
lea (%rax), %r8
mov (up,%rbx,8), %rax
adc %r9, %r11
mov %r10, -8(rp,%rbx,8)
mov (rp,%rbx,8), %r10
lea (%rdx), %r9
adc $0, %rbp
L(mid): mul %rcx
ADDSUB %r11, %r10
lea (%rax), %r11
mov 8(up,%rbx,8), %rax
adc %rbp, %r8
mov %r10, (rp,%rbx,8)
mov 8(rp,%rbx,8), %r10
lea (%rdx), %rbp
adc $0, %r9
L(e): add $2, %rbx
js L(top)
mul %rcx
ADDSUB %r8, %r10
adc %r9, %r11
mov %r10, -8(rp)
adc $0, %rbp
L(n2): mov (rp), %r10
ADDSUB %r11, %r10
adc %rbp, %rax
mov %r10, (rp)
adc $0, %rdx
L(n1): mov 8(rp), %r10
ADDSUB %rax, %r10
mov %r10, 8(rp)
mov %ebx, %eax /* zero rax */
adc %rdx, %rax
pop %rbp
pop %rbx
ret
END (func)
|