about summary refs log tree commit diff
path: root/sysdeps/alpha/remqu.S
blob: 4df92563efe0f25c6df141fa13d74bd94d0bf7d4 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
/* Copyright (C) 2004-2023 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library.  If not, see
   <https://www.gnu.org/licenses/>.  */

#include "div_libc.h"


/* 64-bit unsigned long remainder.  These are not normal C functions.  Argument
   registers are t10 and t11, the result goes in t12.  Only t12 and AT may be
   clobbered.

   Theory of operation here is that we can use the FPU divider for virtually
   all operands that we see: all dividend values between -2**53 and 2**53-1
   can be computed directly.  Note that divisor values need not be checked
   against that range because the rounded fp value will be close enough such
   that the quotient is < 1, which will properly be truncated to zero when we
   convert back to integer.

   When the dividend is outside the range for which we can compute exact
   results, we use the fp quotient as an estimate from which we begin refining
   an exact integral value.  This reduces the number of iterations in the
   shift-and-subtract loop significantly.

   The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE
   for cvttq/c even without /sui being set.  It will not, however, properly
   raise the exception, so we don't have to worry about FPCR_INED being clear
   and so dying by SIGFPE.  */

	.text
	.align	4
	.globl	__remqu
	.type	__remqu, @funcnoplt
	.usepv	__remqu, no

	cfi_startproc
	cfi_return_column (RA)
__remqu:
	lda	sp, -FRAME(sp)
	cfi_def_cfa_offset (FRAME)
	CALL_MCOUNT

	/* Get the fp divide insn issued as quickly as possible.  After
	   that's done, we have at least 22 cycles until its results are
	   ready -- all the time in the world to figure out how we're
	   going to use the results.  */
	subq	Y, 1, AT
	and	Y, AT, AT
	beq	AT, $powerof2

	stt	$f0, 0(sp)
	excb
	stt	$f1, 8(sp)
	stt	$f3, 48(sp)
	cfi_rel_offset ($f0, 0)
	cfi_rel_offset ($f1, 8)
	cfi_rel_offset ($f3, 48)
	mf_fpcr	$f3

	_ITOFT2	X, $f0, 16, Y, $f1, 24
	cvtqt	$f0, $f0
	cvtqt	$f1, $f1

	blt	X, $x_is_neg
	divt/c	$f0, $f1, $f0

	/* Check to see if Y was mis-converted as signed value.  */
	ldt	$f1, 8(sp)
	blt	Y, $y_is_neg

	/* Check to see if X fit in the double as an exact value.  */
	srl	X, 53, AT
	bne	AT, $x_big

	/* If we get here, we're expecting exact results from the division.
	   Do nothing else besides convert, compute remainder, clean up.  */
	cvttq/c	$f0, $f0
	excb
	mt_fpcr	$f3
	_FTOIT	$f0, AT, 16

	mulq	AT, Y, AT
	ldt	$f0, 0(sp)
	ldt	$f3, 48(sp)
	lda	sp, FRAME(sp)
	cfi_remember_state
	cfi_restore ($f0)
	cfi_restore ($f1)
	cfi_restore ($f3)
	cfi_def_cfa_offset (0)

	.align	4
	subq	X, AT, RV
	ret	$31, (RA), 1

	.align	4
	cfi_restore_state
$x_is_neg:
	/* If we get here, X is so big that bit 63 is set, which made the
	   conversion come out negative.  Fix it up lest we not even get
	   a good estimate.  */
	ldah	AT, 0x5f80		/* 2**64 as float.  */
	stt	$f2, 24(sp)
	cfi_rel_offset ($f2, 24)
	_ITOFS	AT, $f2, 16

	.align	4
	addt	$f0, $f2, $f0
	unop
	divt/c	$f0, $f1, $f0
	unop

	/* Ok, we've now the divide issued.  Continue with other checks.  */
	ldt	$f1, 8(sp)
	unop
	ldt	$f2, 24(sp)
	blt	Y, $y_is_neg
	cfi_restore ($f1)
	cfi_restore ($f2)
	cfi_remember_state	/* for y_is_neg */

	.align	4
$x_big:
	/* If we get here, X is large enough that we don't expect exact
	   results, and neither X nor Y got mis-translated for the fp
	   division.  Our task is to take the fp result, figure out how
	   far it's off from the correct result and compute a fixup.  */
	stq	t0, 16(sp)
	stq	t1, 24(sp)
	stq	t2, 32(sp)
	stq	t3, 40(sp)
	cfi_rel_offset (t0, 16)
	cfi_rel_offset (t1, 24)
	cfi_rel_offset (t2, 32)
	cfi_rel_offset (t3, 40)

#define Q	t0		/* quotient */
#define R	RV		/* remainder */
#define SY	t1		/* scaled Y */
#define S	t2		/* scalar */
#define QY	t3		/* Q*Y */

	cvttq/c	$f0, $f0
	_FTOIT	$f0, Q, 8
	mulq	Q, Y, QY

	.align	4
	stq	t4, 8(sp)
	excb
	ldt	$f0, 0(sp)
	mt_fpcr	$f3
	cfi_rel_offset (t4, 8)
	cfi_restore ($f0)

	subq	QY, X, R
	mov	Y, SY
	mov	1, S
	bgt	R, $q_high

$q_high_ret:
	subq	X, QY, R
	mov	Y, SY
	mov	1, S
	bgt	R, $q_low

$q_low_ret:
	ldq	t4, 8(sp)
	ldq	t0, 16(sp)
	ldq	t1, 24(sp)
	ldq	t2, 32(sp)

	ldq	t3, 40(sp)
	ldt	$f3, 48(sp)
	lda	sp, FRAME(sp)
	cfi_remember_state
	cfi_restore (t0)
	cfi_restore (t1)
	cfi_restore (t2)
	cfi_restore (t3)
	cfi_restore (t4)
	cfi_restore ($f3)
	cfi_def_cfa_offset (0)
	ret	$31, (RA), 1

	.align	4
	cfi_restore_state
	/* The quotient that we computed was too large.  We need to reduce
	   it by S such that Y*S >= R.  Obviously the closer we get to the
	   correct value the better, but overshooting high is ok, as we'll
	   fix that up later.  */
0:
	addq	SY, SY, SY
	addq	S, S, S
$q_high:
	cmpult	SY, R, AT
	bne	AT, 0b

	subq	Q, S, Q
	unop
	subq	QY, SY, QY
	br	$q_high_ret

	.align	4
	/* The quotient that we computed was too small.  Divide Y by the
	   current remainder (R) and add that to the existing quotient (Q).
	   The expectation, of course, is that R is much smaller than X.  */
	/* Begin with a shift-up loop.  Compute S such that Y*S >= R.  We
	   already have a copy of Y in SY and the value 1 in S.  */
0:
	addq	SY, SY, SY
	addq	S, S, S
$q_low:
	cmpult	SY, R, AT
	bne	AT, 0b

	/* Shift-down and subtract loop.  Each iteration compares our scaled
	   Y (SY) with the remainder (R); if SY <= R then X is divisible by
	   Y's scalar (S) so add it to the quotient (Q).  */
2:	addq	Q, S, t3
	srl	S, 1, S
	cmpule	SY, R, AT
	subq	R, SY, t4

	cmovne	AT, t3, Q
	cmovne	AT, t4, R
	srl	SY, 1, SY
	bne	S, 2b

	br	$q_low_ret

	.align	4
	cfi_restore_state
$y_is_neg:
	/* If we get here, Y is so big that bit 63 is set.  The results
	   from the divide will be completely wrong.  Fortunately, the
	   quotient must be either 0 or 1, so the remainder must be X
	   or X-Y, so just compute it directly.  */
	cmpule	Y, X, AT
	excb
	mt_fpcr	$f3
	subq	X, Y, RV
	ldt	$f0, 0(sp)
	ldt	$f3, 48(sp)
	cmoveq	AT, X, RV

	lda	sp, FRAME(sp)
	cfi_restore ($f0)
	cfi_restore ($f3)
	cfi_def_cfa_offset (0)
	ret	$31, (RA), 1

	.align	4
	cfi_def_cfa_offset (FRAME)
$powerof2:
	subq	Y, 1, AT
	beq	Y, DIVBYZERO
	and	X, AT, RV
	lda	sp, FRAME(sp)
	cfi_def_cfa_offset (0)
	ret	$31, (RA), 1

	cfi_endproc
	.size	__remqu, .-__remqu

	DO_DIVBYZERO