about summary refs log tree commit diff
path: root/sysdeps/hppa/udiv_qrnnd.s
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
committerJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
commit0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (patch)
tree2ea1f8305970753e4a657acb2ccc15ca3eec8e2c /sysdeps/hppa/udiv_qrnnd.s
parent7d58530341304d403a6626d7f7a1913165fe2f32 (diff)
downloadglibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.gz
glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.xz
glibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.zip
2.5-18.1
Diffstat (limited to 'sysdeps/hppa/udiv_qrnnd.s')
-rw-r--r--sysdeps/hppa/udiv_qrnnd.s286
1 files changed, 0 insertions, 286 deletions
diff --git a/sysdeps/hppa/udiv_qrnnd.s b/sysdeps/hppa/udiv_qrnnd.s
deleted file mode 100644
index cd2b58ddec..0000000000
--- a/sysdeps/hppa/udiv_qrnnd.s
+++ /dev/null
@@ -1,286 +0,0 @@
-;! HP-PA  __udiv_qrnnd division support, used from longlong.h.
-;! This version runs fast on pre-PA7000 CPUs.
-
-;! Copyright (C) 1993, 1994 Free Software Foundation, Inc.
-
-;! This file is part of the GNU MP Library.
-
-;! The GNU MP Library is free software; you can redistribute it and/or modify
-;! it under the terms of the GNU Lesser General Public License as published by
-;! the Free Software Foundation; either version 2.1 of the License, or (at your
-;! option) any later version.
-
-;! The GNU MP Library is distributed in the hope that it will be useful, but
-;! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-;! or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
-;! License for more details.
-
-;! You should have received a copy of the GNU Lesser General Public License
-;! along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
-;! the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-;! MA 02111-1307, USA.
-
-
-;! INPUT PARAMETERS
-;! rem_ptr	gr26
-;! n1		gr25
-;! n0		gr24
-;! d		gr23
-
-;! The code size is a bit excessive.  We could merge the last two ds;addc
-;! sequences by simply moving the "bb,< Odd" instruction down.  The only
-;! trouble is the FFFFFFFF code that would need some hacking.
-
-	.text
-	.export		__udiv_qrnnd
-__udiv_qrnnd:
-	.proc
-	.callinfo	frame=0,no_calls
-	.entry
-
-	comb,<		%r23,0,L$largedivisor
-	 sub		%r0,%r23,%r1		;! clear cy as side-effect
-	ds		%r0,%r1,%r0
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r23,%r25
-	addc		%r24,%r24,%r28
-	ds		%r25,%r23,%r25
-	comclr,>=	%r25,%r0,%r0
-	addl		%r25,%r23,%r25
-	stws		%r25,0(0,%r26)
-	bv		0(%r2)
-	 addc		%r28,%r28,%r28
-
-L$largedivisor:
-	extru		%r24,31,1,%r20		;! r20 = n0 & 1
-	bb,<		%r23,31,L$odd
-	 extru		%r23,30,31,%r22		;! r22 = d >> 1
-	shd		%r25,%r24,1,%r24	;! r24 = new n0
-	extru		%r25,30,31,%r25		;! r25 = new n1
-	sub		%r0,%r22,%r21
-	ds		%r0,%r21,%r0
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	comclr,>=	%r25,%r0,%r0
-	addl		%r25,%r22,%r25
-	sh1addl		%r25,%r20,%r25
-	stws		%r25,0(0,%r26)
-	bv		0(%r2)
-	 addc		%r24,%r24,%r28
-
-L$odd:	addib,sv,n	1,%r22,L$FF..		;! r22 = (d / 2 + 1)
-	shd		%r25,%r24,1,%r24	;! r24 = new n0
-	extru		%r25,30,31,%r25		;! r25 = new n1
-	sub		%r0,%r22,%r21
-	ds		%r0,%r21,%r0
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r24
-	ds		%r25,%r22,%r25
-	addc		%r24,%r24,%r28
-	comclr,>=	%r25,%r0,%r0
-	addl		%r25,%r22,%r25
-	sh1addl		%r25,%r20,%r25
-;! We have computed (n1,,n0) / (d + 1), q' = r28, r' = r25
-	add,nuv		%r28,%r25,%r25
-	addl		%r25,%r1,%r25
-	addc		%r0,%r28,%r28
-	sub,<<		%r25,%r23,%r0
-	addl		%r25,%r1,%r25
-	stws		%r25,0(0,%r26)
-	bv		0(%r2)
-	 addc		%r0,%r28,%r28
-
-;! This is just a special case of the code above.
-;! We come here when d == 0xFFFFFFFF
-L$FF..:	add,uv		%r25,%r24,%r24
-	sub,<<		%r24,%r23,%r0
-	ldo		1(%r24),%r24
-	stws		%r24,0(0,%r26)
-	bv		0(%r2)
-	 addc		%r0,%r25,%r28
-
-	.exit
-	.procend