about summary refs log tree commit diff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2009-04-14 22:24:59 +0000
committerUlrich Drepper <drepper@redhat.com>2009-04-14 22:24:59 +0000
commit7fd23f1f3b20b02be6fc1ab225282e828b077867 (patch)
tree252df414110c8ca71aa4adb8565d7da34a70947a
parente42e88abb6d2ca37be0aabfc52256a707aefcfa1 (diff)
downloadglibc-7fd23f1f3b20b02be6fc1ab225282e828b077867.tar.gz
glibc-7fd23f1f3b20b02be6fc1ab225282e828b077867.tar.xz
glibc-7fd23f1f3b20b02be6fc1ab225282e828b077867.zip
mpn_add_n for x86-64.
-rw-r--r--sysdeps/x86_64/add_n.S42
1 files changed, 42 insertions, 0 deletions
diff --git a/sysdeps/x86_64/add_n.S b/sysdeps/x86_64/add_n.S
new file mode 100644
index 0000000000..7883f6c840
--- /dev/null
+++ b/sysdeps/x86_64/add_n.S
@@ -0,0 +1,42 @@
+/* Add two limb vectors of the same length > 0 and store sum in a third
+   limb vector.
+   Copyright (C) 2004 Free Software Foundation, Inc.
+   This file is part of the GNU MP Library.
+
+   The GNU MP Library is free software; you can redistribute it and/or modify
+   it under the terms of the GNU Lesser General Public License as published by
+   the Free Software Foundation; either version 2.1 of the License, or (at your
+   option) any later version.
+
+   The GNU MP Library is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+   License for more details.
+
+   You should have received a copy of the GNU Lesser General Public License
+   along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
+   the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+   MA 02111-1307, USA. */
+
+#include "sysdep.h"
+#include "asm-syntax.h"
+
+	.text
+ENTRY (__mpn_add_n)
+	leaq	(%rsi,%rcx,8), %rsi
+	leaq	(%rdi,%rcx,8), %rdi
+	leaq	(%rdx,%rcx,8), %rdx
+	negq	%rcx
+	xorl	%eax, %eax			# clear cy
+	.p2align 2
+L(loop):
+	movq	(%rsi,%rcx,8), %rax
+	movq	(%rdx,%rcx,8), %r10
+	adcq	%r10, %rax
+	movq	%rax, (%rdi,%rcx,8)
+	incq	%rcx
+	jne	L(loop)
+	movq	%rcx, %rax			# zero %rax
+	adcq	%rax, %rax
+	ret
+END (__mpn_add_n)