about summary refs log tree commit diff
path: root/sysdeps/s390/multiarch/wcscpy-vx.S
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/s390/multiarch/wcscpy-vx.S')
-rw-r--r--sysdeps/s390/multiarch/wcscpy-vx.S111
1 files changed, 111 insertions, 0 deletions
diff --git a/sysdeps/s390/multiarch/wcscpy-vx.S b/sysdeps/s390/multiarch/wcscpy-vx.S
new file mode 100644
index 0000000000..d4a809948a
--- /dev/null
+++ b/sysdeps/s390/multiarch/wcscpy-vx.S
@@ -0,0 +1,111 @@
+/* Vector optimized 32/64 bit S/390 version of wcscpy.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc)
+
+# include "sysdep.h"
+# include "asm-syntax.h"
+
+	.text
+
+/* char * wcscpy (const wchar_t *dest, const wchar_t *src)
+   Copy string src to dest.
+
+   Register usage:
+   -r0=border-len for switching to vector-instructions
+   -r1=tmp
+   -r2=dest and return value
+   -r3=src
+   -r4=tmp
+   -r5=current_len
+   -v16=part of src
+   -v17=index of zero
+   -v18=part of src
+*/
+ENTRY(__wcscpy_vx)
+	.machine "z13"
+	.machinemode "zarch_nohighgprs"
+
+	vlbb	%v16,0(%r3),6	/* Load s until next 4k-byte boundary.  */
+	lcbb	%r1,0(%r3),6	/* Get bytes to 4k-byte boundary or 16.  */
+
+	tmll	%r3,3		/* Test if s is 4-byte aligned?  */
+	jne	.Lfallback	/* And use common-code variant if not.  */
+
+	vfenezf	%v17,%v16,%v16	/* Find element not equal with zero search.  */
+	vlgvb	%r5,%v17,7	/* Load zero index or 16 if not found.  */
+	clrjl	%r5,%r1,.Lfound_align /* If found zero within loaded bytes,
+					 copy bytes before and return.  */
+
+	/* Align s to 16 byte.  */
+	risbgn	%r4,%r3,60,128+63,0 /* %r3 = bits 60-63 of %r2 'and' 15.  */
+	lghi	%r5,15		/* current_len = 15.  */
+	slr	%r5,%r4		/* Compute highest index to 16byte boundary.  */
+
+	vstl	%v16,%r5,0(%r2)	/* Copy loaded characters - no zero.  */
+	ahi	%r5,1		/* Start loop at next character.  */
+
+	/* Find zero in 16byte aligned loop.  */
+.Lloop:
+	vl	%v16,0(%r5,%r3)	/* Load s.  */
+	vfenezfs %v17,%v16,%v16	/* Find element not equal with zero search.  */
+	je	.Lfound_v16_0	/* Jump away if zero was found.  */
+	vl	%v18,16(%r5,%r3) /* Load next part of s.  */
+	vst	%v16,0(%r5,%r2)	/* Store previous part without zero to dst.  */
+	vfenezfs %v17,%v18,%v18
+	je	.Lfound_v18_16
+	vl	%v16,32(%r5,%r3)
+	vst	%v18,16(%r5,%r2)
+	vfenezfs %v17,%v16,%v16
+	je	.Lfound_v16_32
+	vl	%v18,48(%r5,%r3)
+	vst	%v16,32(%r5,%r2)
+	vfenezfs %v17,%v18,%v18
+	je	.Lfound_v18_48
+	vst	%v18,48(%r5,%r2)
+
+	aghi	%r5,64
+	j	.Lloop		/* No zero found -> loop.  */
+
+.Lfound_v16_32:
+	aghi	%r5,32
+.Lfound_v16_0:
+	la	%r3,0(%r5,%r2)
+	vlgvb	%r1,%v17,7	/* Load byte index of zero.  */
+	aghi	%r1,3		/* Also copy remaining bytes of zero.  */
+	vstl	%v16,%r1,0(%r3)	/* Copy characters including zero.  */
+	br	%r14
+
+.Lfound_v18_48:
+	aghi	%r5,32
+.Lfound_v18_16:
+	la	%r3,16(%r5,%r2)
+	vlgvb	%r1,%v17,7	/* Load byte index of zero.  */
+	aghi	%r1,3		/* Also copy remaining bytes of zero.  */
+	vstl	%v18,%r1,0(%r3)	/* Copy characters including zero.  */
+	br	%r14
+
+.Lfound_align:
+	aghi	%r5,3		/* Also copy remaining bytes of zero.  */
+	vstl	%v16,%r5,0(%r2)	/* Copy characters including zero.  */
+	br	%r14
+
+.Lfallback:
+	jg	__wcscpy_c
+END(__wcscpy_vx)
+#endif /* HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc) */