summary refs log tree commit diff
path: root/sysdeps/s390/multiarch/memchr-vx.S
diff options
context:
space:
mode:
authorStefan Liebler <stli@linux.vnet.ibm.com>2015-08-26 10:26:24 +0200
committerAndreas Krebbel <krebbel@linux.vnet.ibm.com>2015-08-26 10:26:24 +0200
commit88eefd344b3cf4a41284a1dfdaca61667e3a1b4b (patch)
treee6df6d058a7fe251cadaed11918374df46ca7bb9 /sysdeps/s390/multiarch/memchr-vx.S
parentb4c21601b147efc3c2b0e679e4ffc554b3987f0b (diff)
downloadglibc-88eefd344b3cf4a41284a1dfdaca61667e3a1b4b.tar.gz
glibc-88eefd344b3cf4a41284a1dfdaca61667e3a1b4b.tar.xz
glibc-88eefd344b3cf4a41284a1dfdaca61667e3a1b4b.zip
S390: Optimize memchr, rawmemchr and wmemchr.
This patch provides optimized versions of memchr, rawmemchr and wmemchr with the
z13 vector instructions.

ChangeLog:

	* sysdeps/s390/multiarch/memchr-vx.S: New File.
	* sysdeps/s390/multiarch/memchr.c: Likewise.
	* sysdeps/s390/multiarch/rawmemchr-c.c: Likewise.
	* sysdeps/s390/multiarch/rawmemchr-vx.S: Likewise.
	* sysdeps/s390/multiarch/rawmemchr.c: Likewise.
	* sysdeps/s390/multiarch/wmemchr-c.c: Likewise.
	* sysdeps/s390/multiarch/wmemchr-vx.S: Likewise.
	* sysdeps/s390/multiarch/wmemchr.c: Likewise.
	* sysdeps/s390/s390-32/multiarch/memchr.c: Likewise.
	* sysdeps/s390/s390-64/multiarch/memchr.c: Likewise.
	* sysdeps/s390/multiarch/Makefile (sysdep_routines): Add memchr, wmemchr
	and rawmemchr functions.
	* sysdeps/s390/multiarch/ifunc-impl-list-common.c
	(__libc_ifunc_impl_list_common): Add ifunc test for memchr, rawmemchr
	and wmemchr.
	* wcsmbs/wmemchr.c: Use WMEMCHR if defined.
	* string/test-memchr.c: Add wmemchr support.
	* wcsmbs/test-wmemchr.c: New File.
	* wcsmbs/Makefile (strop-tests): Add wmemchr.
	* benchtests/bench-memchr.c: Add wmemchr support.
	* benchtests/bench-wmemchr.c: New File.
	* benchtests/Makefile (wcsmbs-bench): wmemchr.
Diffstat (limited to 'sysdeps/s390/multiarch/memchr-vx.S')
-rw-r--r--sysdeps/s390/multiarch/memchr-vx.S159
1 files changed, 159 insertions, 0 deletions
diff --git a/sysdeps/s390/multiarch/memchr-vx.S b/sysdeps/s390/multiarch/memchr-vx.S
new file mode 100644
index 0000000000..e0aa9e691b
--- /dev/null
+++ b/sysdeps/s390/multiarch/memchr-vx.S
@@ -0,0 +1,159 @@
+/* Vector optimized 32/64 bit S/390 version of memchr.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc)
+
+# include "sysdep.h"
+# include "asm-syntax.h"
+
+	.text
+
+/* void *memchr (const void *s, int c, size_t n)
+   Scans memory for character c
+   and returns pointer to first c.
+
+   Register usage:
+   -r0=tmp
+   -r1=tmp
+   -r2=s
+   -r3=c
+   -r4=n
+   -r5=current_len
+   -v16=part of s
+   -v17=index of found c
+   -v18=c replicated
+*/
+ENTRY(__memchr_vx)
+
+	.machine "z13"
+	.machinemode "zarch_nohighgprs"
+
+# if !defined __s390x__
+	llgfr	%r4,%r4
+# endif /* !defined __s390x__ */
+
+	clgije	%r4,0,.Lnf_end	/* If len == 0 then exit.  */
+
+	vlbb	%v16,0(%r2),6	/* Load s until next 4k-byte boundary.  */
+	lcbb	%r0,0(%r2),6	/* Get bytes to 4k-byte boundary or 16.  */
+	llgfr	%r0,%r0		/* Convert 32bit to 64bit.  */
+
+	vlvgb	%v18,%r3,0	/* Generate vector which elements are all c.
+				   if c > 255, c will be truncated.  */
+	vrepb	%v18,%v18,0
+	lghi	%r5,16		/* current_len = 16.  */
+
+	clgrjhe	%r0,%r4,.Llastcmp /* If (bytes to boundary) >= n,
+				     jump to lastcmp.  */
+
+	vfeebs	%v17,%v16,%v18	/* Find c.  */
+	vlgvb	%r1,%v17,7	/* Load byte index of c.  */
+	clgrjl	%r1,%r0,.Lfound2 /* Found c is within loaded bytes.  */
+
+	/* Align s to 16 byte.  */
+	risbgn	%r1,%r2,60,128+63,0 /* %r3 = bits 60-63 of %r2 'and' 15.  */
+	slr	%r5,%r1		/* Compute bytes to 16bytes boundary.  */
+
+	lgr	%r0,%r5		/* If %r5 + 64 < n? -> loop64.  */
+	aghi	%r0,64
+	clgrjl	%r0,%r4,.Lloop64
+.Llt64:
+	vl	%v16,0(%r5,%r2)
+	aghi	%r5,16
+	clgrjhe	%r5,%r4,.Llastcmp /* Do last compare if curr-len >= n.  */
+	vfeebs	%v17,%v16,%v18	/* Find c.  */
+	jl	.Lfound	/* Jump away if c was found.  */
+
+	vl	%v16,0(%r5,%r2)
+	aghi	%r5,16
+	clgrjhe	%r5,%r4,.Llastcmp
+	vfeebs	%v17,%v16,%v18
+	jl	.Lfound
+
+	vl	%v16,0(%r5,%r2)
+	aghi	%r5,16
+	clgrjhe	%r5,%r4,.Llastcmp
+	vfeebs	%v17,%v16,%v18
+	jl	.Lfound
+
+	vl	%v16,0(%r5,%r2)
+	aghi	%r5,16
+
+.Llastcmp:
+	/* Use comparision result only if located within first n characters.
+	   %r5: current_len;
+	   %r4: n;
+	   (current_len - n): [0...16[
+	   first ignored match index: vr-width - (current_len - n) ]0...16]
+	*/
+	vfeebs	%v17,%v16,%v18	/* Find c.  */
+	slgrk	%r4,%r5,%r4	/* %r5 = current_len - n.  */
+	lghi	%r0,16		/* Register width = 16.  */
+	vlgvb	%r1,%v17,7	/* Extract found index or 16 if all equal.  */
+	slr	%r0,%r4		/* %r0 = first ignored match index.  */
+	clrjl	%r1,%r0,.Lfound2 /* Go away if miscompare is below n bytes.  */
+	/* c not found within n-bytes.  */
+.Lnf_end:
+	lghi	%r2,0		/* Return null.  */
+	br	%r14
+
+.Lfound48:
+	aghi	%r5,16
+.Lfound32:
+	aghi	%r5,16
+.Lfound16:
+	aghi	%r5,16
+.Lfound0:
+	aghi	%r5,16
+.Lfound:
+	vlgvb	%r1,%v17,7	/* Load byte index of c.  */
+.Lfound2:
+	slgfi	%r5,16		/* current_len -=16 */
+	algr	%r5,%r1		/* Zero byte index is added to current len.  */
+	la	%r2,0(%r5,%r2)	/* Return pointer to c.  */
+	br	%r14
+
+
+.Lloop64:
+	vl	%v16,0(%r5,%r2)
+	vfeebs	%v17,%v16,%v18	/* Find c.  */
+	jl	.Lfound0	/* Jump away if c was found.  */
+	vl	%v16,16(%r5,%r2)
+	vfeebs	%v17,%v16,%v18
+	jl	.Lfound16
+	vl	%v16,32(%r5,%r2)
+	vfeebs	%v17,%v16,%v18
+	jl	.Lfound32
+	vl	%v16,48(%r5,%r2)
+	vfeebs	%v17,%v16,%v18
+	jl	.Lfound48
+
+	aghi	%r5,64
+	lgr	%r0,%r5		/* If %r5 + 64 < n? -> loop64.  */
+	aghi	%r0,64
+	clgrjl	%r0,%r4,.Lloop64
+
+	j	.Llt64
+END(__memchr_vx)
+
+# define memchr __memchr_c
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name) strong_alias(__memchr_c, __GI_memchr)
+#endif /* HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc) */
+
+#include <memchr.S>