about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-11-08 17:38:39 -0800
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-11-08 19:22:33 -0800
commit642933158e7cf072d873231b1a9bb03291f2b989 (patch)
tree352c3956cef706e683d0ac26ef85d165d1adcceb /sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S
parentf049f52dfeed8129c11ab1641a815705d09ff7e8 (diff)
downloadglibc-642933158e7cf072d873231b1a9bb03291f2b989.tar.gz
glibc-642933158e7cf072d873231b1a9bb03291f2b989.tar.xz
glibc-642933158e7cf072d873231b1a9bb03291f2b989.zip
x86: Optimize and shrink st{r|p}{n}{cat|cpy}-avx2 functions
Optimizations are:
    1. Use more overlapping stores to avoid branches.
    2. Reduce how unrolled the aligning copies are (this is more of a
       code-size save, its a negative for some sizes in terms of
       perf).
    3. For st{r|p}n{cat|cpy} re-order the branches to minimize the
       number that are taken.

Performance Changes:

    Times are from N = 10 runs of the benchmark suite and are
    reported as geometric mean of all ratios of
    New Implementation / Old Implementation.

    strcat-avx2      -> 0.998
    strcpy-avx2      -> 0.937
    stpcpy-avx2      -> 0.971

    strncpy-avx2     -> 0.793
    stpncpy-avx2     -> 0.775

    strncat-avx2     -> 0.962

Code Size Changes:
    function         -> Bytes New / Bytes Old -> Ratio

    strcat-avx2      ->  685 / 1639 -> 0.418
    strcpy-avx2      ->  560 /  903 -> 0.620
    stpcpy-avx2      ->  592 /  939 -> 0.630

    strncpy-avx2     -> 1176 / 2390 -> 0.492
    stpncpy-avx2     -> 1268 / 2438 -> 0.520

    strncat-avx2     -> 1042 / 2563 -> 0.407

Notes:
    1. Because of the significant difference between the
       implementations they are split into three files.

           strcpy-avx2.S    -> strcpy, stpcpy, strcat
           strncpy-avx2.S   -> strncpy
           strncat-avx2.S    > strncat

       I couldn't find a way to merge them without making the
       ifdefs incredibly difficult to follow.

Full check passes on x86-64 and build succeeds for all ISA levels w/
and w/o multiarch.
Diffstat (limited to 'sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S')
-rw-r--r--sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S101
1 files changed, 101 insertions, 0 deletions
diff --git a/sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S b/sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S
new file mode 100644
index 0000000000..f50514e07c
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strcat-strlen-avx2.h.S
@@ -0,0 +1,101 @@
+/* strlen used for begining of str{n}cat using AVX2.
+   Copyright (C) 2011-2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+
+/* NOTE: This file is meant to be included by strcat-avx2 or
+   strncat-avx2 and does not standalone.  Before including %rdi
+   must be saved in %rax.  */
+
+
+/* Simple strlen implementation that ends at
+   L(strcat_strlen_done).  */
+	movq	%rdi, %r8
+	andq	$(VEC_SIZE * -1), %r8
+	VPCMPEQ	(%r8), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	shrxl	%edi, %ecx, %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v0)
+
+	VPCMPEQ	VEC_SIZE(%r8), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	leaq	(VEC_SIZE)(%r8), %rdi
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v0)
+
+	VPCMPEQ	(VEC_SIZE * 1)(%rdi), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v1)
+
+	VPCMPEQ	(VEC_SIZE * 2)(%rdi), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v2)
+
+	VPCMPEQ	(VEC_SIZE * 3)(%rdi), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v3)
+
+	orq	$(VEC_SIZE * 4 - 1), %rdi
+	.p2align 4,, 8
+L(loop_2x_vec):
+	VMOVA	(VEC_SIZE * 0 + 1)(%rdi), %VMM(0)
+	VPMIN	(VEC_SIZE * 1 + 1)(%rdi), %VMM(0), %VMM(1)
+	VMOVA	(VEC_SIZE * 2 + 1)(%rdi), %VMM(2)
+	VPMIN	(VEC_SIZE * 3 + 1)(%rdi), %VMM(2), %VMM(3)
+	VPMIN	%VMM(1), %VMM(3), %VMM(3)
+	VPCMPEQ	%VMM(3), %VZERO, %VMM(3)
+	vpmovmskb %VMM(3), %r8d
+	subq	$(VEC_SIZE * -4), %rdi
+	testl	%r8d, %r8d
+	jz	L(loop_2x_vec)
+
+	addq	$(VEC_SIZE * -4 + 1), %rdi
+
+	VPCMPEQ	%VMM(0), %VZERO, %VMM(0)
+	vpmovmskb %VMM(0), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v0)
+
+	VPCMPEQ	%VMM(1), %VZERO, %VMM(1)
+	vpmovmskb %VMM(1), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v1)
+
+	VPCMPEQ	%VMM(2), %VZERO, %VMM(2)
+	vpmovmskb %VMM(2), %ecx
+	testl	%ecx, %ecx
+	jnz	L(bsf_and_done_v2)
+
+	movl	%r8d, %ecx
+L(bsf_and_done_v3):
+	addq	$VEC_SIZE, %rdi
+L(bsf_and_done_v2):
+	bsfl	%ecx, %ecx
+	leaq	(VEC_SIZE * 2)(%rdi, %rcx), %rdi
+	jmp	L(strcat_strlen_done)
+
+	.p2align 4,, 4
+L(bsf_and_done_v1):
+	addq	$VEC_SIZE, %rdi
+L(bsf_and_done_v0):
+	bsfl	%ecx, %ecx
+	addq	%rcx, %rdi
+L(strcat_strlen_done):