diff options
author | Stefan Liebler <stli@linux.ibm.com> | 2018-12-18 13:57:18 +0100 |
---|---|---|
committer | Stefan Liebler <stli@linux.ibm.com> | 2018-12-18 13:57:18 +0100 |
commit | 804f2e5c73b1363836ce5db29a0abb3d36e1286a (patch) | |
tree | 7856f98333f25755a59d689883b8a0df8a0c4e7e /sysdeps/s390/multiarch/wcscpy-vx.S | |
parent | c7e7cd266ed123b6dfb722f599934ca5dcfd3e93 (diff) | |
download | glibc-804f2e5c73b1363836ce5db29a0abb3d36e1286a.tar.gz glibc-804f2e5c73b1363836ce5db29a0abb3d36e1286a.tar.xz glibc-804f2e5c73b1363836ce5db29a0abb3d36e1286a.zip |
S390: Refactor wcscpy ifunc handling.
The ifunc handling for wcscpy is adjusted in order to omit ifunc if the minimum architecture level already supports newer CPUs by default. Unfortunately the c ifunc variant can't be omitted at all as it is used by the z13 ifunc variant as fallback if the pointers are not 4-byte aligned. ChangeLog: * sysdeps/s390/multiarch/Makefile (sysdep_routines): Remove wcscpy variants. * sysdeps/s390/Makefile (sysdep_routines): Add wcscpy variants. * sysdeps/s390/multiarch/ifunc-impl-list.c (__libc_ifunc_impl_list): Refactor ifunc handling for wcscpy. * sysdeps/s390/multiarch/wcscpy-c.c: Move to ... * sysdeps/s390/wcscpy-c.c: ... here and adjust ifunc handling. * sysdeps/s390/multiarch/wcscpy-vx.S: Move to ... * sysdeps/s390/wcscpy-vx.S: ... here and adjust ifunc handling. * sysdeps/s390/multiarch/wcscpy.c: Move to ... * sysdeps/s390/wcscpy.c: ... here and adjust ifunc handling. * sysdeps/s390/ifunc-wcscpy.h: New file.
Diffstat (limited to 'sysdeps/s390/multiarch/wcscpy-vx.S')
-rw-r--r-- | sysdeps/s390/multiarch/wcscpy-vx.S | 111 |
1 files changed, 0 insertions, 111 deletions
diff --git a/sysdeps/s390/multiarch/wcscpy-vx.S b/sysdeps/s390/multiarch/wcscpy-vx.S deleted file mode 100644 index c2e81055be..0000000000 --- a/sysdeps/s390/multiarch/wcscpy-vx.S +++ /dev/null @@ -1,111 +0,0 @@ -/* Vector optimized 32/64 bit S/390 version of wcscpy. - Copyright (C) 2015-2018 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - <http://www.gnu.org/licenses/>. */ - -#if defined HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc) - -# include "sysdep.h" -# include "asm-syntax.h" - - .text - -/* char * wcscpy (const wchar_t *dest, const wchar_t *src) - Copy string src to dest. - - Register usage: - -r0=border-len for switching to vector-instructions - -r1=tmp - -r2=dest and return value - -r3=src - -r4=tmp - -r5=current_len - -v16=part of src - -v17=index of zero - -v18=part of src -*/ -ENTRY(__wcscpy_vx) - .machine "z13" - .machinemode "zarch_nohighgprs" - - vlbb %v16,0(%r3),6 /* Load s until next 4k-byte boundary. */ - lcbb %r1,0(%r3),6 /* Get bytes to 4k-byte boundary or 16. */ - - tmll %r3,3 /* Test if s is 4-byte aligned? */ - jne .Lfallback /* And use common-code variant if not. */ - - vfenezf %v17,%v16,%v16 /* Find element not equal with zero search. */ - vlgvb %r5,%v17,7 /* Load zero index or 16 if not found. */ - clrjl %r5,%r1,.Lfound_align /* If found zero within loaded bytes, - copy bytes before and return. */ - - /* Align s to 16 byte. */ - risbgn %r4,%r3,60,128+63,0 /* %r3 = bits 60-63 of %r2 'and' 15. */ - lghi %r5,15 /* current_len = 15. */ - slr %r5,%r4 /* Compute highest index to 16byte boundary. */ - - vstl %v16,%r5,0(%r2) /* Copy loaded characters - no zero. */ - ahi %r5,1 /* Start loop at next character. */ - - /* Find zero in 16byte aligned loop. */ -.Lloop: - vl %v16,0(%r5,%r3) /* Load s. */ - vfenezfs %v17,%v16,%v16 /* Find element not equal with zero search. */ - je .Lfound_v16_0 /* Jump away if zero was found. */ - vl %v18,16(%r5,%r3) /* Load next part of s. */ - vst %v16,0(%r5,%r2) /* Store previous part without zero to dst. */ - vfenezfs %v17,%v18,%v18 - je .Lfound_v18_16 - vl %v16,32(%r5,%r3) - vst %v18,16(%r5,%r2) - vfenezfs %v17,%v16,%v16 - je .Lfound_v16_32 - vl %v18,48(%r5,%r3) - vst %v16,32(%r5,%r2) - vfenezfs %v17,%v18,%v18 - je .Lfound_v18_48 - vst %v18,48(%r5,%r2) - - aghi %r5,64 - j .Lloop /* No zero found -> loop. */ - -.Lfound_v16_32: - aghi %r5,32 -.Lfound_v16_0: - la %r3,0(%r5,%r2) - vlgvb %r1,%v17,7 /* Load byte index of zero. */ - aghi %r1,3 /* Also copy remaining bytes of zero. */ - vstl %v16,%r1,0(%r3) /* Copy characters including zero. */ - br %r14 - -.Lfound_v18_48: - aghi %r5,32 -.Lfound_v18_16: - la %r3,16(%r5,%r2) - vlgvb %r1,%v17,7 /* Load byte index of zero. */ - aghi %r1,3 /* Also copy remaining bytes of zero. */ - vstl %v18,%r1,0(%r3) /* Copy characters including zero. */ - br %r14 - -.Lfound_align: - aghi %r5,3 /* Also copy remaining bytes of zero. */ - vstl %v16,%r5,0(%r2) /* Copy characters including zero. */ - br %r14 - -.Lfallback: - jg __wcscpy_c -END(__wcscpy_vx) -#endif /* HAVE_S390_VX_ASM_SUPPORT && IS_IN (libc) */ |