diff options
author | Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com> | 2017-10-06 10:04:52 +0530 |
---|---|---|
committer | Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com> | 2017-10-06 10:04:52 +0530 |
commit | 5a907168918805bbe3088dc4ab051e3e78ad7459 (patch) | |
tree | 3c93e0d5356b84b012b64eaeadd7d2e556d7afc2 /sysdeps/powerpc/powerpc64/power8/memrchr.S | |
parent | 0db0b931cf2bf506767be3f93519032f56723883 (diff) | |
download | glibc-5a907168918805bbe3088dc4ab051e3e78ad7459.tar.gz glibc-5a907168918805bbe3088dc4ab051e3e78ad7459.tar.xz glibc-5a907168918805bbe3088dc4ab051e3e78ad7459.zip |
powerpc: Fix IFUNC for memrchr
Recent commit 59ba2d2b5421 missed to add __memrchr_power8 in ifunc list. Also handled discarding unwanted bytes for unaligned inputs in power8 optimization. 2017-10-05 Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com> * sysdeps/powerpc/powerpc64/multiarch/memrchr-ppc64.c: Revert back to powerpc32 file. * sysdeps/powerpc/powerpc64/multiarch/memrchr.c (memrchr): Add __memrchr_power8 to ifunc list. * sysdeps/powerpc/powerpc64/power8/memrchr.S: Mask extra bytes for unaligned inputs.
Diffstat (limited to 'sysdeps/powerpc/powerpc64/power8/memrchr.S')
-rw-r--r-- | sysdeps/powerpc/powerpc64/power8/memrchr.S | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/sysdeps/powerpc/powerpc64/power8/memrchr.S b/sysdeps/powerpc/powerpc64/power8/memrchr.S index 521b3c84a2..22b01ec69c 100644 --- a/sysdeps/powerpc/powerpc64/power8/memrchr.S +++ b/sysdeps/powerpc/powerpc64/power8/memrchr.S @@ -233,11 +233,35 @@ L(found): #endif addi r8, r8, 63 sub r3, r8, r6 /* Compute final address. */ + cmpld cr7, r3, r10 + bgelr cr7 + li r3, 0 blr /* Found a match in last 16 bytes. */ .align 4 L(found_16B): + cmpld r8, r10 /* Are we on the last QW? */ + bge L(last) + /* Now discard bytes before starting address. */ + sub r9, r10, r8 + MTVRD(v9, r9) + vspltisb v8, 3 + /* Mask unwanted bytes. */ +#ifdef __LITTLE_ENDIAN__ + lvsr v7, 0, r10 + vperm v6, v0, v6, v7 + vsldoi v9, v0, v9, 8 + vsl v9, v9, v8 + vslo v6, v6, v9 +#else + lvsl v7, 0, r10 + vperm v6, v6, v0, v7 + vsldoi v9, v0, v9, 8 + vsl v9, v9, v8 + vsro v6, v6, v9 +#endif +L(last): /* Permute the first bit of each byte into bits 48-63. */ VBPERMQ(v6, v6, v10) /* Shift each component into its correct position for merging. */ |