about summary refs log tree commit diff
path: root/sysdeps/powerpc/powerpc32/power7/strncmp.S
diff options
context:
space:
mode:
authorAlan Modra <amodra@gmail.com>2013-08-17 18:41:17 +0930
committerAlan Modra <amodra@gmail.com>2013-10-04 10:39:52 +0930
commit8a7413f9b036da83ffde491a37d9d2340bc321a7 (patch)
tree8ba04b78fde9893b9da3a10e7934a2c127594f6e /sysdeps/powerpc/powerpc32/power7/strncmp.S
parent33ee81de05e83ce12f32a491270bb4c1611399c7 (diff)
downloadglibc-8a7413f9b036da83ffde491a37d9d2340bc321a7.tar.gz
glibc-8a7413f9b036da83ffde491a37d9d2340bc321a7.tar.xz
glibc-8a7413f9b036da83ffde491a37d9d2340bc321a7.zip
PowerPC LE strcmp and strncmp
http://sourceware.org/ml/libc-alpha/2013-08/msg00099.html

More little-endian support.  I leave the main strcmp loops unchanged,
(well, except for renumbering rTMP to something other than r0 since
it's needed in an addi insn) and modify the tail for little-endian.

I noticed some of the big-endian tail code was a little untidy so have
cleaned that up too.

	* sysdeps/powerpc/powerpc64/strcmp.S (rTMP2): Define as r0.
	(rTMP): Define as r11.
	(strcmp): Add little-endian support.  Optimise tail.
	* sysdeps/powerpc/powerpc32/strcmp.S: Similarly.
	* sysdeps/powerpc/powerpc64/strncmp.S: Likewise.
	* sysdeps/powerpc/powerpc32/strncmp.S: Likewise.
	* sysdeps/powerpc/powerpc64/power4/strncmp.S: Likewise.
	* sysdeps/powerpc/powerpc32/power4/strncmp.S: Likewise.
	* sysdeps/powerpc/powerpc64/power7/strncmp.S: Likewise.
	* sysdeps/powerpc/powerpc32/power7/strncmp.S: Likewise.
Diffstat (limited to 'sysdeps/powerpc/powerpc32/power7/strncmp.S')
-rw-r--r--sysdeps/powerpc/powerpc32/power7/strncmp.S55
1 files changed, 40 insertions, 15 deletions
diff --git a/sysdeps/powerpc/powerpc32/power7/strncmp.S b/sysdeps/powerpc/powerpc32/power7/strncmp.S
index fdae44d265..10c9d251b0 100644
--- a/sysdeps/powerpc/powerpc32/power7/strncmp.S
+++ b/sysdeps/powerpc/powerpc32/power7/strncmp.S
@@ -26,7 +26,7 @@
 
 EALIGN (strncmp,5,0)
 
-#define rTMP	r0
+#define rTMP2	r0
 #define rRTN	r3
 #define rSTR1	r3	/* first string arg */
 #define rSTR2	r4	/* second string arg */
@@ -39,6 +39,7 @@ EALIGN (strncmp,5,0)
 #define r7F7F	r9	/* constant 0x7f7f7f7f */
 #define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f) */
 #define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+#define rTMP	r12
 
 	dcbt	0,rSTR1
 	nop
@@ -78,13 +79,45 @@ L(g1):	add	rTMP,rFEFE,rWORD1
 /* OK. We've hit the end of the string. We need to be careful that
    we don't compare two strings as different because of gunk beyond
    the end of the strings...  */
+#ifdef __LITTLE_ENDIAN__
+L(endstring):
+	slwi	rTMP, rTMP, 1
+	addi    rTMP2, rTMP, -1
+	andc    rTMP2, rTMP2, rTMP
+	and	rWORD2, rWORD2, rTMP2		/* Mask off gunk.  */
+	and	rWORD1, rWORD1, rTMP2
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr
+	ori	rRTN, rTMP2, 1
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr
+	ori	rRTN, rTMP2, 1
+	blr
 
+#else
 L(endstring):
 	and	rTMP,r7F7F,rWORD1
 	beq	cr1,L(equal)
 	add	rTMP,rTMP,r7F7F
 	xor.	rBITDIF,rWORD1,rWORD2
-
 	andc	rNEG,rNEG,rTMP
 	blt	L(highbit)
 	cntlzw	rBITDIF,rBITDIF
@@ -92,28 +125,20 @@ L(endstring):
 	addi	rNEG,rNEG,7
 	cmpw	cr1,rNEG,rBITDIF
 	sub	rRTN,rWORD1,rWORD2
-	blt	cr1,L(equal)
-	srawi	rRTN,rRTN,31
-	ori	rRTN,rRTN,1
-	blr
+	bgelr	cr1
 L(equal):
 	li	rRTN,0
 	blr
 
 L(different):
-	lwzu	rWORD1,-4(rSTR1)
+	lwz	rWORD1,-4(rSTR1)
 	xor.	rBITDIF,rWORD1,rWORD2
 	sub	rRTN,rWORD1,rWORD2
-	blt	L(highbit)
-	srawi	rRTN,rRTN,31
-	ori	rRTN,rRTN,1
-	blr
+	bgelr
 L(highbit):
-	srwi	rWORD2,rWORD2,24
-	srwi	rWORD1,rWORD1,24
-	sub	rRTN,rWORD1,rWORD2
+	ori	rRTN, rWORD2, 1
 	blr
-
+#endif
 
 /* Oh well. In this case, we just do a byte-by-byte comparison.  */
 	.align	4