about summary refs log tree commit diff
diff options
context:
space:
mode:
authorWill Schmidt <will_schmidt@vnet.ibm.com>2012-08-21 14:20:55 -0500
committerRyan S. Arnold <rsa@linux.vnet.ibm.com>2012-08-21 14:20:55 -0500
commit14a50c9d238a8d79d48193482ed9a2be52d1dc3d (patch)
tree046c03b3bf03143c65e3f4eab969391df13e4dd1
parent696da85994af013ae1e3e8ad3bb016793f9b2c5d (diff)
downloadglibc-14a50c9d238a8d79d48193482ed9a2be52d1dc3d.tar.gz
glibc-14a50c9d238a8d79d48193482ed9a2be52d1dc3d.tar.xz
glibc-14a50c9d238a8d79d48193482ed9a2be52d1dc3d.zip
[Powerpc] Tune/optimize powerpc{32,64}/power7/memchr.S.
Assorted tweaking, twisting and tuning to squeeze a few additional cycles
out of the memchr code.   Changes include bypassing the shift pairs
(sld,srd) when they are not required, and unrolling the small_loop that
handles short and trailing strings.

Per scrollpipe data measuring aligned strings for 64-bit, these changes
save between five and eight cycles (9-13% overall) for short strings (<32),
Longer aligned strings see slight improvement of 1-3% due to bypassing the
shifts and the instruction rearranging.
-rw-r--r--ChangeLog6
-rw-r--r--sysdeps/powerpc/powerpc32/power7/memchr.S78
-rw-r--r--sysdeps/powerpc/powerpc64/power7/memchr.S85
3 files changed, 125 insertions, 44 deletions
diff --git a/ChangeLog b/ChangeLog
index 0c34ac30cf..12e5b4c2df 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,9 @@
+2012-08-21  Will Schmidt  <will_schmidt@vnet.ibm.com>
+
+	* sysdeps/powerpc/powerpc64/power7/memchr.S: Unrolled short loop and
+	slight instruction rearrangements per scrollpipe analysis.
+	* sysdeps/powerpc/powerpc64/power7/memchr.S: Likewise.
+
 2012-08-20  Roland McGrath  <roland@hack.frob.com>
 
 	* manual/syslog.texi (syslog; vsyslog, closelog):
diff --git a/sysdeps/powerpc/powerpc32/power7/memchr.S b/sysdeps/powerpc/powerpc32/power7/memchr.S
index 85cc2999ae..ca2bc49579 100644
--- a/sysdeps/powerpc/powerpc32/power7/memchr.S
+++ b/sysdeps/powerpc/powerpc32/power7/memchr.S
@@ -1,5 +1,5 @@
 /* Optimized memchr implementation for PowerPC32/POWER7 using cmpb insn.
-   Copyright (C) 2010 Free Software Foundation, Inc.
+   Copyright (C) 2010-2012 Free Software Foundation, Inc.
    Contributed by Luis Machado <luisgpm@br.ibm.com>.
    This file is part of the GNU C Library.
 
@@ -33,19 +33,23 @@ ENTRY (BP_SYM (__memchr))
 	cmplwi	r5,16
 	ble	L(small_range)
 
-	cmplw	cr7,r3,r7     /* Is the address equal or less than r3?  If
-				 it's equal or less, it means size is either 0
-				 or a negative number.  */
+	cmplw	cr7,r3,r7     /* Compare the starting address (r3) with the
+				 ending address (r7).  If (r3 >= r7), the size
+				 passed in is zero or negative.  */
 	ble	cr7,L(proceed)
 
-	li	r7,-1	      /* Make r11 the biggest if r4 <= 0.  */
+	li	r7,-1	      /* Artificially set our ending address (r7)
+				 such that we will exit early. */
 L(proceed):
 	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	cmpli	cr6,r6,0      /* cr6 == Do we have padding?  */
 	lwz	r12,0(r8)     /* Load word from memory.  */
 	cmpb	r10,r12,r4    /* Check for BYTE's in WORD1.  */
+	beq	cr6,L(proceed_no_padding)
 	slw	r10,r10,r6
 	srw	r10,r10,r6
-	cmplwi	cr7,r10,0     /* If r10 == 0, no BYTE's have been found.  */
+L(proceed_no_padding):
+	cmplwi	cr7,r10,0     /* If r10 == 0, no BYTEs have been found.  */
 	bne	cr7,L(done)
 
 	/* Are we done already?  */
@@ -73,7 +77,7 @@ L(proceed):
 L(loop_setup):
 	sub	r5,r7,r9
 	srwi	r6,r5,3	      /* Number of loop iterations.  */
-	mtctr	r6	      /* Setup the counter.  */
+	mtctr	r6            /* Setup the counter.  */
 	b	L(loop)
 	/* Main loop to look for BYTE backwards in the string.  Since
 	   it's a small loop (< 8 instructions), align it to 32-bytes.  */
@@ -82,7 +86,6 @@ L(loop):
 	/* Load two words, compare and merge in a
 	   single register for speed.  This is an attempt
 	   to speed up the byte-checking process for bigger strings.  */
-
 	lwz	r12,4(r8)
 	lwzu	r11,8(r8)
 	cmpb	r10,r12,r4
@@ -91,11 +94,11 @@ L(loop):
 	cmplwi	cr7,r5,0
 	bne	cr7,L(found)
 	bdnz	L(loop)
+
 	/* We're here because the counter reached 0, and that means we
-	   didn't have any matches for BYTE in the whole range.  Just
-	   return the original range.  */
-	addi	r9,r8,4
-	cmplw	cr6,r9,r7
+	   didn't have any matches for BYTE in the whole range.  */
+	subi	r11,r7,4
+	cmplw	cr6,r8,r11
 	blt	cr6,L(loop_small)
 	b	L(null)
 
@@ -118,8 +121,7 @@ L(found):
 	/* r10 has the output of the cmpb instruction, that is, it contains
 	   0xff in the same position as BYTE in the original
 	   word from the string.  Use that to calculate the pointer.
-	   We need to make sure BYTE is *before* the end of the
-	   range.  */
+	   We need to make sure BYTE is *before* the end of the range.  */
 L(done):
 	cntlzw	r0,r10	      /* Count leading zeroes before the match.  */
 	srwi	r0,r0,3	      /* Convert leading zeroes to bytes.  */
@@ -137,13 +139,15 @@ L(null):
 	.align	4
 L(small_range):
 	cmplwi	r5,0
-	beq	L(null)
-
 	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	beq	L(null)       /* This branch is for the cmplwi r5,0 above */
 	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmplwi	cr6,r6,0      /* cr6 == Do we have padding?  */
 	cmpb	r10,r12,r4    /* Check for BYTE in DWORD1.  */
+	beq	cr6,L(small_no_padding)
 	slw	r10,r10,r6
 	srw	r10,r10,r6
+L(small_no_padding):
 	cmplwi	cr7,r10,0
 	bne	cr7,L(done)
 
@@ -151,18 +155,52 @@ L(small_range):
 	addi    r9,r8,4
 	cmplw	r9,r7
 	bge	L(null)
-	b	L(loop_small)
 
-	.p2align  5
-L(loop_small):
+L(loop_small):                /* loop_small has been unrolled.  */
 	lwzu	r12,4(r8)
 	cmpb	r10,r12,r4
 	addi	r9,r8,4
 	cmplwi	cr6,r10,0
+	cmplw	r9,r7
 	bne	cr6,L(done)
+	bge	L(null)
+
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,4
+	cmplwi	cr6,r10,0
 	cmplw	r9,r7
+	bne	cr6,L(done)
 	bge	L(null)
-	b	L(loop_small)
+
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,4
+	cmplwi	cr6,r10,0
+	cmplw	r9,r7
+	bne	cr6,L(done)
+	bge	L(null)
+
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,4
+	cmplwi	cr6,r10,0
+	cmplw	r9,r7
+	bne	cr6,L(done)
+	bge	L(null)
+
+	/* For most cases we will never get here.  Under some combinations of
+	   padding + length there is a leftover word that still needs to be
+	   checked.  */
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,4
+	cmplwi	cr6,r10,0
+	bne	cr6,L(done)
+
+	/* save a branch and exit directly */
+	li	r3,0
+	blr
 
 END (BP_SYM (__memchr))
 weak_alias (BP_SYM (__memchr), BP_SYM(memchr))
diff --git a/sysdeps/powerpc/powerpc64/power7/memchr.S b/sysdeps/powerpc/powerpc64/power7/memchr.S
index f0dbf60d0b..c125a5b354 100644
--- a/sysdeps/powerpc/powerpc64/power7/memchr.S
+++ b/sysdeps/powerpc/powerpc64/power7/memchr.S
@@ -1,5 +1,5 @@
 /* Optimized memchr implementation for PowerPC64/POWER7 using cmpb insn.
-   Copyright (C) 2010 Free Software Foundation, Inc.
+   Copyright (C) 2010-2012 Free Software Foundation, Inc.
    Contributed by Luis Machado <luisgpm@br.ibm.com>.
    This file is part of the GNU C Library.
 
@@ -29,27 +29,32 @@ ENTRY (BP_SYM (__memchr))
 	clrrdi  r8,r3,3
 	rlwimi	r4,r4,8,16,23
 	rlwimi	r4,r4,16,0,15
-	insrdi  r4,r4,32,0
 	add	r7,r3,r5      /* Calculate the last acceptable address.  */
 	cmpldi	r5,32
+	insrdi  r4,r4,32,0
 	ble	L(small_range)
 
-	cmpld	cr7,r3,r7     /* Is the address equal or less than r3?  If
-				 it's equal or less, it means size is either 0
-				 or a negative number.  */
+	cmpld	cr7,r3,r7     /* Compare the starting address (r3) with the
+				 ending address (r7).  If (r3 >= r7),
+				 the size passed in was zero or negative.  */
 	ble	cr7,L(proceed)
 
-	li	r7,-1	      /* Make r11 the biggest if r4 <= 0.  */
+	li	r7,-1         /* Artificially set our ending address (r7)
+				 such that we will exit early.  */
+
 L(proceed):
 	rlwinm	r6,r3,3,26,28 /* Calculate padding.  */
+	cmpldi	cr6,r6,0      /* cr6 == Do we have padding?  */
 	ld	r12,0(r8)     /* Load doubleword from memory.  */
-	cmpb	r10,r12,r4    /* Check for BYTE's in DWORD1.  */
+	cmpb	r10,r12,r4    /* Check for BYTEs in DWORD1.  */
+	beq	cr6,L(proceed_no_padding)
 	sld	r10,r10,r6
 	srd	r10,r10,r6
-	cmpldi	cr7,r10,0     /* If r10 == 0, no BYTE's's have been found.  */
+L(proceed_no_padding):
+	cmpldi	cr7,r10,0     /* Does r10 indicate we got a hit?  */
 	bne	cr7,L(done)
 
-	/* Are we done already?  */
+	/* See if we are at the last acceptable address yet.  */
 	addi	r9,r8,8
 	cmpld	cr6,r9,r7
 	bge	cr6,L(null)
@@ -74,7 +79,7 @@ L(proceed):
 L(loop_setup):
 	sub	r5,r7,r9
 	srdi	r6,r5,4	      /* Number of loop iterations.  */
-	mtctr	r6	      /* Setup the counter.  */
+	mtctr	r6            /* Setup the counter.  */
 	b	L(loop)
 	/* Main loop to look for BYTE backwards in the string.  Since
 	   it's a small loop (< 8 instructions), align it to 32-bytes.  */
@@ -83,7 +88,6 @@ L(loop):
 	/* Load two doublewords, compare and merge in a
 	   single register for speed.  This is an attempt
 	   to speed up the byte-checking process for bigger strings.  */
-
 	ld	r12,8(r8)
 	ldu	r11,16(r8)
 	cmpb	r10,r12,r4
@@ -92,11 +96,11 @@ L(loop):
 	cmpldi	cr7,r5,0
 	bne	cr7,L(found)
 	bdnz	L(loop)
+
 	/* We're here because the counter reached 0, and that means we
-	   didn't have any matches for BYTE in the whole range.  Just return
-	   the original range.  */
-	addi	r9,r8,8
-	cmpld	cr6,r9,r7
+	   didn't have any matches for BYTE in the whole range.  */
+	subi	r11,r7,8
+	cmpld	cr6,r8,r11
 	blt	cr6,L(loop_small)
 	b	L(null)
 
@@ -119,8 +123,7 @@ L(found):
 	/* r10 has the output of the cmpb instruction, that is, it contains
 	   0xff in the same position as BYTE in the original
 	   doubleword from the string.  Use that to calculate the pointer.
-	   We need to make sure BYTE is *before* the end of the
-	   range.  */
+	   We need to make sure BYTE is *before* the end of the range.  */
 L(done):
 	cntlzd	r0,r10	      /* Count leading zeroes before the match.  */
 	srdi	r0,r0,3	      /* Convert leading zeroes to bytes.  */
@@ -138,13 +141,16 @@ L(null):
 	.align	4
 L(small_range):
 	cmpldi	r5,0
-	beq	L(null)
-
 	rlwinm	r6,r3,3,26,28 /* Calculate padding.  */
+	beq	L(null)       /* This branch is for the cmpldi r5,0 above.  */
 	ld	r12,0(r8)     /* Load word from memory.  */
+	cmpldi	cr6,r6,0      /* cr6 == Do we have padding?  */
 	cmpb	r10,r12,r4    /* Check for BYTE in DWORD1.  */
+			      /* If no padding, skip the shifts.  */
+	beq	cr6,L(small_no_padding)
 	sld	r10,r10,r6
 	srd	r10,r10,r6
+L(small_no_padding):
 	cmpldi	cr7,r10,0
 	bne	cr7,L(done)
 
@@ -152,18 +158,49 @@ L(small_range):
 	addi    r9,r8,8
 	cmpld	r9,r7
 	bge	L(null)
-	b	L(loop_small)
+	/* If we're not done, drop through into loop_small.  */
+
+L(loop_small):                /* loop_small has been unrolled.  */
+	ldu	r12,8(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,8
+	cmpldi	cr6,r10,0
+	cmpld	r9,r7
+	bne	cr6,L(done)   /* Found something.  */
+	bge	L(null)       /* Hit end of string (length).  */
 
-	.p2align  5
-L(loop_small):
 	ldu	r12,8(r8)
 	cmpb	r10,r12,r4
 	addi	r9,r8,8
 	cmpldi	cr6,r10,0
-	bne	cr6,L(done)
 	cmpld	r9,r7
+	bne	cr6,L(done)   /* Found something.  */
 	bge	L(null)
-	b	L(loop_small)
+
+	ldu	r12,8(r8)
+	subi	r11,r7,8
+	cmpb	r10,r12,r4
+	cmpldi	cr6,r10,0
+	ori	r2,r2,0       /* Force a dispatch group.  */
+	bne	cr6,L(done)
+
+	cmpld	r8,r11        /* At end of range?  */
+	bge	L(null)
+
+	/* For most cases we will never get here.  Under some combinations of
+	   padding + length there is a leftover double that still needs to be
+	   checked.  */
+	ldu	r12,8(r8)
+	cmpb	r10,r12,r4
+	addi	r9,r8,8
+	cmpldi	cr6,r10,0
+	cmpld	r9,r7
+	bne	cr6,L(done)   /* Found something.  */
+
+	/* Save a branch and exit directly.  */
+	li	r3,0
+	blr
+
 
 END (BP_SYM (__memchr))
 weak_alias (BP_SYM (__memchr), BP_SYM(memchr))