about summary refs log tree commit diff
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2018-04-16 08:18:23 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2018-04-16 17:24:31 -0300
commit743b9c2a98426fb40f8ffee3529c8870bc5300f5 (patch)
treef050338793677c5b1b2cd681d40c80a1be44ab08
parent326e74e7c18ab12ac8e4b67cce295d32fcc1cf68 (diff)
downloadglibc-743b9c2a98426fb40f8ffee3529c8870bc5300f5.tar.gz
glibc-743b9c2a98426fb40f8ffee3529c8870bc5300f5.tar.xz
glibc-743b9c2a98426fb40f8ffee3529c8870bc5300f5.zip
arm: Remove ununsed ARM code in optimized implementation
This patch removes the ununsed ARM code path for armv6t2 memchr and
strlen and armv7 memch and strcmp.  In all implementation, the ARM
code is not used in any possible build (unless glibc is explicit
build with the non-documented NO_THUMB compiler flag) and for armv7
the resulting code either produces wrong results (memchr) and throw
build error (strcmp).

Checked on arm-linux-gnueabihf built targeting both armv6 and
armv7.

	* sysdeps/arm/armv6t2/memchr.S (memchr): Remove ARM code path.
	* sysdeps/arm/armv6t2/strlen.S (memchr): Likewise.
	* sysdeps/arm/armv7/multiarch/memchr_neon.S (memchr): Likewise.
	* sysdeps/arm/armv7/strcmp.S (strcmp): Likewise.
-rw-r--r--ChangeLog7
-rw-r--r--sysdeps/arm/armv6t2/memchr.S23
-rw-r--r--sysdeps/arm/armv6t2/strlen.S25
-rw-r--r--sysdeps/arm/armv7/multiarch/memchr_neon.S16
-rw-r--r--sysdeps/arm/armv7/strcmp.S23
5 files changed, 8 insertions, 86 deletions
diff --git a/ChangeLog b/ChangeLog
index 718e17636a..381a2ea2b7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2018-04-16  Adhemerval Zanella  <adhemerval.zanella@linaro.org>
+
+	* sysdeps/arm/armv6t2/memchr.S (memchr): Remove ARM code path.
+	* sysdeps/arm/armv6t2/strlen.S (memchr): Likewise.
+	* sysdeps/arm/armv7/multiarch/memchr_neon.S (memchr): Likewise.
+	* sysdeps/arm/armv7/strcmp.S (strcmp): Likewise.
+
 2018-04-16  Andreas Schwab  <schwab@suse.de>
 
 	[BZ #19527]
diff --git a/sysdeps/arm/armv6t2/memchr.S b/sysdeps/arm/armv6t2/memchr.S
index bdd385b572..1d6eee0a11 100644
--- a/sysdeps/arm/armv6t2/memchr.S
+++ b/sysdeps/arm/armv6t2/memchr.S
@@ -42,12 +42,8 @@
 	.syntax unified
 
 	.text
-#ifdef NO_THUMB
-	.arm
-#else
 	.thumb
 	.thumb_func
-#endif
 	.global memchr
 	.type memchr,%function
 ENTRY(memchr)
@@ -91,22 +87,14 @@ ENTRY(memchr)
 
 15:
 	ldrd 	r4,r5, [r0],#8
-#ifndef NO_THUMB
 	subs	r6, r6, #8
-#endif
 	eor	r4,r4, r1	@ Get it so that r4,r5 have 00's where the bytes match the target
 	eor	r5,r5, r1
 	uadd8	r4, r4, r7	@ Parallel add 0xff - sets the GE bits for anything that wasn't 0
 	sel	r4, r3, r7	@ bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
 	uadd8	r5, r5, r7	@ Parallel add 0xff - sets the GE bits for anything that wasn't 0
 	sel	r5, r4, r7	@ chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
-#ifndef NO_THUMB
 	cbnz	r5, 60f
-#else
-	cmp	r5, #0
-	bne	60f
-	subs	r6, r6, #8
-#endif
 	bne	15b		@ (Flags from the subs above) If not run out of bytes then go around again
 
 	pop	{r4,r5,r6,r7}
@@ -120,24 +108,13 @@ ENTRY(memchr)
 	and	r2,r2,#7	@ Leave the count remaining as the number after the double words have been done
 
 20:
-#ifndef NO_THUMB
 	cbz	r2, 40f		@ 0 length or hit the end already then not found
-#else
-	cmp	r2, #0
-	beq	40f
-#endif
 
 21:  @ Post aligned section, or just a short call
 	ldrb	r3,[r0],#1
-#ifndef NO_THUMB
 	subs	r2,r2,#1
 	eor	r3,r3,r1	@ r3 = 0 if match - doesn't break flags from sub
 	cbz	r3, 50f
-#else
-	eors	r3, r3, r1
-	beq	50f
-	subs	r2, r2, #1
-#endif
 	bne	21b		@ on r2 flags
 
 40:
diff --git a/sysdeps/arm/armv6t2/strlen.S b/sysdeps/arm/armv6t2/strlen.S
index 6988183ea6..a34ef20e9d 100644
--- a/sysdeps/arm/armv6t2/strlen.S
+++ b/sysdeps/arm/armv6t2/strlen.S
@@ -21,7 +21,7 @@
 
  */
 
-#include <arm-features.h>               /* This might #define NO_THUMB.  */
+#include <arm-features.h>
 #include <sysdep.h>
 
 #ifdef __ARMEB__
@@ -32,24 +32,8 @@
 #define S2HI		lsl
 #endif
 
-#ifndef NO_THUMB
 /* This code is best on Thumb.  */
 	.thumb
-#else
-/* Using bne.w explicitly is desirable in Thumb mode because it helps
-   align the following label without a nop.  In ARM mode there is no
-   such difference.  */
-.macro bne.w label
-	bne \label
-.endm
-
-/* This clobbers the condition codes, which the real Thumb cbnz instruction
-   does not do.  But it doesn't matter for any of the uses here.  */
-.macro cbnz reg, label
-	cmp \reg, #0
-	bne \label
-.endm
-#endif
 
 /* Parameters and result.  */
 #define srcin		r0
@@ -146,16 +130,9 @@ ENTRY(strlen)
 	tst	tmp1, #4
 	pld	[src, #64]
 	S2HI	tmp2, const_m1, tmp2
-#ifdef NO_THUMB
-	mvn	tmp1, tmp2
-	orr	data1a, data1a, tmp1
-	itt	ne
-	orrne	data1b, data1b, tmp1
-#else
 	orn	data1a, data1a, tmp2
 	itt	ne
 	ornne	data1b, data1b, tmp2
-#endif
 	movne	data1a, const_m1
 	mov	const_0, #0
 	b	.Lstart_realigned
diff --git a/sysdeps/arm/armv7/multiarch/memchr_neon.S b/sysdeps/arm/armv7/multiarch/memchr_neon.S
index 1b2ae75aa0..6fbf9b8898 100644
--- a/sysdeps/arm/armv7/multiarch/memchr_neon.S
+++ b/sysdeps/arm/armv7/multiarch/memchr_neon.S
@@ -68,11 +68,7 @@
  * allows to identify exactly which byte has matched.
  */
 
-#ifndef NO_THUMB
 	.thumb_func
-#else
-	.arm
-#endif
 	.p2align 4,,15
 
 ENTRY(memchr)
@@ -132,12 +128,7 @@ ENTRY(memchr)
 	/* The first block can also be the last */
 	bls		.Lmasklast
 	/* Have we found something already? */
-#ifndef NO_THUMB
 	cbnz		synd, .Ltail
-#else
-	cmp		synd, #0
-	bne		.Ltail
-#endif
 
 
 .Lloopintro:
@@ -176,16 +167,9 @@ ENTRY(memchr)
 	vpadd.i8	vdata0_0, vdata0_0, vdata1_0
 	vpadd.i8	vdata0_0, vdata0_0, vdata0_0
 	vmov		synd, vdata0_0[0]
-#ifndef NO_THUMB
 	cbz		synd, .Lnotfound
 	bhi		.Ltail	/* Uses the condition code from
 				   subs cntin, cntin, #32 above.  */
-#else
-	cmp		synd, #0
-	beq		.Lnotfound
-	cmp		cntin, #0
-	bhi		.Ltail
-#endif
 
 
 .Lmasklast:
diff --git a/sysdeps/arm/armv7/strcmp.S b/sysdeps/arm/armv7/strcmp.S
index 060b865658..2626fdf72e 100644
--- a/sysdeps/arm/armv7/strcmp.S
+++ b/sysdeps/arm/armv7/strcmp.S
@@ -83,8 +83,6 @@
 #define syndrome	tmp2
 
 
-#ifndef NO_THUMB
-/* This code is best on Thumb.  */
 	.thumb
 
 /* In Thumb code we can't use MVN with a register shift, but we do have ORN.  */
@@ -94,27 +92,6 @@
 .macro apply_mask data_reg, mask_reg
 	orn \data_reg, \data_reg, \mask_reg
 .endm
-#else
-/* In ARM code we don't have ORN, but we can use MVN with a register shift.  */
-.macro prepare_mask mask_reg, nbits_reg
-	mvn \mask_reg, const_m1, S2HI \nbits_reg
-.endm
-.macro apply_mask data_reg, mask_reg
-	orr \data_reg, \data_reg, \mask_reg
-.endm
-
-/* These clobber the condition codes, which the real Thumb cbz/cbnz
-   instructions do not.  But it doesn't matter for any of the uses here.  */
-.macro cbz reg, label
-	cmp \reg, #0
-	beq \label
-.endm
-.macro cbnz reg, label
-	cmp \reg, #0
-	bne \label
-.endm
-#endif
-
 
 	/* Macro to compute and return the result value for word-aligned
 	   cases.  */