about summary refs log tree commit diff
path: root/sysdeps/aarch64/memcmp.S
diff options
context:
space:
mode:
authorSiddhesh Poyarekar <siddhesh@sourceware.org>2018-02-02 10:15:20 +0530
committerSiddhesh Poyarekar <siddhesh@sourceware.org>2018-02-02 10:15:21 +0530
commit84c94d2fd90d84ae7e67657ee8e22c2d1b796f63 (patch)
tree22a78bfeda9685e888f3e0845b3d228f5bc57c4d /sysdeps/aarch64/memcmp.S
parent96e6a7167e127d5e65000f2724e074f1c026e1f1 (diff)
downloadglibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.tar.gz
glibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.tar.xz
glibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.zip
aarch64: Use the L() macro for labels in memcmp
The L() macro makes the assembly a bit more readable.

	* sysdeps/aarch64/memcmp.S: Use L() macro for labels.
Diffstat (limited to 'sysdeps/aarch64/memcmp.S')
-rw-r--r--sysdeps/aarch64/memcmp.S32
1 files changed, 16 insertions, 16 deletions
diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S
index ccc795adeb..ecd12061b2 100644
--- a/sysdeps/aarch64/memcmp.S
+++ b/sysdeps/aarch64/memcmp.S
@@ -44,7 +44,7 @@ ENTRY_ALIGN (memcmp, 6)
 	DELOUSE (2)
 
 	subs	limit, limit, 8
-	b.lo	.Lless8
+	b.lo	L(less8)
 
 	/* Limit >= 8, so check first 8 bytes using unaligned loads.  */
 	ldr	data1, [src1], 8
@@ -52,65 +52,65 @@ ENTRY_ALIGN (memcmp, 6)
 	and	tmp1, src1, 7
 	add	limit, limit, tmp1
 	cmp	data1, data2
-	bne	.Lreturn
+	bne	L(return)
 
 	/* Align src1 and adjust src2 with bytes not yet done.  */
 	sub	src1, src1, tmp1
 	sub	src2, src2, tmp1
 
 	subs	limit, limit, 8
-	b.ls	.Llast_bytes
+	b.ls	L(last_bytes)
 
 	/* Loop performing 8 bytes per iteration using aligned src1.
 	   Limit is pre-decremented by 8 and must be larger than zero.
 	   Exit if <= 8 bytes left to do or if the data is not equal.  */
 	.p2align 4
-.Lloop8:
+L(loop8):
 	ldr	data1, [src1], 8
 	ldr	data2, [src2], 8
 	subs	limit, limit, 8
 	ccmp	data1, data2, 0, hi  /* NZCV = 0b0000.  */
-	b.eq	.Lloop8
+	b.eq	L(loop8)
 
 	cmp	data1, data2
-	bne	.Lreturn
+	bne	L(return)
 
 	/* Compare last 1-8 bytes using unaligned access.  */
-.Llast_bytes:
+L(last_bytes):
 	ldr	data1, [src1, limit]
 	ldr	data2, [src2, limit]
 
 	/* Compare data bytes and set return value to 0, -1 or 1.  */
-.Lreturn:
+L(return):
 #ifndef __AARCH64EB__
 	rev	data1, data1
 	rev	data2, data2
 #endif
 	cmp     data1, data2
-.Lret_eq:
+L(ret_eq):
 	cset	result, ne
 	cneg	result, result, lo
 	ret
 
 	.p2align 4
 	/* Compare up to 8 bytes.  Limit is [-8..-1].  */
-.Lless8:
+L(less8):
 	adds	limit, limit, 4
-	b.lo	.Lless4
+	b.lo	L(less4)
 	ldr	data1w, [src1], 4
 	ldr	data2w, [src2], 4
 	cmp	data1w, data2w
-	b.ne	.Lreturn
+	b.ne	L(return)
 	sub	limit, limit, 4
-.Lless4:
+L(less4):
 	adds	limit, limit, 4
-	beq	.Lret_eq
-.Lbyte_loop:
+	beq	L(ret_eq)
+L(byte_loop):
 	ldrb	data1w, [src1], 1
 	ldrb	data2w, [src2], 1
 	subs	limit, limit, 1
 	ccmp	data1w, data2w, 0, ne	/* NZCV = 0b0000.  */
-	b.eq	.Lbyte_loop
+	b.eq	L(byte_loop)
 	sub	result, data1w, data2w
 	ret