diff options
author | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2018-02-02 10:15:20 +0530 |
---|---|---|
committer | Siddhesh Poyarekar <siddhesh@sourceware.org> | 2018-02-02 10:15:21 +0530 |
commit | 84c94d2fd90d84ae7e67657ee8e22c2d1b796f63 (patch) | |
tree | 22a78bfeda9685e888f3e0845b3d228f5bc57c4d | |
parent | 96e6a7167e127d5e65000f2724e074f1c026e1f1 (diff) | |
download | glibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.tar.gz glibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.tar.xz glibc-84c94d2fd90d84ae7e67657ee8e22c2d1b796f63.zip |
aarch64: Use the L() macro for labels in memcmp
The L() macro makes the assembly a bit more readable. * sysdeps/aarch64/memcmp.S: Use L() macro for labels.
-rw-r--r-- | ChangeLog | 2 | ||||
-rw-r--r-- | sysdeps/aarch64/memcmp.S | 32 |
2 files changed, 18 insertions, 16 deletions
diff --git a/ChangeLog b/ChangeLog index 4b3be4ba84..6c6dac46e4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,7 @@ 2018-02-02 Siddhesh Poyarekar <siddhesh@sourceware.org> + * sysdeps/aarch64/memcmp.S: Use L() macro for labels. + * benchtests/bench-memcmp.c: Print json instead of plain text. * benchtests/bench-memcmp.c (do_test): Call realloc_buf for diff --git a/sysdeps/aarch64/memcmp.S b/sysdeps/aarch64/memcmp.S index ccc795adeb..ecd12061b2 100644 --- a/sysdeps/aarch64/memcmp.S +++ b/sysdeps/aarch64/memcmp.S @@ -44,7 +44,7 @@ ENTRY_ALIGN (memcmp, 6) DELOUSE (2) subs limit, limit, 8 - b.lo .Lless8 + b.lo L(less8) /* Limit >= 8, so check first 8 bytes using unaligned loads. */ ldr data1, [src1], 8 @@ -52,65 +52,65 @@ ENTRY_ALIGN (memcmp, 6) and tmp1, src1, 7 add limit, limit, tmp1 cmp data1, data2 - bne .Lreturn + bne L(return) /* Align src1 and adjust src2 with bytes not yet done. */ sub src1, src1, tmp1 sub src2, src2, tmp1 subs limit, limit, 8 - b.ls .Llast_bytes + b.ls L(last_bytes) /* Loop performing 8 bytes per iteration using aligned src1. Limit is pre-decremented by 8 and must be larger than zero. Exit if <= 8 bytes left to do or if the data is not equal. */ .p2align 4 -.Lloop8: +L(loop8): ldr data1, [src1], 8 ldr data2, [src2], 8 subs limit, limit, 8 ccmp data1, data2, 0, hi /* NZCV = 0b0000. */ - b.eq .Lloop8 + b.eq L(loop8) cmp data1, data2 - bne .Lreturn + bne L(return) /* Compare last 1-8 bytes using unaligned access. */ -.Llast_bytes: +L(last_bytes): ldr data1, [src1, limit] ldr data2, [src2, limit] /* Compare data bytes and set return value to 0, -1 or 1. */ -.Lreturn: +L(return): #ifndef __AARCH64EB__ rev data1, data1 rev data2, data2 #endif cmp data1, data2 -.Lret_eq: +L(ret_eq): cset result, ne cneg result, result, lo ret .p2align 4 /* Compare up to 8 bytes. Limit is [-8..-1]. */ -.Lless8: +L(less8): adds limit, limit, 4 - b.lo .Lless4 + b.lo L(less4) ldr data1w, [src1], 4 ldr data2w, [src2], 4 cmp data1w, data2w - b.ne .Lreturn + b.ne L(return) sub limit, limit, 4 -.Lless4: +L(less4): adds limit, limit, 4 - beq .Lret_eq -.Lbyte_loop: + beq L(ret_eq) +L(byte_loop): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 subs limit, limit, 1 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ - b.eq .Lbyte_loop + b.eq L(byte_loop) sub result, data1w, data2w ret |