diff options
author | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2023-10-26 16:34:47 +0100 |
---|---|---|
committer | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2024-04-10 14:03:07 +0100 |
commit | 1521237c3211bb0b1a8f7a9c5793d382789b2b68 (patch) | |
tree | ae85b37511cb02b6b9d5061fb2bc283f1338930e /sysdeps/aarch64/multiarch/memset_emag.S | |
parent | 25b66e8c4a75b51b0122089cf6b99860fb05470d (diff) | |
download | glibc-1521237c3211bb0b1a8f7a9c5793d382789b2b68.tar.gz glibc-1521237c3211bb0b1a8f7a9c5793d382789b2b68.tar.xz glibc-1521237c3211bb0b1a8f7a9c5793d382789b2b68.zip |
AArch64: Cleanup emag memset
Cleanup emag memset - merge the memset_base64.S file, remove the unused ZVA code (since it is disabled on emag). Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> (cherry picked from commit 9627ab99b50d250c6dd3001a3355aa03692f7fe5)
Diffstat (limited to 'sysdeps/aarch64/multiarch/memset_emag.S')
-rw-r--r-- | sysdeps/aarch64/multiarch/memset_emag.S | 98 |
1 files changed, 88 insertions, 10 deletions
diff --git a/sysdeps/aarch64/multiarch/memset_emag.S b/sysdeps/aarch64/multiarch/memset_emag.S index 6fecad4fae..bbfa815925 100644 --- a/sysdeps/aarch64/multiarch/memset_emag.S +++ b/sysdeps/aarch64/multiarch/memset_emag.S @@ -18,17 +18,95 @@ <https://www.gnu.org/licenses/>. */ #include <sysdep.h> +#include "memset-reg.h" -#define MEMSET __memset_emag - -/* - * Using DC ZVA to zero memory does not produce better performance if - * memory size is not very large, especially when there are multiple - * processes/threads contending memory/cache. Here we set threshold to - * zero to disable using DC ZVA, which is good for multi-process/thread - * workloads. +/* Assumptions: + * + * ARMv8-a, AArch64, unaligned accesses + * */ -#define DC_ZVA_THRESHOLD 0 +ENTRY (__memset_emag) + + PTR_ARG (0) + SIZE_ARG (2) + + bfi valw, valw, 8, 8 + bfi valw, valw, 16, 16 + bfi val, val, 32, 32 + + add dstend, dstin, count + + cmp count, 96 + b.hi L(set_long) + cmp count, 16 + b.hs L(set_medium) + + /* Set 0..15 bytes. */ + tbz count, 3, 1f + str val, [dstin] + str val, [dstend, -8] + ret + + .p2align 3 +1: tbz count, 2, 2f + str valw, [dstin] + str valw, [dstend, -4] + ret +2: cbz count, 3f + strb valw, [dstin] + tbz count, 1, 3f + strh valw, [dstend, -2] +3: ret + + .p2align 3 + /* Set 16..96 bytes. */ +L(set_medium): + stp val, val, [dstin] + tbnz count, 6, L(set96) + stp val, val, [dstend, -16] + tbz count, 5, 1f + stp val, val, [dstin, 16] + stp val, val, [dstend, -32] +1: ret + + .p2align 4 + /* Set 64..96 bytes. Write 64 bytes from the start and + 32 bytes from the end. */ +L(set96): + stp val, val, [dstin, 16] + stp val, val, [dstin, 32] + stp val, val, [dstin, 48] + stp val, val, [dstend, -32] + stp val, val, [dstend, -16] + ret + + .p2align 4 +L(set_long): + stp val, val, [dstin] + bic dst, dstin, 15 + /* Small-size or non-zero memset does not use DC ZVA. */ + sub count, dstend, dst + + /* + * Adjust count and bias for loop. By subtracting extra 1 from count, + * it is easy to use tbz instruction to check whether loop tailing + * count is less than 33 bytes, so as to bypass 2 unnecessary stps. + */ + sub count, count, 64+16+1 + +1: stp val, val, [dst, 16] + stp val, val, [dst, 32] + stp val, val, [dst, 48] + stp val, val, [dst, 64]! + subs count, count, 64 + b.hs 1b + + tbz count, 5, 1f /* Remaining count is less than 33 bytes? */ + stp val, val, [dst, 16] + stp val, val, [dst, 32] +1: stp val, val, [dstend, -32] + stp val, val, [dstend, -16] + ret -#include "./memset_base64.S" +END (__memset_emag) |