From 0e02b5107e17830d19e83cb2208103f79666af31 Mon Sep 17 00:00:00 2001 From: Siddhesh Poyarekar Date: Wed, 9 Aug 2017 12:57:17 +0530 Subject: memcpy_falkor: Fix code style in comments --- ChangeLog | 3 +++ sysdeps/aarch64/multiarch/memcpy_falkor.S | 11 ++++------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index 0132300249..688403e321 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,8 @@ 2017-08-09 Siddhesh Poyarekar + * sysdeps/aarch64/multiarch/memcpy_falkor.S: Fix code style in + comments. + * manual/tunables.texi (Tunable glibc.tune.cpu): Add falkor. * sysdeps/aarch64/multiarch/Makefile (sysdep_routines): Add memcpy_falkor. diff --git a/sysdeps/aarch64/multiarch/memcpy_falkor.S b/sysdeps/aarch64/multiarch/memcpy_falkor.S index 3708281147..dea4f225ee 100644 --- a/sysdeps/aarch64/multiarch/memcpy_falkor.S +++ b/sysdeps/aarch64/multiarch/memcpy_falkor.S @@ -20,10 +20,8 @@ #include /* Assumptions: - * - * ARMv8-a, AArch64, falkor, unaligned accesses. - * - */ + + ARMv8-a, AArch64, falkor, unaligned accesses. */ #define dstin x0 #define src x1 @@ -53,14 +51,13 @@ conditionals, since the former would unnecessarily break across multiple issue groups. The medium copy group has been enlarged to 128 bytes since bumping up the small copies up to 32 bytes allows us to do that without - cost and also allows us the reduce the size of the prep code before loop64. + cost and also allows us to reduce the size of the prep code before loop64. All copies are done only via two registers r6 and r7. This is to ensure that all loads hit a single hardware prefetcher which can get correctly trained to prefetch a single stream. - The non-temporal stores help optimize cache utilization. -*/ + The non-temporal stores help optimize cache utilization. */ #if IS_IN (libc) ENTRY_ALIGN (__memcpy_falkor, 6) -- cgit 1.4.1