about summary refs log tree commit diff
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-06-24 09:42:12 -0700
committerSunil K Pandey <skpgkp2@gmail.com>2022-07-18 22:13:57 -0700
commit17d929ab2eea5fe34e8fc459f1baf28ae8a15ad5 (patch)
treebd92734846e184145c078b3089b971528ad3cab3
parente063a461a54a05ebdd8234b0d5c68d923f968be6 (diff)
downloadglibc-17d929ab2eea5fe34e8fc459f1baf28ae8a15ad5.tar.gz
glibc-17d929ab2eea5fe34e8fc459f1baf28ae8a15ad5.tar.xz
glibc-17d929ab2eea5fe34e8fc459f1baf28ae8a15ad5.zip
x86: Align entry for memrchr to 64-bytes.
The function was tuned around 64-byte entry alignment and performs
better for all sizes with it.

As well different code boths where explicitly written to touch the
minimum number of cache line i.e sizes <= 32 touch only the entry
cache line.

(cherry picked from commit 227afaa67213efcdce6a870ef5086200f1076438)
-rw-r--r--sysdeps/x86_64/multiarch/memrchr-avx2.S2
1 files changed, 1 insertions, 1 deletions
diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2.S b/sysdeps/x86_64/multiarch/memrchr-avx2.S
index 5f8e0be18c..edd8180ba1 100644
--- a/sysdeps/x86_64/multiarch/memrchr-avx2.S
+++ b/sysdeps/x86_64/multiarch/memrchr-avx2.S
@@ -35,7 +35,7 @@
 # define VEC_SIZE			32
 # define PAGE_SIZE			4096
 	.section SECTION(.text), "ax", @progbits
-ENTRY(MEMRCHR)
+ENTRY_P2ALIGN(MEMRCHR, 6)
 # ifdef __ILP32__
 	/* Clear upper bits.  */
 	and	%RDX_LP, %RDX_LP