about summary refs log tree commit diff
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-06-24 09:42:12 -0700
committerNoah Goldstein <goldstein.w.n@gmail.com>2022-06-27 08:35:51 -0700
commit227afaa67213efcdce6a870ef5086200f1076438 (patch)
treeebf7ab75d079ece8cced5978bea3da23d775f5ee
parentdbb0f06cc09784f6229cc1736c4af8caa687975f (diff)
downloadglibc-227afaa67213efcdce6a870ef5086200f1076438.tar.gz
glibc-227afaa67213efcdce6a870ef5086200f1076438.tar.xz
glibc-227afaa67213efcdce6a870ef5086200f1076438.zip
x86: Align entry for memrchr to 64-bytes.
The function was tuned around 64-byte entry alignment and performs
better for all sizes with it.

As well different code boths where explicitly written to touch the
minimum number of cache line i.e sizes <= 32 touch only the entry
cache line.
-rw-r--r--sysdeps/x86_64/multiarch/memrchr-avx2.S2
1 files changed, 1 insertions, 1 deletions
diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2.S b/sysdeps/x86_64/multiarch/memrchr-avx2.S
index 9c83c76d3c..f300d7daf4 100644
--- a/sysdeps/x86_64/multiarch/memrchr-avx2.S
+++ b/sysdeps/x86_64/multiarch/memrchr-avx2.S
@@ -35,7 +35,7 @@
 # define VEC_SIZE			32
 # define PAGE_SIZE			4096
 	.section SECTION(.text), "ax", @progbits
-ENTRY(MEMRCHR)
+ENTRY_P2ALIGN(MEMRCHR, 6)
 # ifdef __ILP32__
 	/* Clear upper bits.  */
 	and	%RDX_LP, %RDX_LP