about summary refs log tree commit diff
path: root/ChangeLog
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2016-03-31 10:04:26 -0700
committerH.J. Lu <hjl.tools@gmail.com>2016-03-31 10:04:40 -0700
commit88b57b8ed41d5ecf2e1bdfc19556f9246a665ebb (patch)
tree798e0ca5f87b073921766fb4a41aa8f9095137d8 /ChangeLog
parent5cdd1989d1d2f135d02e66250f37ba8e767f9772 (diff)
downloadglibc-88b57b8ed41d5ecf2e1bdfc19556f9246a665ebb.tar.gz
glibc-88b57b8ed41d5ecf2e1bdfc19556f9246a665ebb.tar.xz
glibc-88b57b8ed41d5ecf2e1bdfc19556f9246a665ebb.zip
Add x86-64 memmove with unaligned load/store and rep movsb
Implement x86-64 memmove with unaligned load/store and rep movsb.
Support 16-byte, 32-byte and 64-byte vector register sizes.  When
size <= 8 times of vector register size, there is no check for
address overlap bewteen source and destination.  Since overhead for
overlap check is small when size > 8 times of vector register size,
memcpy is an alias of memmove.

A single file provides 2 implementations of memmove, one with rep movsb
and the other without rep movsb.  They share the same codes when size is
between 2 times of vector register size and REP_MOVSB_THRESHOLD which
is 2KB for 16-byte vector register size and scaled up by large vector
register size.

Key features:

1. Use overlapping load and store to avoid branch.
2. For size <= 8 times of vector register size, load  all sources into
registers and store them together.
3. If there is no address overlap bewteen source and destination, copy
from both ends with 4 times of vector register size at a time.
4. If address of destination > address of source, backward copy 8 times
of vector register size at a time.
5. Otherwise, forward copy 8 times of vector register size at a time.
6. Use rep movsb only for forward copy.  Avoid slow backward rep movsb
by fallbacking to backward copy 8 times of vector register size at a
time.
7. Skip when address of destination == address of source.

	[BZ #19776]
	* sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
	memmove-sse2-unaligned-erms, memmove-avx-unaligned-erms and
	memmove-avx512-unaligned-erms.
	* sysdeps/x86_64/multiarch/ifunc-impl-list.c
	(__libc_ifunc_impl_list): Test
	__memmove_chk_avx512_unaligned_2,
	__memmove_chk_avx512_unaligned_erms,
	__memmove_chk_avx_unaligned_2, __memmove_chk_avx_unaligned_erms,
	__memmove_chk_sse2_unaligned_2,
	__memmove_chk_sse2_unaligned_erms, __memmove_avx_unaligned_2,
	__memmove_avx_unaligned_erms, __memmove_avx512_unaligned_2,
	__memmove_avx512_unaligned_erms, __memmove_erms,
	__memmove_sse2_unaligned_2, __memmove_sse2_unaligned_erms,
	__memcpy_chk_avx512_unaligned_2,
	__memcpy_chk_avx512_unaligned_erms,
	__memcpy_chk_avx_unaligned_2, __memcpy_chk_avx_unaligned_erms,
	__memcpy_chk_sse2_unaligned_2, __memcpy_chk_sse2_unaligned_erms,
	__memcpy_avx_unaligned_2, __memcpy_avx_unaligned_erms,
	__memcpy_avx512_unaligned_2, __memcpy_avx512_unaligned_erms,
	__memcpy_sse2_unaligned_2, __memcpy_sse2_unaligned_erms,
	__memcpy_erms, __mempcpy_chk_avx512_unaligned_2,
	__mempcpy_chk_avx512_unaligned_erms,
	__mempcpy_chk_avx_unaligned_2, __mempcpy_chk_avx_unaligned_erms,
	__mempcpy_chk_sse2_unaligned_2, __mempcpy_chk_sse2_unaligned_erms,
	__mempcpy_avx512_unaligned_2, __mempcpy_avx512_unaligned_erms,
	__mempcpy_avx_unaligned_2, __mempcpy_avx_unaligned_erms,
	__mempcpy_sse2_unaligned_2, __mempcpy_sse2_unaligned_erms and
	__mempcpy_erms.
	* sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S: New
	file.
	* sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S:
	Likwise.
	* sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S:
	Likwise.
	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:
	Likwise.
Diffstat (limited to 'ChangeLog')
-rw-r--r--ChangeLog40
1 files changed, 40 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
index 632da3c751..100764f66a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,43 @@
+2016-03-31   H.J. Lu  <hongjiu.lu@intel.com>
+
+	[BZ #19776]
+	* sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
+	memmove-sse2-unaligned-erms, memmove-avx-unaligned-erms and
+	memmove-avx512-unaligned-erms.
+	* sysdeps/x86_64/multiarch/ifunc-impl-list.c
+	(__libc_ifunc_impl_list): Test
+	__memmove_chk_avx512_unaligned_2,
+	__memmove_chk_avx512_unaligned_erms,
+	__memmove_chk_avx_unaligned_2, __memmove_chk_avx_unaligned_erms,
+	__memmove_chk_sse2_unaligned_2,
+	__memmove_chk_sse2_unaligned_erms, __memmove_avx_unaligned_2,
+	__memmove_avx_unaligned_erms, __memmove_avx512_unaligned_2,
+	__memmove_avx512_unaligned_erms, __memmove_erms,
+	__memmove_sse2_unaligned_2, __memmove_sse2_unaligned_erms,
+	__memcpy_chk_avx512_unaligned_2,
+	__memcpy_chk_avx512_unaligned_erms,
+	__memcpy_chk_avx_unaligned_2, __memcpy_chk_avx_unaligned_erms,
+	__memcpy_chk_sse2_unaligned_2, __memcpy_chk_sse2_unaligned_erms,
+	__memcpy_avx_unaligned_2, __memcpy_avx_unaligned_erms,
+	__memcpy_avx512_unaligned_2, __memcpy_avx512_unaligned_erms,
+	__memcpy_sse2_unaligned_2, __memcpy_sse2_unaligned_erms,
+	__memcpy_erms, __mempcpy_chk_avx512_unaligned_2,
+	__mempcpy_chk_avx512_unaligned_erms,
+	__mempcpy_chk_avx_unaligned_2, __mempcpy_chk_avx_unaligned_erms,
+	__mempcpy_chk_sse2_unaligned_2, __mempcpy_chk_sse2_unaligned_erms,
+	__mempcpy_avx512_unaligned_2, __mempcpy_avx512_unaligned_erms,
+	__mempcpy_avx_unaligned_2, __mempcpy_avx_unaligned_erms,
+	__mempcpy_sse2_unaligned_2, __mempcpy_sse2_unaligned_erms and
+	__mempcpy_erms.
+	* sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S: New
+	file.
+	* sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S:
+	Likwise.
+	* sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S:
+	Likwise.
+	* sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:
+	Likwise.
+
 2016-03-31  Stefan Liebler  <stli@linux.vnet.ibm.com>
 
 	* sysdeps/s390/bits/link.h: (La_s390_vr) New typedef.