about summary refs log tree commit diff
path: root/src/string/x86_64
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2013-08-01 21:44:43 -0400
committerRich Felker <dalias@aerifal.cx>2013-08-01 21:44:43 -0400
commit926272ddffa293ee68ffeb01422fc8c792acf428 (patch)
tree8997ae7c582e5d4dbaf0056d940d15702d728258 /src/string/x86_64
parent4a1f55e92fa74ee382909baa96302231f566b5e1 (diff)
downloadmusl-926272ddffa293ee68ffeb01422fc8c792acf428.tar.gz
musl-926272ddffa293ee68ffeb01422fc8c792acf428.tar.xz
musl-926272ddffa293ee68ffeb01422fc8c792acf428.zip
optimized memset asm for i386 and x86_64
the concept of both versions is the same; they differ only in details.
for long runs, they use "rep movsl" or "rep movsq", and for small
runs, they use a trick, writing from both ends towards the middle,
that reduces the number of branches needed. in addition, if memset is
called multiple times with the same length, all branches will be
predicted; there are no loops.

for larger runs, there are likely faster approaches than "rep", at
least on some cpu models. for 32-bit, it's unlikely that there is any
faster approach that does not require non-baseline instructions; doing
anything fancier would require inspecting cpu capabilities. for
64-bit, there may very well be faster versions that work on all
models; further optimization could be explored in the future.

with these changes, memset is anywhere between 50% faster and 6 times
faster, depending on the cpu model and the length and alignment of the
destination buffer.
Diffstat (limited to 'src/string/x86_64')
-rw-r--r--src/string/x86_64/memset.s41
1 files changed, 41 insertions, 0 deletions
diff --git a/src/string/x86_64/memset.s b/src/string/x86_64/memset.s
new file mode 100644
index 00000000..fc06eef8
--- /dev/null
+++ b/src/string/x86_64/memset.s
@@ -0,0 +1,41 @@
+.global memset
+.type memset,@function
+memset:
+	and $0xff,%esi
+	mov $0x101010101010101,%rax
+	mov %rdx,%rcx
+	mov %rdi,%r8
+	imul %rsi,%rax
+	cmp $16,%rcx
+	jb 1f
+
+	mov %rax,-8(%rdi,%rcx)
+	shr $3,%rcx
+	rep
+	stosq
+	mov %r8,%rax
+	ret
+
+1:	test %ecx,%ecx
+	jz 1f
+
+	mov %al,(%rdi)
+	mov %al,-1(%rdi,%rcx)
+	cmp $2,%ecx
+	jbe 1f
+
+	mov %al,1(%rdi)
+	mov %al,-2(%rdi,%rcx)
+	cmp $4,%ecx
+	jbe 1f
+
+	mov %eax,(%rdi)
+	mov %eax,-4(%rdi,%rcx)
+	cmp $8,%ecx
+	jbe 1f
+
+	mov %eax,4(%rdi)
+	mov %eax,-8(%rdi,%rcx)
+
+1:	mov %r8,%rax
+	ret