about summary refs log tree commit diff
path: root/src/string/memset.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2013-08-27 18:08:29 -0400
committerRich Felker <dalias@aerifal.cx>2013-08-27 18:08:29 -0400
commita543369e3b06a51eacd392c738fc10c5267a195f (patch)
tree7f7b59c2a45d22ad4a3e66774dcc547ae45b06ac /src/string/memset.c
parent06ceee8ca34999c5c35e5dfb85133d4b35c9689b (diff)
downloadmusl-a543369e3b06a51eacd392c738fc10c5267a195f.tar.gz
musl-a543369e3b06a51eacd392c738fc10c5267a195f.tar.xz
musl-a543369e3b06a51eacd392c738fc10c5267a195f.zip
optimized C memset
this version of memset is optimized both for small and large values of
n, and makes no misaligned writes, so it is usable (and near-optimal)
on all archs. it is capable of filling up to 52 or 56 bytes without
entering a loop and with at most 7 branches, all of which can be fully
predicted if memset is called multiple times with the same size.

it also uses the attribute extension to inform the compiler that it is
violating the aliasing rules, unlike the previous code which simply
assumed it was safe to violate the aliasing rules since translation
unit boundaries hide the violations from the compiler. for non-GNUC
compilers, 100% portable fallback code in the form of a naive loop is
provided. I intend to eventually apply this approach to all of the
string/memory functions which are doing word-at-a-time accesses.
Diffstat (limited to 'src/string/memset.c')
-rw-r--r--src/string/memset.c89
1 files changed, 77 insertions, 12 deletions
diff --git a/src/string/memset.c b/src/string/memset.c
index 20e47c45..f438b073 100644
--- a/src/string/memset.c
+++ b/src/string/memset.c
@@ -1,21 +1,86 @@
 #include <string.h>
-#include <stdlib.h>
 #include <stdint.h>
-#include <limits.h>
-
-#define SS (sizeof(size_t))
-#define ALIGN (sizeof(size_t)-1)
-#define ONES ((size_t)-1/UCHAR_MAX)
 
 void *memset(void *dest, int c, size_t n)
 {
 	unsigned char *s = dest;
-	c = (unsigned char)c;
-	for (; ((uintptr_t)s & ALIGN) && n; n--) *s++ = c;
-	if (n) {
-		size_t *w, k = ONES * c;
-		for (w = (void *)s; n>=SS; n-=SS, w++) *w = k;
-		for (s = (void *)w; n; n--, s++) *s = c;
+	size_t k;
+
+	/* Fill head and tail with minimal branching. Each
+	 * conditional ensures that all the subsequently used
+	 * offsets are well-defined and in the dest region. */
+
+	if (!n) return dest;
+	s[0] = s[n-1] = c;
+	if (n <= 2) return dest;
+	s[1] = s[n-2] = c;
+	s[2] = s[n-3] = c;
+	if (n <= 6) return dest;
+	s[3] = s[n-4] = c;
+	if (n <= 8) return dest;
+
+	/* Advance pointer to align it at a 4-byte boundary,
+	 * and truncate n to a multiple of 4. The previous code
+	 * already took care of any head/tail that get cut off
+	 * by the alignment. */
+
+	k = -(uintptr_t)s & 3;
+	s += k;
+	n -= k;
+	n &= -4;
+
+#ifdef __GNUC__
+	typedef uint32_t __attribute__((__may_alias__)) u32;
+	typedef uint64_t __attribute__((__may_alias__)) u64;
+
+	u32 c32 = ((u32)-1)/255 * (unsigned char)c;
+
+	/* In preparation to copy 32 bytes at a time, aligned on
+	 * an 8-byte bounary, fill head/tail up to 28 bytes each.
+	 * As in the initial byte-based head/tail fill, each
+	 * conditional below ensures that the subsequent offsets
+	 * are valid (e.g. !(n<=24) implies n>=28). */
+
+	*(u32 *)(s+0) = c32;
+	*(u32 *)(s+n-4) = c32;
+	if (n <= 8) return dest;
+	*(u32 *)(s+4) = c32;
+	*(u32 *)(s+8) = c32;
+	*(u32 *)(s+n-12) = c32;
+	*(u32 *)(s+n-8) = c32;
+	if (n <= 24) return dest;
+	*(u32 *)(s+12) = c32;
+	*(u32 *)(s+16) = c32;
+	*(u32 *)(s+20) = c32;
+	*(u32 *)(s+24) = c32;
+	*(u32 *)(s+n-28) = c32;
+	*(u32 *)(s+n-24) = c32;
+	*(u32 *)(s+n-20) = c32;
+	*(u32 *)(s+n-16) = c32;
+
+	/* Align to a multiple of 8 so we can fill 64 bits at a time,
+	 * and avoid writing the same bytes twice as much as is
+	 * practical without introducing additional branching. */
+
+	k = 24 + ((uintptr_t)s & 4);
+	s += k;
+	n -= k;
+
+	/* If this loop is reached, 28 tail bytes have already been
+	 * filled, so any remainder when n drops below 32 can be
+	 * safely ignored. */
+
+	u64 c64 = c32 | ((u64)c32 << 32);
+	for (; n >= 32; n-=32, s+=32) {
+		*(u64 *)(s+0) = c64;
+		*(u64 *)(s+8) = c64;
+		*(u64 *)(s+16) = c64;
+		*(u64 *)(s+24) = c64;
 	}
+#else
+	/* Pure C fallback with no aliasing violations. */
+	for (; n; n--, s++) *s = c;
+#endif
+
 	return dest;
 }