about summary refs log tree commit diff
path: root/src/time
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-08-02 14:04:45 -0400
committerRich Felker <dalias@aerifal.cx>2019-08-02 14:04:45 -0400
commit0705fe93566fca735bf0df155f71641df02fc273 (patch)
tree3ee3ba011e9f09ca9f7ca55a557a12f49b9ec5ce /src/time
parent006a75a99789f383713e4f47affd7c90e39cc827 (diff)
downloadmusl-0705fe93566fca735bf0df155f71641df02fc273.tar.gz
musl-0705fe93566fca735bf0df155f71641df02fc273.tar.xz
musl-0705fe93566fca735bf0df155f71641df02fc273.zip
clock_gettime: add support for 32-bit vdso with 64-bit time_t
this fixes a major upcoming performance regression introduced by
commit 72f50245d018af0c31b38dec83c557a4e5dd1ea8, whereby 32-bit archs
would lose vdso clock_gettime after switching to 64-bit time_t, unless
the kernel supports time64 and provides a time64 version of the vdso
function. this would incur not just one but two syscalls: first, the
failed time64 syscall, then the fallback time32 one.

overflow of the 32-bit result is detected and triggers a revert to
syscalls. normally, on a system that's not Y2038-ready, this would
still overflow, but if the process has been migrated to a
time64-capable kernel or if the kernel has been hot-patched to add
time64 syscalls, it may conceivably work.
Diffstat (limited to 'src/time')
-rw-r--r--src/time/clock_gettime.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/src/time/clock_gettime.c b/src/time/clock_gettime.c
index 46083759..63e9f9c8 100644
--- a/src/time/clock_gettime.c
+++ b/src/time/clock_gettime.c
@@ -8,9 +8,41 @@
 
 static void *volatile vdso_func;
 
+#ifdef VDSO_CGT32_SYM
+static void *volatile vdso_func_32;
+static int cgt_time32_wrap(clockid_t clk, struct timespec *ts)
+{
+	long ts32[2];
+	int (*f)(clockid_t, long[2]) =
+		(int (*)(clockid_t, long[2]))vdso_func_32;
+	int r = f(clk, ts32);
+	if (!r) {
+		/* Fallback to syscalls if time32 overflowed. Maybe
+		 * we lucked out and somehow migrated to a kernel with
+		 * time64 syscalls available. */
+		if (ts32[0] < 0) {
+			a_cas_p(&vdso_func, (void *)cgt_time32_wrap, 0);
+			return -ENOSYS;
+		}
+		ts->tv_sec = ts32[0];
+		ts->tv_nsec = ts32[1];
+	}
+	return r;
+}
+#endif
+
 static int cgt_init(clockid_t clk, struct timespec *ts)
 {
 	void *p = __vdsosym(VDSO_CGT_VER, VDSO_CGT_SYM);
+#ifdef VDSO_CGT32_SYM
+	if (!p) {
+		void *q = __vdsosym(VDSO_CGT32_VER, VDSO_CGT32_SYM);
+		if (q) {
+			a_cas_p(&vdso_func_32, 0, q);
+			p = cgt_time32_wrap;
+		}
+	}
+#endif
 	int (*f)(clockid_t, struct timespec *) =
 		(int (*)(clockid_t, struct timespec *))p;
 	a_cas_p(&vdso_func, (void *)cgt_init, p);