about summary refs log tree commit diff
path: root/hurd/hurdlock.h
diff options
context:
space:
mode:
authorAgustina Arzille <avarzille@riseup.net>2018-03-18 18:22:55 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2018-03-18 18:23:45 +0100
commitfb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba (patch)
tree0144983157b1b454724c37fe9e3e2b1e4aaa6629 /hurd/hurdlock.h
parent542c20a171cbc8cb63155fa6344708d26e9c446b (diff)
downloadglibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.gz
glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.xz
glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.zip
hurd: Reimplement libc locks using mach's gsync
	* hurd/Makefile (routines): Add hurdlock.
	* hurd/Versions (GLIBC_PRIVATE): Added new entry to export the above
	interface.
	(HURD_CTHREADS_0.3): Remove __libc_getspecific.
	* hurd/hurdpid.c: Include <lowlevellock.h>
	(_S_msg_proc_newids): Use lll_wait to synchronize.
	* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
	* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
	* mach/Makefile (lock-headers): Remove machine-lock.h.
	* mach/lock-intern.h: Include <lowlevellock.h> instead of
	<machine-lock.h>.
	(__spin_lock_t): New type.
	(__SPIN_LOCK_INITIALIZER): New macro.
	(__spin_lock, __spin_unlock, __spin_try_lock, __spin_lock_locked,
	__mutex_init, __mutex_lock_solid, __mutex_unlock_solid, __mutex_lock,
	__mutex_unlock, __mutex_trylock): Use lll to implement locks.
	* mach/mutex-init.c: Include <lowlevellock.h> instead of <cthreads.h>.
	(__mutex_init): Initialize with lll.
	* manual/errno.texi (EOWNERDEAD, ENOTRECOVERABLE): New errno values.
	* sysdeps/mach/Makefile: Add libmachuser as dependencies for libs
	needing lll.
	* sysdeps/mach/hurd/bits/errno.h: Regenerate.
	* sysdeps/mach/hurd/cthreads.c (__libc_getspecific): Remove function.
	* sysdeps/mach/hurd/bits/libc-lock.h: Remove file.
	* sysdeps/mach/hurd/setpgid.c: Include <lowlevellock.h>.
	(__setpgid): Use lll for synchronization.
	* sysdeps/mach/hurd/setsid.c: Likewise with __setsid.
	* sysdeps/mach/bits/libc-lock.h: Include <tls.h> and <lowlevellock.h>
	instead of <cthreads.h>.
	(_IO_lock_inexpensive): New macro
	(__libc_lock_recursive_t, __rtld_lock_recursive_t): New structures.
	(__libc_lock_self0): New declaration.
	(__libc_lock_owner_self): New macro.
	(__libc_key_t): Remove type.
	(_LIBC_LOCK_INITIALIZER): New macro.
	(__libc_lock_define_initialized, __libc_lock_init, __libc_lock_fini,
	__libc_lock_fini_recursive, __rtld_lock_fini_recursive,
	__libc_lock_lock, __libc_lock_trylock, __libc_lock_unlock,
	__libc_lock_define_initialized_recursive,
	__rtld_lock_define_initialized_recursive,
	__libc_lock_init_recursive, __libc_lock_trylock_recursive,
	__libc_lock_lock_recursive, __libc_lock_unlock_recursive,
	__rtld_lock_initialize, __rtld_lock_trylock_recursive,
	__rtld_lock_lock_recursive, __rtld_lock_unlock_recursive
	__libc_once_define, __libc_mutex_unlock): Reimplement with lll.
	(__libc_lock_define_recursive, __rtld_lock_define_recursive,
	_LIBC_LOCK_RECURSIVE_INITIALIZER, _RTLD_LOCK_RECURSIVE_INITIALIZER):
	New macros.
	Include <libc-lockP.h> to reimplement libc_key* with pthread_key*.
	* hurd/hurdlock.c: New file.
	* hurd/hurdlock.h: New file.
	* mach/lowlevellock.h: New file
Diffstat (limited to 'hurd/hurdlock.h')
-rw-r--r--hurd/hurdlock.h125
1 files changed, 125 insertions, 0 deletions
diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h
new file mode 100644
index 0000000000..af57e25387
--- /dev/null
+++ b/hurd/hurdlock.h
@@ -0,0 +1,125 @@
+/* Low-level lock implementation.  High-level Hurd helpers.
+   Copyright (C) 1999-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _HURD_LOCK_H
+#define _HURD_LOCK_H   1
+
+#include <mach/lowlevellock.h>
+
+struct timespec;
+
+/* Flags for robust locks.  */
+#define LLL_WAITERS      (1U << 31)
+#define LLL_DEAD_OWNER   (1U << 30)
+
+#define LLL_OWNER_MASK   ~(LLL_WAITERS | LLL_DEAD_OWNER)
+
+/* Wait on 64-bit address PTR, without blocking if its contents
+   are different from the pair <LO, HI>.  */
+#define lll_xwait(ptr, lo, hi, flags) \
+  __gsync_wait (__mach_task_self (), \
+    (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
+
+/* Same as 'lll_wait', but only block for MLSEC milliseconds.  */
+#define lll_timed_wait(ptr, val, mlsec, flags) \
+  __gsync_wait (__mach_task_self (), \
+    (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
+
+/* Same as 'lll_xwait', but only block for MLSEC milliseconds.  */
+#define lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
+  __gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
+    lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
+
+/* Same as 'lll_wait', but only block until TSP elapses,
+   using clock CLK.  */
+extern int __lll_abstimed_wait (void *__ptr, int __val,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as 'lll_xwait', but only block until TSP elapses,
+   using clock CLK.  */
+extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as 'lll_lock', but return with an error if TSP elapses,
+   using clock CLK.  */
+extern int __lll_abstimed_lock (void *__ptr,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Acquire the lock at PTR, but return with an error if
+   the process containing the owner thread dies.  */
+extern int __lll_robust_lock (void *__ptr, int __flags);
+
+/* Same as '__lll_robust_lock', but only block until TSP
+   elapses, using clock CLK.  */
+extern int __lll_robust_abstimed_lock (void *__ptr,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as '__lll_robust_lock', but return with an error
+   if the lock cannot be acquired without blocking.  */
+extern int __lll_robust_trylock (void *__ptr);
+
+/* Wake one or more threads waiting on address PTR,
+   setting its value to VAL before doing so.  */
+#define lll_set_wake(ptr, val, flags) \
+  __gsync_wake (__mach_task_self (), \
+    (vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
+
+/* Release the robust lock at PTR.  */
+extern void __lll_robust_unlock (void *__ptr, int __flags);
+
+/* Rearrange threads waiting on address SRC to instead wait on
+   DST, waking one of them if WAIT_ONE is non-zero.  */
+#define lll_requeue(src, dst, wake_one, flags) \
+  __gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
+    (vm_offset_t)dst, (boolean_t)wake_one, flags)
+
+/* The following are hacks that allow us to simulate optional
+   parameters in C, to avoid having to pass the clock id for
+   every one of these calls, defaulting to CLOCK_REALTIME if
+   no argument is passed.  */
+
+#define lll_abstimed_wait(ptr, val, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_wait ((ptr), (val), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_abstimed_lock(ptr, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_lock ((ptr), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_robust_abstimed_lock(ptr, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_robust_abstimed_lock ((ptr), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+
+#endif