about summary refs log tree commit diff
path: root/hurd/hurdlock.c
diff options
context:
space:
mode:
authorAgustina Arzille <avarzille@riseup.net>2018-03-18 18:22:55 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2018-03-18 18:23:45 +0100
commitfb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba (patch)
tree0144983157b1b454724c37fe9e3e2b1e4aaa6629 /hurd/hurdlock.c
parent542c20a171cbc8cb63155fa6344708d26e9c446b (diff)
downloadglibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.gz
glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.xz
glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.zip
hurd: Reimplement libc locks using mach's gsync
	* hurd/Makefile (routines): Add hurdlock.
	* hurd/Versions (GLIBC_PRIVATE): Added new entry to export the above
	interface.
	(HURD_CTHREADS_0.3): Remove __libc_getspecific.
	* hurd/hurdpid.c: Include <lowlevellock.h>
	(_S_msg_proc_newids): Use lll_wait to synchronize.
	* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
	* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
	* mach/Makefile (lock-headers): Remove machine-lock.h.
	* mach/lock-intern.h: Include <lowlevellock.h> instead of
	<machine-lock.h>.
	(__spin_lock_t): New type.
	(__SPIN_LOCK_INITIALIZER): New macro.
	(__spin_lock, __spin_unlock, __spin_try_lock, __spin_lock_locked,
	__mutex_init, __mutex_lock_solid, __mutex_unlock_solid, __mutex_lock,
	__mutex_unlock, __mutex_trylock): Use lll to implement locks.
	* mach/mutex-init.c: Include <lowlevellock.h> instead of <cthreads.h>.
	(__mutex_init): Initialize with lll.
	* manual/errno.texi (EOWNERDEAD, ENOTRECOVERABLE): New errno values.
	* sysdeps/mach/Makefile: Add libmachuser as dependencies for libs
	needing lll.
	* sysdeps/mach/hurd/bits/errno.h: Regenerate.
	* sysdeps/mach/hurd/cthreads.c (__libc_getspecific): Remove function.
	* sysdeps/mach/hurd/bits/libc-lock.h: Remove file.
	* sysdeps/mach/hurd/setpgid.c: Include <lowlevellock.h>.
	(__setpgid): Use lll for synchronization.
	* sysdeps/mach/hurd/setsid.c: Likewise with __setsid.
	* sysdeps/mach/bits/libc-lock.h: Include <tls.h> and <lowlevellock.h>
	instead of <cthreads.h>.
	(_IO_lock_inexpensive): New macro
	(__libc_lock_recursive_t, __rtld_lock_recursive_t): New structures.
	(__libc_lock_self0): New declaration.
	(__libc_lock_owner_self): New macro.
	(__libc_key_t): Remove type.
	(_LIBC_LOCK_INITIALIZER): New macro.
	(__libc_lock_define_initialized, __libc_lock_init, __libc_lock_fini,
	__libc_lock_fini_recursive, __rtld_lock_fini_recursive,
	__libc_lock_lock, __libc_lock_trylock, __libc_lock_unlock,
	__libc_lock_define_initialized_recursive,
	__rtld_lock_define_initialized_recursive,
	__libc_lock_init_recursive, __libc_lock_trylock_recursive,
	__libc_lock_lock_recursive, __libc_lock_unlock_recursive,
	__rtld_lock_initialize, __rtld_lock_trylock_recursive,
	__rtld_lock_lock_recursive, __rtld_lock_unlock_recursive
	__libc_once_define, __libc_mutex_unlock): Reimplement with lll.
	(__libc_lock_define_recursive, __rtld_lock_define_recursive,
	_LIBC_LOCK_RECURSIVE_INITIALIZER, _RTLD_LOCK_RECURSIVE_INITIALIZER):
	New macros.
	Include <libc-lockP.h> to reimplement libc_key* with pthread_key*.
	* hurd/hurdlock.c: New file.
	* hurd/hurdlock.h: New file.
	* mach/lowlevellock.h: New file
Diffstat (limited to 'hurd/hurdlock.c')
-rw-r--r--hurd/hurdlock.c215
1 files changed, 215 insertions, 0 deletions
diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c
new file mode 100644
index 0000000000..c06b03cf30
--- /dev/null
+++ b/hurd/hurdlock.c
@@ -0,0 +1,215 @@
+/* Hurd helpers for lowlevellocks.
+   Copyright (C) 1999-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "hurdlock.h"
+#include <hurd.h>
+#include <hurd/hurd.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+
+/* Convert an absolute timeout in nanoseconds to a relative
+   timeout in milliseconds.  */
+static inline int __attribute__ ((gnu_inline))
+compute_reltime (const struct timespec *abstime, clockid_t clk)
+{
+  struct timespec ts;
+  __clock_gettime (clk, &ts);
+
+  ts.tv_sec = abstime->tv_sec - ts.tv_sec;
+  ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
+
+  if (ts.tv_nsec < 0)
+    {
+      --ts.tv_sec;
+      ts.tv_nsec += 1000000000;
+    }
+
+  return (ts.tv_sec < 0 ? -1 :
+    (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000));
+}
+
+int __lll_abstimed_wait (void *ptr, int val,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int mlsec = compute_reltime (tsp, clk);
+  return (mlsec < 0 ? KERN_TIMEDOUT :
+    lll_timed_wait (ptr, val, mlsec, flags));
+}
+
+int __lll_abstimed_xwait (void *ptr, int lo, int hi,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int mlsec = compute_reltime (tsp, clk);
+  return (mlsec < 0 ? KERN_TIMEDOUT :
+    lll_timed_xwait (ptr, lo, hi, mlsec, flags));
+}
+
+int __lll_abstimed_lock (void *ptr,
+  const struct timespec *tsp, int flags, int clk)
+{
+  if (lll_trylock (ptr) == 0)
+    return (0);
+
+  while (1)
+    {
+      if (atomic_exchange_acq ((int *)ptr, 2) == 0)
+        return (0);
+      else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
+        return (EINVAL);
+
+      int mlsec = compute_reltime (tsp, clk);
+      if (mlsec < 0 || lll_timed_wait (ptr,
+          2, mlsec, flags) == KERN_TIMEDOUT)
+        return (ETIMEDOUT);
+    }
+}
+
+/* Robust locks.  */
+
+/* Test if a given process id is still valid.  */
+static inline int valid_pid (int pid)
+{
+  task_t task = __pid2task (pid);
+  if (task == MACH_PORT_NULL)
+    return (0);
+
+  __mach_port_deallocate (__mach_task_self (), task);
+  return (1);
+}
+
+/* Robust locks have currently no support from the kernel; they
+   are simply implemented with periodic polling. When sleeping, the
+   maximum blocking time is determined by this constant.  */
+#define MAX_WAIT_TIME   1500
+
+int __lll_robust_lock (void *ptr, int flags)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  int wait_time = 25;
+  unsigned int val;
+
+  /* Try to set the lock word to our PID if it's clear. Otherwise,
+     mark it as having waiters.  */
+  while (1)
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (atomic_compare_and_exchange_bool_acq (iptr,
+          val | LLL_WAITERS, val) == 0)
+        break;
+    }
+
+  for (id |= LLL_WAITERS ; ; )
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (val && !valid_pid (val & LLL_OWNER_MASK))
+        {
+          if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+            return (EOWNERDEAD);
+        }
+      else
+        {
+          lll_timed_wait (iptr, val, wait_time, flags);
+          if (wait_time < MAX_WAIT_TIME)
+            wait_time <<= 1;
+        }
+    }
+}
+
+int __lll_robust_abstimed_lock (void *ptr,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  int wait_time = 25;
+  unsigned int val;
+
+  while (1)
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (atomic_compare_and_exchange_bool_acq (iptr,
+          val | LLL_WAITERS, val) == 0)
+        break;
+    }
+
+  for (id |= LLL_WAITERS ; ; )
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (val && !valid_pid (val & LLL_OWNER_MASK))
+        {
+          if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+            return (EOWNERDEAD);
+        }
+      else
+        {
+          int mlsec = compute_reltime (tsp, clk);
+          if (mlsec < 0)
+            return (ETIMEDOUT);
+          else if (mlsec > wait_time)
+            mlsec = wait_time;
+
+          int res = lll_timed_wait (iptr, val, mlsec, flags);
+          if (res == KERN_TIMEDOUT)
+            return (ETIMEDOUT);
+          else if (wait_time < MAX_WAIT_TIME)
+            wait_time <<= 1;
+        }
+    }
+}
+
+int __lll_robust_trylock (void *ptr)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  unsigned int val = *iptr;
+
+  if (!val)
+    {
+      if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+    }
+  else if (!valid_pid (val & LLL_OWNER_MASK) &&
+      atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+    return (EOWNERDEAD);
+
+  return (EBUSY);
+}
+
+void __lll_robust_unlock (void *ptr, int flags)
+{
+  unsigned int val = atomic_load_relaxed((unsigned int *)ptr);
+  while (1)
+    {
+      if (val & LLL_WAITERS)
+        {
+          lll_set_wake (ptr, 0, flags);
+          break;
+        }
+      else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
+        break;
+    }
+}