diff options
author | Agustina Arzille <avarzille@riseup.net> | 2018-03-18 18:22:55 +0100 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2018-03-18 18:23:45 +0100 |
commit | fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba (patch) | |
tree | 0144983157b1b454724c37fe9e3e2b1e4aaa6629 /mach | |
parent | 542c20a171cbc8cb63155fa6344708d26e9c446b (diff) | |
download | glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.gz glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.tar.xz glibc-fb4cc8a0c2cc7ca710a2e1b21d33fcf7c5e3e8ba.zip |
hurd: Reimplement libc locks using mach's gsync
* hurd/Makefile (routines): Add hurdlock. * hurd/Versions (GLIBC_PRIVATE): Added new entry to export the above interface. (HURD_CTHREADS_0.3): Remove __libc_getspecific. * hurd/hurdpid.c: Include <lowlevellock.h> (_S_msg_proc_newids): Use lll_wait to synchronize. * hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock. * hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization. * mach/Makefile (lock-headers): Remove machine-lock.h. * mach/lock-intern.h: Include <lowlevellock.h> instead of <machine-lock.h>. (__spin_lock_t): New type. (__SPIN_LOCK_INITIALIZER): New macro. (__spin_lock, __spin_unlock, __spin_try_lock, __spin_lock_locked, __mutex_init, __mutex_lock_solid, __mutex_unlock_solid, __mutex_lock, __mutex_unlock, __mutex_trylock): Use lll to implement locks. * mach/mutex-init.c: Include <lowlevellock.h> instead of <cthreads.h>. (__mutex_init): Initialize with lll. * manual/errno.texi (EOWNERDEAD, ENOTRECOVERABLE): New errno values. * sysdeps/mach/Makefile: Add libmachuser as dependencies for libs needing lll. * sysdeps/mach/hurd/bits/errno.h: Regenerate. * sysdeps/mach/hurd/cthreads.c (__libc_getspecific): Remove function. * sysdeps/mach/hurd/bits/libc-lock.h: Remove file. * sysdeps/mach/hurd/setpgid.c: Include <lowlevellock.h>. (__setpgid): Use lll for synchronization. * sysdeps/mach/hurd/setsid.c: Likewise with __setsid. * sysdeps/mach/bits/libc-lock.h: Include <tls.h> and <lowlevellock.h> instead of <cthreads.h>. (_IO_lock_inexpensive): New macro (__libc_lock_recursive_t, __rtld_lock_recursive_t): New structures. (__libc_lock_self0): New declaration. (__libc_lock_owner_self): New macro. (__libc_key_t): Remove type. (_LIBC_LOCK_INITIALIZER): New macro. (__libc_lock_define_initialized, __libc_lock_init, __libc_lock_fini, __libc_lock_fini_recursive, __rtld_lock_fini_recursive, __libc_lock_lock, __libc_lock_trylock, __libc_lock_unlock, __libc_lock_define_initialized_recursive, __rtld_lock_define_initialized_recursive, __libc_lock_init_recursive, __libc_lock_trylock_recursive, __libc_lock_lock_recursive, __libc_lock_unlock_recursive, __rtld_lock_initialize, __rtld_lock_trylock_recursive, __rtld_lock_lock_recursive, __rtld_lock_unlock_recursive __libc_once_define, __libc_mutex_unlock): Reimplement with lll. (__libc_lock_define_recursive, __rtld_lock_define_recursive, _LIBC_LOCK_RECURSIVE_INITIALIZER, _RTLD_LOCK_RECURSIVE_INITIALIZER): New macros. Include <libc-lockP.h> to reimplement libc_key* with pthread_key*. * hurd/hurdlock.c: New file. * hurd/hurdlock.h: New file. * mach/lowlevellock.h: New file
Diffstat (limited to 'mach')
-rw-r--r-- | mach/Makefile | 2 | ||||
-rw-r--r-- | mach/lock-intern.h | 69 | ||||
-rw-r--r-- | mach/lowlevellock.h | 81 | ||||
-rw-r--r-- | mach/mutex-init.c | 7 |
4 files changed, 129 insertions, 30 deletions
diff --git a/mach/Makefile b/mach/Makefile index e0d1ffb1fa..2683587b2d 100644 --- a/mach/Makefile +++ b/mach/Makefile @@ -23,7 +23,7 @@ headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \ $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \ $(lock-headers) machine-sp.h bits/mach/param.h lock = spin-solid spin-lock mutex-init mutex-solid -lock-headers = lock-intern.h machine-lock.h spin-lock.h +lock-headers = lock-intern.h spin-lock.h routines = $(mach-syscalls) $(mach-shortcuts) \ mach_init mig_strncpy msg \ mig-alloc mig-dealloc mig-reply \ diff --git a/mach/lock-intern.h b/mach/lock-intern.h index f8874c7561..ed8acb6aec 100644 --- a/mach/lock-intern.h +++ b/mach/lock-intern.h @@ -19,12 +19,19 @@ #define _LOCK_INTERN_H #include <sys/cdefs.h> -#include <machine-lock.h> +#if defined __USE_EXTERN_INLINES && defined _LIBC +# include <lowlevellock.h> +#endif #ifndef _EXTERN_INLINE #define _EXTERN_INLINE __extern_inline #endif +/* The type of a spin lock variable. */ +typedef unsigned int __spin_lock_t; + +/* Static initializer for spinlocks. */ +#define __SPIN_LOCK_INITIALIZER LLL_INITIALIZER /* Initialize LOCK. */ @@ -50,31 +57,47 @@ extern void __spin_lock (__spin_lock_t *__lock); _EXTERN_INLINE void __spin_lock (__spin_lock_t *__lock) { - if (! __spin_try_lock (__lock)) - __spin_lock_solid (__lock); + lll_lock (__lock, 0); } #endif - -/* Name space-clean internal interface to mutex locks. - Code internal to the C library uses these functions to lock and unlock - mutex locks. These locks are of type `struct mutex', defined in - <cthreads.h>. The functions here are name space-clean. If the program - is linked with the cthreads library, `__mutex_lock_solid' and - `__mutex_unlock_solid' will invoke the corresponding cthreads functions - to implement real mutex locks. If not, simple stub versions just use - spin locks. */ +/* Unlock LOCK. */ +extern void __spin_unlock (__spin_lock_t *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE void +__spin_unlock (__spin_lock_t *__lock) +{ + lll_unlock (__lock, 0); +} +#endif -/* Initialize the newly allocated mutex lock LOCK for further use. */ -extern void __mutex_init (void *__lock); +/* Try to lock LOCK; return nonzero if we locked it, zero if another has. */ +extern int __spin_try_lock (__spin_lock_t *__lock); -/* Lock LOCK, blocking if we can't get it. */ -extern void __mutex_lock_solid (void *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE int +__spin_try_lock (__spin_lock_t *__lock) +{ + return (lll_trylock (__lock) == 0); +} +#endif + +/* Return nonzero if LOCK is locked. */ +extern int __spin_lock_locked (__spin_lock_t *__lock); -/* Finish unlocking LOCK, after the spin lock LOCK->held has already been - unlocked. This function will wake up any thread waiting on LOCK. */ -extern void __mutex_unlock_solid (void *__lock); +#if defined __USE_EXTERN_INLINES && defined _LIBC +_EXTERN_INLINE int +__spin_lock_locked (__spin_lock_t *__lock) +{ + return (*(volatile __spin_lock_t *)__lock != 0); +} +#endif + +/* Name space-clean internal interface to mutex locks. */ + +/* Initialize the newly allocated mutex lock LOCK for further use. */ +extern void __mutex_init (void *__lock); /* Lock the mutex lock LOCK. */ @@ -84,8 +107,7 @@ extern void __mutex_lock (void *__lock); _EXTERN_INLINE void __mutex_lock (void *__lock) { - if (! __spin_try_lock ((__spin_lock_t *) __lock)) - __mutex_lock_solid (__lock); + __spin_lock ((__spin_lock_t *)__lock); } #endif @@ -97,8 +119,7 @@ extern void __mutex_unlock (void *__lock); _EXTERN_INLINE void __mutex_unlock (void *__lock) { - __spin_unlock ((__spin_lock_t *) __lock); - __mutex_unlock_solid (__lock); + __spin_unlock ((__spin_lock_t *)__lock); } #endif @@ -109,7 +130,7 @@ extern int __mutex_trylock (void *__lock); _EXTERN_INLINE int __mutex_trylock (void *__lock) { - return __spin_try_lock ((__spin_lock_t *) __lock); + return (__spin_try_lock ((__spin_lock_t *)__lock)); } #endif diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h new file mode 100644 index 0000000000..20c66ca979 --- /dev/null +++ b/mach/lowlevellock.h @@ -0,0 +1,81 @@ +/* Low-level lock implementation. Mach gsync-based version. + Copyright (C) 1994-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _MACH_LOWLEVELLOCK_H +#define _MACH_LOWLEVELLOCK_H 1 + +#include <mach/gnumach.h> +#include <atomic.h> + +/* Gsync flags. */ +#ifndef GSYNC_SHARED + #define GSYNC_SHARED 0x01 + #define GSYNC_QUAD 0x02 + #define GSYNC_TIMED 0x04 + #define GSYNC_BROADCAST 0x08 + #define GSYNC_MUTATE 0x10 +#endif + +/* Static initializer for low-level locks. */ +#define LLL_INITIALIZER 0 + +/* Wait on address PTR, without blocking if its contents + * are different from VAL. */ +#define lll_wait(ptr, val, flags) \ + __gsync_wait (__mach_task_self (), \ + (vm_offset_t)(ptr), (val), 0, 0, (flags)) + +/* Wake one or more threads waiting on address PTR. */ +#define lll_wake(ptr, flags) \ + __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags)) + +/* Acquire the lock at PTR. */ +#define lll_lock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + int __flags = (flags); \ + if (*__iptr != 0 || \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \ + while (1) \ + { \ + if (atomic_exchange_acq (__iptr, 2) == 0) \ + break; \ + lll_wait (__iptr, 2, __flags); \ + } \ + (void)0; \ + }) + +/* Try to acquire the lock at PTR, without blocking. + Evaluates to zero on success. */ +#define lll_trylock(ptr) \ + ({ \ + int *__iptr = (int *)(ptr); \ + *__iptr == 0 && \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \ + }) + +/* Release the lock at PTR. */ +#define lll_unlock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + if (atomic_exchange_rel (__iptr, 0) == 2) \ + lll_wake (__iptr, (flags)); \ + (void)0; \ + }) + +#endif diff --git a/mach/mutex-init.c b/mach/mutex-init.c index cab0a86fec..776fec5506 100644 --- a/mach/mutex-init.c +++ b/mach/mutex-init.c @@ -17,13 +17,10 @@ <http://www.gnu.org/licenses/>. */ #include <lock-intern.h> -#include <cthreads.h> +#include <lowlevellock.h> void __mutex_init (void *lock) { - /* This happens to be name space-safe because it is a macro. - It invokes only spin_lock_init, which is a macro for __spin_lock_init; - and cthread_queue_init, which is a macro for some simple code. */ - mutex_init ((struct mutex *) lock); + *(int *)lock = LLL_INITIALIZER; } |