diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2020-12-13 10:37:24 +0000 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2020-12-16 01:58:33 +0100 |
commit | bec412424e949c900b01767ce32b6743bdaaac93 (patch) | |
tree | d4244ee3b644f5e8e68dfb5f22aeeb30065bca75 /sysdeps | |
parent | 18c2ab9a094f6a6cb3a107d66dafaf32f8f969f0 (diff) | |
download | glibc-bec412424e949c900b01767ce32b6743bdaaac93.tar.gz glibc-bec412424e949c900b01767ce32b6743bdaaac93.tar.xz glibc-bec412424e949c900b01767ce32b6743bdaaac93.zip |
hurd: make lll_* take a variable instead of a ptr
To be coherent with other ports, let's make lll_* take a variable, and rename those that keep taking a ptr into __lll_*.
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/mach/hurd/htl/pt-mutex-lock.c | 8 | ||||
-rw-r--r-- | sysdeps/mach/hurd/htl/pt-mutex-timedlock.c | 6 | ||||
-rw-r--r-- | sysdeps/mach/hurd/htl/pt-mutex-trylock.c | 8 | ||||
-rw-r--r-- | sysdeps/mach/hurd/htl/pt-mutex-unlock.c | 8 | ||||
-rw-r--r-- | sysdeps/mach/hurd/htl/pt-mutex.h | 2 | ||||
-rw-r--r-- | sysdeps/mach/hurd/setpgid.c | 2 | ||||
-rw-r--r-- | sysdeps/mach/hurd/setsid.c | 2 | ||||
-rw-r--r-- | sysdeps/mach/hurd/tls.h | 4 | ||||
-rw-r--r-- | sysdeps/mach/libc-lock.h | 12 |
9 files changed, 26 insertions, 26 deletions
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-lock.c b/sysdeps/mach/hurd/htl/pt-mutex-lock.c index 22510701d8..ed1f6c13a1 100644 --- a/sysdeps/mach/hurd/htl/pt-mutex-lock.c +++ b/sysdeps/mach/hurd/htl/pt-mutex-lock.c @@ -33,7 +33,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp) switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: - lll_lock (&mtxp->__lock, flags); + lll_lock (mtxp->__lock, flags); break; case PT_MTX_RECURSIVE: @@ -47,7 +47,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp) return ret; } - lll_lock (&mtxp->__lock, flags); + lll_lock (mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); mtxp->__cnt = 1; break; @@ -57,7 +57,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp) if (mtx_owned_p (mtxp, self, flags)) return EDEADLK; - lll_lock (&mtxp->__lock, flags); + lll_lock (mtxp->__lock, flags); mtx_set_owner (mtxp, self, flags); break; @@ -65,7 +65,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp) case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: self = _pthread_self (); - ROBUST_LOCK (self, mtxp, __lll_robust_lock, flags); + ROBUST_LOCK (self, mtxp, lll_robust_lock, flags); break; default: diff --git a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c index 198b340429..965e8b24fb 100644 --- a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c +++ b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c @@ -34,7 +34,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp, switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: - ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid); + ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid); break; case PT_MTX_RECURSIVE: @@ -47,7 +47,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp, ++mtxp->__cnt; ret = 0; } - else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0) + else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0) { mtx_set_owner (mtxp, self, flags); mtxp->__cnt = 1; @@ -59,7 +59,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp, self = _pthread_self (); if (mtx_owned_p (mtxp, self, flags)) ret = EDEADLK; - else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0) + else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0) mtx_set_owner (mtxp, self, flags); break; diff --git a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c index f883ec3f30..62183b0299 100644 --- a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c +++ b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c @@ -32,7 +32,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp) switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: - ret = lll_trylock (&mtxp->__lock); + ret = lll_trylock (mtxp->__lock); if (ret) ret = EBUSY; break; @@ -47,7 +47,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp) ++mtxp->__cnt; ret = 0; } - else if ((ret = lll_trylock (&mtxp->__lock)) == 0) + else if ((ret = lll_trylock (mtxp->__lock)) == 0) { mtx_set_owner (mtxp, self, mtxp->__flags); mtxp->__cnt = 1; @@ -59,7 +59,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp) case PT_MTX_ERRORCHECK: self = _pthread_self (); - if ((ret = lll_trylock (&mtxp->__lock)) == 0) + if ((ret = lll_trylock (mtxp->__lock)) == 0) mtx_set_owner (mtxp, self, mtxp->__flags); else ret = EBUSY; @@ -69,7 +69,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp) case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: self = _pthread_self (); - ROBUST_LOCK (self, mtxp, __lll_robust_trylock); + ROBUST_LOCK (self, mtxp, lll_robust_trylock); break; default: diff --git a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c index aabe9eafbb..f2e87a7c6f 100644 --- a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c +++ b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c @@ -32,7 +32,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp) switch (MTX_TYPE (mtxp)) { case PT_MTX_NORMAL: - lll_unlock (&mtxp->__lock, flags); + lll_unlock (mtxp->__lock, flags); break; case PT_MTX_RECURSIVE: @@ -42,7 +42,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp) else if (--mtxp->__cnt == 0) { mtxp->__owner_id = mtxp->__shpid = 0; - lll_unlock (&mtxp->__lock, flags); + lll_unlock (mtxp->__lock, flags); } break; @@ -54,7 +54,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp) else { mtxp->__owner_id = mtxp->__shpid = 0; - lll_unlock (&mtxp->__lock, flags); + lll_unlock (mtxp->__lock, flags); } break; @@ -74,7 +74,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp) * state, mark it as irrecoverable. */ mtxp->__owner_id = ((mtxp->__lock & LLL_DEAD_OWNER) ? NOTRECOVERABLE_ID : 0); - __lll_robust_unlock (&mtxp->__lock, flags); + lll_robust_unlock (mtxp->__lock, flags); } break; diff --git a/sysdeps/mach/hurd/htl/pt-mutex.h b/sysdeps/mach/hurd/htl/pt-mutex.h index 578478fcaf..ead7c91c4f 100644 --- a/sysdeps/mach/hurd/htl/pt-mutex.h +++ b/sysdeps/mach/hurd/htl/pt-mutex.h @@ -42,7 +42,7 @@ return EDEADLK; \ } \ \ - ret = cb (&mtxp->__lock, ##__VA_ARGS__); \ + ret = cb (mtxp->__lock, ##__VA_ARGS__); \ if (ret == 0 || ret == EOWNERDEAD) \ { \ if (mtxp->__owner_id == ENOTRECOVERABLE) \ diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c index 4bb90c48c7..41562b77e5 100644 --- a/sysdeps/mach/hurd/setpgid.c +++ b/sysdeps/mach/hurd/setpgid.c @@ -39,7 +39,7 @@ __setpgid (pid_t pid, pid_t pgid) /* Synchronize with the signal thread to make sure we have received and processed proc_newids before returning to the user. */ while (_hurd_pids_changed_stamp == stamp) - lll_wait (&_hurd_pids_changed_stamp, stamp, 0); + lll_wait (_hurd_pids_changed_stamp, stamp, 0); return 0; diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c index b297473a86..f5c95a334e 100644 --- a/sysdeps/mach/hurd/setsid.c +++ b/sysdeps/mach/hurd/setsid.c @@ -56,7 +56,7 @@ __setsid (void) returned by `getpgrp ()' in other threads) has been updated before we return. */ while (_hurd_pids_changed_stamp == stamp) - lll_wait (&_hurd_pids_changed_stamp, stamp, 0); + lll_wait (_hurd_pids_changed_stamp, stamp, 0); } HURD_CRITICAL_END; diff --git a/sysdeps/mach/hurd/tls.h b/sysdeps/mach/hurd/tls.h index a6a3586785..a0d70c2026 100644 --- a/sysdeps/mach/hurd/tls.h +++ b/sysdeps/mach/hurd/tls.h @@ -60,7 +60,7 @@ #define THREAD_GSCOPE_RESET_FLAG() \ do \ if (atomic_exchange_and_add_rel (&GL(dl_thread_gscope_count), -1) == 1) \ - lll_wake (&GL(dl_thread_gscope_count), 0); \ + lll_wake (GL(dl_thread_gscope_count), 0); \ while (0) #define THREAD_GSCOPE_WAIT() \ do \ @@ -68,7 +68,7 @@ int count; \ atomic_write_barrier (); \ while ((count = GL(dl_thread_gscope_count))) \ - lll_wait (&GL(dl_thread_gscope_count), count, 0); \ + lll_wait (GL(dl_thread_gscope_count), count, 0); \ } \ while (0) diff --git a/sysdeps/mach/libc-lock.h b/sysdeps/mach/libc-lock.h index 3993a57b26..d9a2c42ebe 100644 --- a/sysdeps/mach/libc-lock.h +++ b/sysdeps/mach/libc-lock.h @@ -74,14 +74,14 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; /* Lock the named lock variable. */ #define __libc_lock_lock(NAME) \ - ({ lll_lock (&(NAME), 0); 0; }) + ({ lll_lock ((NAME), 0); 0; }) /* Lock the named lock variable. */ -#define __libc_lock_trylock(NAME) lll_trylock (&(NAME)) +#define __libc_lock_trylock(NAME) lll_trylock (NAME) /* Unlock the named lock variable. */ #define __libc_lock_unlock(NAME) \ - ({ lll_unlock (&(NAME), 0); 0; }) + ({ lll_unlock ((NAME), 0); 0; }) #define __libc_lock_define_recursive(CLASS,NAME) \ CLASS __libc_lock_recursive_t NAME; @@ -111,7 +111,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; int __r = 0; \ if (__self == __lock->owner) \ ++__lock->cnt; \ - else if ((__r = lll_trylock (&__lock->lock)) == 0) \ + else if ((__r = lll_trylock (__lock->lock)) == 0) \ __lock->owner = __self, __lock->cnt = 1; \ __r; \ }) @@ -122,7 +122,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; void *__self = __libc_lock_owner_self (); \ if (__self != __lock->owner) \ { \ - lll_lock (&__lock->lock, 0); \ + lll_lock (__lock->lock, 0); \ __lock->owner = __self; \ } \ ++__lock->cnt; \ @@ -135,7 +135,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; if (--__lock->cnt == 0) \ { \ __lock->owner = 0; \ - lll_unlock (&__lock->lock, 0); \ + lll_unlock (__lock->lock, 0); \ } \ }) |