diff options
Diffstat (limited to 'nptl')
-rw-r--r-- | nptl/pthread_mutex_init.c | 8 | ||||
-rw-r--r-- | nptl/pthread_mutex_setprioceiling.c | 15 | ||||
-rw-r--r-- | nptl/pthread_mutexattr_getprioceiling.c | 8 | ||||
-rw-r--r-- | nptl/pthread_mutexattr_setprioceiling.c | 15 | ||||
-rw-r--r-- | nptl/tpp.c | 54 |
5 files changed, 68 insertions, 32 deletions
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c index 9f28b8d8dc..38597ab0f8 100644 --- a/nptl/pthread_mutex_init.c +++ b/nptl/pthread_mutex_init.c @@ -22,6 +22,7 @@ #include <string.h> #include <kernel-features.h> #include "pthreadP.h" +#include <atomic.h> #include <stap-probe.h> @@ -117,10 +118,11 @@ __pthread_mutex_init (mutex, mutexattr) >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT; if (! ceiling) { - if (__sched_fifo_min_prio == -1) + /* See __init_sched_fifo_prio. */ + if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1) __init_sched_fifo_prio (); - if (ceiling < __sched_fifo_min_prio) - ceiling = __sched_fifo_min_prio; + if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio)) + ceiling = atomic_load_relaxed (&__sched_fifo_min_prio); } mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; break; diff --git a/nptl/pthread_mutex_setprioceiling.c b/nptl/pthread_mutex_setprioceiling.c index 52f65a0fcf..63f11bbf44 100644 --- a/nptl/pthread_mutex_setprioceiling.c +++ b/nptl/pthread_mutex_setprioceiling.c @@ -20,6 +20,7 @@ #include <stdbool.h> #include <errno.h> #include <pthreadP.h> +#include <atomic.h> int @@ -33,15 +34,19 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling) if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0) return EINVAL; - if (__sched_fifo_min_prio == -1) + /* See __init_sched_fifo_prio. */ + if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1 + || atomic_load_relaxed (&__sched_fifo_max_prio) == -1) __init_sched_fifo_prio (); - if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0) - || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0) - || __builtin_expect ((prioceiling + if (__glibc_unlikely (prioceiling + < atomic_load_relaxed (&__sched_fifo_min_prio)) + || __glibc_unlikely (prioceiling + > atomic_load_relaxed (&__sched_fifo_max_prio)) + || __glibc_unlikely ((prioceiling & (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT)) - != prioceiling, 0)) + != prioceiling)) return EINVAL; /* Check whether we already hold the mutex. */ diff --git a/nptl/pthread_mutexattr_getprioceiling.c b/nptl/pthread_mutexattr_getprioceiling.c index c3e93fa655..df265e7410 100644 --- a/nptl/pthread_mutexattr_getprioceiling.c +++ b/nptl/pthread_mutexattr_getprioceiling.c @@ -18,6 +18,7 @@ <http://www.gnu.org/licenses/>. */ #include <pthreadP.h> +#include <atomic.h> int @@ -35,10 +36,11 @@ pthread_mutexattr_getprioceiling (attr, prioceiling) if (! ceiling) { - if (__sched_fifo_min_prio == -1) + /* See __init_sched_fifo_prio. */ + if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1) __init_sched_fifo_prio (); - if (ceiling < __sched_fifo_min_prio) - ceiling = __sched_fifo_min_prio; + if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio)) + ceiling = atomic_load_relaxed (&__sched_fifo_min_prio); } *prioceiling = ceiling; diff --git a/nptl/pthread_mutexattr_setprioceiling.c b/nptl/pthread_mutexattr_setprioceiling.c index d10e51cbfa..d155bf0fa8 100644 --- a/nptl/pthread_mutexattr_setprioceiling.c +++ b/nptl/pthread_mutexattr_setprioceiling.c @@ -19,6 +19,7 @@ #include <errno.h> #include <pthreadP.h> +#include <atomic.h> int @@ -26,15 +27,19 @@ pthread_mutexattr_setprioceiling (attr, prioceiling) pthread_mutexattr_t *attr; int prioceiling; { - if (__sched_fifo_min_prio == -1) + /* See __init_sched_fifo_prio. */ + if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1 + || atomic_load_relaxed (&__sched_fifo_max_prio) == -1) __init_sched_fifo_prio (); - if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0) - || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0) - || __builtin_expect ((prioceiling + if (__glibc_unlikely (prioceiling + < atomic_load_relaxed (&__sched_fifo_min_prio)) + || __glibc_unlikely (prioceiling + > atomic_load_relaxed (&__sched_fifo_max_prio)) + || __glibc_unlikely ((prioceiling & (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT)) - != prioceiling, 0)) + != prioceiling)) return EINVAL; struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr; diff --git a/nptl/tpp.c b/nptl/tpp.c index ee9a2fe0d0..9cfeea188c 100644 --- a/nptl/tpp.c +++ b/nptl/tpp.c @@ -23,17 +23,29 @@ #include <pthreadP.h> #include <sched.h> #include <stdlib.h> +#include <atomic.h> int __sched_fifo_min_prio = -1; int __sched_fifo_max_prio = -1; +/* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio + once. The standard solution would be similar to pthread_once, but then + readers would need to use an acquire fence. In this specific case, + initialization is comprised of just idempotent writes to two variables + that have an initial value of -1. Therefore, we can treat each variable as + a separate, at-least-once initialized value. This enables using just + relaxed MO loads and stores, but requires that consumers check for + initialization of each value that is to be used; see + __pthread_tpp_change_priority for an example. + */ void __init_sched_fifo_prio (void) { - __sched_fifo_max_prio = sched_get_priority_max (SCHED_FIFO); - atomic_write_barrier (); - __sched_fifo_min_prio = sched_get_priority_min (SCHED_FIFO); + atomic_store_relaxed (&__sched_fifo_max_prio, + sched_get_priority_max (SCHED_FIFO)); + atomic_store_relaxed (&__sched_fifo_min_prio, + sched_get_priority_min (SCHED_FIFO)); } int @@ -41,49 +53,59 @@ __pthread_tpp_change_priority (int previous_prio, int new_prio) { struct pthread *self = THREAD_SELF; struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); + int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); + int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); if (tpp == NULL) { - if (__sched_fifo_min_prio == -1) - __init_sched_fifo_prio (); + /* See __init_sched_fifo_prio. We need both the min and max prio, + so need to check both, and run initialization if either one is + not initialized. The memory model's write-read coherence rule + makes this work. */ + if (fifo_min_prio == -1 || fifo_max_prio == -1) + { + __init_sched_fifo_prio (); + fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); + fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); + } size_t size = sizeof *tpp; - size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1) + size += (fifo_max_prio - fifo_min_prio + 1) * sizeof (tpp->priomap[0]); tpp = calloc (size, 1); if (tpp == NULL) return ENOMEM; - tpp->priomax = __sched_fifo_min_prio - 1; + tpp->priomax = fifo_min_prio - 1; THREAD_SETMEM (self, tpp, tpp); } assert (new_prio == -1 - || (new_prio >= __sched_fifo_min_prio - && new_prio <= __sched_fifo_max_prio)); + || (new_prio >= fifo_min_prio + && new_prio <= fifo_max_prio)); assert (previous_prio == -1 - || (previous_prio >= __sched_fifo_min_prio - && previous_prio <= __sched_fifo_max_prio)); + || (previous_prio >= fifo_min_prio + && previous_prio <= fifo_max_prio)); int priomax = tpp->priomax; int newpriomax = priomax; if (new_prio != -1) { - if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0) + if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0) return EAGAIN; - ++tpp->priomap[new_prio - __sched_fifo_min_prio]; + ++tpp->priomap[new_prio - fifo_min_prio]; if (new_prio > priomax) newpriomax = new_prio; } if (previous_prio != -1) { - if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0 + if (--tpp->priomap[previous_prio - fifo_min_prio] == 0 && priomax == previous_prio && previous_prio > new_prio) { int i; - for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i) - if (tpp->priomap[i - __sched_fifo_min_prio]) + for (i = previous_prio - 1; i >= fifo_min_prio; --i) + if (tpp->priomap[i - fifo_min_prio]) break; newpriomax = i; } |