diff options
author | Stefan Liebler <stli@linux.ibm.com> | 2019-02-07 15:18:36 +0100 |
---|---|---|
committer | Stefan Liebler <stli@linux.ibm.com> | 2019-02-07 15:39:15 +0100 |
commit | a9f60b1571cc7821cd6bc4dbeba12194d484e7f5 (patch) | |
tree | 0812ab66742709e91d50e9b86c9c588f53bac9d3 | |
parent | 85224b0290ed9302fc3289b0e388792be557f073 (diff) | |
download | glibc-a9f60b1571cc7821cd6bc4dbeba12194d484e7f5.tar.gz glibc-a9f60b1571cc7821cd6bc4dbeba12194d484e7f5.tar.xz glibc-a9f60b1571cc7821cd6bc4dbeba12194d484e7f5.zip |
Add compiler barriers around modifications of the robust mutex list for pthread_mutex_trylock. [BZ #24180]
While debugging a kernel warning, Thomas Gleixner, Sebastian Sewior and Heiko Carstens found a bug in pthread_mutex_trylock due to misordered instructions: 140: a5 1b 00 01 oill %r1,1 144: e5 48 a0 f0 00 00 mvghi 240(%r10),0 <--- THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); 14a: e3 10 a0 e0 00 24 stg %r1,224(%r10) <--- last THREAD_SETMEM of ENQUEUE_MUTEX_PI vs (with compiler barriers): 140: a5 1b 00 01 oill %r1,1 144: e3 10 a0 e0 00 24 stg %r1,224(%r10) 14a: e5 48 a0 f0 00 00 mvghi 240(%r10),0 Please have a look at the discussion: "Re: WARN_ON_ONCE(!new_owner) within wake_futex_pi() triggerede" (https://lore.kernel.org/lkml/20190202112006.GB3381@osiris/) This patch is introducing the same compiler barriers and comments for pthread_mutex_trylock as introduced for pthread_mutex_lock and pthread_mutex_timedlock by commit 8f9450a0b7a9e78267e8ae1ab1000ebca08e473e "Add compiler barriers around modifications of the robust mutex list." ChangeLog: [BZ #24180] * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): Add compiler barriers and comments. (cherry picked from commit 823624bdc47f1f80109c9c52dee7939b9386d708)
-rw-r--r-- | ChangeLog | 6 | ||||
-rw-r--r-- | nptl/pthread_mutex_trylock.c | 57 |
2 files changed, 59 insertions, 4 deletions
diff --git a/ChangeLog b/ChangeLog index cb119f36ac..82802f3367 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2019-02-07 Stefan Liebler <stli@linux.ibm.com> + + [BZ #24180] + * nptl/pthread_mutex_trylock.c (__pthread_mutex_trylock): + Add compiler barriers and comments. + 2019-02-04 Florian Weimer <fweimer@redhat.com> [BZ #20018] diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c index fa90c1d1e6..8e01113b0f 100644 --- a/nptl/pthread_mutex_trylock.c +++ b/nptl/pthread_mutex_trylock.c @@ -94,6 +94,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, &mutex->__data.__list.__next); + /* We need to set op_pending before starting the operation. Also + see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); oldval = mutex->__data.__lock; do @@ -119,7 +122,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) /* But it is inconsistent unless marked otherwise. */ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; + /* We must not enqueue the mutex before we have acquired it. + Also see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); ENQUEUE_MUTEX (mutex); + /* We need to clear op_pending after we enqueue the mutex. */ + __asm ("" ::: "memory"); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Note that we deliberately exist here. If we fall @@ -135,6 +143,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) int kind = PTHREAD_MUTEX_TYPE (mutex); if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) { + /* We do not need to ensure ordering wrt another memory + access. Also see comments at ENQUEUE_MUTEX. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; @@ -142,6 +152,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) { + /* We do not need to ensure ordering wrt another memory + access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); @@ -160,6 +172,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) id, 0); if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0) { + /* We haven't acquired the lock as it is already acquired by + another owner. We do not need to ensure ordering wrt another + memory access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EBUSY; @@ -173,13 +188,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) if (oldval == id) lll_unlock (mutex->__data.__lock, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); + /* FIXME This violates the mutex destruction requirements. See + __pthread_mutex_unlock_full. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } } while ((oldval & FUTEX_OWNER_DIED) != 0); + /* We must not enqueue the mutex before we have acquired it. + Also see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); ENQUEUE_MUTEX (mutex); + /* We need to clear op_pending after we enqueue the mutex. */ + __asm ("" ::: "memory"); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); mutex->__data.__owner = id; @@ -211,10 +233,15 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) } if (robust) - /* Note: robust PI futexes are signaled by setting bit 0. */ - THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, - (void *) (((uintptr_t) &mutex->__data.__list.__next) - | 1)); + { + /* Note: robust PI futexes are signaled by setting bit 0. */ + THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, + (void *) (((uintptr_t) &mutex->__data.__list.__next) + | 1)); + /* We need to set op_pending before starting the operation. Also + see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); + } oldval = mutex->__data.__lock; @@ -223,12 +250,16 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) { if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) { + /* We do not need to ensure ordering wrt another memory + access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EDEADLK; } if (kind == PTHREAD_MUTEX_RECURSIVE_NP) { + /* We do not need to ensure ordering wrt another memory + access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Just bump the counter. */ @@ -250,6 +281,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) { if ((oldval & FUTEX_OWNER_DIED) == 0) { + /* We haven't acquired the lock as it is already acquired by + another owner. We do not need to ensure ordering wrt another + memory access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EBUSY; @@ -270,6 +304,9 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) if (INTERNAL_SYSCALL_ERROR_P (e, __err) && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK) { + /* The kernel has not yet finished the mutex owner death. + We do not need to ensure ordering wrt another memory + access. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return EBUSY; @@ -287,7 +324,12 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) /* But it is inconsistent unless marked otherwise. */ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; + /* We must not enqueue the mutex before we have acquired it. + Also see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); ENQUEUE_MUTEX (mutex); + /* We need to clear op_pending after we enqueue the mutex. */ + __asm ("" ::: "memory"); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); /* Note that we deliberately exit here. If we fall @@ -310,13 +352,20 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex) PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), 0, 0); + /* To the kernel, this will be visible after the kernel has + acquired the mutex in the syscall. */ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); return ENOTRECOVERABLE; } if (robust) { + /* We must not enqueue the mutex before we have acquired it. + Also see comments at ENQUEUE_MUTEX. */ + __asm ("" ::: "memory"); ENQUEUE_MUTEX_PI (mutex); + /* We need to clear op_pending after we enqueue the mutex. */ + __asm ("" ::: "memory"); THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); } |