about summary refs log tree commit diff
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2015-07-14 21:58:34 +0200
committerTorvald Riegel <triegel@redhat.com>2015-12-23 18:44:53 +0100
commit389fdf78b2e606387ce9d51f29e5c0a22ad9ad2a (patch)
tree3c665e20903d53ded06618a794b10d0d28292cc9
parent7962541a32eff5597bc4207e781cfac8d1bb0d87 (diff)
downloadglibc-389fdf78b2e606387ce9d51f29e5c0a22ad9ad2a.tar.gz
glibc-389fdf78b2e606387ce9d51f29e5c0a22ad9ad2a.tar.xz
glibc-389fdf78b2e606387ce9d51f29e5c0a22ad9ad2a.zip
Do not violate mutex destruction requirements.
POSIX and C++11 require that a thread can destroy a mutex if no other
thread owns the mutex, is blocked on the mutex, or will try to acquire
it in the future.  After destroying the mutex, it can reuse or unmap the
underlying memory.  Thus, we must not access a mutex' memory after
releasing it.  Currently, we can load the private flag after releasing
the mutex, which is fixed by this patch.
See https://sourceware.org/bugzilla/show_bug.cgi?id=13690 for more
background.

We need to call futex_wake on the lock after releasing it, however.  This
is by design, and can lead to spurious wake-ups on unrelated futex words
(e.g., when the mutex memory is reused for another mutex).  This behavior
is documented in the glibc-internal futex API and in recent drafts of the
Linux kernel's futex documentation (see the draft_futex branch of
git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git).
-rw-r--r--ChangeLog12
-rw-r--r--nptl/pthread_mutex_unlock.c12
-rw-r--r--sysdeps/nptl/lowlevellock.h18
-rw-r--r--sysdeps/unix/sysv/linux/lowlevellock-futex.h9
-rw-r--r--sysdeps/unix/sysv/linux/sparc/lowlevellock.h6
5 files changed, 44 insertions, 13 deletions
diff --git a/ChangeLog b/ChangeLog
index 9063848e27..4ba6ee4a13 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2015-12-23  Torvald Riegel  <triegel@redhat.com>
+
+	[BZ #13690]
+	* sysdeps/nptl/lowlevellock.h (__lll_unlock): Do not access the lock
+	after releasing it.
+	(__lll_robust_unlock): Likewise.
+	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
+	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_unlock): Likewise.
+	(lll_robust_unlock): Likewise.
+	* sysdeps/unix/sysv/linux/lowlevellock-futex.h (__lll_private_flag):
+	Prevent warnings in callers.
+
 2015-12-23  Florian Weimer  <fweimer@redhat.com>
 
 	* malloc/arena.c (list_lock): Update comment.
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index c078f7ebe3..e2cd52411f 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -230,16 +230,18 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
 	/* One less user.  */
 	--mutex->__data.__nusers;
 
-      /* Unlock.  */
+      /* Unlock.  Load all necessary mutex data before releasing the mutex
+	 to not violate the mutex destruction requirements (see
+	 lll_unlock).  */
+      int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
+      int private = (robust
+		     ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
+		     : PTHREAD_MUTEX_PSHARED (mutex));
       if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
 	  || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
 						   THREAD_GETMEM (THREAD_SELF,
 								  tid)))
 	{
-	  int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
-	  int private = (robust
-			 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
-			 : PTHREAD_MUTEX_PSHARED (mutex));
 	  INTERNAL_SYSCALL_DECL (__err);
 	  INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
 			    __lll_private_flag (FUTEX_UNLOCK_PI, private));
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 27f41424ea..7d41ef0335 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -191,14 +191,21 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
    that's cast to void.  */
 /* Unconditionally set FUTEX to 0 (not acquired), releasing the lock.  If FUTEX
    was >1 (acquired, possibly with waiters), then wake any waiters.  The waiter
-   that acquires the lock will set FUTEX to >1.  */
+   that acquires the lock will set FUTEX to >1.
+   Evaluate PRIVATE before releasing the lock so that we do not violate the
+   mutex destruction requirements.  Specifically, we need to ensure that
+   another thread can destroy the mutex (and reuse its memory) once it
+   acquires the lock and when there will be no further lock acquisitions;
+   thus, we must not access the lock after releasing it, or those accesses
+   could be concurrent with mutex destruction or reuse of the memory.  */
 #define __lll_unlock(futex, private)                    \
   ((void)                                               \
    ({                                                   \
      int *__futex = (futex);                            \
+     int __private = (private);                         \
      int __oldval = atomic_exchange_rel (__futex, 0);   \
      if (__glibc_unlikely (__oldval > 1))               \
-       lll_futex_wake (__futex, 1, private);            \
+       lll_futex_wake (__futex, 1, __private);          \
    }))
 #define lll_unlock(futex, private)	\
   __lll_unlock (&(futex), private)
@@ -209,14 +216,17 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
    that's cast to void.  */
 /* Unconditionally set FUTEX to 0 (not acquired), releasing the lock.  If FUTEX
    had FUTEX_WAITERS set then wake any waiters.  The waiter that acquires the
-   lock will set FUTEX_WAITERS.  */
+   lock will set FUTEX_WAITERS.
+   Evaluate PRIVATE before releasing the lock so that we do not violate the
+   mutex destruction requirements (see __lll_unlock).  */
 #define __lll_robust_unlock(futex, private)             \
   ((void)                                               \
    ({                                                   \
      int *__futex = (futex);                            \
+     int __private = (private);                         \
      int __oldval = atomic_exchange_rel (__futex, 0);   \
      if (__glibc_unlikely (__oldval & FUTEX_WAITERS))	\
-       lll_futex_wake (__futex, 1, private);            \
+       lll_futex_wake (__futex, 1, __private);          \
    }))
 #define lll_robust_unlock(futex, private)       \
   __lll_robust_unlock (&(futex), private)
diff --git a/sysdeps/unix/sysv/linux/lowlevellock-futex.h b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
index 59f6627bdb..40825f0306 100644
--- a/sysdeps/unix/sysv/linux/lowlevellock-futex.h
+++ b/sysdeps/unix/sysv/linux/lowlevellock-futex.h
@@ -54,8 +54,13 @@
 #if IS_IN (libc) || IS_IN (rtld)
 /* In libc.so or ld.so all futexes are private.  */
 # ifdef __ASSUME_PRIVATE_FUTEX
-#  define __lll_private_flag(fl, private) \
-  ((fl) | FUTEX_PRIVATE_FLAG)
+#  define __lll_private_flag(fl, private)			\
+  ({								\
+    /* Prevent warnings in callers of this macro.  */		\
+    int __lll_private_flag_priv __attribute__ ((unused));	\
+    __lll_private_flag_priv = (private);			\
+    ((fl) | FUTEX_PRIVATE_FLAG);				\
+  })
 # else
 #  define __lll_private_flag(fl, private) \
   ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
diff --git a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index 7608c01d17..9fa73371fc 100644
--- a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -126,17 +126,19 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime,
 #define lll_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
+    int __private = (private);						      \
     int __val = atomic_exchange_24_rel (__futex, 0);			      \
     if (__glibc_unlikely (__val > 1))					      \
-      lll_futex_wake (__futex, 1, private);				      \
+      lll_futex_wake (__futex, 1, __private);				      \
   }))
 
 #define lll_robust_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
+    int __private = (private);						      \
     int __val = atomic_exchange_rel (__futex, 0);			      \
     if (__glibc_unlikely (__val & FUTEX_WAITERS))			      \
-      lll_futex_wake (__futex, 1, private);				      \
+      lll_futex_wake (__futex, 1, __private);				      \
   }))
 
 #define lll_islocked(futex) \