about summary refs log tree commit diff
path: root/src/thread/pthread_create.c
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2020-11-19 16:09:16 -0500
committerRich Felker <dalias@aerifal.cx>2020-11-19 16:09:16 -0500
commitd26e0774a59bb7245b205bc8e7d8b35cc2037095 (patch)
tree1f9921e83c473b5b1c595c99c20801d7df4d4564 /src/thread/pthread_create.c
parent167390f05564e0a4d3fcb4329377fd7743267560 (diff)
downloadmusl-d26e0774a59bb7245b205bc8e7d8b35cc2037095.tar.gz
musl-d26e0774a59bb7245b205bc8e7d8b35cc2037095.tar.xz
musl-d26e0774a59bb7245b205bc8e7d8b35cc2037095.zip
pthread_exit: don't __vm_wait under thread list lock
the __vm_wait operation can delay forward progress arbitrarily long if
a thread holding the lock is interrupted by a signal. in a worst case
this can deadlock. any critical section holding the thread list lock
must respect lock ordering contracts and must not take any lock which
is not AS-safe.

to fix, move the determination of thread joinable/detached state to
take place before the killlock and thread list lock are taken. this
requires reverting the atomic state transition if we determine that
the exiting thread is the last thread and must call exit, but that's
easy to do since it's a single-threaded context with application
signals blocked.
Diffstat (limited to 'src/thread/pthread_create.c')
-rw-r--r--src/thread/pthread_create.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 55744155..250cd0a4 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -69,12 +69,25 @@ _Noreturn void __pthread_exit(void *result)
 
 	__pthread_tsd_run_dtors();
 
+	__block_app_sigs(&set);
+
+	/* This atomic potentially competes with a concurrent pthread_detach
+	 * call; the loser is responsible for freeing thread resources. */
+	int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
+
+	if (state==DT_DETACHED && self->map_base) {
+		/* Since __unmapself bypasses the normal munmap code path,
+		 * explicitly wait for vmlock holders first. This must be
+		 * done before any locks are taken, to avoid lock ordering
+		 * issues that could lead to deadlock. */
+		__vm_wait();
+	}
+
 	/* Access to target the exiting thread with syscalls that use
 	 * its kernel tid is controlled by killlock. For detached threads,
 	 * any use past this point would have undefined behavior, but for
 	 * joinable threads it's a valid usage that must be handled.
 	 * Signals must be blocked since pthread_kill must be AS-safe. */
-	__block_app_sigs(&set);
 	LOCK(self->killlock);
 
 	/* The thread list lock must be AS-safe, and thus depends on
@@ -87,6 +100,7 @@ _Noreturn void __pthread_exit(void *result)
 	if (self->next == self) {
 		__tl_unlock();
 		UNLOCK(self->killlock);
+		self->detach_state = state;
 		__restore_sigs(&set);
 		exit(0);
 	}
@@ -125,10 +139,6 @@ _Noreturn void __pthread_exit(void *result)
 	self->prev->next = self->next;
 	self->prev = self->next = self;
 
-	/* This atomic potentially competes with a concurrent pthread_detach
-	 * call; the loser is responsible for freeing thread resources. */
-	int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
-
 	if (state==DT_DETACHED && self->map_base) {
 		/* Detached threads must block even implementation-internal
 		 * signals, since they will not have a stack in their last
@@ -140,10 +150,6 @@ _Noreturn void __pthread_exit(void *result)
 		if (self->robust_list.off)
 			__syscall(SYS_set_robust_list, 0, 3*sizeof(long));
 
-		/* Since __unmapself bypasses the normal munmap code path,
-		 * explicitly wait for vmlock holders first. */
-		__vm_wait();
-
 		/* The following call unmaps the thread's stack mapping
 		 * and then exits without touching the stack. */
 		__unmapself(self->map_base, self->map_size);