summary refs log tree commit diff
path: root/nptl/pthread_create.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2006-03-28 04:25:17 +0000
committerUlrich Drepper <drepper@redhat.com>2006-03-28 04:25:17 +0000
commit0f6699ea0554a667de301d46fcfe1baf0d53d094 (patch)
treec9360cea0ccc7c79d4235f43a661f51ffda5492f /nptl/pthread_create.c
parent5b20043897accf32d33ae775af7413098cd0cec2 (diff)
downloadglibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.tar.gz
glibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.tar.xz
glibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.zip
* sysdeps/unix/sysv/linux/kernel-features.h: Add
	__ASSUME_SET_ROBUST_LIST.
Diffstat (limited to 'nptl/pthread_create.c')
-rw-r--r--nptl/pthread_create.c46
1 files changed, 29 insertions, 17 deletions
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index f3d90ecebf..71365a17e8 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -229,6 +229,19 @@ start_thread (void *arg)
   /* Initialize resolver state pointer.  */
   __resp = &pd->res;
 
+#ifdef __NR_set_robust_list
+# ifndef __ASSUME_SET_ROBUST_LIST
+  if (__set_robust_list_avail >= 0)
+# endif
+    {
+      INTERNAL_SYSCALL_DECL (err);
+      /* This call should never fail because the initial call in init.c
+	 succeeded.  */
+      INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
+			sizeof (struct robust_list_head));
+    }
+#endif
+
   /* This is where the try/finally block should be created.  For
      compilers without that support we do use setjmp.  */
   struct pthread_unwind_buf unwind_buf;
@@ -310,35 +323,34 @@ start_thread (void *arg)
      the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */
   atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
 
+#ifndef __ASSUME_SET_ROBUST_LIST
   /* If this thread has any robust mutexes locked, handle them now.  */
-#if __WORDSIZE == 64
-  __pthread_list_t *robust = pd->robust_list.__next;
-#else
+# if __WORDSIZE == 64
+  void *robust = pd->robust_head.list;
+# else
   __pthread_slist_t *robust = pd->robust_list.__next;
-#endif
-  if (__builtin_expect (robust != &pd->robust_list, 0))
+# endif
+/* We let the kernel do the notification if it is able to do so.  */
+  if (__set_robust_list_avail < 0
+      && __builtin_expect (robust != &pd->robust_head, 0))
     {
       do
 	{
 	  struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
-	    ((char *) robust - offsetof (struct __pthread_mutex_s, __list));
-	  robust = robust->__next;
+	    ((char *) robust - offsetof (struct __pthread_mutex_s,
+					 __list.__next));
+	  robust = *((void **) robust);
 
-	  this->__list.__next = NULL;
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
+# ifdef __PTHREAD_MUTEX_HAVE_PREV
 	  this->__list.__prev = NULL;
-#endif
+# endif
+	  this->__list.__next = NULL;
 
 	  lll_robust_mutex_dead (this->__lock);
 	}
-      while (robust != &pd->robust_list);
-
-      /* Clean up so that the thread descriptor can be reused.  */
-      pd->robust_list.__next = &pd->robust_list;
-#ifdef __PTHREAD_MUTEX_HAVE_PREV
-      pd->robust_list.__prev = &pd->robust_list;
-#endif
+      while (robust != &pd->robust_head);
     }
+#endif
 
   /* If the thread is detached free the TCB.  */
   if (IS_DETACHED (pd))