diff options
author | Ulrich Drepper <drepper@redhat.com> | 2006-03-28 04:25:17 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2006-03-28 04:25:17 +0000 |
commit | 0f6699ea0554a667de301d46fcfe1baf0d53d094 (patch) | |
tree | c9360cea0ccc7c79d4235f43a661f51ffda5492f /nptl/allocatestack.c | |
parent | 5b20043897accf32d33ae775af7413098cd0cec2 (diff) | |
download | glibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.tar.gz glibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.tar.xz glibc-0f6699ea0554a667de301d46fcfe1baf0d53d094.zip |
* sysdeps/unix/sysv/linux/kernel-features.h: Add
__ASSUME_SET_ROBUST_LIST.
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r-- | nptl/allocatestack.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index 046a2470fc..a3ed1a33d3 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -365,12 +365,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* The process ID is also the same as that of the caller. */ pd->pid = THREAD_GETMEM (THREAD_SELF, pid); - /* List of robust mutexes. */ -#ifdef __PTHREAD_MUTEX_HAVE_PREV - pd->robust_list.__prev = &pd->robust_list; -#endif - pd->robust_list.__next = &pd->robust_list; - /* Allocate the DTV for this thread. */ if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) { @@ -505,12 +499,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* The process ID is also the same as that of the caller. */ pd->pid = THREAD_GETMEM (THREAD_SELF, pid); - /* List of robust mutexes. */ -#ifdef __PTHREAD_MUTEX_HAVE_PREV - pd->robust_list.__prev = &pd->robust_list; -#endif - pd->robust_list.__next = &pd->robust_list; - /* Allocate the DTV for this thread. */ if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) { @@ -634,6 +622,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, stillborn thread could be canceled while the lock is taken. */ pd->lock = LLL_LOCK_INITIALIZER; + /* The robust mutex lists also need to be initialized + unconditionally because the cleanup for the previous stack owner + might have happened in the kernel. */ + pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) + - offsetof (pthread_mutex_t, + __data.__list.__next)); + pd->robust_head.list_op_pending = NULL; +#ifdef __PTHREAD_MUTEX_HAVE_PREV + pd->robust_prev = &pd->robust_head; +#endif + pd->robust_head.list = &pd->robust_head; + /* We place the thread descriptor at the end of the stack. */ *pdp = pd; |