diff options
-rw-r--r-- | nptl/ChangeLog | 12 | ||||
-rw-r--r-- | nptl/allocatestack.c | 15 | ||||
-rw-r--r-- | nptl/descr.h | 43 | ||||
-rw-r--r-- | nptl/init.c | 6 | ||||
-rw-r--r-- | nptl/pthread_create.c | 22 | ||||
-rw-r--r-- | nptl/pthread_mutex_unlock.c | 9 | ||||
-rw-r--r-- | nptl/sysdeps/pthread/pthread.h | 8 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h | 8 | ||||
-rw-r--r-- | nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h | 19 |
9 files changed, 96 insertions, 46 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog index 2477d8aa71..073b3649b7 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,5 +1,17 @@ 2006-02-12 Ulrich Drepper <drepper@redhat.com> + * allocatestack.c (allocate_stack): Initialize robust_list. + * init.c (__pthread_initialize_minimal_internal): Likewise. + * descr.h (struct xid_command): Pretty printing. + (struct pthread): Use __pthread_list_t or __pthread_slist_t for + robust_list. Adjust macros. + * pthread_create.c (start_thread): Adjust robust_list handling. + * phtread_mutex_unlock.c: Don't allow unlocking from any thread + but the owner for all robust mutex types. + * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Define + __pthread_list_t and __pthread_slist_t. Use them in pthread_mutex_t. + * sysdeps/pthread/pthread.h: Adjust mutex initializers. + * sysdeps/unix/sysv/linux/i386/not-cancel.h: Define openat_not_cancel, openat_not_cancel_3, openat64_not_cancel, and openat64_not_cancel_3. diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c index e6bcc2170f..046a2470fc 100644 --- a/nptl/allocatestack.c +++ b/nptl/allocatestack.c @@ -1,5 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004, 2005 - Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -366,6 +365,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* The process ID is also the same as that of the caller. */ pd->pid = THREAD_GETMEM (THREAD_SELF, pid); + /* List of robust mutexes. */ +#ifdef __PTHREAD_MUTEX_HAVE_PREV + pd->robust_list.__prev = &pd->robust_list; +#endif + pd->robust_list.__next = &pd->robust_list; + /* Allocate the DTV for this thread. */ if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) { @@ -500,6 +505,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp, /* The process ID is also the same as that of the caller. */ pd->pid = THREAD_GETMEM (THREAD_SELF, pid); + /* List of robust mutexes. */ +#ifdef __PTHREAD_MUTEX_HAVE_PREV + pd->robust_list.__prev = &pd->robust_list; +#endif + pd->robust_list.__next = &pd->robust_list; + /* Allocate the DTV for this thread. */ if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL) { diff --git a/nptl/descr.h b/nptl/descr.h index 6dcc574c24..6984138537 100644 --- a/nptl/descr.h +++ b/nptl/descr.h @@ -97,7 +97,7 @@ struct pthread_unwind_buf struct xid_command { int syscall_no; - long id[3]; + long int id[3]; volatile int cntr; }; @@ -135,46 +135,45 @@ struct pthread pid_t pid; /* List of robust mutexes the thread is holding. */ - struct __pthread_mutex_s *robust_list; - #ifdef __PTHREAD_MUTEX_HAVE_PREV + __pthread_list_t robust_list; + # define ENQUEUE_MUTEX(mutex) \ do { \ - mutex->__data.__next = THREAD_GETMEM (THREAD_SELF, robust_list); \ - THREAD_SETMEM (THREAD_SELF, robust_list, &mutex->__data); \ - if (mutex->__data.__next != NULL) \ - mutex->__data.__next->__prev = &mutex->__data; \ - mutex->__data.__prev = NULL; \ + __pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \ + next->__prev = &mutex->__data.__list; \ + mutex->__data.__list.__next = next; \ + mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \ + THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \ } while (0) # define DEQUEUE_MUTEX(mutex) \ do { \ - if (mutex->__data.__prev == NULL) \ - THREAD_SETMEM (THREAD_SELF, robust_list, mutex->__data.__next); \ - else \ - mutex->__data.__prev->__next = mutex->__data.__next; \ - if (mutex->__data.__next != NULL) \ - mutex->__data.__next->__prev = mutex->__data.__prev; \ - mutex->__data.__prev = NULL; \ - mutex->__data.__next = NULL; \ + mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \ + mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \ + mutex->__data.__list.__prev = NULL; \ + mutex->__data.__list.__next = NULL; \ } while (0) #else + __pthread_slist_t robust_list; + # define ENQUEUE_MUTEX(mutex) \ do { \ - mutex->__data.__next = THREAD_GETMEM (THREAD_SELF, robust_list); \ - THREAD_SETMEM (THREAD_SELF, robust_list, &mutex->__data); \ + mutex->__data.__list.__next \ + = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \ + THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \ } while (0) # define DEQUEUE_MUTEX(mutex) \ do { \ - struct __pthread_mutex_s *runp = THREAD_GETMEM (THREAD_SELF, robust_list);\ - if (runp == &mutex->__data) \ + __pthread_slist_t *runp = THREAD_GETMEM (THREAD_SELF, robust_list.__next);\ + if (runp == &mutex->__data.__list) \ THREAD_SETMEM (THREAD_SELF, robust_list, runp->__next); \ else \ { \ - while (runp->__next != &mutex->__data) \ + while (runp->__next != &mutex->__data.__list) \ runp = runp->__next; \ \ runp->__next = runp->__next->__next; \ - mutex->__data.__next = NULL; \ + mutex->__data.__list.__next = NULL; \ } \ } while (0) #endif diff --git a/nptl/init.c b/nptl/init.c index 1f79eba62a..cb63ff7a6d 100644 --- a/nptl/init.c +++ b/nptl/init.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -247,6 +247,10 @@ __pthread_initialize_minimal_internal (void) struct pthread *pd = THREAD_SELF; INTERNAL_SYSCALL_DECL (err); pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid); +#ifdef __PTHREAD_MUTEX_HAVE_PREV + pd->robust_list.__prev = &pd->robust_list; +#endif + pd->robust_list.__next = &pd->robust_list; THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); THREAD_SETMEM (pd, user_stack, true); if (LLL_LOCK_INITIALIZER != 0) diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index 94d424b1f2..b1253b2243 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -311,12 +311,17 @@ start_thread (void *arg) atomic_bit_set (&pd->cancelhandling, EXITING_BIT); /* If this thread has any robust mutexes locked, handle them now. */ - struct __pthread_mutex_s *robust = THREAD_GETMEM (pd, robust_list); - if (__builtin_expect (robust != NULL, 0)) +#if __WORDSIZE == 64 + __pthread_list_t *robust = pd->robust_list.__next; +#else + __pthread_slist_t *robust = pd->robust_list.__next; +#endif + if (__builtin_expect (robust != &pd->robust_list, 0)) { do { - struct __pthread_mutex_s *this = robust; + struct __pthread_mutex_s *this = (struct __pthread_mutex_s *) + ((char *) robust - offsetof (struct __pthread_mutex_s, __list)); robust = robust->__next; assert (lll_mutex_islocked (this->__lock)); @@ -324,17 +329,20 @@ start_thread (void *arg) --this->__nusers; assert (this->__owner != PTHREAD_MUTEX_NOTRECOVERABLE); this->__owner = PTHREAD_MUTEX_OWNERDEAD; - this->__next = NULL; + this->__list.__next = NULL; #ifdef __PTHREAD_MUTEX_HAVE_PREV - this->__prev = NULL; + this->__list.__prev = NULL; #endif lll_mutex_unlock (this->__lock); } - while (robust != NULL); + while (robust != &pd->robust_list); /* Clean up so that the thread descriptor can be reused. */ - THREAD_SETMEM (pd, robust_list, NULL); + pd->robust_list.__next = &pd->robust_list; +#ifdef __PTHREAD_MUTEX_HAVE_PREV + pd->robust_list.__prev = &pd->robust_list; +#endif } /* If the thread is detached free the TCB. */ diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c index babce51a6f..4d87381065 100644 --- a/nptl/pthread_mutex_unlock.c +++ b/nptl/pthread_mutex_unlock.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. @@ -76,15 +76,12 @@ __pthread_mutex_unlock_usercnt (mutex, decr) goto robust; case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP: - /* Error checking mutex. */ + case PTHREAD_MUTEX_ROBUST_PRIVATE_NP: + case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP: if (abs (mutex->__data.__owner) != THREAD_GETMEM (THREAD_SELF, tid) || ! lll_mutex_islocked (mutex->__data.__lock)) return EPERM; - /* FALLTHROUGH */ - - case PTHREAD_MUTEX_ROBUST_PRIVATE_NP: - case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP: /* If the previous owner died and the caller did not succeed in making the state consistent, mark the mutex as unrecoverable and make all waiters. */ diff --git a/nptl/sysdeps/pthread/pthread.h b/nptl/sysdeps/pthread/pthread.h index d887e37aee..f4935e07b4 100644 --- a/nptl/sysdeps/pthread/pthread.h +++ b/nptl/sysdeps/pthread/pthread.h @@ -74,14 +74,14 @@ enum /* Mutex initializers. */ #if __WORDSIZE == 64 # define PTHREAD_MUTEX_INITIALIZER \ - { { 0, 0, 0, 0, 0, 0, 0, 0 } } + { { 0, 0, 0, 0, 0, 0, { 0, 0 } } } # ifdef __USE_GNU # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \ - { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, 0, 0 } } + { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, { 0, 0 } } } # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \ - { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, 0, 0 } } + { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, { 0, 0 } } } # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ - { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, 0, 0 } } + { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, { 0, 0 } } } # endif #else # define PTHREAD_MUTEX_INITIALIZER \ diff --git a/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h b/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h index 662f3e54a8..f53d0e5a72 100644 --- a/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h +++ b/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h @@ -43,6 +43,12 @@ typedef union } pthread_attr_t; +typedef struct __pthread_internal_slist +{ + struct __pthread_internal_slist *__next; +} __pthread_slist_t; + + /* Data structures for mutex handling. The structure of the attribute type is not exposed on purpose. */ typedef union @@ -59,7 +65,7 @@ typedef union __extension__ union { int __spins; - struct __pthread_mutex_s *__next; + __pthread_slist_t __list; }; } __data; char __size[__SIZEOF_PTHREAD_MUTEX_T]; diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h b/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h index 81942bc86c..693387a266 100644 --- a/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h +++ b/nptl/sysdeps/unix/sysv/linux/x86_64/bits/pthreadtypes.h @@ -57,6 +57,20 @@ typedef union } pthread_attr_t; +#if __WORDSIZE == 64 +typedef struct __pthread_internal_list +{ + struct __pthread_internal_list *__prev; + struct __pthread_internal_list *__next; +} __pthread_list_t; +#else +typedef struct __pthread_internal_slist +{ + struct __pthread_internal_slist *__next; +} __pthread_slist_t; +#endif + + /* Data structures for mutex handling. The structure of the attribute type is not exposed on purpose. */ typedef union @@ -74,15 +88,14 @@ typedef union int __kind; #if __WORDSIZE == 64 int __spins; - struct __pthread_mutex_s *__next; - struct __pthread_mutex_s *__prev; + __pthread_list_t __list; # define __PTHREAD_MUTEX_HAVE_PREV 1 #else unsigned int __nusers; __extension__ union { int __spins; - struct __pthread_mutex_s *__next; + __pthread_slist_t __list; }; #endif } __data; |