diff options
author | Ulrich Drepper <drepper@redhat.com> | 2000-01-12 11:39:14 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2000-01-12 11:39:14 +0000 |
commit | ce75c139ac162cf8d7d4a7598503bc3592328b2c (patch) | |
tree | 604f258eac5abfa916634aa002e6fd6bcc35fa82 /linuxthreads | |
parent | 2e8048e533cf8f9ce23400c261cd8528cf37af5e (diff) | |
download | glibc-ce75c139ac162cf8d7d4a7598503bc3592328b2c.tar.gz glibc-ce75c139ac162cf8d7d4a7598503bc3592328b2c.tar.xz glibc-ce75c139ac162cf8d7d4a7598503bc3592328b2c.zip |
Update.
2000-01-12 Ulrich Drepper <drepper@cygnus.com> * iconvdata/gconv-modules: Add aliases ISO-IR-199 and ISO-IR-203. Reported by Bruno Haible <haible@ilog.fr>. 2000-01-11 Andreas Schwab <schwab@suse.de> * sysdeps/i386/fpu/libm-test-ulps: Adjust some epsilons. 2000-01-10 Thorsten Kukuk <kukuk@suse.de> * nss/getent.c: Add ipv6 support for hosts. 2000-01-05 Philip Blundell <pb@futuretv.com> * sysdeps/unix/sysv/linux/arm/Versions: Add getrlimit, setrlimit, getrlimit64, setrlimit64 for GLIBC_2.1.3. * sysdeps/unix/sysv/linux/arm/syscalls.list: Add oldgetrlimit, oldsetrlimit. * sysdeps/unix/sysv/linux/arm/oldsetrlimit64.c: New file. * sysdeps/unix/sysv/linux/arm/oldgetrlimit64.c: Likewise. * sysdeps/unix/sysv/linux/arm/setrlimit64.c: Likewise. * sysdeps/unix/sysv/linux/arm/getrlimit64.c: Likewise. * sysdeps/unix/sysv/linux/arm/setrlimit.c: Likewise. * sysdeps/unix/sysv/linux/arm/getrlimit.c: Likewise. * sysdeps/unix/sysv/linux/arm/Makefile [subdir=resource] (sysdep_routines): Add oldgetrlimit64, oldsetrlimit64. [subdir=misc] (sysdep_headers): Add sys/elf.h. 2000-01-09 Andreas Jaeger <aj@suse.de> * manual/install.texi (Tools for Compilation): Update required compiler version. (Configuring and compiling): Restore old comments about configparms; modify to reflect current usage. 2000-01-09 Philip Blundell <philb@gnu.org> * sysdeps/posix/getaddrinfo.c (gaih_inet): Don't attempt name resolution if the hints included AI_NUMERICHOST.
Diffstat (limited to 'linuxthreads')
-rw-r--r-- | linuxthreads/internals.h | 19 | ||||
-rw-r--r-- | linuxthreads/manager.c | 19 | ||||
-rw-r--r-- | linuxthreads/pthread.c | 10 | ||||
-rw-r--r-- | linuxthreads/queue.h | 5 | ||||
-rw-r--r-- | linuxthreads/rwlock.c | 246 | ||||
-rw-r--r-- | linuxthreads/sysdeps/pthread/bits/pthreadtypes.h | 2 | ||||
-rw-r--r-- | linuxthreads/sysdeps/pthread/pthread.h | 1 |
7 files changed, 281 insertions, 21 deletions
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h index a9e262b484..3fcec42630 100644 --- a/linuxthreads/internals.h +++ b/linuxthreads/internals.h @@ -107,6 +107,22 @@ struct pthread_atomic { int p_spinlock; }; +/* Context info for read write locks. The pthread_rwlock_info structure + is information about a lock that has been read-locked by the thread + in whose list this structure appears. The pthread_rwlock_context + is embedded in the thread context and contains a pointer to the + head of the list of lock info structures, as well as a count of + read locks that are untracked, because no info structure could be + allocated for them. */ + +struct _pthread_rwlock_t; + +typedef struct _pthread_rwlock_info { + struct _pthread_rwlock_info *pr_next; + struct _pthread_rwlock_t *pr_lock; + int pr_lock_count; +} pthread_readlock_info; + struct _pthread_descr_struct { pthread_descr p_nextlive, p_prevlive; /* Double chaining of active threads */ @@ -149,6 +165,9 @@ struct _pthread_descr_struct { called on thread */ char p_woken_by_cancel; /* cancellation performed wakeup */ pthread_extricate_if *p_extricate; /* See above */ + pthread_readlock_info *p_readlock_list; /* List of readlock info structs */ + pthread_readlock_info *p_readlock_free; /* Free list of structs */ + int p_untracked_readlock_count; /* Readlocks not tracked by list */ struct __res_state *p_resp; /* Pointer to resolver state */ struct __res_state p_res; /* per-thread resolver state */ /* New elements must be added at the end. */ diff --git a/linuxthreads/manager.c b/linuxthreads/manager.c index 78d4aaaaca..3a6f085bbf 100644 --- a/linuxthreads/manager.c +++ b/linuxthreads/manager.c @@ -497,6 +497,8 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr, static void pthread_free(pthread_descr th) { pthread_handle handle; + pthread_readlock_info *iter, *next; + ASSERT(th->p_exited); /* Make the handle invalid */ handle = thread_handle(th->p_tid); @@ -509,6 +511,23 @@ static void pthread_free(pthread_descr th) #endif /* One fewer threads in __pthread_handles */ __pthread_handles_num--; + + /* Destroy read lock list, and list of free read lock structures. + If the former is not empty, it means the thread exited while + holding read locks! */ + + for (iter = th->p_readlock_list; iter != NULL; iter = next) + { + next = iter->pr_next; + free(iter); + } + + for (iter = th->p_readlock_free; iter != NULL; iter = next) + { + next = iter->pr_next; + free(iter); + } + /* If initial thread, nothing to free */ if (th == &__pthread_initial_thread) return; if (!th->p_userstack) diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c index 34d1de180c..5f544c75d5 100644 --- a/linuxthreads/pthread.c +++ b/linuxthreads/pthread.c @@ -74,7 +74,10 @@ struct _pthread_descr_struct __pthread_initial_thread = { {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */ ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */ 0, /* char p_woken_by_cancel */ - NULL /* struct pthread_extricate_if *p_extricate */ + NULL, /* struct pthread_extricate_if *p_extricate */ + NULL, /* pthread_readlock_info *p_readlock_list; */ + NULL, /* pthread_readlock_info *p_readlock_free; */ + 0 /* int p_untracked_readlock_count; */ }; /* Descriptor of the manager thread; none of this is used but the error @@ -122,7 +125,10 @@ struct _pthread_descr_struct __pthread_manager_thread = { {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */ ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */ 0, /* char p_woken_by_cancel */ - NULL /* struct pthread_extricate_if *p_extricate */ + NULL, /* struct pthread_extricate_if *p_extricate */ + NULL, /* pthread_readlock_info *p_readlock_list; */ + NULL, /* pthread_readlock_info *p_readlock_free; */ + 0 /* int p_untracked_readlock_count; */ }; /* Pointer to the main thread (the father of the thread manager thread) */ diff --git a/linuxthreads/queue.h b/linuxthreads/queue.h index f87322f84a..28bd75531c 100644 --- a/linuxthreads/queue.h +++ b/linuxthreads/queue.h @@ -54,3 +54,8 @@ static inline int remove_from_queue(pthread_descr * q, pthread_descr th) } return 0; } + +static inline int queue_is_empty(pthread_descr * q) +{ + return *q == NULL; +} diff --git a/linuxthreads/rwlock.c b/linuxthreads/rwlock.c index 1d78b78cdf..7b472e284c 100644 --- a/linuxthreads/rwlock.c +++ b/linuxthreads/rwlock.c @@ -1,5 +1,5 @@ /* Read-write lock implementation. - Copyright (C) 1998 Free Software Foundation, Inc. + Copyright (C) 1998, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Xavier Leroy <Xavier.Leroy@inria.fr> and Ulrich Drepper <drepper@cygnus.com>, 1998. @@ -21,11 +21,167 @@ #include <errno.h> #include <pthread.h> +#include <stdlib.h> #include "internals.h" #include "queue.h" #include "spinlock.h" #include "restart.h" +/* + * Check whether the calling thread already owns one or more read locks on the + * specified lock. If so, return a pointer to the read lock info structure + * corresponding to that lock. + */ + +static pthread_readlock_info * +rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock) +{ + pthread_readlock_info *info; + + for (info = self->p_readlock_list; info != NULL; info = info->pr_next) + { + if (info->pr_lock == rwlock) + return info; + } + + return NULL; +} + +/* + * Add a new lock to the thread's list of locks for which it has a read lock. + * A new info node must be allocated for this, which is taken from the thread's + * free list, or by calling malloc. If malloc fails, a null pointer is + * returned. Otherwise the lock info structure is initialized and pushed + * onto the thread's list. + */ + +static pthread_readlock_info * +rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock) +{ + pthread_readlock_info *info = self->p_readlock_free; + + if (info != NULL) + self->p_readlock_free = info->pr_next; + else + info = malloc(sizeof *info); + + if (info == NULL) + return NULL; + + info->pr_lock_count = 1; + info->pr_lock = rwlock; + info->pr_next = self->p_readlock_list; + self->p_readlock_list = info; + + return info; +} + +/* + * If the thread owns a read lock over the given pthread_rwlock_t, + * and this read lock is tracked in the thread's lock list, + * this function returns a pointer to the info node in that list. + * It also decrements the lock count within that node, and if + * it reaches zero, it removes the node from the list. + * If nothing is found, it returns a null pointer. + */ + +static pthread_readlock_info * +rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock) +{ + pthread_readlock_info **pinfo; + + for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next) + { + if ((*pinfo)->pr_lock == rwlock) + { + pthread_readlock_info *info = *pinfo; + if (--info->pr_lock_count == 0) + *pinfo = info->pr_next; + return info; + } + } + + return NULL; +} + +/* + * This function checks whether the conditions are right to place a read lock. + * It returns 1 if so, otherwise zero. The rwlock's internal lock must be + * locked upon entry. + */ + +static int +rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already) +{ + /* Can't readlock; it is write locked. */ + if (rwlock->__rw_writer != NULL) + return 0; + + /* Lock prefers readers; get it. */ + if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP) + return 1; + + /* Lock prefers writers, but none are waiting. */ + if (queue_is_empty(&rwlock->__rw_write_waiting)) + return 1; + + /* Writers are waiting, but this thread already has a read lock */ + if (have_lock_already) + return 1; + + /* Writers are waiting, and this is a new lock */ + return 0; +} + +/* + * This function helps support brain-damaged recursive read locking + * semantics required by Unix 98, while maintaining write priority. + * This basically determines whether this thread already holds a read lock + * already. It returns 1 if so, otherwise it returns 0. + * + * If the thread has any ``untracked read locks'' then it just assumes + * that this lock is among them, just to be safe, and returns 1. + * + * Also, if it finds the thread's lock in the list, it sets the pointer + * referenced by pexisting to refer to the list entry. + * + * If the thread has no untracked locks, and the lock is not found + * in its list, then it is added to the list. If this fails, + * then *pout_of_mem is set to 1. + */ + +static int +rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock, + pthread_readlock_info **pexisting, int *pout_of_mem) +{ + pthread_readlock_info *existing = NULL; + int out_of_mem = 0, have_lock_already = 0; + pthread_descr self = *pself; + + if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP) + { + if (!self) + self = thread_self(); + + existing = rwlock_is_in_list(self, rwlock); + + if (existing != NULL || self->p_untracked_readlock_count > 0) + have_lock_already = 1; + else + { + existing = rwlock_add_to_list(self, rwlock); + if (existing == NULL) + out_of_mem = 1; + } + } + + *pout_of_mem = out_of_mem; + *pexisting = existing; + *pself = self; + + return have_lock_already; +} + int pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) @@ -68,24 +224,26 @@ pthread_rwlock_destroy (pthread_rwlock_t *rwlock) return 0; } - int pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { pthread_descr self = NULL; + pthread_readlock_info *existing; + int out_of_mem, have_lock_already; - while (1) + have_lock_already = rwlock_have_already(&self, rwlock, + &existing, &out_of_mem); + + for (;;) { + if (self == NULL) + self = thread_self (); + __pthread_lock (&rwlock->__rw_lock, self); - if (rwlock->__rw_writer == NULL - || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP - && rwlock->__rw_readers != 0)) - /* We can add a reader lock. */ + + if (rwlock_can_rdlock(rwlock, have_lock_already)) break; - /* Suspend ourselves, then try again */ - if (self == NULL) - self = thread_self (); enqueue (&rwlock->__rw_read_waiting, self); __pthread_unlock (&rwlock->__rw_lock); suspend (self); /* This is not a cancellation point */ @@ -94,26 +252,56 @@ pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) ++rwlock->__rw_readers; __pthread_unlock (&rwlock->__rw_lock); + if (have_lock_already || out_of_mem) + { + if (existing != NULL) + existing->pr_lock_count++; + else + self->p_untracked_readlock_count++; + } + return 0; } - int pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { - int result = EBUSY; + pthread_descr self = thread_self(); + pthread_readlock_info *existing; + int out_of_mem, have_lock_already; + int retval = EBUSY; - __pthread_lock (&rwlock->__rw_lock, NULL); - if (rwlock->__rw_writer == NULL - || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP - && rwlock->__rw_readers != 0)) + have_lock_already = rwlock_have_already(&self, rwlock, + &existing, &out_of_mem); + + __pthread_lock (&rwlock->__rw_lock, self); + + /* 0 is passed to here instead of have_lock_already. + This is to meet Single Unix Spec requirements: + if writers are waiting, pthread_rwlock_tryrdlock + does not acquire a read lock, even if the caller has + one or more read locks already. */ + + if (rwlock_can_rdlock(rwlock, 0)) { ++rwlock->__rw_readers; - result = 0; + retval = 0; } + __pthread_unlock (&rwlock->__rw_lock); - return result; + if (retval == 0) + { + if (have_lock_already || out_of_mem) + { + if (existing != NULL) + existing->pr_lock_count++; + else + self->p_untracked_readlock_count++; + } + } + + return retval; } @@ -210,6 +398,28 @@ pthread_rwlock_unlock (pthread_rwlock_t *rwlock) __pthread_unlock (&rwlock->__rw_lock); if (th != NULL) restart (th); + + /* Recursive lock fixup */ + + if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP) + { + pthread_descr self = thread_self(); + pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock); + + if (victim != NULL) + { + if (victim->pr_lock_count == 0) + { + victim->pr_next = self->p_readlock_free; + self->p_readlock_free = victim; + } + } + else + { + if (self->p_untracked_readlock_count > 0) + self->p_untracked_readlock_count--; + } + } } return 0; diff --git a/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h b/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h index fbb10ed5c7..db4c3790ce 100644 --- a/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h +++ b/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h @@ -95,7 +95,7 @@ typedef int pthread_once_t; #ifdef __USE_UNIX98 /* Read-write locks. */ -typedef struct +typedef struct _pthread_rwlock_t { struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */ int __rw_readers; /* Number of readers */ diff --git a/linuxthreads/sysdeps/pthread/pthread.h b/linuxthreads/sysdeps/pthread/pthread.h index b3606f7d08..925db09b86 100644 --- a/linuxthreads/sysdeps/pthread/pthread.h +++ b/linuxthreads/sysdeps/pthread/pthread.h @@ -97,6 +97,7 @@ enum { PTHREAD_RWLOCK_PREFER_READER_NP, PTHREAD_RWLOCK_PREFER_WRITER_NP, + PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, PTHREAD_RWLOCK_DEFAULT_NP = PTHREAD_RWLOCK_PREFER_WRITER_NP }; #endif /* Unix98 */ |