diff options
author | Ulrich Drepper <drepper@redhat.com> | 2000-04-11 17:03:03 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2000-04-11 17:03:03 +0000 |
commit | de26253715b91e7fd4a9854fe836baef86dbc7af (patch) | |
tree | fad01a7fbfc9ebc7121551e0d8877c3fed149011 /linuxthreads/spinlock.c | |
parent | e7c036b39ef12abc7ff131982df75e3ec35c0f31 (diff) | |
download | glibc-de26253715b91e7fd4a9854fe836baef86dbc7af.tar.gz glibc-de26253715b91e7fd4a9854fe836baef86dbc7af.tar.xz glibc-de26253715b91e7fd4a9854fe836baef86dbc7af.zip |
Upadte.
2000-04-11 Ulrich Drepper <drepper@redhat.com> * internals.h: Define MEMORY_BARRIER as empty if not defined already. * spinlock.c (__pthread_lock): Add memory barriers. (__pthread_unlock): Likewise. * sysdeps/alpha/pt-machine.h (MEMORY_BARRIER): Define using mb instruction. (RELEASE): Not needed anymore. (__compare_and_swap): Mark asm as modifying memory. * sysdeps/powerpc/pt-machine.h (sync): Remove. Replace with definition of MEMORY_BARRIER. (__compare_and_swap): Use MEMORY_BARRIER instead of sync. * sysdeps/sparc/sparc32/pt-machine.h (RELEASE): Not needed anymore. (MEMORY_BARRIER): Define using stbar. * sysdeps/sparc/sparc64/pt-machine.h (MEMORY_BARRIER): Define using stbar. (__compare_and_swap): Use MEMORY_BARRIER to ensure ordering. Patch by Xavier Leroy <Xavier.Leroy@inria.fr> based on comments by Mike Burrows <m3b@pa.dec.com>.
Diffstat (limited to 'linuxthreads/spinlock.c')
-rw-r--r-- | linuxthreads/spinlock.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c index b1a99d9753..e1c40c05d0 100644 --- a/linuxthreads/spinlock.c +++ b/linuxthreads/spinlock.c @@ -54,6 +54,9 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock, if (self != NULL) { ASSERT(self->p_nextlock == NULL); THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus); + /* Make sure the store in p_nextlock completes before performing + the compare-and-swap */ + MEMORY_BARRIER(); } } while(! compare_and_swap(&lock->__status, oldstatus, newstatus, &lock->__spinlock)); @@ -108,8 +111,17 @@ again: maxprio = thr->p_priority; } ptr = &(thr->p_nextlock); + /* Prevent reordering of the load of lock->__status above and the + load of *ptr below, as well as reordering of *ptr between + several iterations of the while loop. Some processors (e.g. + multiprocessor Alphas) could perform such reordering even though + the loads are dependent. */ + MEMORY_BARRIER(); thr = *ptr; } + /* Prevent reordering of the load of lock->__status above and + thr->p_nextlock below */ + MEMORY_BARRIER(); /* Remove max prio thread from waiting list. */ if (maxptr == (pthread_descr *) &lock->__status) { /* If max prio thread is at head, remove it with compare-and-swap @@ -124,6 +136,9 @@ again: thr = *maxptr; *maxptr = thr->p_nextlock; } + /* Prevent reordering of store to *maxptr above and store to thr->p_nextlock + below */ + MEMORY_BARRIER(); /* Wake up the selected waiting thread */ thr->p_nextlock = NULL; restart(thr); @@ -149,6 +164,8 @@ int __pthread_compare_and_swap(long * ptr, long oldval, long newval, } else { res = 0; } + /* Prevent reordering of store to *ptr above and store to *spinlock below */ + MEMORY_BARRIER(); *spinlock = 0; return res; } |