about summary refs log tree commit diff
path: root/linuxthreads
diff options
context:
space:
mode:
Diffstat (limited to 'linuxthreads')
-rw-r--r--linuxthreads/condvar.c4
-rw-r--r--linuxthreads/internals.h3
-rw-r--r--linuxthreads/mutex.c14
-rw-r--r--linuxthreads/pthread.c20
-rw-r--r--linuxthreads/spinlock.c134
-rw-r--r--linuxthreads/spinlock.h14
-rw-r--r--linuxthreads/sysdeps/pthread/bits/pthreadtypes.h5
-rw-r--r--linuxthreads/sysdeps/pthread/pthread.h4
8 files changed, 147 insertions, 51 deletions
diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c
index 3bc672e909..5e1dd712d7 100644
--- a/linuxthreads/condvar.c
+++ b/linuxthreads/condvar.c
@@ -63,7 +63,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 
   /* Check whether the mutex is locked and owned by this thread.  */
   if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP  
-      && mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
+      && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
       && mutex->__m_owner != self)
     return EINVAL;
 
@@ -124,7 +124,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
 
   /* Check whether the mutex is locked and owned by this thread.  */
   if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP  
-      && mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
+      && mutex->__m_kind != PTHREAD_MUTEX_ADAPTIVE_NP
       && mutex->__m_owner != self)
     return EINVAL;
 
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h
index 405af3c0df..e3fbf8c521 100644
--- a/linuxthreads/internals.h
+++ b/linuxthreads/internals.h
@@ -287,6 +287,9 @@ extern volatile td_thr_events_t __pthread_threads_events;
 /* Pointer to descriptor of thread with last event.  */
 extern volatile pthread_descr __pthread_last_event;
 
+/* Flag which tells whether we are executing on SMP kernel. */
+extern int __pthread_smp_kernel;
+
 /* Return the handle corresponding to a thread id */
 
 static inline pthread_handle thread_handle(pthread_t id)
diff --git a/linuxthreads/mutex.c b/linuxthreads/mutex.c
index 8b137043b2..9b9bcee9b9 100644
--- a/linuxthreads/mutex.c
+++ b/linuxthreads/mutex.c
@@ -38,7 +38,7 @@ strong_alias (__pthread_mutex_init, pthread_mutex_init)
 
 int __pthread_mutex_destroy(pthread_mutex_t * mutex)
 {
-  if (mutex->__m_lock.__status != 0) return EBUSY;
+  if ((mutex->__m_lock.__status & 1) != 0) return EBUSY;
   return 0;
 }
 strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
@@ -49,7 +49,7 @@ int __pthread_mutex_trylock(pthread_mutex_t * mutex)
   int retcode;
 
   switch(mutex->__m_kind) {
-  case PTHREAD_MUTEX_FAST_NP:
+  case PTHREAD_MUTEX_ADAPTIVE_NP:
     retcode = __pthread_trylock(&mutex->__m_lock);
     return retcode;
   case PTHREAD_MUTEX_RECURSIVE_NP:
@@ -84,7 +84,7 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex)
   pthread_descr self;
 
   switch(mutex->__m_kind) {
-  case PTHREAD_MUTEX_FAST_NP:
+  case PTHREAD_MUTEX_ADAPTIVE_NP:
     __pthread_lock(&mutex->__m_lock, NULL);
     return 0;
   case PTHREAD_MUTEX_RECURSIVE_NP:
@@ -122,7 +122,7 @@ int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
     return EINVAL;
 
   switch(mutex->__m_kind) {
-  case PTHREAD_MUTEX_FAST_NP:
+  case PTHREAD_MUTEX_ADAPTIVE_NP:
     __pthread_lock(&mutex->__m_lock, NULL);
     return 0;
   case PTHREAD_MUTEX_RECURSIVE_NP:
@@ -158,7 +158,7 @@ strong_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
 int __pthread_mutex_unlock(pthread_mutex_t * mutex)
 {
   switch (mutex->__m_kind) {
-  case PTHREAD_MUTEX_FAST_NP:
+  case PTHREAD_MUTEX_ADAPTIVE_NP:
     __pthread_unlock(&mutex->__m_lock);
     return 0;
   case PTHREAD_MUTEX_RECURSIVE_NP:
@@ -170,7 +170,7 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
     __pthread_unlock(&mutex->__m_lock);
     return 0;
   case PTHREAD_MUTEX_ERRORCHECK_NP:
-    if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
+    if (mutex->__m_owner != thread_self() || (mutex->__m_lock.__status & 1) == 0)
       return EPERM;
     mutex->__m_owner = NULL;
     __pthread_alt_unlock(&mutex->__m_lock);
@@ -199,7 +199,7 @@ strong_alias (__pthread_mutexattr_destroy, pthread_mutexattr_destroy)
 
 int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
 {
-  if (kind != PTHREAD_MUTEX_FAST_NP
+  if (kind != PTHREAD_MUTEX_ADAPTIVE_NP
       && kind != PTHREAD_MUTEX_RECURSIVE_NP
       && kind != PTHREAD_MUTEX_ERRORCHECK_NP
       && kind != PTHREAD_MUTEX_TIMED_NP)
diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c
index 1e650f4e46..a3f829f9ac 100644
--- a/linuxthreads/pthread.c
+++ b/linuxthreads/pthread.c
@@ -150,7 +150,7 @@ pthread_descr __pthread_main_thread = &__pthread_initial_thread;
 /* Limit between the stack of the initial thread (above) and the
    stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
 
-char *__pthread_initial_thread_bos = NULL;
+char *__pthread_initial_thread_bos;
 
 /* File descriptor for sending requests to the thread manager. */
 /* Initially -1, meaning that the thread manager is not running. */
@@ -163,13 +163,17 @@ int __pthread_manager_reader;
 
 /* Limits of the thread manager stack */
 
-char *__pthread_manager_thread_bos = NULL;
-char *__pthread_manager_thread_tos = NULL;
+char *__pthread_manager_thread_bos;
+char *__pthread_manager_thread_tos;
 
 /* For process-wide exit() */
 
-int __pthread_exit_requested = 0;
-int __pthread_exit_code = 0;
+int __pthread_exit_requested;
+int __pthread_exit_code;
+
+/* Nozero if the machine has more than one processor.  */
+int __pthread_smp_kernel;
+
 
 #if !__ASSUME_REALTIME_SIGNALS
 /* Pointers that select new or old suspend/resume functions
@@ -212,7 +216,7 @@ static int current_rtmin = -1;
 static int current_rtmax = -1;
 int __pthread_sig_restart = SIGUSR1;
 int __pthread_sig_cancel = SIGUSR2;
-int __pthread_sig_debug = 0;
+int __pthread_sig_debug;
 #else
 static int current_rtmin;
 static int current_rtmax;
@@ -224,7 +228,7 @@ int __pthread_sig_debug = __SIGRTMIN + 2;
 #else
 int __pthread_sig_restart = SIGUSR1;
 int __pthread_sig_cancel = SIGUSR2;
-int __pthread_sig_debug = 0;
+int __pthread_sig_debug;
 #endif
 
 static int rtsigs_initialized;
@@ -399,6 +403,8 @@ static void pthread_initialize(void)
     __cxa_atexit((void (*) (void *)) pthread_exit_process, NULL, __dso_handle);
   else
     __on_exit (pthread_exit_process, NULL);
+  /* How many processors.  */
+  __pthread_smp_kernel = sysconf (_SC_NPROCESSORS_ONLN) > 1;
 }
 
 void __pthread_initialize(void)
diff --git a/linuxthreads/spinlock.c b/linuxthreads/spinlock.c
index c91a7cfa84..02ab9a9613 100644
--- a/linuxthreads/spinlock.c
+++ b/linuxthreads/spinlock.c
@@ -24,12 +24,20 @@
 #include "spinlock.h"
 #include "restart.h"
 
-/* The status field of a spinlock has the following meaning:
-     0: spinlock is free
-     1: spinlock is taken, no thread is waiting on it
-  ADDR: psinlock is taken, ADDR is address of thread descriptor for
-        first waiting thread, other waiting threads are linked via
-        their p_nextlock field.
+/* The status field of a spinlock is a pointer whose least significant
+   bit is a locked flag.
+
+   Thus the field values have the following meanings:
+
+   status == 0:       spinlock is free
+   status == 1:       spinlock is taken; no thread is waiting on it
+
+   (status & 1) == 1: spinlock is taken and (status & ~1L) is a
+                      pointer to the first waiting thread; other
+		      waiting threads are linked via the p_nextlock 
+		      field.
+   (status & 1) == 0: same as above, but spinlock is not taken.
+
    The waiting list is not sorted by priority order.
    Actually, we always insert at top of list (sole insertion mode
    that can be performed without locking).
@@ -38,29 +46,70 @@
    This is safe because there are no concurrent __pthread_unlock
    operations -- only the thread that locked the mutex can unlock it. */
 
+
 void internal_function __pthread_lock(struct _pthread_fastlock * lock,
 				      pthread_descr self)
 {
+#if defined HAS_COMPARE_AND_SWAP
   long oldstatus, newstatus;
-  int spurious_wakeup_count = 0;
+  int successful_seizure, spurious_wakeup_count = 0;
+  int spin_count = 0;
+#endif
+
+#if defined TEST_FOR_COMPARE_AND_SWAP
+  if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP
+  {
+    __pthread_acquire(&lock->__spinlock);
+    return 0;
+  }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
+again:
+
+  /* On SMP, try spinning to get the lock. */
+
+  if (__pthread_smp_kernel) {
+    int max_count = lock->__spinlock * 2 + 10;
+
+    for (spin_count = 0; spin_count < max_count; spin_count++) {
+      if (((oldstatus = lock->__status) & 1) == 0) {
+	if(__compare_and_swap(&lock->__status, oldstatus, oldstatus | 1))
+	{
+	  if (spin_count)
+	    lock->__spinlock += (spin_count - lock->__spinlock) / 8;
+	  return;
+	}
+      }
+    }
+
+    lock->__spinlock += (spin_count - lock->__spinlock) / 8;
+  }
+
+  /* No luck, try once more or suspend. */
 
   do {
     oldstatus = lock->__status;
-    if (oldstatus == 0) {
-      newstatus = 1;
+    successful_seizure = 0;
+
+    if ((oldstatus & 1) == 0) {
+      newstatus = oldstatus | 1;
+      successful_seizure = 1;
     } else {
       if (self == NULL)
 	self = thread_self();
-      newstatus = (long) self;
+      newstatus = (long) self | 1;
     }
+
     if (self != NULL) {
-      THREAD_SETMEM(self, p_nextlock, (pthread_descr) oldstatus);
+      THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
       /* Make sure the store in p_nextlock completes before performing
          the compare-and-swap */
       MEMORY_BARRIER();
     }
-  } while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
-                             &lock->__spinlock));
+  } while(! __compare_and_swap(&lock->__status, oldstatus, newstatus));
 
   /* Suspend with guard against spurious wakeup.
      This can happen in pthread_cond_timedwait_relative, when the thread
@@ -68,7 +117,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
      locks the queue to remove itself. At that point it may still be on the
      queue, and may be resumed by a condition signal. */
 
-  if (oldstatus != 0) {
+  if (!successful_seizure) {
     for (;;) {
       suspend(self);
       if (self->p_nextlock != NULL) {
@@ -78,37 +127,50 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
       }
       break;
     }
+    goto again;
   }
 
   /* Put back any resumes we caught that don't belong to us. */
   while (spurious_wakeup_count--)
     restart(self);
+#endif
 }
 
 int __pthread_unlock(struct _pthread_fastlock * lock)
 {
+#if defined HAS_COMPARE_AND_SWAP
   long oldstatus;
   pthread_descr thr, * ptr, * maxptr;
   int maxprio;
+#endif
 
+#if defined TEST_FOR_COMPARE_AND_SWAP
+  if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP
+  {
+    lock->__spinlock = 0;
+    WRITE_MEMORY_BARRIER();
+    return 0;
+  }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
 again:
   oldstatus = lock->__status;
-  if (oldstatus == 0 || oldstatus == 1) {
-    /* No threads are waiting for this lock.  Please note that we also
-       enter this case if the lock is not taken at all.  If this wouldn't
-       be done here we would crash further down.  */
-    if (! compare_and_swap_with_release_semantics (&lock->__status,
-						   oldstatus, 0,
-						   &lock->__spinlock))
-      goto again;
-    return 0;
+
+  while ((oldstatus = lock->__status) == 1) {
+    if (__compare_and_swap_with_release_semantics(&lock->__status, 
+	oldstatus, 0))
+      return 0;
   }
+
   /* Find thread in waiting queue with maximal priority */
   ptr = (pthread_descr *) &lock->__status;
-  thr = (pthread_descr) oldstatus;
+  thr = (pthread_descr) (oldstatus & ~1L);
   maxprio = 0;
   maxptr = ptr;
-  while (thr != (pthread_descr) 1) {
+  while (thr != 0) {
     if (thr->p_priority >= maxprio) {
       maxptr = ptr;
       maxprio = thr->p_priority;
@@ -128,16 +190,25 @@ again:
   /* Remove max prio thread from waiting list. */
   if (maxptr == (pthread_descr *) &lock->__status) {
     /* If max prio thread is at head, remove it with compare-and-swap
-       to guard against concurrent lock operation */
-    thr = (pthread_descr) oldstatus;
-    if (! compare_and_swap_with_release_semantics
-	    (&lock->__status, oldstatus, (long)(thr->p_nextlock),
-	     &lock->__spinlock))
+       to guard against concurrent lock operation. This removal
+       also has the side effect of marking the lock as released
+       because the new status comes from thr->p_nextlock whose
+       least significant bit is clear. */
+    thr = (pthread_descr) (oldstatus & ~1L);
+    if (! __compare_and_swap_with_release_semantics
+	    (&lock->__status, oldstatus, (long)(thr->p_nextlock)))
       goto again;
   } else {
-    /* No risk of concurrent access, remove max prio thread normally */
+    /* No risk of concurrent access, remove max prio thread normally.
+       But in this case we must also flip the least significant bit
+       of the status to mark the lock as released. */
     thr = *maxptr;
     *maxptr = thr->p_nextlock;
+
+    do {
+      oldstatus = lock->__status;
+    } while (!__compare_and_swap_with_release_semantics(&lock->__status,
+	     oldstatus, oldstatus & ~1L));
   }
   /* Prevent reordering of store to *maxptr above and store to thr->p_nextlock
      below */
@@ -147,6 +218,7 @@ again:
   restart(thr);
 
   return 0;
+#endif
 }
 
 /*
diff --git a/linuxthreads/spinlock.h b/linuxthreads/spinlock.h
index 1145c72636..96f39551dd 100644
--- a/linuxthreads/spinlock.h
+++ b/linuxthreads/spinlock.h
@@ -81,6 +81,7 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
 
 #ifndef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
 #define compare_and_swap_with_release_semantics compare_and_swap
+#define __compare_and_swap_with_release_semantics __compare_and_swap
 #endif
 
 /* Internal locks */
@@ -97,13 +98,26 @@ static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
 
 static inline int __pthread_trylock (struct _pthread_fastlock * lock)
 {
+#if defined HAS_COMPARE_AND_SWAP
   long oldstatus;
+#endif
 
+#if defined TEST_FOR_COMPARE_AND_SWAP
+  if (!__pthread_has_cas)
+#endif
+#if !defined HAS_COMPARE_AND_SWAP
+  {
+    return (testandset(&lock->__spinlock) : EBUSY : 0)
+  }
+#endif
+
+#if defined HAS_COMPARE_AND_SWAP
   do {
     oldstatus = lock->__status;
     if (oldstatus != 0) return EBUSY;
   } while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
   return 0;
+#endif
 }
 
 /* Variation of internal lock used for pthread_mutex_t, supporting 
diff --git a/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h b/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
index a53889f5f0..2bb661fea3 100644
--- a/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
+++ b/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
@@ -25,8 +25,9 @@
 /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
 struct _pthread_fastlock
 {
-  long int __status;            /* "Free" or "taken" or head of waiting list */
-  int __spinlock;               /* For compare-and-swap emulation */
+  long int __status;   /* "Free" or "taken" or head of waiting list */
+  int __spinlock;      /* Used by compare_and_swap emulation. Also,
+			  adaptive SMP lock stores spin count here. */
 };
 
 #ifndef _PTHREAD_DESCR_DEFINED
diff --git a/linuxthreads/sysdeps/pthread/pthread.h b/linuxthreads/sysdeps/pthread/pthread.h
index 0e1cbe891f..f9e1fd9787 100644
--- a/linuxthreads/sysdeps/pthread/pthread.h
+++ b/linuxthreads/sysdeps/pthread/pthread.h
@@ -77,10 +77,10 @@ enum
 
 enum
 {
-  PTHREAD_MUTEX_FAST_NP,
+  PTHREAD_MUTEX_TIMED_NP,
   PTHREAD_MUTEX_RECURSIVE_NP,
   PTHREAD_MUTEX_ERRORCHECK_NP,
-  PTHREAD_MUTEX_TIMED_NP
+  PTHREAD_MUTEX_ADAPTIVE_NP
 #ifdef __USE_UNIX98
   ,
   PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,