about summary refs log tree commit diff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-04-15 17:15:10 +0000
committerUlrich Drepper <drepper@redhat.com>2000-04-15 17:15:10 +0000
commitef187474bc32c56121db454e023197f2037e601d (patch)
tree37d50aa26f48b0eda1886665c4389831bc78a8b2
parenta5a6f9262eeffab9f78622258fae306d1bf99d04 (diff)
downloadglibc-ef187474bc32c56121db454e023197f2037e601d.tar.gz
glibc-ef187474bc32c56121db454e023197f2037e601d.tar.xz
glibc-ef187474bc32c56121db454e023197f2037e601d.zip
Update.
	* sysdeps/alpha/dl-machine.h (RTLD_START):Rewrite for new init
	function interface.  Patch by Richard Henderson <rth@cygnus.com>.
-rw-r--r--ChangeLog3
-rw-r--r--linuxthreads/ChangeLog16
-rw-r--r--linuxthreads/condvar.c210
-rw-r--r--linuxthreads/internals.h5
-rw-r--r--linuxthreads/pthread.c141
-rw-r--r--linuxthreads/restart.h6
-rw-r--r--linuxthreads/semaphore.c55
-rw-r--r--sysdeps/alpha/dl-machine.h71
-rw-r--r--sysdeps/unix/sysv/linux/alpha/adjtime.c2
9 files changed, 219 insertions, 290 deletions
diff --git a/ChangeLog b/ChangeLog
index 02eff57bdf..c590d90a74 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2000-04-15  Ulrich Drepper  <drepper@redhat.com>
 
+	* sysdeps/alpha/dl-machine.h (RTLD_START):Rewrite for new init
+	function interface.  Patch by Richard Henderson <rth@cygnus.com>.
+
 	* posix/Makefile (headers): Add spawn.h.
 	(distribute): Add spawn_int.h.
 	(routines): Add spawn_faction_init, spawn_faction_destroy,
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index 50fa766fc6..9732297adc 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,5 +1,21 @@
 2000-04-15  Ulrich Drepper  <drepper@redhat.com>
 
+	* condvar.c: Remove all the special code to handle cond_timedwait.
+	Use timedsuspend instead.
+	* internals.h: Declare __pthread_timedsuspend_old,
+	__pthread_timedsuspend_new, and __pthread_timedsuspend.
+	Remove declaration of __pthread_init_condvar.
+	* pthread.c: Define __pthread_timedsuspend variable.
+	(__pthread_timedsuspend_old): New function.  Timed suspension
+	implementation for old Linux kernels.
+	(__pthread_timedsuspend_new): New function.  Timed suspension
+	implementation for new Linux kernels.
+	* restart.h (timedsuspend): New function.  Call appropriate
+	suspension function through __pthread_timedsuspend.
+	* semaphore.c (sem_timedwait): Use timedsuspend, don't duplicate
+	the code.
+	Patch by Kaz Kylheku <kaz@ashi.footprints.net>.
+
 	* internals.h (WRITE_MEMORY_BARRIER): Define as MEMORY_BARRIER if
 	undefined.
 	* spinlock.c: Use WRITE_MEMORY_BARRIER instead of MEMORY_BARRIER
diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c
index e1c8119231..a06517c83f 100644
--- a/linuxthreads/condvar.c
+++ b/linuxthreads/condvar.c
@@ -25,22 +25,6 @@
 #include "queue.h"
 #include "restart.h"
 
-static int pthread_cond_timedwait_relative_old(pthread_cond_t *,
-    pthread_mutex_t *, const struct timespec *);
-
-static int pthread_cond_timedwait_relative_new(pthread_cond_t *,
-    pthread_mutex_t *, const struct timespec *);
-
-static int (*pthread_cond_tw_rel)(pthread_cond_t *, pthread_mutex_t *,
-    const struct timespec *) = pthread_cond_timedwait_relative_old;
-
-/* initialize this module */
-void __pthread_init_condvar(int rt_sig_available)
-{
-  if (rt_sig_available)
-    pthread_cond_tw_rel = pthread_cond_timedwait_relative_new;
-}
-
 int pthread_cond_init(pthread_cond_t *cond,
                       const pthread_condattr_t *cond_attr)
 {
@@ -127,151 +111,13 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
   return 0;
 }
 
-/* The following function is used on kernels that don't have rt signals.
-   SIGUSR1 is used as the restart signal. The different code is needed
-   because that ordinary signal does not queue. */
-
 static int
-pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
+pthread_cond_timedwait_relative(pthread_cond_t *cond,
 				pthread_mutex_t *mutex,
 				const struct timespec * abstime)
 {
   volatile pthread_descr self = thread_self();
-  sigset_t unblock, initial_mask;
   int already_canceled = 0;
-  int was_signalled = 0;
-  sigjmp_buf jmpbuf;
-  pthread_extricate_if extr;
-
-  /* Check whether the mutex is locked and owned by this thread.  */
-  if (mutex->__m_kind != PTHREAD_MUTEX_FAST_NP && mutex->__m_owner != self)
-    return EINVAL;
-
-  /* Set up extrication interface */
-  extr.pu_object = cond;
-  extr.pu_extricate_func = cond_extricate_func;
-
-  /* Register extrication interface */
-  __pthread_set_own_extricate_if(self, &extr);
-
-  /* Enqueue to wait on the condition and check for cancellation. */
-  __pthread_lock(&cond->__c_lock, self);
-  if (!(THREAD_GETMEM(self, p_canceled)
-      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
-    enqueue(&cond->__c_waiting, self);
-  else
-    already_canceled = 1;
-  __pthread_spin_unlock(&cond->__c_lock);
-
-  if (already_canceled) {
-    __pthread_set_own_extricate_if(self, 0);
-    pthread_exit(PTHREAD_CANCELED);
-  }
-
-  pthread_mutex_unlock(mutex);
-
-  if (atomic_decrement(&self->p_resume_count) == 0) {
-    /* Set up a longjmp handler for the restart signal, unblock
-       the signal and sleep. */
-
-    if (sigsetjmp(jmpbuf, 1) == 0) {
-      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
-      THREAD_SETMEM(self, p_signal, 0);
-      /* Unblock the restart signal */
-      sigemptyset(&unblock);
-      sigaddset(&unblock, __pthread_sig_restart);
-      sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
-
-      while (1) {
-	struct timeval now;
-	struct timespec reltime;
-
-	/* Compute a time offset relative to now.  */
-	__gettimeofday (&now, NULL);
-	reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
-	reltime.tv_sec = abstime->tv_sec - now.tv_sec;
-	if (reltime.tv_nsec < 0) {
-	  reltime.tv_nsec += 1000000000;
-	  reltime.tv_sec -= 1;
-	}
-
-	/* Sleep for the required duration. If woken by a signal,
-	   resume waiting as required by Single Unix Specification.  */
-	if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
-	  break;
-      }
-
-      /* Block the restart signal again */
-      sigprocmask(SIG_SETMASK, &initial_mask, NULL);
-      was_signalled = 0;
-    } else {
-      was_signalled = 1;
-    }
-    THREAD_SETMEM(self, p_signal_jmp, NULL);
-  }
-
-  /* Now was_signalled is true if we exited the above code
-     due to the delivery of a restart signal.  In that case,
-     we know we have been dequeued and resumed and that the
-     resume count is balanced.  Otherwise, there are some
-     cases to consider. First, try to bump up the resume count
-     back to zero. If it goes to 1, it means restart() was
-     invoked on this thread. The signal must be consumed
-     and the count bumped down and everything is cool.
-     Otherwise, no restart was delivered yet, so we remove
-     the thread from the queue. If this succeeds, it's a clear
-     case of timeout. If we fail to remove from the queue, then we
-     must wait for a restart. */
-
-  if (!was_signalled) {
-    if (atomic_increment(&self->p_resume_count) != -1) {
-      __pthread_wait_for_restart_signal(self);
-      atomic_decrement(&self->p_resume_count); /* should be zero now! */
-    } else {
-      int was_on_queue;
-      __pthread_lock(&cond->__c_lock, self);
-      was_on_queue = remove_from_queue(&cond->__c_waiting, self);
-      __pthread_spin_unlock(&cond->__c_lock);
-
-      if (was_on_queue) {
-	__pthread_set_own_extricate_if(self, 0);
-	pthread_mutex_lock(mutex);
-	return ETIMEDOUT;
-      }
-
-      suspend(self);
-    }
-  }
-
-  __pthread_set_own_extricate_if(self, 0);
-
-  /* The remaining logic is the same as in other cancellable waits,
-     such as pthread_join sem_wait or pthread_cond wait. */
-
-  if (THREAD_GETMEM(self, p_woken_by_cancel)
-      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
-    THREAD_SETMEM(self, p_woken_by_cancel, 0);
-    pthread_mutex_lock(mutex);
-    pthread_exit(PTHREAD_CANCELED);
-  }
-
-  pthread_mutex_lock(mutex);
-  return 0;
-}
-
-/* The following function is used on new (late 2.1 and 2.2 and higher) kernels
-   that have rt signals which queue. */
-
-static int
-pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
-				pthread_mutex_t *mutex,
-				const struct timespec * abstime)
-{
-  volatile pthread_descr self = thread_self();
-  sigset_t unblock, initial_mask;
-  int already_canceled = 0;
-  int was_signalled = 0;
-  sigjmp_buf jmpbuf;
   pthread_extricate_if extr;
 
   /* Check whether the mutex is locked and owned by this thread.  */
@@ -279,7 +125,6 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
     return EINVAL;
 
   already_canceled = 0;
-  was_signalled = 0;
 
   /* Set up extrication interface */
   extr.pu_object = cond;
@@ -304,56 +149,7 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
 
   pthread_mutex_unlock(mutex);
 
-  /* Set up a longjmp handler for the restart signal, unblock
-     the signal and sleep. */
-
-  if (sigsetjmp(jmpbuf, 1) == 0) {
-    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
-    THREAD_SETMEM(self, p_signal, 0);
-    /* Unblock the restart signal */
-    sigemptyset(&unblock);
-    sigaddset(&unblock, __pthread_sig_restart);
-    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
-
-      while (1) {
-	struct timeval now;
-	struct timespec reltime;
-
-	/* Compute a time offset relative to now.  */
-	__gettimeofday (&now, NULL);
-	reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
-	reltime.tv_sec = abstime->tv_sec - now.tv_sec;
-	if (reltime.tv_nsec < 0) {
-	  reltime.tv_nsec += 1000000000;
-	  reltime.tv_sec -= 1;
-	}
-
-	/* Sleep for the required duration. If woken by a signal,
-	   resume waiting as required by Single Unix Specification.  */
-	if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
-	  break;
-      }
-
-    /* Block the restart signal again */
-    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
-    was_signalled = 0;
-  } else {
-    was_signalled = 1;
-  }
-  THREAD_SETMEM(self, p_signal_jmp, NULL);
-
-  /* Now was_signalled is true if we exited the above code
-     due to the delivery of a restart signal.  In that case,
-     everything is cool. We have been removed from the queue
-     by the other thread, and consumed its signal.
-
-     Otherwise we this thread woke up spontaneously, or due to a signal other
-     than restart. The next thing to do is to try to remove the thread
-     from the queue. This may fail due to a race against another thread
-     trying to do the same. In the failed case, we know we were signalled,
-     and we may also have to consume a restart signal. */
-
-  if (!was_signalled) {
+  if (!timedsuspend(self, abstime) == 0) {
     int was_on_queue;
 
     /* __pthread_lock will queue back any spurious restarts that
@@ -393,7 +189,7 @@ int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
                            const struct timespec * abstime)
 {
   /* Indirect call through pointer! */
-  return pthread_cond_tw_rel(cond, mutex, abstime);
+  return pthread_cond_timedwait_relative(cond, mutex, abstime);
 }
 
 int pthread_cond_signal(pthread_cond_t *cond)
diff --git a/linuxthreads/internals.h b/linuxthreads/internals.h
index 17c8e4d4d6..e6b58df3ca 100644
--- a/linuxthreads/internals.h
+++ b/linuxthreads/internals.h
@@ -431,18 +431,19 @@ extern void __pthread_kill_other_threads_np (void);
 
 void __pthread_restart_old(pthread_descr th);
 void __pthread_suspend_old(pthread_descr self);
+int __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abs);
 
 void __pthread_restart_new(pthread_descr th);
 void __pthread_suspend_new(pthread_descr self);
+int __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abs);
 
 void __pthread_wait_for_restart_signal(pthread_descr self);
 
-void __pthread_init_condvar(int rt_sig_available);
-
 /* Global pointers to old or new suspend functions */
 
 extern void (*__pthread_restart)(pthread_descr);
 extern void (*__pthread_suspend)(pthread_descr);
+extern int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *);
 
 /* Prototypes for the function without cancelation support when the
    normal version has it.  */
diff --git a/linuxthreads/pthread.c b/linuxthreads/pthread.c
index d3b851e5a2..e8ec26d33c 100644
--- a/linuxthreads/pthread.c
+++ b/linuxthreads/pthread.c
@@ -166,6 +166,7 @@ int __pthread_exit_code = 0;
 
 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
+int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
 
 /* Communicate relevant LinuxThreads constants to gdb */
 
@@ -238,7 +239,6 @@ init_rtsigs (void)
       __pthread_sig_cancel = SIGUSR2;
       __pthread_sig_debug = 0;
 #endif
-      __pthread_init_condvar(0);
     }
   else
     {
@@ -246,10 +246,9 @@ init_rtsigs (void)
       current_rtmin = __SIGRTMIN + 3;
       __pthread_restart = __pthread_restart_new;
       __pthread_suspend = __pthread_wait_for_restart_signal;
-      __pthread_init_condvar(1);
+      __pthread_timedsuspend = __pthread_timedsuspend_new;
 #else
       current_rtmin = __SIGRTMIN;
-      __pthread_init_condvar(0);
 #endif
 
       current_rtmax = __SIGRTMAX;
@@ -826,7 +825,8 @@ void __pthread_wait_for_restart_signal(pthread_descr self)
   } while (self->p_signal !=__pthread_sig_restart );
 }
 
-/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT signals.
+/* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
+   signals.
    On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
    Since the restart signal does not queue, we use an atomic counter to create
    queuing semantics. This is needed to resolve a rare race condition in
@@ -844,6 +844,83 @@ void __pthread_suspend_old(pthread_descr self)
     __pthread_wait_for_restart_signal(self);
 }
 
+int 
+__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
+{
+  sigset_t unblock, initial_mask;
+  int was_signalled = 0;
+  sigjmp_buf jmpbuf;
+
+  if (atomic_decrement(&self->p_resume_count) == 0) {
+    /* Set up a longjmp handler for the restart signal, unblock
+       the signal and sleep. */
+
+    if (sigsetjmp(jmpbuf, 1) == 0) {
+      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+      THREAD_SETMEM(self, p_signal, 0);
+      /* Unblock the restart signal */
+      sigemptyset(&unblock);
+      sigaddset(&unblock, __pthread_sig_restart);
+      sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+
+      while (1) {
+	struct timeval now;
+	struct timespec reltime;
+
+	/* Compute a time offset relative to now.  */
+	__gettimeofday (&now, NULL);
+	reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
+	reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+	if (reltime.tv_nsec < 0) {
+	  reltime.tv_nsec += 1000000000;
+	  reltime.tv_sec -= 1;
+	}
+
+	/* Sleep for the required duration. If woken by a signal,
+	   resume waiting as required by Single Unix Specification.  */
+	if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
+	  break;
+      }
+
+      /* Block the restart signal again */
+      sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+      was_signalled = 0;
+    } else {
+      was_signalled = 1;
+    }
+    THREAD_SETMEM(self, p_signal_jmp, NULL);
+  }
+
+  /* Now was_signalled is true if we exited the above code
+     due to the delivery of a restart signal.  In that case,
+     we know we have been dequeued and resumed and that the
+     resume count is balanced.  Otherwise, there are some
+     cases to consider. First, try to bump up the resume count
+     back to zero. If it goes to 1, it means restart() was
+     invoked on this thread. The signal must be consumed
+     and the count bumped down and everything is cool. We
+     can return a 1 to the caller.
+     Otherwise, no restart was delivered yet, so a potential
+     race exists; we return a 0 to the caller which must deal
+     with this race in an appropriate way; for example by
+     atomically removing the thread from consideration for a 
+     wakeup---if such a thing fails, it means a restart is
+     being delivered. */
+
+  if (!was_signalled) {
+    if (atomic_increment(&self->p_resume_count) != -1) {
+      __pthread_wait_for_restart_signal(self);
+      atomic_decrement(&self->p_resume_count); /* should be zero now! */
+      /* woke spontaneously and consumed restart signal */
+      return 1;
+    }
+    /* woke spontaneously but did not consume restart---caller must resolve */
+    return 0;
+  }
+  /* woken due to restart signal */
+  return 1;
+}
+
 void __pthread_restart_new(pthread_descr th)
 {
     kill(th->p_pid, __pthread_sig_restart);
@@ -852,6 +929,62 @@ void __pthread_restart_new(pthread_descr th)
 /* There is no __pthread_suspend_new because it would just
    be a wasteful wrapper for __pthread_wait_for_restart_signal */
 
+int 
+__pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
+{
+  sigset_t unblock, initial_mask;
+  int was_signalled = 0;
+  sigjmp_buf jmpbuf;
+
+  if (sigsetjmp(jmpbuf, 1) == 0) {
+    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
+    THREAD_SETMEM(self, p_signal, 0);
+    /* Unblock the restart signal */
+    sigemptyset(&unblock);
+    sigaddset(&unblock, __pthread_sig_restart);
+    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
+
+    while (1) {
+      struct timeval now;
+      struct timespec reltime;
+
+      /* Compute a time offset relative to now.  */
+      __gettimeofday (&now, NULL);
+      reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
+      reltime.tv_sec = abstime->tv_sec - now.tv_sec;
+      if (reltime.tv_nsec < 0) {
+	reltime.tv_nsec += 1000000000;
+	reltime.tv_sec -= 1;
+      }
+
+      /* Sleep for the required duration. If woken by a signal,
+	 resume waiting as required by Single Unix Specification.  */
+      if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
+	break;
+    }
+
+    /* Block the restart signal again */
+    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
+    was_signalled = 0;
+  } else {
+    was_signalled = 1;
+  }
+  THREAD_SETMEM(self, p_signal_jmp, NULL);
+
+  /* Now was_signalled is true if we exited the above code
+     due to the delivery of a restart signal.  In that case,
+     everything is cool. We have been removed from whatever
+     we were waiting on by the other thread, and consumed its signal.
+
+     Otherwise we this thread woke up spontaneously, or due to a signal other
+     than restart. This is an ambiguous case  that must be resolved by
+     the caller; the thread is still eligible for a restart wakeup
+     so there is a race. */
+
+  return was_signalled;
+}
+
+
 /* Debugging aid */
 
 #ifdef DEBUG
diff --git a/linuxthreads/restart.h b/linuxthreads/restart.h
index 702d7d15c6..0a69309c5b 100644
--- a/linuxthreads/restart.h
+++ b/linuxthreads/restart.h
@@ -25,3 +25,9 @@ static inline void suspend(pthread_descr self)
 {
   __pthread_suspend(self); /* see pthread.c */
 }
+
+static inline int timedsuspend(pthread_descr self, 
+		const struct timespec *abstime)
+{
+   return __pthread_timedsuspend(self, abstime); /* see pthread.c */
+}
diff --git a/linuxthreads/semaphore.c b/linuxthreads/semaphore.c
index 7775b5a7d3..35bb79679b 100644
--- a/linuxthreads/semaphore.c
+++ b/linuxthreads/semaphore.c
@@ -195,10 +195,6 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
   pthread_descr self = thread_self();
   pthread_extricate_if extr;
   int already_canceled = 0;
-  int was_signalled = 0;
-  sigjmp_buf jmpbuf;
-  sigset_t unblock;
-  sigset_t initial_mask;
 
   __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
@@ -233,56 +229,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
     pthread_exit(PTHREAD_CANCELED);
   }
 
-  /* Set up a longjmp handler for the restart signal, unblock
-     the signal and sleep. */
-
-  if (sigsetjmp(jmpbuf, 1) == 0) {
-    THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
-    THREAD_SETMEM(self, p_signal, 0);
-    /* Unblock the restart signal */
-    sigemptyset(&unblock);
-    sigaddset(&unblock, __pthread_sig_restart);
-    sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
-
-    while (1) {
-        struct timeval now;
-        struct timespec reltime;
-
-        /* Compute a time offset relative to now.  */
-        __gettimeofday (&now, NULL);
-        reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
-        reltime.tv_sec = abstime->tv_sec - now.tv_sec;
-        if (reltime.tv_nsec < 0) {
-          reltime.tv_nsec += 1000000000;
-          reltime.tv_sec -= 1;
-        }
-
-        /* Sleep for the required duration. If woken by a signal,
-           resume waiting as required by Single Unix Specification.  */
-        if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
-          break;
-      }
-
-    /* Block the restart signal again */
-    sigprocmask(SIG_SETMASK, &initial_mask, NULL);
-    was_signalled = 0;
-  } else {
-    was_signalled = 1;
-  }
-  THREAD_SETMEM(self, p_signal_jmp, NULL);
-
-  /* Now was_signalled is true if we exited the above code
-     due to the delivery of a restart signal.  In that case,
-     everything is cool. We have been removed from the queue
-     by the other thread, and consumed its signal.
-
-     Otherwise we this thread woke up spontaneously, or due to a signal other
-     than restart. The next thing to do is to try to remove the thread
-     from the queue. This may fail due to a race against another thread
-     trying to do the same. In the failed case, we know we were signalled,
-     and we may also have to consume a restart signal. */
-
-  if (!was_signalled) {
+  if (timedsuspend(self, abstime) == 0) {
     int was_on_queue;
 
     /* __pthread_lock will queue back any spurious restarts that
diff --git a/sysdeps/alpha/dl-machine.h b/sysdeps/alpha/dl-machine.h
index ad79ef669c..b16e1c884c 100644
--- a/sysdeps/alpha/dl-machine.h
+++ b/sysdeps/alpha/dl-machine.h
@@ -289,37 +289,64 @@ _dl_start_user:
 	/* Store the highest stack address.  */
 	stq	$30, __libc_stack_end
 	/* See if we were run as a command with the executable file
-	   name as an extra leading argument.  If so, adjust the stack
-	   pointer to skip _dl_skip_args words.  */
+	   name as an extra leading argument.  */
 	ldl	$1, _dl_skip_args
-	beq	$1, 0f
-	ldq	$2, 0($sp)
-	subq	$2, $1, $2
-	s8addq	$1, $sp, $sp
-	stq	$2, 0($sp)
-	/* Load _dl_main_searchlist into s1 to pass to _dl_init_next.  */
-0:	ldq	$10, _dl_main_searchlist
-	/* Call _dl_init_next to return the address of an initializer
-	   function to run.  */
-1:	mov	$10, $16
-	jsr	$26, _dl_init_next
-	ldgp	$gp, 0($26)
-	beq	$0, 2f
-	mov	$0, $27
-	jsr	$26, ($0)
-	ldgp	$gp, 0($26)
-	br	1b
-2:	/* Clear the startup flag.  */
-	stl	$31, _dl_starting_up
+	beq	$1, $fixup_stack
+$fixup_stack_ret:
+	/* The special initializer gets called with the stack just
+	   as the application's entry point will see it; it can
+	   switch stacks if it moves these contents over.  */
+" RTLD_START_SPECIAL_INIT "
+	/* Call _dl_init(_dl_loaded, argc, argv, envp) to run initializers.  */
+	ldq	$16, _dl_loaded
+	ldq	$17, 0($sp)
+	lda	$18, 8($sp)
+	s8addq	$17, 8, $19
+	addq	$19, $18, $19
+	jsr	$26, _dl_init
 	/* Pass our finalizer function to the user in $0. */
 	lda	$0, _dl_fini
 	/* Jump to the user's entry point.  */
 	mov	$9, $27
 	jmp	($9)
+$fixup_stack:
+	/* Adjust the stack pointer to skip _dl_skip_args words.  This
+	   involves copying everything down, since the stack pointer must
+	   always be 16-byte aligned.  */
+	ldq	$2, 0($sp)
+	subq	$2, $1, $2
+	mov	$sp, $4
+	s8addq	$2, $sp, $3
+	stq	$2, 0($sp)
+	/* Copy down argv.  */
+0:	ldq	$5, 8($3)
+	addq	$4, 8, $4
+	addq	$3, 8, $3
+	stq	$5, 0($4)
+	bne	$5, 0b
+	/* Copy down envp.  */
+1:	ldq	$5, 8($3)
+	addq	$4, 8, $4
+	addq	$3, 8, $3
+	stq	$5, 0($4)
+	bne	$5, 1b
+	/* Copy down auxiliary table.  */
+2:	ldq	$5, 8($3)
+	ldq	$6, 16($3)
+	addq	$4, 16, $4
+	addq	$3, 16, $3
+	stq	$5, -8($4)
+	stq	$6, 0($4)
+	bne	$5, 2b
+	br	$fixup_stack_ret
 	.end _dl_start_user
 	.set noat
 .previous");
 
+#ifndef RTLD_START_SPECIAL_INIT
+#define RTLD_START_SPECIAL_INIT /* nothing */
+#endif
+
 /* Nonzero iff TYPE describes relocation of a PLT entry, so
    PLT entries should not be allowed to define the value.  */
 #define elf_machine_lookup_noplt_p(type)  ((type) == R_ALPHA_JMP_SLOT)
@@ -350,7 +377,7 @@ elf_machine_fixup_plt(struct link_map *l, const Elf64_Rela *reloc,
   /* Recover the PLT entry address by calculating reloc's index into the
      .rela.plt, and finding that entry in the .plt.  */
   rela_plt = (void *) D_PTR (l, l_info[DT_JMPREL]);
-  plte = (void *) (D_PTR (l, [DT_PLTGOT]) + 32);
+  plte = (void *) (D_PTR (l, l_info[DT_PLTGOT]) + 32);
   plte += 3 * (reloc - rela_plt);
 
   /* Find the displacement from the plt entry to the function.  */
diff --git a/sysdeps/unix/sysv/linux/alpha/adjtime.c b/sysdeps/unix/sysv/linux/alpha/adjtime.c
index 63cc66daf2..560cb2771c 100644
--- a/sysdeps/unix/sysv/linux/alpha/adjtime.c
+++ b/sysdeps/unix/sysv/linux/alpha/adjtime.c
@@ -68,7 +68,7 @@ extern int ADJTIMEX (struct TIMEX *);
 #include <sysdeps/unix/sysv/linux/adjtime.c>
 
 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_1)
-compat_symbol (libc, __adjtime_tv32, adjtime, GLIBC_2.0);
+compat_symbol (libc, __adjtime_tv32, adjtime, GLIBC_2_0);
 #endif
 
 #undef TIMEVAL