about summary refs log tree commit diff
path: root/nptl/sysdeps/i386
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/sysdeps/i386')
-rw-r--r--nptl/sysdeps/i386/i686/bits/atomic.h340
-rw-r--r--nptl/sysdeps/i386/i686/pthread_spin_trylock.S34
-rw-r--r--nptl/sysdeps/i386/i686/tls.h36
-rw-r--r--nptl/sysdeps/i386/pthread_sigmask.c34
-rw-r--r--nptl/sysdeps/i386/pthread_spin_destroy.c29
-rw-r--r--nptl/sysdeps/i386/pthread_spin_init.c20
-rw-r--r--nptl/sysdeps/i386/pthread_spin_lock.c48
-rw-r--r--nptl/sysdeps/i386/pthread_spin_unlock.S32
-rw-r--r--nptl/sysdeps/i386/pthreaddef.h55
-rw-r--r--nptl/sysdeps/i386/tls.h332
10 files changed, 960 insertions, 0 deletions
diff --git a/nptl/sysdeps/i386/i686/bits/atomic.h b/nptl/sysdeps/i386/i686/bits/atomic.h
new file mode 100644
index 0000000000..7eb7573265
--- /dev/null
+++ b/nptl/sysdeps/i386/i686/bits/atomic.h
@@ -0,0 +1,340 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <stdint.h>
+
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+
+#ifndef LOCK
+# ifdef UP
+#  define LOCK	/* nothing */
+# else
+#  define LOCK "lock;"
+# endif
+#endif
+
+
+#define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
+  ({ unsigned char ret;							      \
+     __asm __volatile (LOCK "cmpxchgb %2, %1; setne %0"			      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "q" (newval), "1" (*mem), "0" (oldval));	      \
+     ret; })
+
+#define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
+  ({ unsigned char ret;							      \
+     __asm __volatile (LOCK "cmpxchgw %2, %1; setne %0"			      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "r" (newval), "1" (*mem), "0" (oldval));	      \
+     ret; })
+
+#define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
+  ({ unsigned char ret;							      \
+     __asm __volatile (LOCK "cmpxchgl %2, %1; setne %0"			      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "r" (newval), "1" (*mem), "0" (oldval));	      \
+     ret; })
+
+/* XXX We do not really need 64-bit compare-and-exchange.  At least
+   not in the moment.  Using it would mean causing portability
+   problems since not many other 32-bit architectures have support for
+   such an operation.  So don't define any code for now.  If it is
+   really going to be used the code below can be used.  */
+#if 1
+# define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+  (abort (), 0)
+#else
+# ifdef __PIC__
+#  define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+  ({ unsigned char ret;							      \
+     int ignore;							      \
+     __asm __volatile ("xchgl %3, %%ebx\n\t"				      \
+		       LOCK "cmpxchg8b %2, %1\n\t"			      \
+		       "setne %0\n\t"					      \
+		       "xchgl %3, %%ebx"				      \
+		       : "=a" (ret), "=m" (*mem), "=d" (ignore)		      \
+		       : "DS" (((unsigned long long int) (newval))	      \
+			       & 0xffffffff),				      \
+			 "c" (((unsigned long long int) (newval)) >> 32),     \
+			 "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+					  & 0xffffffff),		      \
+			 "2" (((unsigned long long int) (oldval)) >> 32));    \
+     ret; })
+# else
+#  define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
+  ({ unsigned char ret;							      \
+     int ignore;							      \
+     __asm __volatile (LOCK "cmpxchg8b %2, %1; setne %0"		      \
+		       : "=a" (ret), "=m" (*mem), "=d" (ignore)		      \
+		       : "b" (((unsigned long long int) (newval))	      \
+			      & 0xffffffff),				      \
+			  "c" (((unsigned long long int) (newval)) >> 32),    \
+			 "1" (*mem), "0" (((unsigned long long int) (oldval)) \
+					  & 0xffffffff),		      \
+			 "2" (((unsigned long long int) (oldval)) >> 32));    \
+     ret; })
+# endif
+#endif
+
+
+#define atomic_exchange_and_add(mem, value) \
+  ({ __typeof (*mem) result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "xaddb %0, %1"				      \
+			 : "=r" (result), "=m" (*mem)			      \
+			 : "0" (value), "1" (*mem));			      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "xaddw %0, %1"				      \
+			 : "=r" (result), "=m" (*mem)			      \
+			 : "0" (value), "1" (*mem));			      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "xaddl %0, %1"				      \
+			 : "=r" (result), "=m" (*mem)			      \
+			 : "0" (value), "1" (*mem));			      \
+     else								      \
+       {								      \
+	 __typeof (value) addval = (value);				      \
+	 __typeof (*mem) oldval;					      \
+	 __typeof (mem) memp = (mem);					      \
+	 do								      \
+	   result = (oldval = *memp) + addval;				      \
+	 while (! __arch_compare_and_exchange_64_acq (memp, result, oldval)); \
+       }								      \
+     result; })
+
+
+#define atomic_add(mem, value) \
+  (void) ({ if (__builtin_constant_p (value) && (value) == 1)		      \
+	      atomic_increment (mem);					      \
+	    else if (__builtin_constant_p (value) && (value) == 1)	      \
+	      atomic_decrement (mem);					      \
+	    else if (sizeof (*mem) == 1)				      \
+	      __asm __volatile (LOCK "addb %1, %0"			      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "0" (*mem));		      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK "addw %1, %0"			      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "0" (*mem));		      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK "addl %1, %0"			      \
+				: "=m" (*mem)				      \
+				: "ir" (value), "0" (*mem));		      \
+	    else							      \
+	      {								      \
+		__typeof (value) addval = (value);			      \
+		__typeof (*mem) oldval;					      \
+		__typeof (mem) memp = (mem);				      \
+		do							      \
+		  oldval = *memp;					      \
+		while (! __arch_compare_and_exchange_64_acq (memp,	      \
+							     oldval + addval, \
+							     oldval));	      \
+	      }								      \
+	    })
+
+
+#define atomic_add_negative(mem, value) \
+  ({ unsigned char __result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "addb %2, %0; sets %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "addw %2, %0; sets %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "addl %2, %0; sets %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else								      \
+       abort ();							      \
+     __result; })
+
+
+#define atomic_add_zero(mem, value) \
+  ({ unsigned char __result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "addb %2, %0; setz %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "addw %2, %0; setz %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "addl %2, %0; setz %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "ir" (value), "0" (*mem));			      \
+     else								      \
+       abort ();							      \
+     __result; })
+
+
+#define atomic_increment(mem) \
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK "incb %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK "incw %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK "incl %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else							      \
+	      {								      \
+		__typeof (*mem) oldval;					      \
+		__typeof (mem) memp = (mem);				      \
+		do							      \
+		  oldval = *memp;					      \
+		while (! __arch_compare_and_exchange_64_acq (memp,	      \
+							     oldval + 1,      \
+							     oldval));	      \
+	      }								      \
+	    })
+
+
+#define atomic_increment_and_test(mem) \
+  ({ unsigned char __result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "incb %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "incw %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "incl %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else								      \
+       abort ();							      \
+     __result; })
+
+
+#define atomic_decrement(mem) \
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK "decb %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK "decw %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK "decl %0"				      \
+				: "=m" (*mem)				      \
+				: "0" (*mem));				      \
+	    else							      \
+	      {								      \
+		__typeof (*mem) oldval;					      \
+		__typeof (mem) memp = (mem);				      \
+		do							      \
+		  oldval = *memp;					      \
+		while (! __arch_compare_and_exchange_64_acq (memp,	      \
+							     oldval - 1,      \
+							     oldval));	      \
+	      }								      \
+	    })
+
+
+#define atomic_decrement_and_test(mem) \
+  ({ unsigned char __result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "decb %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "decw %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "decl %0; sete %1"			      \
+			 : "=m" (*mem), "=qm" (__result)		      \
+			 : "0" (*mem));					      \
+     else								      \
+       abort ();							      \
+     __result; })
+
+
+#define atomic_bit_set(mem, bit) \
+  (void) ({ if (sizeof (*mem) == 1)					      \
+	      __asm __volatile (LOCK "orb %2, %0"			      \
+				: "=m" (*mem)				      \
+				: "0" (*mem), "i" (1 << (bit)));	      \
+	    else if (sizeof (*mem) == 2)				      \
+	      __asm __volatile (LOCK "orw %2, %0"			      \
+				: "=m" (*mem)				      \
+				: "0" (*mem), "i" (1 << (bit)));	      \
+	    else if (sizeof (*mem) == 4)				      \
+	      __asm __volatile (LOCK "orl %2, %0"			      \
+				: "=m" (*mem)				      \
+				: "0" (*mem), "i" (1 << (bit)));	      \
+	    else							      \
+	      abort ();							      \
+	    })
+
+
+#define atomic_bit_test_set(mem, bit) \
+  ({ unsigned char __result;						      \
+     if (sizeof (*mem) == 1)						      \
+       __asm __volatile (LOCK "btsb %3, %1; setc %0"			      \
+			 : "=q" (__result), "=m" (*mem)			      \
+			 : "1" (*mem), "i" (bit));			      \
+     else if (sizeof (*mem) == 2)					      \
+       __asm __volatile (LOCK "btsw %3, %1; setc %0"			      \
+			 : "=q" (__result), "=m" (*mem)			      \
+			 : "1" (*mem), "i" (bit));			      \
+     else if (sizeof (*mem) == 4)					      \
+       __asm __volatile (LOCK "btsl %3, %1; setc %0"			      \
+			 : "=q" (__result), "=m" (*mem)			      \
+			 : "1" (*mem), "i" (bit));			      \
+     else							      	      \
+       abort ();							      \
+     __result; })
diff --git a/nptl/sysdeps/i386/i686/pthread_spin_trylock.S b/nptl/sysdeps/i386/i686/pthread_spin_trylock.S
new file mode 100644
index 0000000000..881976c4fe
--- /dev/null
+++ b/nptl/sysdeps/i386/i686/pthread_spin_trylock.S
@@ -0,0 +1,34 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#define EBUSY	16
+
+	.globl	pthread_spin_trylock
+	.type	pthread_spin_trylock,@function
+	.align	16
+pthread_spin_trylock:
+	movl	4(%esp), %edx
+	movl	$1, %eax
+	xorl	%ecx, %ecx
+	cmpxchgl %ecx, (%edx)
+	movl	$EBUSY, %ecx
+	movl	$0, %eax
+	cmovne	%ecx, %eax
+	ret
+	.size	pthread_spin_trylock,.-pthread_spin_trylock
diff --git a/nptl/sysdeps/i386/i686/tls.h b/nptl/sysdeps/i386/i686/tls.h
new file mode 100644
index 0000000000..4025ed8d21
--- /dev/null
+++ b/nptl/sysdeps/i386/i686/tls.h
@@ -0,0 +1,36 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#ifndef _TLS_H
+
+/* Additional definitions for <tls.h> on i686 and up.  */
+
+
+/* Macros to load from and store into segment registers.  We can use
+   the 32-bit instructions.  */
+#define TLS_GET_GS() \
+  ({ int __seg; __asm ("movl %%gs, %0" : "=q" (__seg)); __seg; })
+#define TLS_SET_GS(val) \
+  __asm ("movl %0, %%gs" :: "q" (val))
+
+
+/* Get the full set of definitions.  */
+#include "../tls.h"
+
+#endif	/* tls.h */
diff --git a/nptl/sysdeps/i386/pthread_sigmask.c b/nptl/sysdeps/i386/pthread_sigmask.c
new file mode 100644
index 0000000000..2ae9198c02
--- /dev/null
+++ b/nptl/sysdeps/i386/pthread_sigmask.c
@@ -0,0 +1,34 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <signal.h>
+#include <pthreadP.h>
+#include <tls.h>
+#include <sysdep.h>
+
+
+int
+pthread_sigmask (how, newmask, oldmask)
+     int how;
+     const sigset_t *newmask;
+     sigset_t *oldmask;
+{
+  return INLINE_SYSCALL (sigprocmask, 3, how, newmask, oldmask);
+}
diff --git a/nptl/sysdeps/i386/pthread_spin_destroy.c b/nptl/sysdeps/i386/pthread_spin_destroy.c
new file mode 100644
index 0000000000..4d0109cf02
--- /dev/null
+++ b/nptl/sysdeps/i386/pthread_spin_destroy.c
@@ -0,0 +1,29 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "pthreadP.h"
+
+
+int
+pthread_spin_destroy (lock)
+     pthread_spinlock_t *lock;
+{
+  /* Nothing to do.  */
+  return 0;
+}
diff --git a/nptl/sysdeps/i386/pthread_spin_init.c b/nptl/sysdeps/i386/pthread_spin_init.c
new file mode 100644
index 0000000000..0a47981aa2
--- /dev/null
+++ b/nptl/sysdeps/i386/pthread_spin_init.c
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Not needed.  pthread_spin_init is an alias for pthread_spin_unlock.  */
diff --git a/nptl/sysdeps/i386/pthread_spin_lock.c b/nptl/sysdeps/i386/pthread_spin_lock.c
new file mode 100644
index 0000000000..43a1831131
--- /dev/null
+++ b/nptl/sysdeps/i386/pthread_spin_lock.c
@@ -0,0 +1,48 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "pthreadP.h"
+
+
+#ifdef UP
+# define LOCK
+#else
+# define LOCK "lock;"
+#endif
+
+
+int
+pthread_spin_lock (lock)
+     pthread_spinlock_t *lock;
+{
+  asm ("\n"
+       "1:\t" LOCK "decl %0\n\t"
+       "jne 2f\n\t"
+       ".subsection 1\n\t"
+       ".align 16\n"
+       "2:\trep; nop\n\t"
+       "cmpl $0, %0\n\t"
+       "jg 1b\n\t"
+       "jmp 2b\n\t"
+       ".previous"
+       : "=m" (*lock)
+       : "0" (*lock));
+
+  return 0;
+}
diff --git a/nptl/sysdeps/i386/pthread_spin_unlock.S b/nptl/sysdeps/i386/pthread_spin_unlock.S
new file mode 100644
index 0000000000..d94f1e7b8c
--- /dev/null
+++ b/nptl/sysdeps/i386/pthread_spin_unlock.S
@@ -0,0 +1,32 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+	.globl	pthread_spin_unlock
+	.type	pthread_spin_unlock,@function
+	.align	16
+pthread_spin_unlock:
+	movl	4(%esp), %eax
+	movl	$1, (%eax)
+	xorl	%eax, %eax
+	ret
+	.size	pthread_spin_unlock,.-pthread_spin_unlock
+
+	/* The implementation of pthread_spin_init is identical.  */
+	.globl	pthread_spin_init
+pthread_spin_init = pthread_spin_unlock
diff --git a/nptl/sysdeps/i386/pthreaddef.h b/nptl/sysdeps/i386/pthreaddef.h
new file mode 100644
index 0000000000..6efa1b6d82
--- /dev/null
+++ b/nptl/sysdeps/i386/pthreaddef.h
@@ -0,0 +1,55 @@
+/* Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+/* Default stack size.  */
+#define ARCH_STACK_DEFAULT_SIZE	(2 * 1024 * 1024)
+
+/* Required stack pointer alignment at beginning.  SSE requires 16
+   bytes.  */
+#define STACK_ALIGN		16
+
+/* Minimal stack size after allocating thread descriptor and guard size.  */
+#define MINIMAL_REST_STACK	2048
+
+/* Alignment requirement for TCB.  */
+#define TCB_ALIGNMENT		16
+
+/* The signal used for asynchronous cancelation.  */
+#define SIGCANCEL		__SIGRTMIN
+
+
+/* Location of current stack frame.  */
+#define CURRENT_STACK_FRAME	__builtin_frame_address (0)
+
+
+/* XXX Until we have a better place keep the definitions here.  */
+
+/* While there is no such syscall.  */
+#define __exit_thread_inline(val) \
+  while (1) {								      \
+    if (__builtin_constant_p (val) && (val) == 0)			      \
+      asm volatile ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit));	      \
+    else								      \
+      asm volatile ("movl %1, %%ebx; int $0x80"				      \
+		    :: "a" (__NR_exit), "r" (val));			      \
+  }
+#define gettid()							      \
+  ({ int tid;								      \
+     __asm __volatile ("int $0x80" : "=a" (tid) : "0" (__NR_gettid));	      \
+     tid; })
diff --git a/nptl/sysdeps/i386/tls.h b/nptl/sysdeps/i386/tls.h
new file mode 100644
index 0000000000..244a487bcb
--- /dev/null
+++ b/nptl/sysdeps/i386/tls.h
@@ -0,0 +1,332 @@
+/* Definition for thread-local data handling.  linuxthreads/i386 version.
+   Copyright (C) 2002 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#ifndef _TLS_H
+#define _TLS_H	1
+
+#ifndef __ASSEMBLER__
+# include <stddef.h>
+# include <stdint.h>
+
+
+/* Type for the dtv.  */
+typedef union dtv
+{
+  size_t counter;
+  void *pointer;
+} dtv_t;
+
+
+typedef struct
+{
+  void *tcb;		/* Pointer to the TCB.  Not necessary the
+			   thread descriptor used by libpthread.  */
+  dtv_t *dtv;
+  void *self;		/* Pointer to the thread descriptor.  */
+} tcbhead_t;
+#endif
+
+
+/* We require TLS support in the tools.  */
+#ifndef HAVE_TLS_SUPPORT
+# error "TLS support is required."
+#endif
+
+/* Signal that TLS support is available.  */
+#define USE_TLS	1
+
+/* Alignment requirement for the stack.  For IA-32 this is govern by
+   the SSE memory functions.  */
+#define STACK_ALIGN	16
+
+
+#ifndef __ASSEMBLER__
+/* Get system call information.  */
+# include <sysdep.h>
+
+/* The old way: using LDT.  */
+
+/* Structure passed to `modify_ldt', 'set_thread_area', and 'clone' calls.  */
+struct user_desc
+{
+  unsigned int entry_number;
+  unsigned long int base_addr;
+  unsigned int limit;
+  unsigned int seg_32bit:1;
+  unsigned int contents:2;
+  unsigned int read_exec_only:1;
+  unsigned int limit_in_pages:1;
+  unsigned int seg_not_present:1;
+  unsigned int useable:1;
+  unsigned int empty:25;
+};
+
+/* Initializing bit fields is slow.  We speed it up by using a union.  */
+union user_desc_init
+{
+  struct user_desc desc;
+  unsigned int vals[4];
+};
+
+
+/* Get the thread descriptor definition.  */
+# include <nptl/descr.h>
+
+/* This is the size of the initial TCB.  */
+# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
+
+/* Alignment requirements for the initial TCB.  */
+# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
+
+/* This is the size of the TCB.  */
+# define TLS_TCB_SIZE sizeof (struct pthread)
+
+/* Alignment requirements for the TCB.  */
+# define TLS_TCB_ALIGN __alignof__ (struct pthread)
+
+/* The TCB can have any size and the memory following the address the
+   thread pointer points to is unspecified.  Allocate the TCB there.  */
+# define TLS_TCB_AT_TP	1
+
+
+/* Install the dtv pointer.  The pointer passed is to the element with
+   index -1 which contain the length.  */
+# define INSTALL_DTV(descr, dtvp) \
+  ((tcbhead_t *) (descr))->dtv = dtvp + 1
+
+/* Install new dtv for current thread.  */
+# define INSTALL_NEW_DTV(dtv) \
+  ({ struct pthread *__pd;						      \
+     THREAD_SETMEM (__pd, header.data.dtvp, dtv); })
+
+/* Return dtv of given thread descriptor.  */
+# define GET_DTV(descr) \
+  (((tcbhead_t *) (descr))->dtv)
+
+
+/* Macros to load from and store into segment registers.  */
+# ifndef TLS_GET_GS
+#  define TLS_GET_GS() \
+  ({ int __seg; __asm ("movw %%gs, %w0" : "=q" (__seg)); __seg & 0xffff; })
+# endif
+# ifndef TLS_SET_GS
+#  define TLS_SET_GS(val) \
+  __asm ("movw %w0, %%gs" :: "q" (val))
+# endif
+
+
+# ifndef __NR_set_thread_area
+#  define __NR_set_thread_area 243
+# endif
+# ifndef TLS_FLAG_WRITABLE
+#  define TLS_FLAG_WRITABLE		0x00000001
+# endif
+
+// XXX Enable for the real world.
+#if 0
+# ifndef __ASSUME_SET_THREAD_AREA
+#  error "we need set_thread_area"
+# endif
+#endif
+
+# ifdef __PIC__
+#  define TLS_EBX_ARG "r"
+#  define TLS_LOAD_EBX "xchgl %3, %%ebx\n\t"
+# else
+#  define TLS_EBX_ARG "b"
+#  define TLS_LOAD_EBX
+# endif
+
+/* Code to initially initialize the thread pointer.  This might need
+   special attention since 'errno' is not yet available and if the
+   operation can cause a failure 'errno' must not be touched.  */
+# define TLS_INIT_TP(thrdescr, secondcall) \
+  ({ void *_thrdescr = (thrdescr);					      \
+     tcbhead_t *_head = _thrdescr;					      \
+     union user_desc_init _segdescr;					      \
+     int _result;							      \
+									      \
+     _head->tcb = _thrdescr;						      \
+     /* For now the thread descriptor is at the same address.  */	      \
+     _head->self = _thrdescr;						      \
+									      \
+     /* The 'entry_number' field.  Let the kernel pick a value.  */	      \
+     if (secondcall)							      \
+       _segdescr.vals[0] = TLS_GET_GS () >> 3;				      \
+     else								      \
+       _segdescr.vals[0] = -1;						      \
+     /* The 'base_addr' field.  Pointer to the TCB.  */			      \
+     _segdescr.vals[1] = (unsigned long int) _thrdescr;			      \
+     /* The 'limit' field.  We use 4GB which is 0xfffff pages.  */	      \
+     _segdescr.vals[2] = 0xfffff;					      \
+     /* Collapsed value of the bitfield:				      \
+	  .seg_32bit = 1						      \
+	  .contents = 0							      \
+	  .read_exec_only = 0						      \
+	  .limit_in_pages = 1						      \
+	  .seg_not_present = 0						      \
+	  .useable = 1 */						      \
+     _segdescr.vals[3] = 0x51;						      \
+									      \
+     /* Install the TLS.  */						      \
+     asm volatile (TLS_LOAD_EBX						      \
+		   "int $0x80\n\t"					      \
+		   TLS_LOAD_EBX						      \
+		   : "=a" (_result), "=m" (_segdescr.desc.entry_number)	      \
+		   : "0" (__NR_set_thread_area),			      \
+		     TLS_EBX_ARG (&_segdescr.desc), "m" (_segdescr.desc));    \
+									      \
+     if (_result == 0)							      \
+       /* We know the index in the GDT, now load the segment register.	      \
+	  The use of the GDT is described by the value 3 in the lower	      \
+	  three bits of the segment descriptor value.			      \
+									      \
+	  Note that we have to do this even if the numeric value of	      \
+	  the descriptor does not change.  Loading the segment register	      \
+	  causes the segment information from the GDT to be loaded	      \
+	  which is necessary since we have changed it.   */		      \
+       TLS_SET_GS (_segdescr.desc.entry_number * 8 + 3);		      \
+									      \
+     _result; })
+
+
+/* Return the address of the dtv for the current thread.  */
+# define THREAD_DTV() \
+  ({ struct pthread *__pd;						      \
+     THREAD_GETMEM (__pd, header.data.dtvp); })
+
+
+/* Return the thread descriptor for the current thread.
+
+   The contained asm must *not* be marked volatile since otherwise
+   assignments like
+        pthread_descr self = thread_self();
+   do not get optimized away.  */
+# define THREAD_SELF \
+  ({ struct pthread *__self;						      \
+     asm ("movl %%gs:%c1,%0" : "=r" (__self)				      \
+	  : "i" (offsetof (struct pthread, header.data.self))); 	      \
+     __self;})
+
+
+/* Read member of the thread descriptor directly.  */
+# define THREAD_GETMEM(descr, member) \
+  ({ __typeof (descr->member) __value;					      \
+     if (sizeof (__value) == 1)						      \
+       asm ("movb %%gs:%P2,%b0"						      \
+	    : "=q" (__value)						      \
+	    : "0" (0), "i" (offsetof (struct pthread, member)));	      \
+     else if (sizeof (__value) == 4)					      \
+       asm ("movl %%gs:%P1,%0"						      \
+	    : "=r" (__value)						      \
+	    : "i" (offsetof (struct pthread, member)));			      \
+     else								      \
+       {								      \
+	 if (sizeof (__value) != 8)					      \
+	   /* There should not be any value with a size other than 1,	      \
+	      4 or 8.  */						      \
+	   abort ();							      \
+									      \
+	 asm ("movl %%gs:%P1,%%eax\n\t"					      \
+	      "movl %%gs:%P2,%%edx"					      \
+	      : "=A" (__value)						      \
+	      : "i" (offsetof (struct pthread, member)),		      \
+		"i" (offsetof (struct pthread, member) + 4));		      \
+       }								      \
+     __value; })
+
+
+/* Same as THREAD_GETMEM, but the member offset can be non-constant.  */
+# define THREAD_GETMEM_NC(descr, member) \
+  ({ __typeof (descr->member) __value;					      \
+     if (sizeof (__value) == 1)						      \
+       asm ("movb %%gs:(%2),%b0"					      \
+	    : "=q" (__value)						      \
+	    : "0" (0), "r" (offsetof (struct pthread, member)));	      \
+     else if (sizeof (__value) == 4)					      \
+       asm ("movl %%gs:(%1),%0"						      \
+	    : "=r" (__value)						      \
+	    : "r" (offsetof (struct pthread, member)));			      \
+     else								      \
+       {								      \
+	 if (sizeof (__value) != 8)					      \
+	   /* There should not be any value with a size other than 1,	      \
+	      4 or 8.  */						      \
+	   abort ();							      \
+									      \
+	 asm ("movl %%gs:(%1),%%eax\n\t"				      \
+	      "movl %%gs:4(%1),%%edx"					      \
+	      : "=&A" (__value)						      \
+	      : "r" (offsetof (struct pthread, member)));		      \
+       }								      \
+     __value; })
+
+
+/* Same as THREAD_SETMEM, but the member offset can be non-constant.  */
+# define THREAD_SETMEM(descr, member, value) \
+  ({ if (sizeof (value) == 1)						      \
+       asm volatile ("movb %0,%%gs:%P1" :				      \
+		     : "iq" (value),					      \
+		       "i" (offsetof (struct pthread, member)));	      \
+     else if (sizeof (value) == 4)					      \
+       asm volatile ("movl %0,%%gs:%P1" :				      \
+		     : "ir" (value),					      \
+		       "i" (offsetof (struct pthread, member)));	      \
+     else								      \
+       {								      \
+	 if (sizeof (value) != 8)					      \
+	   /* There should not be any value with a size other than 1,	      \
+	      4 or 8.  */						      \
+	   abort ();							      \
+									      \
+	 asm volatile ("movl %%eax,%%gs:%P1\n\n"			      \
+		       "movl %%edx,%%gs:%P2" :				      \
+		       : "A" (value),					      \
+			 "i" (offsetof (struct pthread, member)),	      \
+			 "i" (offsetof (struct pthread, member) + 4));	      \
+       }})
+
+
+/* Set member of the thread descriptor directly.  */
+# define THREAD_SETMEM_NC(descr, member, value) \
+  ({ if (sizeof (value) == 1)						      \
+       asm volatile ("movb %0,%%gs:(%1)" :				      \
+		     : "iq" (value),					      \
+		       "r" (offsetof (struct pthread, member)));	      \
+     else if (sizeof (value) == 4)					      \
+       asm volatile ("movl %0,%%gs:(%1)" :				      \
+		     : "ir" (value),					      \
+		       "r" (offsetof (struct pthread, member)));	      \
+     else								      \
+       {								      \
+	 if (sizeof (value) != 8)					      \
+	   /* There should not be any value with a size other than 1,	      \
+	      4 or 8.  */						      \
+	   abort ();							      \
+									      \
+	 asm volatile ("movl %%eax,%%gs:(%1)\n\t"			      \
+		       "movl %%edx,%%gs:4(%1)" :			      \
+		       : "A" (value),					      \
+			 "r" (offsetof (struct pthread, member)));	      \
+       }})
+
+
+#endif /* __ASSEMBLER__ */
+
+#endif	/* tls.h */