summary refs log tree commit diff
path: root/linuxthreads
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2002-08-20 11:10:37 +0000
committerUlrich Drepper <drepper@redhat.com>2002-08-20 11:10:37 +0000
commit69681bdee0226cef026ab761738373955393422b (patch)
treed104408a000f02c27dcbc06f2855c83e78e6b17e /linuxthreads
parentd78ce054e2a3b23fed53f42ea7aa6e1c2ff6d748 (diff)
downloadglibc-69681bdee0226cef026ab761738373955393422b.tar.gz
glibc-69681bdee0226cef026ab761738373955393422b.tar.xz
glibc-69681bdee0226cef026ab761738373955393422b.zip
Update.
	* include/unistd.h: Don't hide _exit.
Diffstat (limited to 'linuxthreads')
-rw-r--r--linuxthreads/ChangeLog6
-rw-r--r--linuxthreads/sysdeps/i386/tls.h6
-rw-r--r--linuxthreads/sysdeps/i386/useldt.h14
3 files changed, 18 insertions, 8 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index 93a71d74e6..86848a685e 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,5 +1,11 @@
 2002-08-20  Ulrich Drepper  <drepper@redhat.com>
 
+	* sysdeps/i386/tls.h: Use 32-bit operations when handling segment
+	registers.  No need to mask upper 16 bits in this case.
+	* sysdeps/i386/useldt.h: Likewise.
+	(DO_SET_THREAD_AREA): We have to load %gs again even if the value
+	is the same since the GDT content changed.
+
 	* sysdeps/i386/tls.h (TLS_INIT_TP): Add new parameter and pass it on
 	to TLS_SETUP_GS_SEGMENT.
 	(TLS_SETUP_GS_SEGMENT): Add new parameter and pass it on to
diff --git a/linuxthreads/sysdeps/i386/tls.h b/linuxthreads/sysdeps/i386/tls.h
index 098b11caa0..39b4b63da3 100644
--- a/linuxthreads/sysdeps/i386/tls.h
+++ b/linuxthreads/sysdeps/i386/tls.h
@@ -123,8 +123,8 @@ typedef struct
   int result;								      \
   if (!firstcall)							      \
     ldt_entry.entry_number = ({ int _gs;				      \
-				asm ("movw %%gs, %w0" : "=q" (_gs));	      \
-				(_gs & 0xffff) >> 3; });		      \
+				asm ("movl %%gs, %0" : "=q" (_gs));	      \
+				_gs >> 3; });				      \
   asm volatile (TLS_LOAD_EBX						      \
 		"int $0x80\n\t"						      \
 		TLS_LOAD_EBX						      \
@@ -164,7 +164,7 @@ typedef struct
     __gs = TLS_SETUP_GS_SEGMENT (_descr, firstcall);			      \
     if (__builtin_expect (__gs, 7) != -1)				      \
       {									      \
-	asm ("movw %w0, %%gs" : : "q" (__gs));				      \
+	asm ("movl %0, %%gs" : : "q" (__gs));				      \
 	__gs = 0;							      \
       }									      \
     __gs;								      \
diff --git a/linuxthreads/sysdeps/i386/useldt.h b/linuxthreads/sysdeps/i386/useldt.h
index c0285330e9..ff42853668 100644
--- a/linuxthreads/sysdeps/i386/useldt.h
+++ b/linuxthreads/sysdeps/i386/useldt.h
@@ -72,7 +72,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
       1, 0, 0, 0, 0, 1, 0 };						      \
   if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0)		      \
     abort ();								      \
-  asm ("movw %w0, %%gs" : : "q" (nr * 8 + 7));				      \
+  asm ("movl %0, %%gs" : : "q" (nr * 8 + 7));				      \
 })
 
 /* When using the new set_thread_area call, we don't need to change %gs
@@ -83,11 +83,15 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
 ({									      \
   int __gs;								      \
   struct modify_ldt_ldt_s ldt_entry =					      \
-    { ({ asm ("movw %%gs, %w0" : "=q" (__gs)); __gs >> 3; }),		      \
+    { ({ asm ("movl %%gs, %0" : "=q" (__gs)); __gs >> 3; }),		      \
       (unsigned long int) descr, sizeof (struct _pthread_descr_struct),	      \
       1, 0, 0, 0, 0, 1, 0 };						      \
-  __builtin_expect (INLINE_SYSCALL (set_thread_area, 1, &ldt_entry) == 0, 1)  \
-  ? __gs : -1;	      							      \
+  if (__builtin_expect (INLINE_SYSCALL (set_thread_area, 1, &ldt_entry) == 0, \
+      			1))						      \
+    asm ("movl %0, %%gs" :: "q" (__gs));				      \
+  else								      \
+    __gs = -1;								      \
+  __gs;		      							      \
 })
 
 #if defined __ASSUME_SET_THREAD_AREA_SYSCALL && defined HAVE_TLS_SUPPORT
@@ -113,7 +117,7 @@ extern int __have_no_set_thread_area;
 #define FREE_THREAD(descr, nr) \
 {									      \
   int __gs;								      \
-  __asm__ __volatile__ ("movw %%gs, %w0" : "=q" (__gs));		      \
+  __asm__ __volatile__ ("movl %%gs, %0" : "=q" (__gs));			      \
   if (__builtin_expect (__gs & 4, 0))					      \
     {									      \
       struct modify_ldt_ldt_s ldt_entry =				      \