about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog4
-rw-r--r--nptl/ChangeLog12
-rw-r--r--nptl/sysdeps/pthread/pthread-functions.h1
-rw-r--r--nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c6
-rw-r--r--nptl/sysdeps/x86_64/tls.h5
-rw-r--r--sysdeps/x86_64/cacheinfo.c37
6 files changed, 36 insertions, 29 deletions
diff --git a/ChangeLog b/ChangeLog
index 4b582368b8..d799ba831a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2007-05-21  Ulrich Drepper  <drepper@redhat.com>
 
+	* sysdeps/x86_64/cacheinfo.c (init_cacheinfo): Pass correct value
+	as second parameter to handle_intel.
+
 	* nscd/aicache.c (addhstaiX): If reported TTL is zero don't cache
 	the entry.
 
@@ -51,6 +54,7 @@
 	Define for kernel >= 2.6.22.
 
 2007-05-18  Ulrich Drepper  <drepper@redhat.com>
+
 	* elf/dl-close.c (_dl_close_worker): When removing object from
 	global scope, wait for all lookups to finish afterwards.
 	* elf/dl-open.c (add_to_global): When global scope array must
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index ed00f55a22..2d4f50b73b 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,15 @@
+2007-05-21  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/pthread/pthread-functions.h (struct pthread_functions):
+	Remove ptr_wait_lookup_done again.
+	* init.c (pthread_functions): Don't add .ptr_wait_lookup_done here.
+	(__pthread_initialize_minimal_internal): Initialize
+	_dl_wait_lookup_done pointer in _rtld_global directly.
+	* sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+	Remove code to code _dl_wait_lookup_done.
+	* sysdeps/x86_64/tls.h (THREAD_GSCOPE_WAIT): The pointer is not
+	encrypted for now.
+
 2007-05-21  Jakub Jelinek  <jakub@redhat.com>
 
 	* tst-robust9.c (do_test): Don't fail if ENABLE_PI and
diff --git a/nptl/sysdeps/pthread/pthread-functions.h b/nptl/sysdeps/pthread/pthread-functions.h
index f0eddd3053..0c404fcbb3 100644
--- a/nptl/sysdeps/pthread/pthread-functions.h
+++ b/nptl/sysdeps/pthread/pthread-functions.h
@@ -97,7 +97,6 @@ struct pthread_functions
   void (*ptr__nptl_deallocate_tsd) (void);
   int (*ptr__nptl_setxid) (struct xid_command *);
   void (*ptr_freeres) (void);
-  void (*ptr_wait_lookup_done) (void);
 };
 
 /* Variable in libc.so.  */
diff --git a/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c b/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
index 25509eb3d6..4b614bd1a6 100644
--- a/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
+++ b/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
@@ -71,12 +71,6 @@ __libc_pthread_init (ptr, reclaim, functions)
       dest->parr[cnt] = p;
     }
   __libc_pthread_functions_init = 1;
-
-# ifdef RTLD_NOT_MANGLED
-  GL(dl_wait_lookup_done) = functions->ptr_wait_lookup_done;
-# else
-  GL(dl_wait_lookup_done) = __libc_pthread_functions.ptr_wait_lookup_done;
-# endif
 #endif
 
 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
diff --git a/nptl/sysdeps/x86_64/tls.h b/nptl/sysdeps/x86_64/tls.h
index 00c9abbfcb..3a69e0428a 100644
--- a/nptl/sysdeps/x86_64/tls.h
+++ b/nptl/sysdeps/x86_64/tls.h
@@ -357,10 +357,7 @@ typedef struct
 #define THREAD_GSCOPE_SET_FLAG() \
   THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
 #define THREAD_GSCOPE_WAIT() \
-  do { void (*ptr) (void) = GL(dl_wait_lookup_done);			      \
-       PTR_DEMANGLE (ptr);						      \
-       ptr ();								      \
-  } while (0)
+  GL(dl_wait_lookup_done) ()
 
 
 #endif /* __ASSEMBLER__ */
diff --git a/sysdeps/x86_64/cacheinfo.c b/sysdeps/x86_64/cacheinfo.c
index 65277389dd..f8217a1757 100644
--- a/sysdeps/x86_64/cacheinfo.c
+++ b/sysdeps/x86_64/cacheinfo.c
@@ -1,6 +1,4 @@
-/*
-   x86_64 cache info.
-
+/* x86_64 cache info.
    Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
@@ -202,7 +200,7 @@ handle_intel (int name, unsigned int maxidx)
   long int result = 0;
   bool no_level_2_or_3 = false;
   bool has_level_2 = false;
-  
+
   while (cnt++ < max)
     {
       unsigned int eax;
@@ -349,7 +347,8 @@ __cache_sysconf (int name)
 /* Half the core cache size for use in memory and string routines, typically
    L1 size. */
 long int __x86_64_core_cache_size_half attribute_hidden = 32 * 1024 / 2;
-/* Shared cache size for use in memory and string routines, typically L2 or L3 size. */
+/* Shared cache size for use in memory and string routines, typically
+   L2 or L3 size. */
 long int __x86_64_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
 /* PREFETCHW support flag for use in memory and string routines. */
 int __x86_64_prefetchw attribute_hidden;
@@ -378,24 +377,25 @@ init_cacheinfo (void)
   /* This spells out "GenuineIntel".  */
   if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
     {
-      core = handle_intel (_SC_LEVEL1_DCACHE_SIZE, eax);
-      
+      core = handle_intel (_SC_LEVEL1_DCACHE_SIZE, max_cpuid);
+
       /* Try L3 first. */
       level  = 3;
-      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, eax);
-      
+      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, max_cpuid);
+
       if (shared <= 0)
         {
 	  /* Try L2 otherwise. */
           level  = 2;
-          shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, eax);
+          shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, max_cpuid);
 	}
-	
-      /* Figure out the number of logical threads that share the highest cache level. */	
+
+      /* Figure out the number of logical threads that share the
+	 highest cache level. */
       if (max_cpuid >= 4)
         {
 	  int i = 0;
-	  
+
 	  /* Query until desired cache level is enumerated. */
 	  do
 	    {
@@ -404,7 +404,7 @@ init_cacheinfo (void)
 		            : "0" (4), "2" (i++));
 	    }
           while (((eax >> 5) & 0x7) != level);
-			
+
 	  threads = ((eax >> 14) & 0x3ff) + 1;
 	}
       else
@@ -413,11 +413,12 @@ init_cacheinfo (void)
           asm volatile ("cpuid"
 		        : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
 		        : "0" (1));
-			
+
 	  threads = (ebx >> 16) & 0xff;
 	}
-	 
-      /* Cap usage of highest cache level to the number of supported threads. */
+
+      /* Cap usage of highest cache level to the number of supported
+	 threads. */
       if (shared > 0 && threads > 0)
         shared /= threads;
     }
@@ -426,7 +427,7 @@ init_cacheinfo (void)
     {
       core   = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
       shared = handle_amd (_SC_LEVEL2_CACHE_SIZE);
-      
+
       asm volatile ("cpuid"
 		    : "=a" (max_cpuid_ex), "=b" (ebx), "=c" (ecx), "=d" (edx)
 		    : "0" (0x80000000));