about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@gmail.com>2012-01-26 09:45:54 -0500
committerUlrich Drepper <drepper@gmail.com>2012-01-26 09:45:54 -0500
commit08cf777f9e7f6d826658a99c7d77a359f73a45bf (patch)
tree89cdc5e4339c060b4e0ccaab79a2924c4989ab9c /sysdeps/x86_64/multiarch
parentafc5ed09cbce5d6fd48b3a8c5ec427b31f996880 (diff)
downloadglibc-08cf777f9e7f6d826658a99c7d77a359f73a45bf.tar.gz
glibc-08cf777f9e7f6d826658a99c7d77a359f73a45bf.tar.xz
glibc-08cf777f9e7f6d826658a99c7d77a359f73a45bf.zip
Really fix AVX tests
There is no problem with strcmp, it doesn't use the YMM registers.
The math routines might since gcc perhaps generates such code.
Introduce bit_YMM_USBALE and use it in the math routines.
Diffstat (limited to 'sysdeps/x86_64/multiarch')
-rw-r--r--sysdeps/x86_64/multiarch/init-arch.c14
-rw-r--r--sysdeps/x86_64/multiarch/init-arch.h26
2 files changed, 20 insertions, 20 deletions
diff --git a/sysdeps/x86_64/multiarch/init-arch.c b/sysdeps/x86_64/multiarch/init-arch.c
index 4fabbee06d..76d146c1f0 100644
--- a/sysdeps/x86_64/multiarch/init-arch.c
+++ b/sysdeps/x86_64/multiarch/init-arch.c
@@ -147,13 +147,13 @@ __init_cpu_features (void)
   if (__cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx & bit_AVX)
     {
       /* Reset the AVX bit in case OSXSAVE is disabled.  */
-      if ((__cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx & bit_OSXSAVE) == 0
-	  || ({ unsigned int xcrlow;
-	      unsigned int xcrhigh;
-	      asm ("xgetbv"
-		   : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0));
-	      (xcrlow & 6) != 6; }))
-	__cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx &= ~bit_AVX;
+      if ((__cpu_features.cpuid[COMMON_CPUID_INDEX_1].ecx & bit_OSXSAVE) != 0
+	  && ({ unsigned int xcrlow;
+		unsigned int xcrhigh;
+		asm ("xgetbv"
+		     : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0));
+		(xcrlow & 6) == 6; }))
+	__cpu_features.feature[index_YMM_Usable] |= bit_YMM_Usable;
     }
 
   __cpu_features.family = family;
diff --git a/sysdeps/x86_64/multiarch/init-arch.h b/sysdeps/x86_64/multiarch/init-arch.h
index 408e5aeb89..2dc75ab37b 100644
--- a/sysdeps/x86_64/multiarch/init-arch.h
+++ b/sysdeps/x86_64/multiarch/init-arch.h
@@ -22,6 +22,7 @@
 #define bit_Prefer_SSE_for_memop	(1 << 3)
 #define bit_Fast_Unaligned_Load		(1 << 4)
 #define bit_Prefer_PMINUB_for_stringop	(1 << 5)
+#define bit_YMM_Usable			(1 << 6)
 
 #define bit_SSE2	(1 << 26)
 #define bit_SSSE3	(1 << 9)
@@ -49,6 +50,7 @@
 # define index_Prefer_SSE_for_memop	FEATURE_INDEX_1*FEATURE_SIZE
 # define index_Fast_Unaligned_Load	FEATURE_INDEX_1*FEATURE_SIZE
 # define index_Prefer_PMINUB_for_stringop FEATURE_INDEX_1*FEATURE_SIZE
+# define index_YMM_Usable		FEATURE_INDEX_1*FEATURE_SIZE
 
 #else	/* __ASSEMBLER__ */
 
@@ -93,7 +95,7 @@ extern struct cpu_features
 
 
 extern void __init_cpu_features (void) attribute_hidden;
-#define INIT_ARCH()\
+# define INIT_ARCH() \
   do							\
     if (__cpu_features.kind == arch_kind_unknown)	\
       __init_cpu_features ();				\
@@ -126,23 +128,21 @@ extern const struct cpu_features *__get_cpu_features (void)
 # define index_Slow_BSF			FEATURE_INDEX_1
 # define index_Prefer_SSE_for_memop	FEATURE_INDEX_1
 # define index_Fast_Unaligned_Load	FEATURE_INDEX_1
+# define index_YMM_Usable		FEATURE_INDEX_1
 
-#define HAS_ARCH_FEATURE(idx, bit) \
-  ((__get_cpu_features ()->feature[idx] & (bit)) != 0)
+# define HAS_ARCH_FEATURE(name) \
+  ((__get_cpu_features ()->feature[index_##name] & (bit_##name)) != 0)
 
-#define HAS_FAST_REP_STRING \
-  HAS_ARCH_FEATURE (index_Fast_Rep_String, bit_Fast_Rep_String)
+# define HAS_FAST_REP_STRING	HAS_ARCH_FEATURE (Fast_Rep_String)
 
-#define HAS_FAST_COPY_BACKWARD \
-  HAS_ARCH_FEATURE (index_Fast_Copy_Backward, bit_Fast_Copy_Backward)
+# define HAS_FAST_COPY_BACKWARD	HAS_ARCH_FEATURE (Fast_Copy_Backward)
 
-#define HAS_SLOW_BSF \
-  HAS_ARCH_FEATURE (index_Slow_BSF, bit_Slow_BSF)
+# define HAS_SLOW_BSF		HAS_ARCH_FEATURE (Slow_BSF)
 
-#define HAS_PREFER_SSE_FOR_MEMOP \
-  HAS_ARCH_FEATURE (index_Prefer_SSE_for_memop, bit_Prefer_SSE_for_memop)
+# define HAS_PREFER_SSE_FOR_MEMOP HAS_ARCH_FEATURE (Prefer_SSE_for_memop)
 
-#define HAS_FAST_UNALIGNED_LOAD \
-  HAS_ARCH_FEATURE (index_Fast_Unaligned_Load, bit_Fast_Unaligned_Load)
+# define HAS_FAST_UNALIGNED_LOAD HAS_ARCH_FEATURE (Fast_Unaligned_Load)
+
+# define HAS_YMM_USABLE		HAS_ARCH_FEATURE (YMM_Usable)
 
 #endif	/* __ASSEMBLER__ */