about summary refs log tree commit diff
path: root/sysdeps/x86_64
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2020-06-29 16:36:08 -0700
committerH.J. Lu <hjl.tools@gmail.com>2020-07-13 06:05:16 -0700
commit107e6a3c2212ba7a3a4ec7cae8d82d73f7c95d0b (patch)
treeb23f1c5ba166bd28f519fa36f225a31407b45270 /sysdeps/x86_64
parent10b01bd4529336bffc2c398ce43a171ed94aacc7 (diff)
downloadglibc-107e6a3c2212ba7a3a4ec7cae8d82d73f7c95d0b.tar.gz
glibc-107e6a3c2212ba7a3a4ec7cae8d82d73f7c95d0b.tar.xz
glibc-107e6a3c2212ba7a3a4ec7cae8d82d73f7c95d0b.zip
x86: Support usable check for all CPU features
Support usable check for all CPU features with the following changes:

1. Change struct cpu_features to

struct cpuid_features
{
  struct cpuid_registers cpuid;
  struct cpuid_registers usable;
};

struct cpu_features
{
  struct cpu_features_basic basic;
  struct cpuid_features features[COMMON_CPUID_INDEX_MAX];
  unsigned int preferred[PREFERRED_FEATURE_INDEX_MAX];
...
};

so that there is a usable bit for each cpuid bit.
2. After the cpuid bits have been initialized, copy the known bits to the
usable bits.  EAX/EBX from INDEX_1 and EAX from INDEX_7 aren't used for
CPU feature detection.
3. Clear the usable bits which require OS support.
4. If the feature is supported by OS, copy its cpuid bit to its usable
bit.
5. Replace HAS_CPU_FEATURE and CPU_FEATURES_CPU_P with CPU_FEATURE_USABLE
and CPU_FEATURE_USABLE_P to check if a feature is usable.
6. Add DEPR_FPU_CS_DS for INDEX_7_EBX_13.
7. Unset MPX feature since it has been deprecated.

The results are

1. If the feature is known and doesn't requre OS support, its usable bit
is copied from the cpuid bit.
2. Otherwise, its usable bit is copied from the cpuid bit only if the
feature is known to supported by OS.
3. CPU_FEATURE_USABLE/CPU_FEATURE_USABLE_P are used to check if the
feature can be used.
4. HAS_CPU_FEATURE/CPU_FEATURE_CPU_P are used to check if CPU supports
the feature.
Diffstat (limited to 'sysdeps/x86_64')
-rw-r--r--sysdeps/x86_64/Makefile6
-rw-r--r--sysdeps/x86_64/dl-machine.h6
-rw-r--r--sysdeps/x86_64/fpu/math-tests-arch.h6
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-avx-fma4.h8
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-fma.h4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-fma4.h6
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx2.h4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx512.h4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-sse4_1.h2
-rw-r--r--sysdeps/x86_64/fpu/multiarch/ifunc-sse4_1.h2
-rw-r--r--sysdeps/x86_64/fpu/multiarch/s_fma.c4
-rw-r--r--sysdeps/x86_64/fpu/multiarch/s_fmaf.c4
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-avx2.h2
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-impl-list.c228
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-memcmp.h8
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-memmove.h10
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-memset.h10
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-sse4_2.h2
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-strcasecmp.h6
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-strcpy.h4
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-wmemset.h4
-rw-r--r--sysdeps/x86_64/multiarch/sched_cpucount.c2
-rw-r--r--sysdeps/x86_64/multiarch/strchr.c2
-rw-r--r--sysdeps/x86_64/multiarch/strcmp.c4
-rw-r--r--sysdeps/x86_64/multiarch/strncmp.c6
-rw-r--r--sysdeps/x86_64/multiarch/test-multiarch.c24
-rw-r--r--sysdeps/x86_64/multiarch/wcscpy.c2
-rw-r--r--sysdeps/x86_64/multiarch/wcsnlen.c4
28 files changed, 187 insertions, 187 deletions
diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile
index e3bb45d788..42b97c5cc7 100644
--- a/sysdeps/x86_64/Makefile
+++ b/sysdeps/x86_64/Makefile
@@ -57,7 +57,7 @@ modules-names += x86_64/tst-x86_64mod-1
 LDFLAGS-tst-x86_64mod-1.so = -Wl,-soname,tst-x86_64mod-1.so
 ifneq (no,$(have-tunables))
 # Test the state size for XSAVE when XSAVEC is disabled.
-tst-x86_64-1-ENV = GLIBC_TUNABLES=glibc.cpu.hwcaps=-XSAVEC_Usable
+tst-x86_64-1-ENV = GLIBC_TUNABLES=glibc.cpu.hwcaps=-XSAVEC
 endif
 
 $(objpfx)tst-x86_64-1: $(objpfx)x86_64/tst-x86_64mod-1.so
@@ -71,10 +71,10 @@ CFLAGS-tst-platformmod-2.c = -mno-avx
 LDFLAGS-tst-platformmod-2.so = -Wl,-soname,tst-platformmod-2.so
 $(objpfx)tst-platform-1: $(objpfx)tst-platformmod-1.so
 $(objpfx)tst-platform-1.out: $(objpfx)x86_64/tst-platformmod-2.so
-# Turn off AVX512F_Usable and AVX2_Usable so that GLRO(dl_platform) is
+# Turn off AVX512F and AVX2 so that GLRO(dl_platform) is
 # always set to x86_64.
 tst-platform-1-ENV = LD_PRELOAD=$(objpfx)\$$PLATFORM/tst-platformmod-2.so \
-	GLIBC_TUNABLES=glibc.cpu.hwcaps=-AVX512F_Usable,-AVX2_Usable
+	GLIBC_TUNABLES=glibc.cpu.hwcaps=-AVX512F,-AVX2
 endif
 
 tests += tst-audit3 tst-audit4 tst-audit5 tst-audit6 tst-audit7 \
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 8e9baffeb4..ca73d8fef9 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -99,9 +99,9 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
 	 end in this function.  */
       if (__glibc_unlikely (profile))
 	{
-	  if (HAS_ARCH_FEATURE (AVX512F_Usable))
+	  if (CPU_FEATURE_USABLE (AVX512F))
 	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx512;
-	  else if (HAS_ARCH_FEATURE (AVX_Usable))
+	  else if (CPU_FEATURE_USABLE (AVX))
 	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_avx;
 	  else
 	    *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile_sse;
@@ -119,7 +119,7 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
 	     the resolved address.  */
 	  if (GLRO(dl_x86_cpu_features).xsave_state_size != 0)
 	    *(ElfW(Addr) *) (got + 2)
-	      = (HAS_ARCH_FEATURE (XSAVEC_Usable)
+	      = (CPU_FEATURE_USABLE (XSAVEC)
 		 ? (ElfW(Addr)) &_dl_runtime_resolve_xsavec
 		 : (ElfW(Addr)) &_dl_runtime_resolve_xsave);
 	  else
diff --git a/sysdeps/x86_64/fpu/math-tests-arch.h b/sysdeps/x86_64/fpu/math-tests-arch.h
index 435ddad991..33ea763de2 100644
--- a/sysdeps/x86_64/fpu/math-tests-arch.h
+++ b/sysdeps/x86_64/fpu/math-tests-arch.h
@@ -24,7 +24,7 @@
 # define CHECK_ARCH_EXT                                        \
   do                                                           \
     {                                                          \
-      if (!HAS_ARCH_FEATURE (AVX_Usable)) return;              \
+      if (!CPU_FEATURE_USABLE (AVX)) return;                   \
     }                                                          \
   while (0)
 
@@ -34,7 +34,7 @@
 # define CHECK_ARCH_EXT                                        \
   do                                                           \
     {                                                          \
-      if (!HAS_ARCH_FEATURE (AVX2_Usable)) return;             \
+      if (!CPU_FEATURE_USABLE (AVX2)) return;                  \
     }                                                          \
   while (0)
 
@@ -44,7 +44,7 @@
 # define CHECK_ARCH_EXT                                        \
   do                                                           \
     {                                                          \
-      if (!HAS_ARCH_FEATURE (AVX512F_Usable)) return;          \
+      if (!CPU_FEATURE_USABLE (AVX512F)) return;               \
     }                                                          \
   while (0)
 
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-avx-fma4.h b/sysdeps/x86_64/fpu/multiarch/ifunc-avx-fma4.h
index 86835eebc1..95fe2f4d70 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-avx-fma4.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-avx-fma4.h
@@ -29,14 +29,14 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     return OPTIMIZE (fma);
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA4_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA4))
     return OPTIMIZE (fma4);
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX))
     return OPTIMIZE (avx);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-fma.h b/sysdeps/x86_64/fpu/multiarch/ifunc-fma.h
index 2242d97de0..0a25a44ab0 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-fma.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-fma.h
@@ -26,8 +26,8 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     return OPTIMIZE (fma);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-fma4.h b/sysdeps/x86_64/fpu/multiarch/ifunc-fma4.h
index 03adf86b9b..7659758972 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-fma4.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-fma4.h
@@ -28,11 +28,11 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     return OPTIMIZE (fma);
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA4_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA))
     return OPTIMIZE (fma4);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx2.h b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx2.h
index 9c5c6f1476..2655e55444 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx2.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx2.h
@@ -31,8 +31,8 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, FMA)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     return OPTIMIZE (avx2);
 
   return OPTIMIZE (sse_wrapper);
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx512.h b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx512.h
index 70e22c53bf..5f8326503b 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx512.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-avx512.h
@@ -34,10 +34,10 @@ IFUNC_SELECTOR (void)
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, MathVec_Prefer_No_AVX512))
     {
-      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512DQ_Usable))
+      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512DQ))
 	return OPTIMIZE (skx);
 
-      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable))
+      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F))
 	return OPTIMIZE (knl);
     }
 
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-sse4_1.h b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-sse4_1.h
index 63005c0af4..7240e554c9 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-sse4_1.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-mathvec-sse4_1.h
@@ -31,7 +31,7 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_1))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
     return OPTIMIZE (sse4);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/fpu/multiarch/ifunc-sse4_1.h b/sysdeps/x86_64/fpu/multiarch/ifunc-sse4_1.h
index 7f26215da6..e5d8a6f932 100644
--- a/sysdeps/x86_64/fpu/multiarch/ifunc-sse4_1.h
+++ b/sysdeps/x86_64/fpu/multiarch/ifunc-sse4_1.h
@@ -26,7 +26,7 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_1))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
     return OPTIMIZE (sse41);
 
   return OPTIMIZE (c);
diff --git a/sysdeps/x86_64/fpu/multiarch/s_fma.c b/sysdeps/x86_64/fpu/multiarch/s_fma.c
index 9992a1e97a..0d8c0b7911 100644
--- a/sysdeps/x86_64/fpu/multiarch/s_fma.c
+++ b/sysdeps/x86_64/fpu/multiarch/s_fma.c
@@ -41,8 +41,8 @@ __fma_fma4 (double x, double y, double z)
 }
 
 
-libm_ifunc (__fma, HAS_ARCH_FEATURE (FMA_Usable)
-	    ? __fma_fma3 : (HAS_ARCH_FEATURE (FMA4_Usable)
+libm_ifunc (__fma, CPU_FEATURE_USABLE (FMA)
+	    ? __fma_fma3 : (CPU_FEATURE_USABLE (FMA4)
 			    ? __fma_fma4 : __fma_sse2));
 libm_alias_double (__fma, fma)
 
diff --git a/sysdeps/x86_64/fpu/multiarch/s_fmaf.c b/sysdeps/x86_64/fpu/multiarch/s_fmaf.c
index 4cbcf1f61b..c01e5a21d4 100644
--- a/sysdeps/x86_64/fpu/multiarch/s_fmaf.c
+++ b/sysdeps/x86_64/fpu/multiarch/s_fmaf.c
@@ -40,8 +40,8 @@ __fmaf_fma4 (float x, float y, float z)
 }
 
 
-libm_ifunc (__fmaf, HAS_ARCH_FEATURE (FMA_Usable)
-	    ? __fmaf_fma3 : (HAS_ARCH_FEATURE (FMA4_Usable)
+libm_ifunc (__fmaf, CPU_FEATURE_USABLE (FMA)
+	    ? __fmaf_fma3 : (CPU_FEATURE_USABLE (FMA4)
 			     ? __fmaf_fma4 : __fmaf_sse2));
 libm_alias_float (__fma, fma)
 
diff --git a/sysdeps/x86_64/multiarch/ifunc-avx2.h b/sysdeps/x86_64/multiarch/ifunc-avx2.h
index 69f30398ae..f4e311d470 100644
--- a/sysdeps/x86_64/multiarch/ifunc-avx2.h
+++ b/sysdeps/x86_64/multiarch/ifunc-avx2.h
@@ -28,7 +28,7 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index ce7eb1eecf..f93ec39d98 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -41,19 +41,19 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/memchr.c.  */
   IFUNC_IMPL (i, name, memchr,
 	      IFUNC_IMPL_ADD (array, i, memchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, memchr, 1, __memchr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/memcmp.c.  */
   IFUNC_IMPL (i, name, memcmp,
 	      IFUNC_IMPL_ADD (array, i, memcmp,
-			      (HAS_ARCH_FEATURE (AVX2_Usable)
-			       && HAS_CPU_FEATURE (MOVBE)),
+			      (CPU_FEATURE_USABLE (AVX2)
+			       && CPU_FEATURE_USABLE (MOVBE)),
 			      __memcmp_avx2_movbe)
-	      IFUNC_IMPL_ADD (array, i, memcmp, HAS_CPU_FEATURE (SSE4_1),
+	      IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSE4_1),
 			      __memcmp_sse4_1)
-	      IFUNC_IMPL_ADD (array, i, memcmp, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSSE3),
 			      __memcmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_sse2))
 
@@ -61,25 +61,25 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/memmove_chk.c.  */
   IFUNC_IMPL (i, name, __memmove_chk,
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_chk_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_chk_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_chk_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memmove_chk_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memmove_chk_avx_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __memmove_chk_ssse3_back)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __memmove_chk_ssse3)
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk, 1,
 			      __memmove_chk_sse2_unaligned)
@@ -92,23 +92,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/memmove.c.  */
   IFUNC_IMPL (i, name, memmove,
 	      IFUNC_IMPL_ADD (array, i, memmove,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memmove_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memmove,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memmove_avx_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, memmove,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, memmove,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memmove,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memmove_avx512_unaligned_erms)
-	      IFUNC_IMPL_ADD (array, i, memmove, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (SSSE3),
 			      __memmove_ssse3_back)
-	      IFUNC_IMPL_ADD (array, i, memmove, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (SSSE3),
 			      __memmove_ssse3)
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_erms)
 	      IFUNC_IMPL_ADD (array, i, memmove, 1,
@@ -119,7 +119,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/memrchr.c.  */
   IFUNC_IMPL (i, name, memrchr,
 	      IFUNC_IMPL_ADD (array, i, memrchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memrchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, memrchr, 1, __memrchr_sse2))
 
@@ -133,19 +133,19 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
 			      __memset_chk_sse2_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memset_chk_avx2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memset_chk_avx2_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_chk_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_chk_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_chk_avx512_no_vzeroupper)
 	      )
 #endif
@@ -158,48 +158,48 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 			      __memset_sse2_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms)
 	      IFUNC_IMPL_ADD (array, i, memset,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memset_avx2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memset,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __memset_avx2_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, memset,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, memset,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memset,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memset_avx512_no_vzeroupper)
 	     )
 
   /* Support sysdeps/x86_64/multiarch/rawmemchr.c.  */
   IFUNC_IMPL (i, name, rawmemchr,
 	      IFUNC_IMPL_ADD (array, i, rawmemchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __rawmemchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, rawmemchr, 1, __rawmemchr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strlen.c.  */
   IFUNC_IMPL (i, name, strlen,
 	      IFUNC_IMPL_ADD (array, i, strlen,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strlen_avx2)
 	      IFUNC_IMPL_ADD (array, i, strlen, 1, __strlen_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strnlen.c.  */
   IFUNC_IMPL (i, name, strnlen,
 	      IFUNC_IMPL_ADD (array, i, strnlen,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strnlen_avx2)
 	      IFUNC_IMPL_ADD (array, i, strnlen, 1, __strnlen_sse2))
 
   /* Support sysdeps/x86_64/multiarch/stpncpy.c.  */
   IFUNC_IMPL (i, name, stpncpy,
-	      IFUNC_IMPL_ADD (array, i, stpncpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, stpncpy, CPU_FEATURE_USABLE (SSSE3),
 			      __stpncpy_ssse3)
-	      IFUNC_IMPL_ADD (array, i, stpncpy, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, stpncpy, CPU_FEATURE_USABLE (AVX2),
 			      __stpncpy_avx2)
 	      IFUNC_IMPL_ADD (array, i, stpncpy, 1,
 			      __stpncpy_sse2_unaligned)
@@ -207,9 +207,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/stpcpy.c.  */
   IFUNC_IMPL (i, name, stpcpy,
-	      IFUNC_IMPL_ADD (array, i, stpcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, stpcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __stpcpy_ssse3)
-	      IFUNC_IMPL_ADD (array, i, stpcpy, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, stpcpy, CPU_FEATURE_USABLE (AVX2),
 			      __stpcpy_avx2)
 	      IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2))
@@ -217,35 +217,35 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/strcasecmp_l.c.  */
   IFUNC_IMPL (i, name, strcasecmp,
 	      IFUNC_IMPL_ADD (array, i, strcasecmp,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __strcasecmp_avx)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp,
-			      HAS_CPU_FEATURE (SSE4_2),
+			      CPU_FEATURE_USABLE (SSE4_2),
 			      __strcasecmp_sse42)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __strcasecmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp, 1, __strcasecmp_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strcasecmp_l.c.  */
   IFUNC_IMPL (i, name, strcasecmp_l,
 	      IFUNC_IMPL_ADD (array, i, strcasecmp_l,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __strcasecmp_l_avx)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp_l,
-			      HAS_CPU_FEATURE (SSE4_2),
+			      CPU_FEATURE_USABLE (SSE4_2),
 			      __strcasecmp_l_sse42)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp_l,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __strcasecmp_l_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strcasecmp_l, 1,
 			      __strcasecmp_l_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strcat.c.  */
   IFUNC_IMPL (i, name, strcat,
-	      IFUNC_IMPL_ADD (array, i, strcat, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (AVX2),
 			      __strcat_avx2)
-	      IFUNC_IMPL_ADD (array, i, strcat, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (SSSE3),
 			      __strcat_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strcat, 1, __strcat_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, strcat, 1, __strcat_sse2))
@@ -253,7 +253,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/strchr.c.  */
   IFUNC_IMPL (i, name, strchr,
 	      IFUNC_IMPL_ADD (array, i, strchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, strchr, 1, __strchr_sse2_no_bsf)
 	      IFUNC_IMPL_ADD (array, i, strchr, 1, __strchr_sse2))
@@ -261,54 +261,54 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/strchrnul.c.  */
   IFUNC_IMPL (i, name, strchrnul,
 	      IFUNC_IMPL_ADD (array, i, strchrnul,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strchrnul_avx2)
 	      IFUNC_IMPL_ADD (array, i, strchrnul, 1, __strchrnul_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strrchr.c.  */
   IFUNC_IMPL (i, name, strrchr,
 	      IFUNC_IMPL_ADD (array, i, strrchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strrchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, strrchr, 1, __strrchr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strcmp.c.  */
   IFUNC_IMPL (i, name, strcmp,
 	      IFUNC_IMPL_ADD (array, i, strcmp,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strcmp_avx2)
-	      IFUNC_IMPL_ADD (array, i, strcmp, HAS_CPU_FEATURE (SSE4_2),
+	      IFUNC_IMPL_ADD (array, i, strcmp, CPU_FEATURE_USABLE (SSE4_2),
 			      __strcmp_sse42)
-	      IFUNC_IMPL_ADD (array, i, strcmp, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strcmp, CPU_FEATURE_USABLE (SSSE3),
 			      __strcmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strcmp, 1, __strcmp_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, strcmp, 1, __strcmp_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strcpy.c.  */
   IFUNC_IMPL (i, name, strcpy,
-	      IFUNC_IMPL_ADD (array, i, strcpy, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (AVX2),
 			      __strcpy_avx2)
-	      IFUNC_IMPL_ADD (array, i, strcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __strcpy_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strcspn.c.  */
   IFUNC_IMPL (i, name, strcspn,
-	      IFUNC_IMPL_ADD (array, i, strcspn, HAS_CPU_FEATURE (SSE4_2),
+	      IFUNC_IMPL_ADD (array, i, strcspn, CPU_FEATURE_USABLE (SSE4_2),
 			      __strcspn_sse42)
 	      IFUNC_IMPL_ADD (array, i, strcspn, 1, __strcspn_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strncase_l.c.  */
   IFUNC_IMPL (i, name, strncasecmp,
 	      IFUNC_IMPL_ADD (array, i, strncasecmp,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __strncasecmp_avx)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp,
-			      HAS_CPU_FEATURE (SSE4_2),
+			      CPU_FEATURE_USABLE (SSE4_2),
 			      __strncasecmp_sse42)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __strncasecmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp, 1,
 			      __strncasecmp_sse2))
@@ -316,22 +316,22 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/strncase_l.c.  */
   IFUNC_IMPL (i, name, strncasecmp_l,
 	      IFUNC_IMPL_ADD (array, i, strncasecmp_l,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __strncasecmp_l_avx)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp_l,
-			      HAS_CPU_FEATURE (SSE4_2),
+			      CPU_FEATURE_USABLE (SSE4_2),
 			      __strncasecmp_l_sse42)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp_l,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __strncasecmp_l_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strncasecmp_l, 1,
 			      __strncasecmp_l_sse2))
 
   /* Support sysdeps/x86_64/multiarch/strncat.c.  */
   IFUNC_IMPL (i, name, strncat,
-	      IFUNC_IMPL_ADD (array, i, strncat, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (AVX2),
 			      __strncat_avx2)
-	      IFUNC_IMPL_ADD (array, i, strncat, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (SSSE3),
 			      __strncat_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strncat, 1,
 			      __strncat_sse2_unaligned)
@@ -339,9 +339,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/strncpy.c.  */
   IFUNC_IMPL (i, name, strncpy,
-	      IFUNC_IMPL_ADD (array, i, strncpy, HAS_ARCH_FEATURE (AVX2_Usable),
+	      IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (AVX2),
 			      __strncpy_avx2)
-	      IFUNC_IMPL_ADD (array, i, strncpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (SSSE3),
 			      __strncpy_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strncpy, 1,
 			      __strncpy_sse2_unaligned)
@@ -349,14 +349,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/strpbrk.c.  */
   IFUNC_IMPL (i, name, strpbrk,
-	      IFUNC_IMPL_ADD (array, i, strpbrk, HAS_CPU_FEATURE (SSE4_2),
+	      IFUNC_IMPL_ADD (array, i, strpbrk, CPU_FEATURE_USABLE (SSE4_2),
 			      __strpbrk_sse42)
 	      IFUNC_IMPL_ADD (array, i, strpbrk, 1, __strpbrk_sse2))
 
 
   /* Support sysdeps/x86_64/multiarch/strspn.c.  */
   IFUNC_IMPL (i, name, strspn,
-	      IFUNC_IMPL_ADD (array, i, strspn, HAS_CPU_FEATURE (SSE4_2),
+	      IFUNC_IMPL_ADD (array, i, strspn, CPU_FEATURE_USABLE (SSE4_2),
 			      __strspn_sse42)
 	      IFUNC_IMPL_ADD (array, i, strspn, 1, __strspn_sse2))
 
@@ -368,70 +368,70 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/wcschr.c.  */
   IFUNC_IMPL (i, name, wcschr,
 	      IFUNC_IMPL_ADD (array, i, wcschr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcschr_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcschr, 1, __wcschr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcsrchr.c.  */
   IFUNC_IMPL (i, name, wcsrchr,
 	      IFUNC_IMPL_ADD (array, i, wcsrchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcsrchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcsrchr, 1, __wcsrchr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcscmp.c.  */
   IFUNC_IMPL (i, name, wcscmp,
 	      IFUNC_IMPL_ADD (array, i, wcscmp,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcscmp_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcscmp, 1, __wcscmp_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcsncmp.c.  */
   IFUNC_IMPL (i, name, wcsncmp,
 	      IFUNC_IMPL_ADD (array, i, wcsncmp,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcsncmp_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcsncmp, 1, __wcsncmp_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcscpy.c.  */
   IFUNC_IMPL (i, name, wcscpy,
-	      IFUNC_IMPL_ADD (array, i, wcscpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, wcscpy, CPU_FEATURE_USABLE (SSSE3),
 			      __wcscpy_ssse3)
 	      IFUNC_IMPL_ADD (array, i, wcscpy, 1, __wcscpy_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcslen.c.  */
   IFUNC_IMPL (i, name, wcslen,
 	      IFUNC_IMPL_ADD (array, i, wcslen,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcslen_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcslen, 1, __wcslen_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wcsnlen.c.  */
   IFUNC_IMPL (i, name, wcsnlen,
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wcsnlen_avx2)
 	      IFUNC_IMPL_ADD (array, i, wcsnlen,
-			      HAS_CPU_FEATURE (SSE4_1),
+			      CPU_FEATURE_USABLE (SSE4_1),
 			      __wcsnlen_sse4_1)
 	      IFUNC_IMPL_ADD (array, i, wcsnlen, 1, __wcsnlen_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wmemchr.c.  */
   IFUNC_IMPL (i, name, wmemchr,
 	      IFUNC_IMPL_ADD (array, i, wmemchr,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wmemchr_avx2)
 	      IFUNC_IMPL_ADD (array, i, wmemchr, 1, __wmemchr_sse2))
 
   /* Support sysdeps/x86_64/multiarch/wmemcmp.c.  */
   IFUNC_IMPL (i, name, wmemcmp,
 	      IFUNC_IMPL_ADD (array, i, wmemcmp,
-			      (HAS_ARCH_FEATURE (AVX2_Usable)
-			       && HAS_CPU_FEATURE (MOVBE)),
+			      (CPU_FEATURE_USABLE (AVX2)
+			       && CPU_FEATURE_USABLE (MOVBE)),
 			      __wmemcmp_avx2_movbe)
-	      IFUNC_IMPL_ADD (array, i, wmemcmp, HAS_CPU_FEATURE (SSE4_1),
+	      IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSE4_1),
 			      __wmemcmp_sse4_1)
-	      IFUNC_IMPL_ADD (array, i, wmemcmp, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSSE3),
 			      __wmemcmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, wmemcmp, 1, __wmemcmp_sse2))
 
@@ -440,35 +440,35 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, wmemset, 1,
 			      __wmemset_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, wmemset,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wmemset_avx2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, wmemset,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __wmemset_avx512_unaligned))
 
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memcpy_chk.c.  */
   IFUNC_IMPL (i, name, __memcpy_chk,
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_chk_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_chk_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_chk_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memcpy_chk_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memcpy_chk_avx_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __memcpy_chk_ssse3_back)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __memcpy_chk_ssse3)
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk, 1,
 			      __memcpy_chk_sse2_unaligned)
@@ -481,23 +481,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/memcpy.c.  */
   IFUNC_IMPL (i, name, memcpy,
 	      IFUNC_IMPL_ADD (array, i, memcpy,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memcpy_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memcpy,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __memcpy_avx_unaligned_erms)
-	      IFUNC_IMPL_ADD (array, i, memcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __memcpy_ssse3_back)
-	      IFUNC_IMPL_ADD (array, i, memcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __memcpy_ssse3)
 	      IFUNC_IMPL_ADD (array, i, memcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, memcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __memcpy_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1,
@@ -508,25 +508,25 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/mempcpy_chk.c.  */
   IFUNC_IMPL (i, name, __mempcpy_chk,
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_chk_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_chk_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_chk_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __mempcpy_chk_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __mempcpy_chk_avx_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __mempcpy_chk_ssse3_back)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
-			      HAS_CPU_FEATURE (SSSE3),
+			      CPU_FEATURE_USABLE (SSSE3),
 			      __mempcpy_chk_ssse3)
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk, 1,
 			      __mempcpy_chk_sse2_unaligned)
@@ -539,23 +539,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/mempcpy.c.  */
   IFUNC_IMPL (i, name, mempcpy,
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_avx512_no_vzeroupper)
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_avx512_unaligned)
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __mempcpy_avx512_unaligned_erms)
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __mempcpy_avx_unaligned)
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
-			      HAS_ARCH_FEATURE (AVX_Usable),
+			      CPU_FEATURE_USABLE (AVX),
 			      __mempcpy_avx_unaligned_erms)
-	      IFUNC_IMPL_ADD (array, i, mempcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __mempcpy_ssse3_back)
-	      IFUNC_IMPL_ADD (array, i, mempcpy, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3),
 			      __mempcpy_ssse3)
 	      IFUNC_IMPL_ADD (array, i, mempcpy, 1,
 			      __mempcpy_sse2_unaligned)
@@ -566,11 +566,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/x86_64/multiarch/strncmp.c.  */
   IFUNC_IMPL (i, name, strncmp,
 	      IFUNC_IMPL_ADD (array, i, strncmp,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __strncmp_avx2)
-	      IFUNC_IMPL_ADD (array, i, strncmp, HAS_CPU_FEATURE (SSE4_2),
+	      IFUNC_IMPL_ADD (array, i, strncmp, CPU_FEATURE_USABLE (SSE4_2),
 			      __strncmp_sse42)
-	      IFUNC_IMPL_ADD (array, i, strncmp, HAS_CPU_FEATURE (SSSE3),
+	      IFUNC_IMPL_ADD (array, i, strncmp, CPU_FEATURE_USABLE (SSSE3),
 			      __strncmp_ssse3)
 	      IFUNC_IMPL_ADD (array, i, strncmp, 1, __strncmp_sse2))
 
@@ -580,10 +580,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1,
 			      __wmemset_chk_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-			      HAS_ARCH_FEATURE (AVX2_Usable),
+			      CPU_FEATURE_USABLE (AVX2),
 			      __wmemset_chk_avx2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-			      HAS_ARCH_FEATURE (AVX512F_Usable),
+			      CPU_FEATURE_USABLE (AVX512F),
 			      __wmemset_chk_avx512_unaligned))
 #endif
 
diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
index c14db39cf4..0e21b3a628 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h
@@ -30,15 +30,15 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
-      && CPU_FEATURES_CPU_P (cpu_features, MOVBE)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
+      && CPU_FEATURE_USABLE_P (cpu_features, MOVBE)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2_movbe);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_1))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
     return OPTIMIZE (sse4_1);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/ifunc-memmove.h b/sysdeps/x86_64/multiarch/ifunc-memmove.h
index 81673d2019..9ada03aa43 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memmove.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memmove.h
@@ -45,13 +45,13 @@ IFUNC_SELECTOR (void)
       || CPU_FEATURES_ARCH_P (cpu_features, Prefer_FSRM))
     return OPTIMIZE (erms);
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
       && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
     {
       if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
 	return OPTIMIZE (avx512_no_vzeroupper);
 
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+      if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
 	return OPTIMIZE (avx512_unaligned_erms);
 
       return OPTIMIZE (avx512_unaligned);
@@ -59,16 +59,16 @@ IFUNC_SELECTOR (void)
 
   if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     {
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+      if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
 	return OPTIMIZE (avx_unaligned_erms);
 
       return OPTIMIZE (avx_unaligned);
     }
 
-  if (!CPU_FEATURES_CPU_P (cpu_features, SSSE3)
+  if (!CPU_FEATURE_USABLE_P (cpu_features, SSSE3)
       || CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Copy))
     {
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+      if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
 	return OPTIMIZE (sse2_unaligned_erms);
 
       return OPTIMIZE (sse2_unaligned);
diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
index d690293385..f52613d372 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memset.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
@@ -42,27 +42,27 @@ IFUNC_SELECTOR (void)
   if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS))
     return OPTIMIZE (erms);
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
       && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
     {
       if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
 	return OPTIMIZE (avx512_no_vzeroupper);
 
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+      if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
 	return OPTIMIZE (avx512_unaligned_erms);
 
       return OPTIMIZE (avx512_unaligned);
     }
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     {
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+      if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
 	return OPTIMIZE (avx2_unaligned_erms);
       else
 	return OPTIMIZE (avx2_unaligned);
     }
 
-  if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
     return OPTIMIZE (sse2_unaligned_erms);
 
   return OPTIMIZE (sse2_unaligned);
diff --git a/sysdeps/x86_64/multiarch/ifunc-sse4_2.h b/sysdeps/x86_64/multiarch/ifunc-sse4_2.h
index 082179c89a..cbf18385d3 100644
--- a/sysdeps/x86_64/multiarch/ifunc-sse4_2.h
+++ b/sysdeps/x86_64/multiarch/ifunc-sse4_2.h
@@ -27,7 +27,7 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_2))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_2))
     return OPTIMIZE (sse42);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h b/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
index f349ee70fd..0818333931 100644
--- a/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
+++ b/sysdeps/x86_64/multiarch/ifunc-strcasecmp.h
@@ -29,14 +29,14 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Usable))
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX))
     return OPTIMIZE (avx);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_2)
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_2)
       && !CPU_FEATURES_ARCH_P (cpu_features, Slow_SSE4_2))
     return OPTIMIZE (sse42);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/ifunc-strcpy.h b/sysdeps/x86_64/multiarch/ifunc-strcpy.h
index ae4f451803..63b0dc0d96 100644
--- a/sysdeps/x86_64/multiarch/ifunc-strcpy.h
+++ b/sysdeps/x86_64/multiarch/ifunc-strcpy.h
@@ -32,14 +32,14 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
   if (CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Load))
     return OPTIMIZE (sse2_unaligned);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/ifunc-wmemset.h b/sysdeps/x86_64/multiarch/ifunc-wmemset.h
index 583f6310a1..8cfce562fc 100644
--- a/sysdeps/x86_64/multiarch/ifunc-wmemset.h
+++ b/sysdeps/x86_64/multiarch/ifunc-wmemset.h
@@ -28,10 +28,10 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     {
-      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
+      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
 	  && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
 	return OPTIMIZE (avx512_unaligned);
       else
diff --git a/sysdeps/x86_64/multiarch/sched_cpucount.c b/sysdeps/x86_64/multiarch/sched_cpucount.c
index 686fe0779c..074c663cf6 100644
--- a/sysdeps/x86_64/multiarch/sched_cpucount.c
+++ b/sysdeps/x86_64/multiarch/sched_cpucount.c
@@ -33,4 +33,4 @@
 #undef __sched_cpucount
 
 libc_ifunc (__sched_cpucount,
-	    HAS_CPU_FEATURE (POPCNT) ? popcount_cpucount : generic_cpucount);
+	    CPU_FEATURE_USABLE (POPCNT) ? popcount_cpucount : generic_cpucount);
diff --git a/sysdeps/x86_64/multiarch/strchr.c b/sysdeps/x86_64/multiarch/strchr.c
index f27980dd36..8df4609bf8 100644
--- a/sysdeps/x86_64/multiarch/strchr.c
+++ b/sysdeps/x86_64/multiarch/strchr.c
@@ -36,7 +36,7 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
diff --git a/sysdeps/x86_64/multiarch/strcmp.c b/sysdeps/x86_64/multiarch/strcmp.c
index 4db7332ac1..16ae72a4c8 100644
--- a/sysdeps/x86_64/multiarch/strcmp.c
+++ b/sysdeps/x86_64/multiarch/strcmp.c
@@ -37,14 +37,14 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
   if (CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Load))
     return OPTIMIZE (sse2_unaligned);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/strncmp.c b/sysdeps/x86_64/multiarch/strncmp.c
index 6b63b0ac29..3c94b3ffd9 100644
--- a/sysdeps/x86_64/multiarch/strncmp.c
+++ b/sysdeps/x86_64/multiarch/strncmp.c
@@ -37,15 +37,15 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_2)
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_2)
       && !CPU_FEATURES_ARCH_P (cpu_features, Slow_SSE4_2))
     return OPTIMIZE (sse42);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/test-multiarch.c b/sysdeps/x86_64/multiarch/test-multiarch.c
index 317373ceda..2782803e73 100644
--- a/sysdeps/x86_64/multiarch/test-multiarch.c
+++ b/sysdeps/x86_64/multiarch/test-multiarch.c
@@ -75,18 +75,18 @@ do_test (int argc, char **argv)
   int fails;
 
   get_cpuinfo ();
-  fails = check_proc ("avx", HAS_ARCH_FEATURE (AVX_Usable),
-		      "HAS_ARCH_FEATURE (AVX_Usable)");
-  fails += check_proc ("fma4", HAS_ARCH_FEATURE (FMA4_Usable),
-		       "HAS_ARCH_FEATURE (FMA4_Usable)");
-  fails += check_proc ("sse4_2", HAS_CPU_FEATURE (SSE4_2),
-		       "HAS_CPU_FEATURE (SSE4_2)");
-  fails += check_proc ("sse4_1", HAS_CPU_FEATURE (SSE4_1)
-		       , "HAS_CPU_FEATURE (SSE4_1)");
-  fails += check_proc ("ssse3", HAS_CPU_FEATURE (SSSE3),
-		       "HAS_CPU_FEATURE (SSSE3)");
-  fails += check_proc ("popcnt", HAS_CPU_FEATURE (POPCNT),
-		       "HAS_CPU_FEATURE (POPCNT)");
+  fails = check_proc ("avx", CPU_FEATURE_USABLE (AVX),
+		      "CPU_FEATURE_USABLE (AVX)");
+  fails += check_proc ("fma4", CPU_FEATURE_USABLE (FMA4),
+		       "CPU_FEATURE_USABLE (FMA4)");
+  fails += check_proc ("sse4_2", CPU_FEATURE_USABLE (SSE4_2),
+		       "CPU_FEATURE_USABLE (SSE4_2)");
+  fails += check_proc ("sse4_1", CPU_FEATURE_USABLE (SSE4_1)
+		       , "CPU_FEATURE_USABLE (SSE4_1)");
+  fails += check_proc ("ssse3", CPU_FEATURE_USABLE (SSSE3),
+		       "CPU_FEATURE_USABLE (SSSE3)");
+  fails += check_proc ("popcnt", CPU_FEATURE_USABLE (POPCNT),
+		       "CPU_FEATURE_USABLE (POPCNT)");
 
   printf ("%d differences between /proc/cpuinfo and glibc code.\n", fails);
 
diff --git a/sysdeps/x86_64/multiarch/wcscpy.c b/sysdeps/x86_64/multiarch/wcscpy.c
index 0dd2a9a34b..e08536c593 100644
--- a/sysdeps/x86_64/multiarch/wcscpy.c
+++ b/sysdeps/x86_64/multiarch/wcscpy.c
@@ -34,7 +34,7 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSSE3))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSSE3))
     return OPTIMIZE (ssse3);
 
   return OPTIMIZE (sse2);
diff --git a/sysdeps/x86_64/multiarch/wcsnlen.c b/sysdeps/x86_64/multiarch/wcsnlen.c
index 8c1fc1a574..52e7e5d4f3 100644
--- a/sysdeps/x86_64/multiarch/wcsnlen.c
+++ b/sysdeps/x86_64/multiarch/wcsnlen.c
@@ -36,11 +36,11 @@ IFUNC_SELECTOR (void)
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
   if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+      && CPU_FEATURE_USABLE_P (cpu_features, AVX2)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     return OPTIMIZE (avx2);
 
-  if (CPU_FEATURES_CPU_P (cpu_features, SSE4_1))
+  if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1))
     return OPTIMIZE (sse4_1);
 
   return OPTIMIZE (sse2);