about summary refs log tree commit diff
path: root/sysdeps/x86/dl-cacheinfo.h
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2020-09-18 07:55:14 -0700
committerH.J. Lu <hjl.tools@gmail.com>2021-01-14 11:38:45 -0800
commit2d651eb9265d1366d7b9e881bfddd46db9c1ecc4 (patch)
treee7ab45e6e14b7be7729b8ae06aa911f97d446d37 /sysdeps/x86/dl-cacheinfo.h
parentd18f59bf9223e9342be16baa2411ef3acc3f7ea4 (diff)
downloadglibc-2d651eb9265d1366d7b9e881bfddd46db9c1ecc4.tar.gz
glibc-2d651eb9265d1366d7b9e881bfddd46db9c1ecc4.tar.xz
glibc-2d651eb9265d1366d7b9e881bfddd46db9c1ecc4.zip
x86: Move x86 processor cache info to cpu_features
1. Move x86 processor cache info to _dl_x86_cpu_features in ld.so.
2. Update tunable bounds with TUNABLE_SET_WITH_BOUNDS.
3. Move x86 cache info initialization to dl-cacheinfo.h and initialize
x86 cache info in init_cpu_features ().
4. Put x86 cache info for libc in cacheinfo.h, which is included in
libc-start.c in libc.a and is included in cacheinfo.c in libc.so.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
Diffstat (limited to 'sysdeps/x86/dl-cacheinfo.h')
-rw-r--r--sysdeps/x86/dl-cacheinfo.h460
1 files changed, 460 insertions, 0 deletions
diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h
index 6adce4147c..96c49f2411 100644
--- a/sysdeps/x86/dl-cacheinfo.h
+++ b/sysdeps/x86/dl-cacheinfo.h
@@ -476,3 +476,463 @@ handle_zhaoxin (int name)
   /* Nothing found.  */
   return 0;
 }
+
+static void
+get_common_cache_info (long int *shared_ptr, unsigned int *threads_ptr,
+                long int core)
+{
+  unsigned int eax;
+  unsigned int ebx;
+  unsigned int ecx;
+  unsigned int edx;
+
+  /* Number of logical processors sharing L2 cache.  */
+  int threads_l2;
+
+  /* Number of logical processors sharing L3 cache.  */
+  int threads_l3;
+
+  const struct cpu_features *cpu_features = __get_cpu_features ();
+  int max_cpuid = cpu_features->basic.max_cpuid;
+  unsigned int family = cpu_features->basic.family;
+  unsigned int model = cpu_features->basic.model;
+  long int shared = *shared_ptr;
+  unsigned int threads = *threads_ptr;
+  bool inclusive_cache = true;
+  bool support_count_mask = true;
+
+  /* Try L3 first.  */
+  unsigned int level = 3;
+
+  if (cpu_features->basic.kind == arch_kind_zhaoxin && family == 6)
+    support_count_mask = false;
+
+  if (shared <= 0)
+    {
+      /* Try L2 otherwise.  */
+      level  = 2;
+      shared = core;
+      threads_l2 = 0;
+      threads_l3 = -1;
+    }
+  else
+    {
+      threads_l2 = 0;
+      threads_l3 = 0;
+    }
+
+  /* A value of 0 for the HTT bit indicates there is only a single
+     logical processor.  */
+  if (HAS_CPU_FEATURE (HTT))
+    {
+      /* Figure out the number of logical threads that share the
+         highest cache level.  */
+      if (max_cpuid >= 4)
+        {
+          int i = 0;
+
+          /* Query until cache level 2 and 3 are enumerated.  */
+          int check = 0x1 | (threads_l3 == 0) << 1;
+          do
+            {
+              __cpuid_count (4, i++, eax, ebx, ecx, edx);
+
+              /* There seems to be a bug in at least some Pentium Ds
+                 which sometimes fail to iterate all cache parameters.
+                 Do not loop indefinitely here, stop in this case and
+                 assume there is no such information.  */
+              if (cpu_features->basic.kind == arch_kind_intel
+                  && (eax & 0x1f) == 0 )
+                goto intel_bug_no_cache_info;
+
+              switch ((eax >> 5) & 0x7)
+                {
+                  default:
+                    break;
+                  case 2:
+                    if ((check & 0x1))
+                      {
+                        /* Get maximum number of logical processors
+                           sharing L2 cache.  */
+                        threads_l2 = (eax >> 14) & 0x3ff;
+                        check &= ~0x1;
+                      }
+                    break;
+                  case 3:
+                    if ((check & (0x1 << 1)))
+                      {
+                        /* Get maximum number of logical processors
+                           sharing L3 cache.  */
+                        threads_l3 = (eax >> 14) & 0x3ff;
+
+                        /* Check if L2 and L3 caches are inclusive.  */
+                        inclusive_cache = (edx & 0x2) != 0;
+                        check &= ~(0x1 << 1);
+                      }
+                    break;
+                }
+            }
+          while (check);
+
+          /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
+             numbers of addressable IDs for logical processors sharing
+             the cache, instead of the maximum number of threads
+             sharing the cache.  */
+          if (max_cpuid >= 11 && support_count_mask)
+            {
+              /* Find the number of logical processors shipped in
+                 one core and apply count mask.  */
+              i = 0;
+
+              /* Count SMT only if there is L3 cache.  Always count
+                 core if there is no L3 cache.  */
+              int count = ((threads_l2 > 0 && level == 3)
+                           | ((threads_l3 > 0
+                               || (threads_l2 > 0 && level == 2)) << 1));
+
+              while (count)
+                {
+                  __cpuid_count (11, i++, eax, ebx, ecx, edx);
+
+                  int shipped = ebx & 0xff;
+                  int type = ecx & 0xff00;
+                  if (shipped == 0 || type == 0)
+                    break;
+                  else if (type == 0x100)
+                    {
+                      /* Count SMT.  */
+                      if ((count & 0x1))
+                        {
+                          int count_mask;
+
+                          /* Compute count mask.  */
+                          asm ("bsr %1, %0"
+                               : "=r" (count_mask) : "g" (threads_l2));
+                          count_mask = ~(-1 << (count_mask + 1));
+                          threads_l2 = (shipped - 1) & count_mask;
+                          count &= ~0x1;
+                        }
+                    }
+                  else if (type == 0x200)
+                    {
+                      /* Count core.  */
+                      if ((count & (0x1 << 1)))
+                        {
+                          int count_mask;
+                          int threads_core
+                            = (level == 2 ? threads_l2 : threads_l3);
+
+                          /* Compute count mask.  */
+                          asm ("bsr %1, %0"
+                               : "=r" (count_mask) : "g" (threads_core));
+                          count_mask = ~(-1 << (count_mask + 1));
+                          threads_core = (shipped - 1) & count_mask;
+                          if (level == 2)
+                            threads_l2 = threads_core;
+                          else
+                            threads_l3 = threads_core;
+                          count &= ~(0x1 << 1);
+                        }
+                    }
+                }
+            }
+          if (threads_l2 > 0)
+            threads_l2 += 1;
+          if (threads_l3 > 0)
+            threads_l3 += 1;
+          if (level == 2)
+            {
+              if (threads_l2)
+                {
+                  threads = threads_l2;
+                  if (cpu_features->basic.kind == arch_kind_intel
+                      && threads > 2
+                      && family == 6)
+                    switch (model)
+                      {
+                        case 0x37:
+                        case 0x4a:
+                        case 0x4d:
+                        case 0x5a:
+                        case 0x5d:
+                          /* Silvermont has L2 cache shared by 2 cores.  */
+                          threads = 2;
+                          break;
+                        default:
+                          break;
+                      }
+                }
+            }
+          else if (threads_l3)
+            threads = threads_l3;
+        }
+      else
+        {
+intel_bug_no_cache_info:
+          /* Assume that all logical threads share the highest cache
+             level.  */
+          threads
+            = ((cpu_features->features[COMMON_CPUID_INDEX_1].cpuid.ebx
+                >> 16) & 0xff);
+        }
+
+        /* Cap usage of highest cache level to the number of supported
+           threads.  */
+        if (shared > 0 && threads > 0)
+          shared /= threads;
+    }
+
+  /* Account for non-inclusive L2 and L3 caches.  */
+  if (!inclusive_cache)
+    {
+      if (threads_l2 > 0)
+        core /= threads_l2;
+      shared += core;
+    }
+
+  *shared_ptr = shared;
+  *threads_ptr = threads;
+}
+
+static void
+dl_init_cacheinfo (struct cpu_features *cpu_features)
+{
+  /* Find out what brand of processor.  */
+  unsigned int ebx;
+  unsigned int ecx;
+  unsigned int edx;
+  int max_cpuid_ex;
+  long int data = -1;
+  long int shared = -1;
+  long int core;
+  unsigned int threads = 0;
+  unsigned long int level1_icache_size = -1;
+  unsigned long int level1_dcache_size = -1;
+  unsigned long int level1_dcache_assoc = -1;
+  unsigned long int level1_dcache_linesize = -1;
+  unsigned long int level2_cache_size = -1;
+  unsigned long int level2_cache_assoc = -1;
+  unsigned long int level2_cache_linesize = -1;
+  unsigned long int level3_cache_size = -1;
+  unsigned long int level3_cache_assoc = -1;
+  unsigned long int level3_cache_linesize = -1;
+  unsigned long int level4_cache_size = -1;
+
+  if (cpu_features->basic.kind == arch_kind_intel)
+    {
+      data = handle_intel (_SC_LEVEL1_DCACHE_SIZE, cpu_features);
+      core = handle_intel (_SC_LEVEL2_CACHE_SIZE, cpu_features);
+      shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, cpu_features);
+
+      level1_icache_size
+	= handle_intel (_SC_LEVEL1_ICACHE_SIZE, cpu_features);
+      level1_dcache_size = data;
+      level1_dcache_assoc
+	= handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features);
+      level1_dcache_linesize
+	= handle_intel (_SC_LEVEL1_DCACHE_LINESIZE, cpu_features);
+      level2_cache_size = core;
+      level2_cache_assoc
+	= handle_intel (_SC_LEVEL2_CACHE_ASSOC, cpu_features);
+      level2_cache_linesize
+	= handle_intel (_SC_LEVEL2_CACHE_LINESIZE, cpu_features);
+      level3_cache_size = shared;
+      level3_cache_assoc
+	= handle_intel (_SC_LEVEL3_CACHE_ASSOC, cpu_features);
+      level3_cache_linesize
+	= handle_intel (_SC_LEVEL3_CACHE_LINESIZE, cpu_features);
+      level4_cache_size
+	= handle_intel (_SC_LEVEL4_CACHE_SIZE, cpu_features);
+
+      get_common_cache_info (&shared, &threads, core);
+    }
+  else if (cpu_features->basic.kind == arch_kind_zhaoxin)
+    {
+      data = handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE);
+      core = handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE);
+      shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE);
+
+      level1_icache_size = handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE);
+      level1_dcache_size = data;
+      level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC);
+      level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE);
+      level2_cache_size = core;
+      level2_cache_assoc = handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC);
+      level2_cache_linesize = handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE);
+      level3_cache_size = shared;
+      level3_cache_assoc = handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC);
+      level3_cache_linesize = handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE);
+
+      get_common_cache_info (&shared, &threads, core);
+    }
+  else if (cpu_features->basic.kind == arch_kind_amd)
+    {
+      data  = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
+      core = handle_amd (_SC_LEVEL2_CACHE_SIZE);
+      shared = handle_amd (_SC_LEVEL3_CACHE_SIZE);
+
+      level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE);
+      level1_dcache_size = data;
+      level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC);
+      level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE);
+      level2_cache_size = core;
+      level2_cache_assoc = handle_amd (_SC_LEVEL2_CACHE_ASSOC);
+      level2_cache_linesize = handle_amd (_SC_LEVEL2_CACHE_LINESIZE);
+      level3_cache_size = shared;
+      level3_cache_assoc = handle_amd (_SC_LEVEL3_CACHE_ASSOC);
+      level3_cache_linesize = handle_amd (_SC_LEVEL3_CACHE_LINESIZE);
+
+      /* Get maximum extended function. */
+      __cpuid (0x80000000, max_cpuid_ex, ebx, ecx, edx);
+
+      if (shared <= 0)
+	/* No shared L3 cache.  All we have is the L2 cache.  */
+	shared = core;
+      else
+	{
+	  /* Figure out the number of logical threads that share L3.  */
+	  if (max_cpuid_ex >= 0x80000008)
+	    {
+	      /* Get width of APIC ID.  */
+	      __cpuid (0x80000008, max_cpuid_ex, ebx, ecx, edx);
+	      threads = 1 << ((ecx >> 12) & 0x0f);
+	    }
+
+	  if (threads == 0 || cpu_features->basic.family >= 0x17)
+	    {
+	      /* If APIC ID width is not available, use logical
+		 processor count.  */
+	      __cpuid (0x00000001, max_cpuid_ex, ebx, ecx, edx);
+
+	      if ((edx & (1 << 28)) != 0)
+		threads = (ebx >> 16) & 0xff;
+	    }
+
+	  /* Cap usage of highest cache level to the number of
+	     supported threads.  */
+	  if (threads > 0)
+	    shared /= threads;
+
+	  /* Get shared cache per ccx for Zen architectures.  */
+	  if (cpu_features->basic.family >= 0x17)
+	    {
+	      unsigned int eax;
+
+	      /* Get number of threads share the L3 cache in CCX.  */
+	      __cpuid_count (0x8000001D, 0x3, eax, ebx, ecx, edx);
+
+	      unsigned int threads_per_ccx = ((eax >> 14) & 0xfff) + 1;
+	      shared *= threads_per_ccx;
+	    }
+	  else
+	    {
+	      /* Account for exclusive L2 and L3 caches.  */
+	      shared += core;
+            }
+	}
+    }
+
+  cpu_features->level1_icache_size = level1_icache_size;
+  cpu_features->level1_dcache_size = level1_dcache_size;
+  cpu_features->level1_dcache_assoc = level1_dcache_assoc;
+  cpu_features->level1_dcache_linesize = level1_dcache_linesize;
+  cpu_features->level2_cache_size = level2_cache_size;
+  cpu_features->level2_cache_assoc = level2_cache_assoc;
+  cpu_features->level2_cache_linesize = level2_cache_linesize;
+  cpu_features->level3_cache_size = level3_cache_size;
+  cpu_features->level3_cache_assoc = level3_cache_assoc;
+  cpu_features->level3_cache_linesize = level3_cache_linesize;
+  cpu_features->level4_cache_size = level4_cache_size;
+
+  /* The default setting for the non_temporal threshold is 3/4 of one
+     thread's share of the chip's cache. For most Intel and AMD processors
+     with an initial release date between 2017 and 2020, a thread's typical
+     share of the cache is from 500 KBytes to 2 MBytes. Using the 3/4
+     threshold leaves 125 KBytes to 500 KBytes of the thread's data
+     in cache after a maximum temporal copy, which will maintain
+     in cache a reasonable portion of the thread's stack and other
+     active data. If the threshold is set higher than one thread's
+     share of the cache, it has a substantial risk of negatively
+     impacting the performance of other threads running on the chip. */
+  unsigned long int non_temporal_threshold = shared * 3 / 4;
+
+#if HAVE_TUNABLES
+  /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8.  */
+  unsigned int minimum_rep_movsb_threshold;
+#endif
+  /* NB: The default REP MOVSB threshold is 2048 * (VEC_SIZE / 16).  */
+  unsigned int rep_movsb_threshold;
+  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
+      && !CPU_FEATURE_PREFERRED_P (cpu_features, Prefer_No_AVX512))
+    {
+      rep_movsb_threshold = 2048 * (64 / 16);
+#if HAVE_TUNABLES
+      minimum_rep_movsb_threshold = 64 * 8;
+#endif
+    }
+  else if (CPU_FEATURE_PREFERRED_P (cpu_features,
+				    AVX_Fast_Unaligned_Load))
+    {
+      rep_movsb_threshold = 2048 * (32 / 16);
+#if HAVE_TUNABLES
+      minimum_rep_movsb_threshold = 32 * 8;
+#endif
+    }
+  else
+    {
+      rep_movsb_threshold = 2048 * (16 / 16);
+#if HAVE_TUNABLES
+      minimum_rep_movsb_threshold = 16 * 8;
+#endif
+    }
+
+  /* The default threshold to use Enhanced REP STOSB.  */
+  unsigned long int rep_stosb_threshold = 2048;
+
+#if HAVE_TUNABLES
+  long int tunable_size;
+
+  tunable_size = TUNABLE_GET (x86_data_cache_size, long int, NULL);
+  /* NB: Ignore the default value 0.  */
+  if (tunable_size != 0)
+    data = tunable_size;
+
+  tunable_size = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
+  /* NB: Ignore the default value 0.  */
+  if (tunable_size != 0)
+    shared = tunable_size;
+
+  tunable_size = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
+  /* NB: Ignore the default value 0.  */
+  if (tunable_size != 0)
+    non_temporal_threshold = tunable_size;
+
+  tunable_size = TUNABLE_GET (x86_rep_movsb_threshold, long int, NULL);
+  if (tunable_size > minimum_rep_movsb_threshold)
+    rep_movsb_threshold = tunable_size;
+
+  /* NB: The default value of the x86_rep_stosb_threshold tunable is the
+     same as the default value of __x86_rep_stosb_threshold and the
+     minimum value is fixed.  */
+  rep_stosb_threshold = TUNABLE_GET (x86_rep_stosb_threshold,
+				     long int, NULL);
+
+  TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, long int, data,
+			   0, (long int) -1);
+  TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, long int, shared,
+			   0, (long int) -1);
+  TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, long int,
+			   non_temporal_threshold, 0, (long int) -1);
+  TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, long int,
+			   rep_movsb_threshold,
+			   minimum_rep_movsb_threshold, (long int) -1);
+  TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, long int,
+			   rep_stosb_threshold, 1, (long int) -1);
+#endif
+
+  cpu_features->data_cache_size = data;
+  cpu_features->shared_cache_size = shared;
+  cpu_features->non_temporal_threshold = non_temporal_threshold;
+  cpu_features->rep_movsb_threshold = rep_movsb_threshold;
+  cpu_features->rep_stosb_threshold = rep_stosb_threshold;
+}