about summary refs log tree commit diff
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-03-26 10:47:42 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-04-22 15:39:48 -0300
commit25b191f6d33cda5770a18fd18be86cce0ebb3228 (patch)
treef850b3f06d97674d0014ead9e211507659fbb12d
parentf6d18bea387676e774e18ce410ace8c33a5c3511 (diff)
downloadglibc-25b191f6d33cda5770a18fd18be86cce0ebb3228.tar.gz
glibc-25b191f6d33cda5770a18fd18be86cce0ebb3228.tar.xz
glibc-25b191f6d33cda5770a18fd18be86cce0ebb3228.zip
elf: Do not check for loader mmap on tst-decorate-maps (BZ 31553)
On some architectures and depending on the page size, the loader can
also allocate some memory during dependencies loading and it will be
marked as 'loader malloc'.  However, if the system page size is
large enough, the initial data page will be enough for all required
allocation and there will be no extra loader mmap.  To avoid false
negatives, the test does not check for such pages.

Checked on powerpc64le-linux-gnu with 64k pagesize.
Reviewed-by: Simon Chopin <simon.chopin@canonical.com>
-rw-r--r--elf/tst-decorate-maps.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/elf/tst-decorate-maps.c b/elf/tst-decorate-maps.c
index 85ba5ce939..6d04344ba2 100644
--- a/elf/tst-decorate-maps.c
+++ b/elf/tst-decorate-maps.c
@@ -56,7 +56,6 @@ struct proc_maps_t
   int n_user_threads;
   int n_arenas;
   int n_malloc_mmap;
-  int n_loader_malloc_mmap;
 };
 
 static struct proc_maps_t
@@ -82,8 +81,12 @@ read_proc_maps (void)
 	r.n_arenas++;
       else if (strstr (line, "[anon: glibc: malloc]") != NULL)
 	r.n_malloc_mmap++;
-      else if (strstr (line, "[anon: glibc: loader malloc]") != NULL)
-	r.n_loader_malloc_mmap++;
+      /* On some architectures and depending on the page size, the loader can
+	 also allocate some memory during dependencies loading and it will be
+	 marked as 'loader malloc'.  However, if the system page size is large
+	 enough, the initial data page will be enough for all required
+	 allocation and there will be no extra loader mmap.  To avoid false
+	 negatives, the test does not check for such pages.  */
     }
   free (line);
   xfclose (f);
@@ -148,8 +151,6 @@ do_test_threads (bool set_guard)
     TEST_COMPARE (r.n_user_threads, num_user_threads);
     TEST_COMPARE (r.n_arenas, expected_n_arenas);
     TEST_COMPARE (r.n_malloc_mmap, 1);
-    /* On some architectures the loader might use more than one page.  */
-    TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
   }
 
   /* Let the threads finish.  */
@@ -164,7 +165,6 @@ do_test_threads (bool set_guard)
     TEST_COMPARE (r.n_user_threads, 0);
     TEST_COMPARE (r.n_arenas, expected_n_arenas);
     TEST_COMPARE (r.n_malloc_mmap, 1);
-    TEST_VERIFY (r.n_loader_malloc_mmap >= 1);
   }
 
   free (p);