about summary refs log tree commit diff
path: root/malloc
diff options
context:
space:
mode:
authorPaul Pluzhnikov <ppluzhnikov@google.com>2023-05-20 13:37:47 +0000
committerPaul Pluzhnikov <ppluzhnikov@google.com>2023-06-02 01:39:48 +0000
commit7f0d9e61f40c669fca3cfd1e342fa8236c7220b7 (patch)
treee02ce0ba813f2cb4f20643988ec030292784cab6 /malloc
parent5013f6fc6c44160e8ec6bcd34ba676e85d9d6ab6 (diff)
downloadglibc-7f0d9e61f40c669fca3cfd1e342fa8236c7220b7.tar.gz
glibc-7f0d9e61f40c669fca3cfd1e342fa8236c7220b7.tar.xz
glibc-7f0d9e61f40c669fca3cfd1e342fa8236c7220b7.zip
Fix all the remaining misspellings -- BZ 25337
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c2
-rw-r--r--malloc/malloc-debug.c2
-rw-r--r--malloc/malloc.c16
-rw-r--r--malloc/tst-mallocfork2.c4
-rw-r--r--malloc/tst-mallocfork3.c2
-rw-r--r--malloc/tst-mallocstate.c2
6 files changed, 14 insertions, 14 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index e98b779dbb..6f03955ff2 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -39,7 +39,7 @@
    mmap threshold, so that requests with a size just below that
    threshold can be fulfilled without creating too many heaps.  */
 
-/* When huge pages are used to create new arenas, the maximum and minumum
+/* When huge pages are used to create new arenas, the maximum and minimum
    size are based on the runtime defined huge page size.  */
 
 static inline size_t
diff --git a/malloc/malloc-debug.c b/malloc/malloc-debug.c
index da9d2340d3..f9d131d22f 100644
--- a/malloc/malloc-debug.c
+++ b/malloc/malloc-debug.c
@@ -588,7 +588,7 @@ malloc_set_state (void *msptr)
   __malloc_debug_disable (MALLOC_CHECK_HOOK);
 
   /* We do not need to perform locking here because malloc_set_state
-     must be called before the first call into the malloc subsytem (usually via
+     must be called before the first call into the malloc subsystem (usually via
      __malloc_initialize_hook).  pthread_create always calls calloc and thus
      must be called only afterwards, so there cannot be more than one thread
      when we reach this point.  Also handle initialization if either we ended
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 5d8b61d66c..b8c0f4f580 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -271,7 +271,7 @@
   is fairly extensive, and will slow down execution
   noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
   will attempt to check every non-mmapped allocated and free chunk in
-  the course of computing the summmaries. (By nature, mmapped regions
+  the course of computing the summaries. (By nature, mmapped regions
   cannot be checked very much automatically.)
 
   Setting MALLOC_DEBUG may also be helpful if you are trying to modify
@@ -672,7 +672,7 @@ void*  __libc_valloc(size_t);
   arena:     current total non-mmapped bytes allocated from system
   ordblks:   the number of free chunks
   smblks:    the number of fastbin blocks (i.e., small chunks that
-	       have been freed but not use resused or consolidated)
+	       have been freed but not reused or consolidated)
   hblks:     current number of mmapped regions
   hblkhd:    total bytes held in mmapped regions
   usmblks:   always 0
@@ -1017,7 +1017,7 @@ libc_hidden_proto (__libc_mallopt)
 
   In 2001, the kernel had a maximum size for brk() which was about 800
   megabytes on 32 bit x86, at that point brk() would hit the first
-  mmaped shared libaries and couldn't expand anymore. With current 2.6
+  mmaped shared libraries and couldn't expand anymore. With current 2.6
   kernels, the VA space layout is different and brk() and mmap
   both can span the entire heap at will.
 
@@ -1486,7 +1486,7 @@ tag_new_usable (void *ptr)
     and consolidated sets of chunks, which is what these bins hold, so
     they can be found quickly.  All procedures maintain the invariant
     that no consolidated chunk physically borders another one, so each
-    chunk in a list is known to be preceeded and followed by either
+    chunk in a list is known to be preceded and followed by either
     inuse chunks or the ends of memory.
 
     Chunks in bins are kept in size order, with ties going to the
@@ -2475,7 +2475,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
 /*
    Allocate memory using mmap() based on S and NB requested size, aligning to
    PAGESIZE if required.  The EXTRA_FLAGS is used on mmap() call.  If the call
-   succeedes S is updated with the allocated size.  This is used as a fallback
+   succeeds S is updated with the allocated size.  This is used as a fallback
    if MORECORE fails.
  */
 static void *
@@ -2557,7 +2557,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
       char *mm;
       if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
 	{
-	  /* There is no need to isse the THP madvise call if Huge Pages are
+	  /* There is no need to issue the THP madvise call if Huge Pages are
 	     used directly.  */
 	  mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
 	  if (mm != MAP_FAILED)
@@ -3439,7 +3439,7 @@ __libc_realloc (void *oldmem, size_t bytes)
     }
 
   /* Little security check which won't hurt performance: the allocator
-     never wrapps around at the end of the address space.  Therefore
+     never wraps around at the end of the address space.  Therefore
      we can exclude some size values which might appear here by
      accident or by "design" from some intruder.  */
   if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
@@ -4486,7 +4486,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   size = chunksize (p);
 
   /* Little security check which won't hurt performance: the
-     allocator never wrapps around at the end of the address space.
+     allocator never wraps around at the end of the address space.
      Therefore we can exclude some size values which might appear
      here by accident or by "design" from some intruder.  */
   if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
diff --git a/malloc/tst-mallocfork2.c b/malloc/tst-mallocfork2.c
index 6ef0f180e9..8a2979ad07 100644
--- a/malloc/tst-mallocfork2.c
+++ b/malloc/tst-mallocfork2.c
@@ -147,7 +147,7 @@ do_test (void)
 {
   atexit (kill_children);
 
-  /* shared->barrier is intialized along with sigusr1_sender_pids
+  /* shared->barrier is initialized along with sigusr1_sender_pids
      below.  */
   shared = support_shared_allocate (sizeof (*shared));
 
@@ -175,7 +175,7 @@ do_test (void)
     signal_sender (SIGUSR2, true);
 
   /* Send SIGUSR1 signals from several processes.  Hopefully, one
-     signal will hit one of the ciritical functions.  Use a barrier to
+     signal will hit one of the critical functions.  Use a barrier to
      avoid sending signals while not running fork/free/malloc.  */
   {
     pthread_barrierattr_t attr;
diff --git a/malloc/tst-mallocfork3.c b/malloc/tst-mallocfork3.c
index 254f71d2a1..80fc2dd06c 100644
--- a/malloc/tst-mallocfork3.c
+++ b/malloc/tst-mallocfork3.c
@@ -142,7 +142,7 @@ do_test (void)
   sigusr2_sender = xpthread_create (NULL, signal_sender, &sigusr2_args);
 
   /* Send SIGUSR1 signals from several threads.  Hopefully, one
-     signal will hit one of the ciritical functions.  Use a barrier to
+     signal will hit one of the critical functions.  Use a barrier to
      avoid sending signals while not running fork/free/malloc.  */
   struct signal_send_args sigusr1_args = { self, SIGUSR1, false };
   xpthread_barrier_init (&barrier, NULL,
diff --git a/malloc/tst-mallocstate.c b/malloc/tst-mallocstate.c
index 704ec88a6e..340a394d1a 100644
--- a/malloc/tst-mallocstate.c
+++ b/malloc/tst-mallocstate.c
@@ -45,7 +45,7 @@ enum allocation_action
     action_free,                /* Dumped and freed.  */
     action_realloc,             /* Dumped and realloc'ed.  */
     action_realloc_same,        /* Dumped and realloc'ed, same size.  */
-    action_realloc_smaller,     /* Dumped and realloc'ed, shrinked.  */
+    action_realloc_smaller,     /* Dumped and realloc'ed, shrunk.  */
     action_count
   };