about summary refs log tree commit diff
path: root/malloc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2004-12-13 23:32:37 +0000
committerJakub Jelinek <jakub@redhat.com>2004-12-13 23:32:37 +0000
commitf9626feb2d8a692e27d1c020beba198ec52a705a (patch)
tree80e2799fe980f5c79a6bce099548081def04da41 /malloc
parente797f2e35cbf7edf2c7de7f79442bda550917f07 (diff)
downloadglibc-f9626feb2d8a692e27d1c020beba198ec52a705a.tar.gz
glibc-f9626feb2d8a692e27d1c020beba198ec52a705a.tar.xz
glibc-f9626feb2d8a692e27d1c020beba198ec52a705a.zip
Updated to fedora-glibc-20041213T2323 cvs/fedora-glibc-2_3_3-91
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c33
-rw-r--r--malloc/malloc.c45
2 files changed, 49 insertions, 29 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index 024e191b9e..026f2c7822 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -782,9 +782,12 @@ arena_get2(a_tsd, size) mstate a_tsd; size_t size;
   }
 
   /* Check the global, circularly linked list for available arenas. */
+  bool retried = false;
  repeat:
   do {
     if(!mutex_trylock(&a->mutex)) {
+      if (retried)
+	(void)mutex_unlock(&list_lock);
       THREAD_STAT(++(a->stat_lock_loop));
       tsd_setspecific(arena_key, (Void_t *)a);
       return a;
@@ -796,29 +799,33 @@ arena_get2(a_tsd, size) mstate a_tsd; size_t size;
      happen during `atfork', or for example on systems where thread
      creation makes it temporarily impossible to obtain _any_
      locks. */
-  if(mutex_trylock(&list_lock)) {
+  if(!retried && mutex_trylock(&list_lock)) {
+    /* We will block to not run in a busy loop.  */
+    (void)mutex_lock(&list_lock);
+
+    /* Since we blocked there might be an arena available now.  */
+    retried = true;
     a = a_tsd;
     goto repeat;
   }
-  (void)mutex_unlock(&list_lock);
 
   /* Nothing immediately available, so generate a new arena.  */
   a = _int_new_arena(size);
-  if(!a)
-    return 0;
+  if(a)
+    {
+      tsd_setspecific(arena_key, (Void_t *)a);
+      mutex_init(&a->mutex);
+      mutex_lock(&a->mutex); /* remember result */
 
-  tsd_setspecific(arena_key, (Void_t *)a);
-  mutex_init(&a->mutex);
-  mutex_lock(&a->mutex); /* remember result */
+      /* Add the new arena to the global list.  */
+      a->next = main_arena.next;
+      atomic_write_barrier ();
+      main_arena.next = a;
 
-  /* Add the new arena to the global list.  */
-  (void)mutex_lock(&list_lock);
-  a->next = main_arena.next;
-  atomic_write_barrier ();
-  main_arena.next = a;
+      THREAD_STAT(++(a->stat_lock_loop));
+    }
   (void)mutex_unlock(&list_lock);
 
-  THREAD_STAT(++(a->stat_lock_loop));
   return a;
 }
 
diff --git a/malloc/malloc.c b/malloc/malloc.c
index cf1b935ffd..e3ccbde7b5 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4256,7 +4256,7 @@ _int_free(mstate av, Void_t* mem)
 	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
 			     >= av->system_mem, 0))
       {
-	errstr = "invalid next size (fast)";
+	errstr = "free(): invalid next size (fast)";
 	goto errout;
       }
 
@@ -4306,7 +4306,7 @@ _int_free(mstate av, Void_t* mem)
     if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
 	|| __builtin_expect (nextsize >= av->system_mem, 0))
       {
-	errstr = "invalid next size (normal)";
+	errstr = "free(): invalid next size (normal)";
 	goto errout;
       }
 
@@ -4550,27 +4550,42 @@ _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
   INTERNAL_SIZE_T* s;               /* copy source */
   INTERNAL_SIZE_T* d;               /* copy destination */
 
+  const char *errstr = NULL;
 
-#if REALLOC_ZERO_BYTES_FREES
-  if (bytes == 0) {
-    if (oldmem != 0)
-      _int_free(av, oldmem);
-    return 0;
-  }
-#endif
-
-  /* realloc of null is supposed to be same as malloc */
-  if (oldmem == 0) return _int_malloc(av, bytes);
 
   checked_request2size(bytes, nb);
 
   oldp    = mem2chunk(oldmem);
   oldsize = chunksize(oldp);
 
+  /* Simple tests for old block integrity.  */
+  if (__builtin_expect ((uintptr_t) oldp & MALLOC_ALIGN_MASK, 0))
+    {
+      errstr = "realloc(): invalid pointer";
+    errout:
+      malloc_printerr (check_action, errstr, oldmem);
+      return NULL;
+    }
+  if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
+      || __builtin_expect (oldsize >= av->system_mem, 0))
+    {
+      errstr = "realloc(): invalid size";
+      goto errout;
+    }
+
   check_inuse_chunk(av, oldp);
 
   if (!chunk_is_mmapped(oldp)) {
 
+    next = chunk_at_offset(oldp, oldsize);
+    INTERNAL_SIZE_T nextsize = chunksize(next);
+    if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
+	|| __builtin_expect (nextsize >= av->system_mem, 0))
+      {
+	errstr = "realloc(): invalid next size";
+	goto errout;
+      }
+
     if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
       /* already big enough; split below */
       newp = oldp;
@@ -4578,11 +4593,9 @@ _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
     }
 
     else {
-      next = chunk_at_offset(oldp, oldsize);
-
       /* Try to expand forward into top */
       if (next == av->top &&
-          (unsigned long)(newsize = oldsize + chunksize(next)) >=
+          (unsigned long)(newsize = oldsize + nextsize) >=
           (unsigned long)(nb + MINSIZE)) {
         set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
         av->top = chunk_at_offset(oldp, nb);
@@ -4594,7 +4607,7 @@ _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
       /* Try to expand forward into next chunk;  split off remainder below */
       else if (next != av->top &&
                !inuse(next) &&
-               (unsigned long)(newsize = oldsize + chunksize(next)) >=
+               (unsigned long)(newsize = oldsize + nextsize) >=
                (unsigned long)(nb)) {
         newp = oldp;
         unlink(next, bck, fwd);