about summary refs log tree commit diff
diff options
context:
space:
mode:
authorMaxim Kuvyrkov <maxim@kugelworks.com>2013-12-24 09:44:50 +1300
committerMaxim Kuvyrkov <maxim@kugelworks.com>2014-01-05 15:02:11 +1300
commit9875bb22212391e39d9d2c29b4b5d5e9e1f83beb (patch)
tree9571693b689aff22ac298bf89c9038e3ced21d48
parent53fa2b6063a484e19a6c48c4efff00a6491c0f4e (diff)
downloadglibc-9875bb22212391e39d9d2c29b4b5d5e9e1f83beb.tar.gz
glibc-9875bb22212391e39d9d2c29b4b5d5e9e1f83beb.tar.xz
glibc-9875bb22212391e39d9d2c29b4b5d5e9e1f83beb.zip
Fix race in free() of fastbin chunk: BZ #15073
Perform sanity check only if we have_lock.  Due to lockless nature of fastbins
we need to be careful derefencing pointers to fastbin entries (chunksize(old)
in this case) in multithreaded environments.

The fix is to add have_lock to the if-condition checks.  The rest of the patch
only makes code more readable.

	* malloc/malloc.c (_int_free): Perform sanity check only if we
	have_lock.

Conflicts:

	ChangeLog
	NEWS
-rw-r--r--ChangeLog7
-rw-r--r--NEWS2
-rw-r--r--malloc/malloc.c20
3 files changed, 20 insertions, 9 deletions
diff --git a/ChangeLog b/ChangeLog
index dd64c05234..92b313878b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2014-01-04  Maxim Kuvyrkov  <maxim@kugelworks.com>
+	    Ondřej Bílka  <neleai@seznam.cz>
+
+	[BZ #15073]
+	* malloc/malloc.c (_int_free): Perform sanity check only if we
+        have_lock.
+
 2012-01-28  Chris Metcalf  <cmetcalf@tilera.com>
 
 	* sysdeps/unix/sysv/linux/faccessat.c (faccessat): Call __fxstatat64.
diff --git a/NEWS b/NEWS
index 00b447a472..1f631ed8d6 100644
--- a/NEWS
+++ b/NEWS
@@ -11,7 +11,7 @@ Version 2.15.1
 
   411, 2547, 2548, 11261, 11365, 11494, 13583, 13731, 13732, 13733, 13747,
   13748, 13749, 13753, 13754, 13771, 13773, 13774, 13786, 14048, 14059,
-  14167, 14273, 14459, 14621, 14648, 14040
+  14167, 14273, 14459, 14621, 14648, 14040, 15073
 
 Version 2.15
 
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 8608083adb..aeda31d27f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3989,25 +3989,29 @@ _int_free(mstate av, mchunkptr p, int have_lock)
     unsigned int idx = fastbin_index(size);
     fb = &fastbin (av, idx);
 
-    mchunkptr fd;
-    mchunkptr old = *fb;
+    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
+    mchunkptr old = *fb, old2;
     unsigned int old_idx = ~0u;
     do
       {
-	/* Another simple check: make sure the top of the bin is not the
-	   record we are going to add (i.e., double free).  */
+	/* Check that the top of the bin is not the record we are going to add
+	   (i.e., double free).  */
 	if (__builtin_expect (old == p, 0))
 	  {
 	    errstr = "double free or corruption (fasttop)";
 	    goto errout;
 	  }
-	if (old != NULL)
+	/* Check that size of fastbin chunk at the top is the same as
+	   size of the chunk that we are adding.  We can dereference OLD
+	   only if we have the lock, otherwise it might have already been
+	   deallocated.  See use of OLD_IDX below for the actual check.  */
+	if (have_lock && old != NULL)
 	  old_idx = fastbin_index(chunksize(old));
-	p->fd = fd = old;
+	p->fd = old2 = old;
       }
-    while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
+    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
 
-    if (fd != NULL && __builtin_expect (old_idx != idx, 0))
+    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
       {
 	errstr = "invalid fastbin entry (free)";
 	goto errout;