about summary refs log tree commit diff
path: root/src/mman
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
committerRich Felker <dalias@aerifal.cx>2015-04-10 02:27:52 -0400
commitf08ab9e61a147630497198fe3239149275c0a3f4 (patch)
tree65f0898637a5306485e665ec95c753b99f4e3740 /src/mman
parent4e98cce1c529a304d7b55b5455078b9532f93e9b (diff)
downloadmusl-f08ab9e61a147630497198fe3239149275c0a3f4.tar.gz
musl-f08ab9e61a147630497198fe3239149275c0a3f4.tar.xz
musl-f08ab9e61a147630497198fe3239149275c0a3f4.zip
redesign and simplify vmlock system
this global lock allows certain unlock-type primitives to exclude
mmap/munmap operations which could change the identity of virtual
addresses while references to them still exist.

the original design mistakenly assumed mmap/munmap would conversely
need to exclude the same operations which exclude mmap/munmap, so the
vmlock was implemented as a sort of 'symmetric recursive rwlock'. this
turned out to be unnecessary.

commit 25d12fc0fc51f1fae0f85b4649a6463eb805aa8f already shortened the
interval during which mmap/munmap held their side of the lock, but
left the inappropriate lock design and some inefficiency.

the new design uses a separate function, __vm_wait, which does not
hold any lock itself and only waits for lock users which were already
present when it was called to release the lock. this is sufficient
because of the way operations that need to be excluded are sequenced:
the "unlock-type" operations using the vmlock need only block
mmap/munmap operations that are precipitated by (and thus sequenced
after) the atomic-unlock they perform while holding the vmlock.

this allows for a spectacular lack of synchronization in the __vm_wait
function itself.
Diffstat (limited to 'src/mman')
-rw-r--r--src/mman/mmap.c9
-rw-r--r--src/mman/munmap.c13
2 files changed, 7 insertions, 15 deletions
diff --git a/src/mman/mmap.c b/src/mman/mmap.c
index 56e39a7a..b85f25ca 100644
--- a/src/mman/mmap.c
+++ b/src/mman/mmap.c
@@ -6,10 +6,8 @@
 #include "syscall.h"
 #include "libc.h"
 
-static void dummy1(int x) { }
-static void dummy0(void) { }
-weak_alias(dummy1, __vm_lock);
-weak_alias(dummy0, __vm_unlock);
+static void dummy(void) { }
+weak_alias(dummy, __vm_wait);
 
 #define UNIT SYSCALL_MMAP2_UNIT
 #define OFF_MASK ((-0x2000ULL << (8*sizeof(long)-1)) | (UNIT-1))
@@ -25,8 +23,7 @@ void *__mmap(void *start, size_t len, int prot, int flags, int fd, off_t off)
 		return MAP_FAILED;
 	}
 	if (flags & MAP_FIXED) {
-		__vm_lock(-1);
-		__vm_unlock();
+		__vm_wait();
 	}
 #ifdef SYS_mmap2
 	return (void *)syscall(SYS_mmap2, start, len, prot, flags, fd, off/UNIT);
diff --git a/src/mman/munmap.c b/src/mman/munmap.c
index 359c691f..3f711ee5 100644
--- a/src/mman/munmap.c
+++ b/src/mman/munmap.c
@@ -2,18 +2,13 @@
 #include "syscall.h"
 #include "libc.h"
 
-static void dummy1(int x) { }
-static void dummy0(void) { }
-weak_alias(dummy1, __vm_lock);
-weak_alias(dummy0, __vm_unlock);
+static void dummy(void) { }
+weak_alias(dummy, __vm_wait);
 
 int __munmap(void *start, size_t len)
 {
-	int ret;
-	__vm_lock(-1);
-	__vm_unlock();
-	ret = syscall(SYS_munmap, start, len);
-	return ret;
+	__vm_wait();
+	return syscall(SYS_munmap, start, len);
 }
 
 weak_alias(__munmap, munmap);