about summary refs log tree commit diff
path: root/sysdeps/unix/sysv
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2015-06-24 14:37:32 +0200
committerTorvald Riegel <triegel@redhat.com>2016-01-15 21:20:34 +0100
commitb02840bacdefde318d2ad2f920e50785b9b25d69 (patch)
treedcf8ee01d1e4bdb42686d890c1d00bf3249fbcaf /sysdeps/unix/sysv
parenta3e5b4feeb54cb92657ec2bc6d9be1fcef9e8575 (diff)
downloadglibc-b02840bacdefde318d2ad2f920e50785b9b25d69.tar.gz
glibc-b02840bacdefde318d2ad2f920e50785b9b25d69.tar.xz
glibc-b02840bacdefde318d2ad2f920e50785b9b25d69.zip
New pthread_barrier algorithm to fulfill barrier destruction requirements.
The previous barrier implementation did not fulfill the POSIX requirements
for when a barrier can be destroyed.  Specifically, it was possible that
threads that haven't noticed yet that their round is complete still access
the barrier's memory, and that those accesses can happen after the barrier
has been legally destroyed.
The new algorithm does not have this issue, and it avoids using a lock
internally.
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r--sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S187
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S161
2 files changed, 0 insertions, 348 deletions
diff --git a/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S
deleted file mode 100644
index 1f5910c472..0000000000
--- a/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S
+++ /dev/null
@@ -1,187 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <lowlevellock.h>
-#include <lowlevelbarrier.h>
-
-	.text
-
-	.globl	__pthread_barrier_wait
-	.type	__pthread_barrier_wait,@function
-	.align	16
-__pthread_barrier_wait:
-	cfi_startproc
-	pushl	%ebx
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-
-	movl	8(%esp), %ebx
-
-	/* Get the mutex.  */
-	movl	$1, %edx
-	xorl	%eax, %eax
-	LOCK
-	cmpxchgl %edx, MUTEX(%ebx)
-	jnz	1f
-
-	/* One less waiter.  If this was the last one needed wake
-	   everybody.  */
-2:	subl	$1, LEFT(%ebx)
-	je	3f
-
-	/* There are more threads to come.  */
-	pushl	%esi
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%esi, -12)
-
-#if CURR_EVENT == 0
-	movl	(%ebx), %edx
-#else
-	movl	CURR_EVENT(%ebx), %edx
-#endif
-
-	/* Release the mutex.  */
-	LOCK
-	subl	$1, MUTEX(%ebx)
-	jne	6f
-
-	/* Wait for the remaining threads.  The call will return immediately
-	   if the CURR_EVENT memory has meanwhile been changed.  */
-7:
-#if FUTEX_WAIT == 0
-	movl	PRIVATE(%ebx), %ecx
-#else
-	movl	$FUTEX_WAIT, %ecx
-	orl	PRIVATE(%ebx), %ecx
-#endif
-	xorl	%esi, %esi
-8:	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	/* Don't return on spurious wakeups.  The syscall does not change
-	   any register except %eax so there is no need to reload any of
-	   them.  */
-#if CURR_EVENT == 0
-	cmpl	%edx, (%ebx)
-#else
-	cmpl	%edx, CURR_EVENT(%ebx)
-#endif
-	je	8b
-
-	/* Increment LEFT.  If this brings the count back to the
-	   initial count unlock the object.  */
-	movl	$1, %edx
-	movl	INIT_COUNT(%ebx), %ecx
-	LOCK
-	xaddl	%edx, LEFT(%ebx)
-	subl	$1, %ecx
-	cmpl	%ecx, %edx
-	jne	10f
-
-	/* Release the mutex.  We cannot release the lock before
-	   waking the waiting threads since otherwise a new thread might
-	   arrive and gets waken up, too.  */
-	LOCK
-	subl	$1, MUTEX(%ebx)
-	jne	9f
-
-	/* Note: %esi is still zero.  */
-10:	movl	%esi, %eax		/* != PTHREAD_BARRIER_SERIAL_THREAD */
-
-	popl	%esi
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%esi)
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-
-	/* The necessary number of threads arrived.  */
-3:
-#if CURR_EVENT == 0
-	addl	$1, (%ebx)
-#else
-	addl	$1, CURR_EVENT(%ebx)
-#endif
-
-	/* Wake up all waiters.  The count is a signed number in the kernel
-	   so 0x7fffffff is the highest value.  */
-	movl	$0x7fffffff, %edx
-	movl	$FUTEX_WAKE, %ecx
-	orl	PRIVATE(%ebx), %ecx
-	movl	$SYS_futex, %eax
-	ENTER_KERNEL
-
-	/* Increment LEFT.  If this brings the count back to the
-	   initial count unlock the object.  */
-	movl	$1, %edx
-	movl	INIT_COUNT(%ebx), %ecx
-	LOCK
-	xaddl	%edx, LEFT(%ebx)
-	subl	$1, %ecx
-	cmpl	%ecx, %edx
-	jne	5f
-
-	/* Release the mutex.  We cannot release the lock before
-	   waking the waiting threads since otherwise a new thread might
-	   arrive and gets waken up, too.  */
-	LOCK
-	subl	$1, MUTEX(%ebx)
-	jne	4f
-
-5:	orl	$-1, %eax		/* == PTHREAD_BARRIER_SERIAL_THREAD */
-
-	popl	%ebx
-	cfi_adjust_cfa_offset(-4)
-	cfi_restore(%ebx)
-	ret
-
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%ebx, -8)
-1:	movl	PRIVATE(%ebx), %ecx
-	leal	MUTEX(%ebx), %edx
-	xorl	$LLL_SHARED, %ecx
-	call	__lll_lock_wait
-	jmp	2b
-
-4:	movl	PRIVATE(%ebx), %ecx
-	leal	MUTEX(%ebx), %eax
-	xorl	$LLL_SHARED, %ecx
-	call	__lll_unlock_wake
-	jmp	5b
-
-	cfi_adjust_cfa_offset(4)
-	cfi_offset(%esi, -12)
-6:	movl	PRIVATE(%ebx), %ecx
-	leal	MUTEX(%ebx), %eax
-	xorl	$LLL_SHARED, %ecx
-	call	__lll_unlock_wake
-	jmp	7b
-
-9:	movl	PRIVATE(%ebx), %ecx
-	leal	MUTEX(%ebx), %eax
-	xorl	$LLL_SHARED, %ecx
-	call	__lll_unlock_wake
-	jmp	10b
-	cfi_endproc
-	.size	__pthread_barrier_wait,.-__pthread_barrier_wait
-weak_alias (__pthread_barrier_wait, pthread_barrier_wait)
diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
deleted file mode 100644
index 650f567b59..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
+++ /dev/null
@@ -1,161 +0,0 @@
-/* Copyright (C) 2002-2016 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <sysdep.h>
-#include <lowlevellock.h>
-#include <lowlevelbarrier.h>
-
-
-	.text
-
-	.globl	__pthread_barrier_wait
-	.type	__pthread_barrier_wait,@function
-	.align	16
-__pthread_barrier_wait:
-	/* Get the mutex.  */
-	xorl	%eax, %eax
-	movl	$1, %esi
-	LOCK
-	cmpxchgl %esi, MUTEX(%rdi)
-	jnz	1f
-
-	/* One less waiter.  If this was the last one needed wake
-	   everybody.  */
-2:	decl	LEFT(%rdi)
-	je	3f
-
-	/* There are more threads to come.  */
-#if CURR_EVENT == 0
-	movl	(%rdi), %edx
-#else
-	movl	CURR_EVENT(%rdi), %edx
-#endif
-
-	/* Release the mutex.  */
-	LOCK
-	decl	MUTEX(%rdi)
-	jne	6f
-
-	/* Wait for the remaining threads.  The call will return immediately
-	   if the CURR_EVENT memory has meanwhile been changed.  */
-7:
-#if FUTEX_WAIT == 0
-	movl	PRIVATE(%rdi), %esi
-#else
-	movl	$FUTEX_WAIT, %esi
-	orl	PRIVATE(%rdi), %esi
-#endif
-	xorq	%r10, %r10
-8:	movl	$SYS_futex, %eax
-	syscall
-
-	/* Don't return on spurious wakeups.  The syscall does not change
-	   any register except %eax so there is no need to reload any of
-	   them.  */
-#if CURR_EVENT == 0
-	cmpl	%edx, (%rdi)
-#else
-	cmpl	%edx, CURR_EVENT(%rdi)
-#endif
-	je	8b
-
-	/* Increment LEFT.  If this brings the count back to the
-	   initial count unlock the object.  */
-	movl	$1, %edx
-	movl	INIT_COUNT(%rdi), %eax
-	LOCK
-	xaddl	%edx, LEFT(%rdi)
-	subl	$1, %eax
-	cmpl	%eax, %edx
-	jne,pt	10f
-
-	/* Release the mutex.  We cannot release the lock before
-	   waking the waiting threads since otherwise a new thread might
-	   arrive and gets waken up, too.  */
-	LOCK
-	decl	MUTEX(%rdi)
-	jne	9f
-
-10:	xorl	%eax, %eax		/* != PTHREAD_BARRIER_SERIAL_THREAD */
-
-	retq
-
-	/* The necessary number of threads arrived.  */
-3:
-#if CURR_EVENT == 0
-	incl	(%rdi)
-#else
-	incl	CURR_EVENT(%rdi)
-#endif
-
-	/* Wake up all waiters.  The count is a signed number in the kernel
-	   so 0x7fffffff is the highest value.  */
-	movl	$0x7fffffff, %edx
-	movl	$FUTEX_WAKE, %esi
-	orl	PRIVATE(%rdi), %esi
-	movl	$SYS_futex, %eax
-	syscall
-
-	/* Increment LEFT.  If this brings the count back to the
-	   initial count unlock the object.  */
-	movl	$1, %edx
-	movl	INIT_COUNT(%rdi), %eax
-	LOCK
-	xaddl	%edx, LEFT(%rdi)
-	subl	$1, %eax
-	cmpl	%eax, %edx
-	jne,pt	5f
-
-	/* Release the mutex.  We cannot release the lock before
-	   waking the waiting threads since otherwise a new thread might
-	   arrive and gets waken up, too.  */
-	LOCK
-	decl	MUTEX(%rdi)
-	jne	4f
-
-5:	orl	$-1, %eax		/* == PTHREAD_BARRIER_SERIAL_THREAD */
-
-	retq
-
-1:	movl	PRIVATE(%rdi), %esi
-	addq	$MUTEX, %rdi
-	xorl	$LLL_SHARED, %esi
-	callq	__lll_lock_wait
-	subq	$MUTEX, %rdi
-	jmp	2b
-
-4:	movl	PRIVATE(%rdi), %esi
-	addq	$MUTEX, %rdi
-	xorl	$LLL_SHARED, %esi
-	callq	__lll_unlock_wake
-	jmp	5b
-
-6:	movl	PRIVATE(%rdi), %esi
-	addq	$MUTEX, %rdi
-	xorl	$LLL_SHARED, %esi
-	callq	__lll_unlock_wake
-	subq	$MUTEX, %rdi
-	jmp	7b
-
-9:	movl	PRIVATE(%rdi), %esi
-	addq	$MUTEX, %rdi
-	xorl	$LLL_SHARED, %esi
-	callq	__lll_unlock_wake
-	jmp	10b
-	.size	__pthread_barrier_wait,.-__pthread_barrier_wait
-weak_alias (__pthread_barrier_wait, pthread_barrier_wait)