about summary refs log tree commit diff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2009-08-04 11:03:46 -0700
committerUlrich Drepper <drepper@redhat.com>2009-08-04 11:03:46 -0700
commit421665c40ae002f74130eb4e0b19cb22d97860cf (patch)
tree78e155aac7fd869030ddad548afbf9dfaed6df0c
parent4a1377672ca5472fac9096b4da72d26226b4aa59 (diff)
downloadglibc-421665c40ae002f74130eb4e0b19cb22d97860cf.tar.gz
glibc-421665c40ae002f74130eb4e0b19cb22d97860cf.tar.xz
glibc-421665c40ae002f74130eb4e0b19cb22d97860cf.zip
Optimize x86-64 syscall cancellation handling.
The syscall wrappers had to save and restore the syscall parameter
values and return value when calling the functions to enable/disable
cancellation were called.  Not anymore.  The called functions are
special and don't modify any unexpected registers.
-rw-r--r--nptl/ChangeLog12
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S114
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S22
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S22
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h55
-rw-r--r--nptl/sysdeps/x86_64/tcb-offsets.sym10
6 files changed, 191 insertions, 44 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 3ebe841452..3a74b5fecd 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,15 @@
+2009-08-04  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/unix/sysv/linux/x86_64/cancellation.S: New file.
+	* sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S: New file.
+	* sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S: New file.
+	* sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h (PSEUDO): Optimize
+	since we can assume the special __*_{en,dis}able_asynccancel
+	functions.
+	(PUSHARGS_*, POPARGS_*, SAVESTK_*, RESTSTK_*): Removed.
+	* sysdeps/x86_64/tcb-offsets.sym: Add cancellation-related bits
+	and PTHREAD_CANCELED.
+
 2009-07-31  Ulrich Drepper  <drepper@redhat.com>
 
 	* descr.h: Better definition of *_BITMASK macros for cancellation.
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
new file mode 100644
index 0000000000..a51df3eacf
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/cancellation.S
@@ -0,0 +1,114 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+
+#ifdef IS_IN_libpthread
+# ifdef SHARED
+#  define __pthread_unwind __GI___pthread_unwind
+# endif
+#else
+# ifndef SHARED
+	.weak __pthread_unwind
+# endif
+#endif
+
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+#endif
+
+/* It is crucial that the functions in this file don't modify registers
+   other than %rax and %r11.  The syscall wrapper code depends on this
+   because it doesn't explicitly save the other registers which hold
+   relevant values.  */
+	.text
+
+	.hidden __pthread_enable_asynccancel
+ENTRY(__pthread_enable_asynccancel)
+	movl	%fs:CANCELHANDLING, %eax
+2:	movl	%eax, %r11d
+	orl	$TCB_CANCELTYPE_BITMASK, %r11d
+	cmpl	%eax, %r11d
+	je	1f
+
+	lock
+	cmpxchgl %r11d, %fs:CANCELHANDLING
+	jnz	2b
+
+	andl	$(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
+	cmpl	$(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
+	je	3f
+
+1:	ret
+
+3:	movq	$TCB_PTHREAD_CANCELED, %fs:RESULT
+	lock
+	orl	$TCB_EXITING_BITMASK, %fs:CANCELHANDLING
+	movq	%fs:CLEANUP_JMP_BUF, %rdi
+#ifdef SHARED
+	call	__pthread_unwind@PLT
+#else
+	call	__pthread_unwind
+#endif
+	hlt
+END(__pthread_enable_asynccancel)
+
+
+	.hidden __pthread_disable_asynccancel
+ENTRY(__pthread_disable_asynccancel)
+	testl	$TCB_CANCELTYPE_BITMASK, %edi
+	jnz	1f
+
+	movl	%fs:CANCELHANDLING, %eax
+2:	movl	%eax, %r11d
+	andl	$~TCB_CANCELTYPE_BITMASK, %r11d
+	lock
+	cmpxchgl %r11d, %fs:CANCELHANDLING
+	jnz	2b
+
+3:	movl	%r11d, %eax
+	andl	$(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
+	cmpl	$TCB_CANCELING_BITMASK, %eax
+	je	4f
+1:	ret
+
+	/* Performance doesn't matter in this loop.  We will
+	   delay until the thread is canceled.  And we will unlikely
+	   enter the loop twice.  */
+4:	movq	%fs:0, %rdi
+	movl	$__NR_futex, %eax
+	xorq	%r10, %r10
+	addq	$CANCELHANDLING, %rdi
+	LOAD_PRIVATE_FUTEX_WAIT (%esi)
+	syscall
+	jmp	3b
+END(__pthread_disable_asynccancel)
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S b/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
new file mode 100644
index 0000000000..1100588502
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/libc-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#define __pthread_enable_asynccancel __libc_enable_asynccancel
+#define __pthread_disable_asynccancel __libc_disable_asynccancel
+#include "cancellation.S"
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S b/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
new file mode 100644
index 0000000000..ce4192b5d3
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/librt-cancellation.S
@@ -0,0 +1,22 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.S"
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h b/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
index 3e741da794..1e92de1dcc 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
 
@@ -25,6 +25,10 @@
 
 #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
 
+/* The code to disable cancellation depends on the fact that the called
+   functions are special.  They don't modify registers other than %rax
+   and %r11 if they return.  Therefore we don't have to preserve other
+   registers around these calls.  */
 # undef PSEUDO
 # define PSEUDO(name, syscall_name, args)				      \
   .text;								      \
@@ -40,60 +44,23 @@
     ret;								      \
   .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;	      \
   L(pseudo_cancel):							      \
-    /* Save registers that might get destroyed.  */			      \
-    SAVESTK_##args							      \
-    PUSHARGS_##args							      \
+    /* We always have to align the stack before calling a function.  */	      \
+    subq $8, %rsp; cfi_adjust_cfa_offset (8);				      \
     CENABLE								      \
-    /* Restore registers.  */						      \
-    POPARGS_##args							      \
     /* The return value from CENABLE is argument for CDISABLE.  */	      \
     movq %rax, (%rsp);							      \
-    movl $SYS_ify (syscall_name), %eax;					      \
-    syscall;								      \
+    DO_CALL (syscall_name, args);					      \
     movq (%rsp), %rdi;							      \
     /* Save %rax since it's the error code from the syscall.  */	      \
-    movq %rax, 8(%rsp);							      \
+    movq %rax, %rdx;							      \
     CDISABLE								      \
-    movq 8(%rsp), %rax;							      \
-    RESTSTK_##args							      \
+    movq %rdx, %rax;							      \
+    addq $8,%rsp; cfi_adjust_cfa_offset (-8);				      \
     cmpq $-4095, %rax;							      \
     jae SYSCALL_ERROR_LABEL;						      \
   L(pseudo_end):
 
 
-# define PUSHARGS_0	/* Nothing.  */
-# define PUSHARGS_1	PUSHARGS_0 movq %rdi, 8(%rsp);
-# define PUSHARGS_2	PUSHARGS_1 movq %rsi, 16(%rsp);
-# define PUSHARGS_3	PUSHARGS_2 movq %rdx, 24(%rsp);
-# define PUSHARGS_4	PUSHARGS_3 movq %rcx, 32(%rsp);
-# define PUSHARGS_5	PUSHARGS_4 movq %r8, 40(%rsp);
-# define PUSHARGS_6	PUSHARGS_5 movq %r9, 48(%rsp);
-
-# define POPARGS_0	/* Nothing.  */
-# define POPARGS_1	POPARGS_0 movq 8(%rsp), %rdi;
-# define POPARGS_2	POPARGS_1 movq 16(%rsp), %rsi;
-# define POPARGS_3	POPARGS_2 movq 24(%rsp), %rdx;
-# define POPARGS_4	POPARGS_3 movq 32(%rsp), %r10;
-# define POPARGS_5	POPARGS_4 movq 40(%rsp), %r8;
-# define POPARGS_6	POPARGS_5 movq 48(%rsp), %r9;
-
-/* We always have to align the stack before calling a function.  */
-# define SAVESTK_0	subq $24, %rsp; cfi_adjust_cfa_offset (24);
-# define SAVESTK_1	SAVESTK_0
-# define SAVESTK_2	SAVESTK_1
-# define SAVESTK_3	subq $40, %rsp; cfi_adjust_cfa_offset (40);
-# define SAVESTK_4	SAVESTK_3
-# define SAVESTK_5	subq $56, %rsp; cfi_adjust_cfa_offset (56);
-# define SAVESTK_6	SAVESTK_5
-
-# define RESTSTK_0	addq $24,%rsp; cfi_adjust_cfa_offset (-24);
-# define RESTSTK_1	RESTSTK_0
-# define RESTSTK_2	RESTSTK_1
-# define RESTSTK_3	addq $40, %rsp; cfi_adjust_cfa_offset (-40);
-# define RESTSTK_4	RESTSTK_3
-# define RESTSTK_5	addq $56, %rsp; cfi_adjust_cfa_offset (-56);
-# define RESTSTK_6	RESTSTK_5
-
 # ifdef IS_IN_libpthread
 #  define CENABLE	call __pthread_enable_asynccancel;
 #  define CDISABLE	call __pthread_disable_asynccancel;
diff --git a/nptl/sysdeps/x86_64/tcb-offsets.sym b/nptl/sysdeps/x86_64/tcb-offsets.sym
index 51f35c61cf..cf863752ee 100644
--- a/nptl/sysdeps/x86_64/tcb-offsets.sym
+++ b/nptl/sysdeps/x86_64/tcb-offsets.sym
@@ -16,3 +16,13 @@ VGETCPU_CACHE_OFFSET	offsetof (tcbhead_t, vgetcpu_cache)
 PRIVATE_FUTEX		offsetof (tcbhead_t, private_futex)
 #endif
 RTLD_SAVESPACE_SSE	offsetof (tcbhead_t, rtld_savespace_sse)
+
+-- Not strictly offsets, but these values are also used in the TCB.
+TCB_CANCELSTATE_BITMASK	 CANCELSTATE_BITMASK
+TCB_CANCELTYPE_BITMASK	 CANCELTYPE_BITMASK
+TCB_CANCELING_BITMASK	 CANCELING_BITMASK
+TCB_CANCELED_BITMASK	 CANCELED_BITMASK
+TCB_EXITING_BITMASK	 EXITING_BITMASK
+TCB_CANCEL_RESTMASK	 CANCEL_RESTMASK
+TCB_TERMINATED_BITMASK	 TERMINATED_BITMASK
+TCB_PTHREAD_CANCELED	 PTHREAD_CANCELED