diff options
Diffstat (limited to 'nptl/cancellation.c')
-rw-r--r-- | nptl/cancellation.c | 127 |
1 files changed, 73 insertions, 54 deletions
diff --git a/nptl/cancellation.c b/nptl/cancellation.c index 7ce60e70d0..e71008b58b 100644 --- a/nptl/cancellation.c +++ b/nptl/cancellation.c @@ -18,74 +18,93 @@ #include <setjmp.h> #include <stdlib.h> #include "pthreadP.h" -#include <futex-internal.h> - -/* The next two functions are similar to pthread_setcanceltype() but - more specialized for the use in the cancelable functions like write(). - They do not need to check parameters etc. These functions must be - AS-safe, with the exception of the actual cancellation, because they - are called by wrappers around AS-safe functions like write().*/ -int -__pthread_enable_asynccancel (void) +/* Called by the INTERNAL_SYSCALL_CANCEL macro, check for cancellation and + returns the syscall value or its negative error code. */ +long int +__internal_syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2, + __syscall_arg_t a3, __syscall_arg_t a4, + __syscall_arg_t a5, __syscall_arg_t a6, + __SYSCALL_CANCEL7_ARG_DEF + __syscall_arg_t nr) { - struct pthread *self = THREAD_SELF; - int oldval = atomic_load_relaxed (&self->cancelhandling); + long int result; + struct pthread *pd = THREAD_SELF; - while (1) + /* If cancellation is not enabled, call the syscall directly and also + for thread terminatation to avoid call __syscall_do_cancel while + executing cleanup handlers. */ + int ch = atomic_load_relaxed (&pd->cancelhandling); + if (SINGLE_THREAD_P || !cancel_enabled (ch) || cancel_exiting (ch)) { - int newval = oldval | CANCELTYPE_BITMASK; - - if (newval == oldval) - break; + result = INTERNAL_SYSCALL_NCS_CALL (nr, a1, a2, a3, a4, a5, a6 + __SYSCALL_CANCEL7_ARCH_ARG7); + if (INTERNAL_SYSCALL_ERROR_P (result)) + return -INTERNAL_SYSCALL_ERRNO (result); + return result; + } - if (atomic_compare_exchange_weak_acquire (&self->cancelhandling, - &oldval, newval)) - { - if (cancel_enabled_and_canceled_and_async (newval)) - { - self->result = PTHREAD_CANCELED; - __do_cancel (); - } + /* Call the arch-specific entry points that contains the globals markers + to be checked by SIGCANCEL handler. */ + result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5, + a6 __SYSCALL_CANCEL7_ARCH_ARG7); - break; - } - } + /* If the cancellable syscall was interrupted by SIGCANCEL and it has no + side-effect, cancel the thread if cancellation is enabled. */ + ch = atomic_load_relaxed (&pd->cancelhandling); + /* The behaviour here assumes that EINTR is returned only if there are no + visible side effects. POSIX Issue 7 has not yet provided any stronger + language for close, and in theory the close syscall could return EINTR + and leave the file descriptor open (conforming and leaks). It expects + that no such kernel is used with glibc. */ + if (result == -EINTR && cancel_enabled_and_canceled (ch)) + __syscall_do_cancel (); - return oldval; + return result; } -libc_hidden_def (__pthread_enable_asynccancel) -/* See the comment for __pthread_enable_asynccancel regarding - the AS-safety of this function. */ -void -__pthread_disable_asynccancel (int oldtype) +/* Called by the SYSCALL_CANCEL macro, check for cancellation and return the + syscall expected success value (usually 0) or, in case of failure, -1 and + sets errno to syscall return value. */ +long int +__syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2, + __syscall_arg_t a3, __syscall_arg_t a4, + __syscall_arg_t a5, __syscall_arg_t a6, + __SYSCALL_CANCEL7_ARG_DEF __syscall_arg_t nr) { - /* If asynchronous cancellation was enabled before we do not have - anything to do. */ - if (oldtype & CANCELTYPE_BITMASK) - return; + int r = __internal_syscall_cancel (a1, a2, a3, a4, a5, a6, + __SYSCALL_CANCEL7_ARG nr); + return __glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (r)) + ? SYSCALL_ERROR_LABEL (INTERNAL_SYSCALL_ERRNO (r)) + : r; +} +/* Called by __syscall_cancel_arch or function above start the thread + cancellation. */ +_Noreturn void +__syscall_do_cancel (void) +{ struct pthread *self = THREAD_SELF; - int newval; + + /* Disable thread cancellation to avoid cancellable entrypoints calling + __syscall_do_cancel recursively. We atomic load relaxed to check the + state of cancelhandling, there is no particular ordering requirement + between the syscall call and the other thread setting our cancelhandling + with a atomic store acquire. + + POSIX Issue 7 notes that the cancellation occurs asynchronously on the + target thread, that implies there is no ordering requirements. It does + not need a MO release store here. */ int oldval = atomic_load_relaxed (&self->cancelhandling); - do + while (1) { - newval = oldval & ~CANCELTYPE_BITMASK; + int newval = oldval | CANCELSTATE_BITMASK; + if (oldval == newval) + break; + if (atomic_compare_exchange_weak_acquire (&self->cancelhandling, + &oldval, newval)) + break; } - while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling, - &oldval, newval)); - /* We cannot return when we are being canceled. Upon return the - thread might be things which would have to be undone. The - following loop should loop until the cancellation signal is - delivered. */ - while (__glibc_unlikely ((newval & (CANCELING_BITMASK | CANCELED_BITMASK)) - == CANCELING_BITMASK)) - { - futex_wait_simple ((unsigned int *) &self->cancelhandling, newval, - FUTEX_PRIVATE); - newval = atomic_load_relaxed (&self->cancelhandling); - } + __do_cancel (PTHREAD_CANCELED); } -libc_hidden_def (__pthread_disable_asynccancel) |