about summary refs log tree commit diff
path: root/nptl/cancellation.c
blob: eee5b6b7586e8f6bfbcb22d570a6f9f60ea97695 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
/* Copyright (C) 2002-2023 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <setjmp.h>
#include <stdlib.h>
#include "pthreadP.h"

/* Called by the INTERNAL_SYSCALL_CANCEL macro, check for cancellation and
   returns the syscall value or its negative error code.  */
long int
__internal_syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2,
			   __syscall_arg_t a3, __syscall_arg_t a4,
			   __syscall_arg_t a5, __syscall_arg_t a6,
			   __syscall_arg_t nr)
{
  long int result;
  struct pthread *pd = THREAD_SELF;

  /* If cancellation is not enabled, call the syscall directly and also
     for thread terminatation to avoid call __syscall_do_cancel while
     executing cleanup handlers.  */
  int ch = atomic_load_relaxed (&pd->cancelhandling);
  if (SINGLE_THREAD_P || !cancel_enabled (ch) || cancel_exiting (ch))
    {
      result = INTERNAL_SYSCALL_NCS_CALL (nr, a1, a2, a3, a4, a5, a6);
      if (INTERNAL_SYSCALL_ERROR_P (result))
	return -INTERNAL_SYSCALL_ERRNO (result);
      return result;
    }

  /* Call the arch-specific entry points that contains the globals markers
     to be checked by SIGCANCEL handler.  */
  result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
			          a6);

  /* If the cancellable syscall was interrupted by SIGCANCEL and it has not
     side-effect, cancel the thread if cancellation is enabled.  */
  ch = atomic_load_relaxed (&pd->cancelhandling);
  if (result == -EINTR && cancel_enabled_and_canceled (ch))
    __syscall_do_cancel ();

  return result;
}

/* Called by the SYSCALL_CANCEL macro, check for cancellation and return the
   syscall expected success value (usually 0) or, in case of failure, -1 and
   sets errno to syscall return value.  */
long int
__syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2,
		  __syscall_arg_t a3, __syscall_arg_t a4,
		  __syscall_arg_t a5, __syscall_arg_t a6,
		  __syscall_arg_t nr)
{
  int r = __internal_syscall_cancel (a1, a2, a3, a4, a5, a6, nr);
  return __glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (r))
	 ? SYSCALL_ERROR_LABEL (INTERNAL_SYSCALL_ERRNO (r))
	 : r;
}

/* Called by __syscall_cancel_arch or function above start the thread
   cancellation.  */
_Noreturn void
__syscall_do_cancel (void)
{
  struct pthread *self = THREAD_SELF;

  /* Disable thread cancellation to avoid cancellable entrypoints to call
     __syscall_do_cancel recursively.  */
  int oldval = atomic_load_relaxed (&self->cancelhandling);
  while (1)
    {
      int newval = oldval | CANCELSTATE_BITMASK;
      if (oldval == newval)
	break;
      if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
						&oldval, newval))
	break;
    }

  __do_cancel (PTHREAD_CANCELED);
}