about summary refs log tree commit diff
path: root/nptl
diff options
context:
space:
mode:
Diffstat (limited to 'nptl')
-rw-r--r--nptl/Makefile10
-rw-r--r--nptl/cancellation.c127
-rw-r--r--nptl/cleanup_defer.c5
-rw-r--r--nptl/descr-const.sym6
-rw-r--r--nptl/descr.h18
-rw-r--r--nptl/libc-cleanup.c5
-rw-r--r--nptl/pthread_cancel.c78
-rw-r--r--nptl/pthread_exit.c4
-rw-r--r--nptl/pthread_setcancelstate.c2
-rw-r--r--nptl/pthread_setcanceltype.c2
-rw-r--r--nptl/pthread_testcancel.c5
-rw-r--r--nptl/tst-cancel31.c100
12 files changed, 240 insertions, 122 deletions
diff --git a/nptl/Makefile b/nptl/Makefile
index c4c27e0d23..bf4c29b6ed 100644
--- a/nptl/Makefile
+++ b/nptl/Makefile
@@ -204,6 +204,7 @@ routines = \
   sem_timedwait \
   sem_unlink \
   sem_wait \
+  syscall_cancel \
   tpp \
   unwind \
   vars \
@@ -235,7 +236,8 @@ CFLAGS-pthread_setcanceltype.c += -fexceptions -fasynchronous-unwind-tables
 
 # These are internal functions which similar functionality as setcancelstate
 # and setcanceltype.
-CFLAGS-cancellation.c += -fasynchronous-unwind-tables
+CFLAGS-cancellation.c += -fexceptions -fasynchronous-unwind-tables
+CFLAGS-syscall_cancel.c += -fexceptions -fasynchronous-unwind-tables
 
 # Calling pthread_exit() must cause the registered cancel handlers to
 # be executed.  Therefore exceptions have to be thrown through this
@@ -279,6 +281,7 @@ tests = \
   tst-cancel7 \
   tst-cancel17 \
   tst-cancel24 \
+  tst-cancel31 \
   tst-cond26 \
   tst-context1 \
   tst-default-attr \
@@ -404,7 +407,10 @@ xtests += tst-eintr1
 
 test-srcs = tst-oddstacklimit
 
-gen-as-const-headers = unwindbuf.sym
+gen-as-const-headers = \
+  descr-const.sym \
+  unwindbuf.sym \
+  # gen-as-const-headers
 
 gen-py-const-headers := nptl_lock_constants.pysym
 pretty-printers := nptl-printers.py
diff --git a/nptl/cancellation.c b/nptl/cancellation.c
index 7ce60e70d0..e71008b58b 100644
--- a/nptl/cancellation.c
+++ b/nptl/cancellation.c
@@ -18,74 +18,93 @@
 #include <setjmp.h>
 #include <stdlib.h>
 #include "pthreadP.h"
-#include <futex-internal.h>
 
-
-/* The next two functions are similar to pthread_setcanceltype() but
-   more specialized for the use in the cancelable functions like write().
-   They do not need to check parameters etc.  These functions must be
-   AS-safe, with the exception of the actual cancellation, because they
-   are called by wrappers around AS-safe functions like write().*/
-int
-__pthread_enable_asynccancel (void)
+/* Called by the INTERNAL_SYSCALL_CANCEL macro, check for cancellation and
+   returns the syscall value or its negative error code.  */
+long int
+__internal_syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2,
+			   __syscall_arg_t a3, __syscall_arg_t a4,
+			   __syscall_arg_t a5, __syscall_arg_t a6,
+			   __SYSCALL_CANCEL7_ARG_DEF
+			   __syscall_arg_t nr)
 {
-  struct pthread *self = THREAD_SELF;
-  int oldval = atomic_load_relaxed (&self->cancelhandling);
+  long int result;
+  struct pthread *pd = THREAD_SELF;
 
-  while (1)
+  /* If cancellation is not enabled, call the syscall directly and also
+     for thread terminatation to avoid call __syscall_do_cancel while
+     executing cleanup handlers.  */
+  int ch = atomic_load_relaxed (&pd->cancelhandling);
+  if (SINGLE_THREAD_P || !cancel_enabled (ch) || cancel_exiting (ch))
     {
-      int newval = oldval | CANCELTYPE_BITMASK;
-
-      if (newval == oldval)
-	break;
+      result = INTERNAL_SYSCALL_NCS_CALL (nr, a1, a2, a3, a4, a5, a6
+					  __SYSCALL_CANCEL7_ARCH_ARG7);
+      if (INTERNAL_SYSCALL_ERROR_P (result))
+	return -INTERNAL_SYSCALL_ERRNO (result);
+      return result;
+    }
 
-      if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
-						&oldval, newval))
-	{
-	  if (cancel_enabled_and_canceled_and_async (newval))
-	    {
-	      self->result = PTHREAD_CANCELED;
-	      __do_cancel ();
-	    }
+  /* Call the arch-specific entry points that contains the globals markers
+     to be checked by SIGCANCEL handler.  */
+  result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
+			          a6 __SYSCALL_CANCEL7_ARCH_ARG7);
 
-	  break;
-	}
-    }
+  /* If the cancellable syscall was interrupted by SIGCANCEL and it has no
+     side-effect, cancel the thread if cancellation is enabled.  */
+  ch = atomic_load_relaxed (&pd->cancelhandling);
+  /* The behaviour here assumes that EINTR is returned only if there are no
+     visible side effects.  POSIX Issue 7 has not yet provided any stronger
+     language for close, and in theory the close syscall could return EINTR
+     and leave the file descriptor open (conforming and leaks).  It expects
+     that no such kernel is used with glibc.  */
+  if (result == -EINTR && cancel_enabled_and_canceled (ch))
+    __syscall_do_cancel ();
 
-  return oldval;
+  return result;
 }
-libc_hidden_def (__pthread_enable_asynccancel)
 
-/* See the comment for __pthread_enable_asynccancel regarding
-   the AS-safety of this function.  */
-void
-__pthread_disable_asynccancel (int oldtype)
+/* Called by the SYSCALL_CANCEL macro, check for cancellation and return the
+   syscall expected success value (usually 0) or, in case of failure, -1 and
+   sets errno to syscall return value.  */
+long int
+__syscall_cancel (__syscall_arg_t a1, __syscall_arg_t a2,
+		  __syscall_arg_t a3, __syscall_arg_t a4,
+		  __syscall_arg_t a5, __syscall_arg_t a6,
+		  __SYSCALL_CANCEL7_ARG_DEF __syscall_arg_t nr)
 {
-  /* If asynchronous cancellation was enabled before we do not have
-     anything to do.  */
-  if (oldtype & CANCELTYPE_BITMASK)
-    return;
+  int r = __internal_syscall_cancel (a1, a2, a3, a4, a5, a6,
+				     __SYSCALL_CANCEL7_ARG nr);
+  return __glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (r))
+	 ? SYSCALL_ERROR_LABEL (INTERNAL_SYSCALL_ERRNO (r))
+	 : r;
+}
 
+/* Called by __syscall_cancel_arch or function above start the thread
+   cancellation.  */
+_Noreturn void
+__syscall_do_cancel (void)
+{
   struct pthread *self = THREAD_SELF;
-  int newval;
+
+  /* Disable thread cancellation to avoid cancellable entrypoints calling
+     __syscall_do_cancel recursively.  We atomic load relaxed to check the
+     state of cancelhandling, there is no particular ordering requirement
+     between the syscall call and the other thread setting our cancelhandling
+     with a atomic store acquire.
+
+     POSIX Issue 7 notes that the cancellation occurs asynchronously on the
+     target thread, that implies there is no ordering requirements.  It does
+     not need a MO release store here.  */
   int oldval = atomic_load_relaxed (&self->cancelhandling);
-  do
+  while (1)
     {
-      newval = oldval & ~CANCELTYPE_BITMASK;
+      int newval = oldval | CANCELSTATE_BITMASK;
+      if (oldval == newval)
+	break;
+      if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
+						&oldval, newval))
+	break;
     }
-  while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
-						&oldval, newval));
 
-  /* We cannot return when we are being canceled.  Upon return the
-     thread might be things which would have to be undone.  The
-     following loop should loop until the cancellation signal is
-     delivered.  */
-  while (__glibc_unlikely ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
-			   == CANCELING_BITMASK))
-    {
-      futex_wait_simple ((unsigned int *) &self->cancelhandling, newval,
-			 FUTEX_PRIVATE);
-      newval = atomic_load_relaxed (&self->cancelhandling);
-    }
+  __do_cancel (PTHREAD_CANCELED);
 }
-libc_hidden_def (__pthread_disable_asynccancel)
diff --git a/nptl/cleanup_defer.c b/nptl/cleanup_defer.c
index dc08eda003..db32c4fc26 100644
--- a/nptl/cleanup_defer.c
+++ b/nptl/cleanup_defer.c
@@ -82,10 +82,7 @@ ___pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf)
 						    &cancelhandling, newval));
 
       if (cancel_enabled_and_canceled (cancelhandling))
-	{
-	  self->result = PTHREAD_CANCELED;
-	  __do_cancel ();
-	}
+	__do_cancel (PTHREAD_CANCELED);
     }
 }
 versioned_symbol (libc, ___pthread_unregister_cancel_restore,
diff --git a/nptl/descr-const.sym b/nptl/descr-const.sym
new file mode 100644
index 0000000000..8608248354
--- /dev/null
+++ b/nptl/descr-const.sym
@@ -0,0 +1,6 @@
+#include <tls.h>
+
+-- Not strictly offsets, these values are using thread cancellation by arch
+-- specific cancel entrypoint.
+TCB_CANCELED_BIT	 CANCELED_BIT
+TCB_CANCELED_BITMASK	 CANCELED_BITMASK
diff --git a/nptl/descr.h b/nptl/descr.h
index 8cef95810c..65d3baaee3 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -426,6 +426,24 @@ struct pthread
 } __attribute ((aligned (TCB_ALIGNMENT)));
 
 static inline bool
+cancel_enabled (int value)
+{
+  return (value & CANCELSTATE_BITMASK) == 0;
+}
+
+static inline bool
+cancel_async_enabled (int value)
+{
+  return (value & CANCELTYPE_BITMASK) != 0;
+}
+
+static inline bool
+cancel_exiting (int value)
+{
+  return (value & EXITING_BITMASK) != 0;
+}
+
+static inline bool
 cancel_enabled_and_canceled (int value)
 {
   return (value & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
diff --git a/nptl/libc-cleanup.c b/nptl/libc-cleanup.c
index fe042c85aa..20d746cbb7 100644
--- a/nptl/libc-cleanup.c
+++ b/nptl/libc-cleanup.c
@@ -69,10 +69,7 @@ __libc_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer)
 						    &cancelhandling, newval));
 
       if (cancel_enabled_and_canceled (cancelhandling))
-	{
-	  self->result = PTHREAD_CANCELED;
-	  __do_cancel ();
-	}
+	__do_cancel (PTHREAD_CANCELED);
     }
 }
 libc_hidden_def (__libc_cleanup_pop_restore)
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
index 69701db6f9..012c4ebeb8 100644
--- a/nptl/pthread_cancel.c
+++ b/nptl/pthread_cancel.c
@@ -23,6 +23,7 @@
 #include <sysdep.h>
 #include <unistd.h>
 #include <unwind-link.h>
+#include <cancellation-pc-check.h>
 #include <stdio.h>
 #include <gnu/lib-names.h>
 #include <sys/single_threaded.h>
@@ -40,31 +41,16 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
       || si->si_code != SI_TKILL)
     return;
 
+  /* Check if asynchronous cancellation mode is set or if interrupted
+     instruction pointer falls within the cancellable syscall bridge.  For
+     interruptable syscalls with external side-effects (i.e. partial reads),
+     the kernel  will set the IP to after __syscall_cancel_arch_end, thus
+     disabling the cancellation and allowing the process to handle such
+     conditions.  */
   struct pthread *self = THREAD_SELF;
-
   int oldval = atomic_load_relaxed (&self->cancelhandling);
-  while (1)
-    {
-      /* We are canceled now.  When canceled by another thread this flag
-	 is already set but if the signal is directly send (internally or
-	 from another process) is has to be done here.  */
-      int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
-
-      if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
-	/* Already canceled or exiting.  */
-	break;
-
-      if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
-						&oldval, newval))
-	{
-	  self->result = PTHREAD_CANCELED;
-
-	  /* Make sure asynchronous cancellation is still enabled.  */
-	  if ((oldval & CANCELTYPE_BITMASK) != 0)
-	    /* Run the registered destructors and terminate the thread.  */
-	    __do_cancel ();
-	}
-    }
+  if (cancel_async_enabled (oldval) || cancellation_pc_check (ctx))
+    __syscall_do_cancel ();
 }
 
 int
@@ -106,15 +92,13 @@ __pthread_cancel (pthread_t th)
   /* Some syscalls are never restarted after being interrupted by a signal
      handler, regardless of the use of SA_RESTART (they always fail with
      EINTR).  So pthread_cancel cannot send SIGCANCEL unless the cancellation
-     is enabled and set as asynchronous (in this case the cancellation will
-     be acted in the cancellation handler instead by the syscall wrapper).
-     Otherwise the target thread is set as 'cancelling' (CANCELING_BITMASK)
+     is enabled.
+     In this case the target thread is set as 'cancelled' (CANCELED_BITMASK)
      by atomically setting 'cancelhandling' and the cancelation will be acted
      upon on next cancellation entrypoing in the target thread.
 
-     It also requires to atomically check if cancellation is enabled and
-     asynchronous, so both cancellation state and type are tracked on
-     'cancelhandling'.  */
+     It also requires to atomically check if cancellation is enabled, so the
+     state are also tracked on 'cancelhandling'.  */
 
   int result = 0;
   int oldval = atomic_load_relaxed (&pd->cancelhandling);
@@ -122,19 +106,17 @@ __pthread_cancel (pthread_t th)
   do
     {
     again:
-      newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
+      newval = oldval | CANCELED_BITMASK;
       if (oldval == newval)
 	break;
 
-      /* If the cancellation is handled asynchronously just send a
-	 signal.  We avoid this if possible since it's more
-	 expensive.  */
-      if (cancel_enabled_and_canceled_and_async (newval))
+      /* Only send the SIGANCEL signal if cancellation is enabled, since some
+	 syscalls are never restarted even with SA_RESTART.  The signal
+	 will act iff async cancellation is enabled.  */
+      if (cancel_enabled (newval))
 	{
-	  /* Mark the cancellation as "in progress".  */
-	  int newval2 = oldval | CANCELING_BITMASK;
 	  if (!atomic_compare_exchange_weak_acquire (&pd->cancelhandling,
-						     &oldval, newval2))
+						     &oldval, newval))
 	    goto again;
 
 	  if (pd == THREAD_SELF)
@@ -143,9 +125,8 @@ __pthread_cancel (pthread_t th)
 	       pthread_create, so the signal handler may not have been
 	       set up for a self-cancel.  */
 	    {
-	      pd->result = PTHREAD_CANCELED;
-	      if ((newval & CANCELTYPE_BITMASK) != 0)
-		__do_cancel ();
+	      if (cancel_async_enabled (newval))
+		__do_cancel (PTHREAD_CANCELED);
 	    }
 	  else
 	    /* The cancellation handler will take care of marking the
@@ -154,19 +135,18 @@ __pthread_cancel (pthread_t th)
 
 	  break;
 	}
-
-	/* A single-threaded process should be able to kill itself, since
-	   there is nothing in the POSIX specification that says that it
-	   cannot.  So we set multiple_threads to true so that cancellation
-	   points get executed.  */
-	THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
-#ifndef TLS_MULTIPLE_THREADS_IN_TCB
-	__libc_single_threaded_internal = 0;
-#endif
     }
   while (!atomic_compare_exchange_weak_acquire (&pd->cancelhandling, &oldval,
 						newval));
 
+  /* A single-threaded process should be able to kill itself, since there is
+     nothing in the POSIX specification that says that it cannot.  So we set
+     multiple_threads to true so that cancellation points get executed.  */
+  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
+#ifndef TLS_MULTIPLE_THREADS_IN_TCB
+  __libc_single_threaded_internal = 0;
+#endif
+
   return result;
 }
 versioned_symbol (libc, __pthread_cancel, pthread_cancel, GLIBC_2_34);
diff --git a/nptl/pthread_exit.c b/nptl/pthread_exit.c
index dc2635f827..600ab036d9 100644
--- a/nptl/pthread_exit.c
+++ b/nptl/pthread_exit.c
@@ -31,9 +31,7 @@ __pthread_exit (void *value)
                     " must be installed for pthread_exit to work\n");
   }
 
-  THREAD_SETMEM (THREAD_SELF, result, value);
-
-  __do_cancel ();
+  __do_cancel (value);
 }
 libc_hidden_def (__pthread_exit)
 weak_alias (__pthread_exit, pthread_exit)
diff --git a/nptl/pthread_setcancelstate.c b/nptl/pthread_setcancelstate.c
index 18fb42a5c4..787c45a32b 100644
--- a/nptl/pthread_setcancelstate.c
+++ b/nptl/pthread_setcancelstate.c
@@ -48,7 +48,7 @@ __pthread_setcancelstate (int state, int *oldstate)
 						&oldval, newval))
 	{
 	  if (cancel_enabled_and_canceled_and_async (newval))
-	    __do_cancel ();
+	    __do_cancel (PTHREAD_CANCELED);
 
 	  break;
 	}
diff --git a/nptl/pthread_setcanceltype.c b/nptl/pthread_setcanceltype.c
index cf441cef91..3b5b0346ef 100644
--- a/nptl/pthread_setcanceltype.c
+++ b/nptl/pthread_setcanceltype.c
@@ -48,7 +48,7 @@ __pthread_setcanceltype (int type, int *oldtype)
 	  if (cancel_enabled_and_canceled_and_async (newval))
 	    {
 	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-	      __do_cancel ();
+	      __do_cancel (PTHREAD_CANCELED);
 	    }
 
 	  break;
diff --git a/nptl/pthread_testcancel.c b/nptl/pthread_testcancel.c
index a0197b5312..be0e8d5a78 100644
--- a/nptl/pthread_testcancel.c
+++ b/nptl/pthread_testcancel.c
@@ -25,10 +25,7 @@ ___pthread_testcancel (void)
   struct pthread *self = THREAD_SELF;
   int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
   if (cancel_enabled_and_canceled (cancelhandling))
-    {
-      self->result = PTHREAD_CANCELED;
-      __do_cancel ();
-    }
+    __do_cancel (PTHREAD_CANCELED);
 }
 versioned_symbol (libc, ___pthread_testcancel, pthread_testcancel, GLIBC_2_34);
 libc_hidden_ver (___pthread_testcancel, __pthread_testcancel)
diff --git a/nptl/tst-cancel31.c b/nptl/tst-cancel31.c
new file mode 100644
index 0000000000..f9cc8245e1
--- /dev/null
+++ b/nptl/tst-cancel31.c
@@ -0,0 +1,100 @@
+/* Verify side-effects of cancellable syscalls (BZ #12683).
+   Copyright (C) 2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+/* This testcase checks if there is resource leakage if the syscall has
+   returned from kernelspace, but before userspace saves the return
+   value.  The 'leaker' thread should be able to close the file descriptor
+   if the resource is already allocated, meaning that if the cancellation
+   signal arrives *after* the open syscal return from kernel, the
+   side-effect should be visible to application.  */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <support/xunistd.h>
+#include <support/xthread.h>
+#include <support/check.h>
+#include <support/temp_file.h>
+#include <support/support.h>
+#include <support/descriptors.h>
+
+static void *
+writeopener (void *arg)
+{
+  int fd;
+  for (;;)
+    {
+      fd = open (arg, O_WRONLY);
+      xclose (fd);
+    }
+  return NULL;
+}
+
+static void *
+leaker (void *arg)
+{
+  int fd = open (arg, O_RDONLY);
+  TEST_VERIFY_EXIT (fd > 0);
+  pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, 0);
+  xclose (fd);
+  return NULL;
+}
+
+static int
+do_test (void)
+{
+  enum {
+    iter_count = 1000
+  };
+
+  char *dir = support_create_temp_directory ("tst-cancel28");
+  char *name = xasprintf ("%s/fifo", dir);
+  TEST_COMPARE (mkfifo (name, 0600), 0);
+  add_temp_file (name);
+
+  struct support_descriptors *descrs = support_descriptors_list ();
+
+  srand (1);
+
+  xpthread_create (NULL, writeopener, name);
+  for (int i = 0; i < iter_count; i++)
+    {
+      pthread_t td = xpthread_create (NULL, leaker, name);
+      struct timespec ts =
+	{ .tv_nsec = rand () % 100000, .tv_sec = 0 };
+      nanosleep (&ts, NULL);
+      /* Ignore pthread_cancel result because it might be the
+	 case when pthread_cancel is called when thread is already
+	 exited.  */
+      pthread_cancel (td);
+      xpthread_join (td);
+    }
+
+  support_descriptors_check (descrs);
+
+  support_descriptors_free (descrs);
+
+  free (name);
+
+  return 0;
+}
+
+#include <support/test-driver.c>