about summary refs log tree commit diff
path: root/nptl/allocatestack.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r--nptl/allocatestack.c63
1 files changed, 1 insertions, 62 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 46089163f4..12cd1058d4 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -291,31 +291,6 @@ queue_stack (struct pthread *stack)
     free_stacks (stack_cache_maxsize);
 }
 
-
-static int
-change_stack_perm (struct pthread *pd)
-{
-#ifdef NEED_SEPARATE_REGISTER_STACK
-  size_t pagemask = __getpagesize () - 1;
-  void *stack = (pd->stackblock
-		 + (((((pd->stackblock_size - pd->guardsize) / 2)
-		      & pagemask) + pd->guardsize) & pagemask));
-  size_t len = pd->stackblock + pd->stackblock_size - stack;
-#elif _STACK_GROWS_DOWN
-  void *stack = pd->stackblock + pd->guardsize;
-  size_t len = pd->stackblock_size - pd->guardsize;
-#elif _STACK_GROWS_UP
-  void *stack = pd->stackblock;
-  size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
-#else
-# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
-#endif
-  if (__mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
-    return errno;
-
-  return 0;
-}
-
 /* Return the guard page position on allocated stack.  */
 static inline char *
 __attribute ((always_inline))
@@ -625,7 +600,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 	  if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0
 				&& (prot & PROT_EXEC) == 0, 0))
 	    {
-	      int err = change_stack_perm (pd);
+	      int err = __nptl_change_stack_perm (pd);
 	      if (err != 0)
 		{
 		  /* Free the stack memory we just allocated.  */
@@ -780,42 +755,6 @@ __deallocate_stack (struct pthread *pd)
   lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
 }
 
-
-int
-__make_stacks_executable (void **stack_endp)
-{
-  /* First the main thread's stack.  */
-  int err = _dl_make_stack_executable (stack_endp);
-  if (err != 0)
-    return err;
-
-  lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
-
-  list_t *runp;
-  list_for_each (runp, &GL (dl_stack_used))
-    {
-      err = change_stack_perm (list_entry (runp, struct pthread, list));
-      if (err != 0)
-	break;
-    }
-
-  /* Also change the permission for the currently unused stacks.  This
-     might be wasted time but better spend it here than adding a check
-     in the fast path.  */
-  if (err == 0)
-    list_for_each (runp, &GL (dl_stack_cache))
-      {
-	err = change_stack_perm (list_entry (runp, struct pthread, list));
-	if (err != 0)
-	  break;
-      }
-
-  lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
-
-  return err;
-}
-
-
 /* In case of a fork() call the memory allocation in the child will be
    the same but only one thread is running.  All stacks except that of
    the one running thread are not used anymore.  We have to recycle