about summary refs log tree commit diff
path: root/nptl/allocatestack.c
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-08-28 11:24:35 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2017-08-29 13:29:19 -0300
commit01b87c656f670863ce437421b8e9278200965d38 (patch)
tree1c51fd932a33a1b7d0b1590b55b603fbd41024fd /nptl/allocatestack.c
parent16f138a49ad1815e113d2b5b7a87f26999ade811 (diff)
downloadglibc-01b87c656f670863ce437421b8e9278200965d38.tar.gz
glibc-01b87c656f670863ce437421b8e9278200965d38.tar.xz
glibc-01b87c656f670863ce437421b8e9278200965d38.zip
ia64: Fix thread stack allocation permission set (BZ #21672)
This patch fixes ia64 failures on thread exit by madvise the required
area taking in consideration its disjoing stacks
(NEED_SEPARATE_REGISTER_STACK).  Also the snippet that setup the
madvise call to advertise kernel the area won't be used anymore in
near future is reallocated in allocatestack.c (for consistency to
put all stack management function in one place).

Checked on x86_64-linux-gnu and i686-linux-gnu for sanity (since
it is not expected code changes for architecture that do not
define NEED_SEPARATE_REGISTER_STACK) and also got a report that
it fixes ia64-linux-gnu failures from Sergei Trofimovich
<slyfox@gentoo.org>.

	[BZ #21672]
	* nptl/allocatestack.c [_STACK_GROWS_DOWN] (setup_stack_prot):
	Set to use !NEED_SEPARATE_REGISTER_STACK as well.
	(advise_stack_range): New function.
	* nptl/pthread_create.c (START_THREAD_DEFN): Move logic to mark
	stack non required to advise_stack_range at allocatestack.c
Diffstat (limited to 'nptl/allocatestack.c')
-rw-r--r--nptl/allocatestack.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 6d1bcaa294..8766debde5 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -356,7 +356,7 @@ setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize,
 		  const int prot)
 {
   char *guardend = guard + guardsize;
-#if _STACK_GROWS_DOWN
+#if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
   /* As defined at guard_position, for architectures with downward stack
      the guard page is always at start of the allocated area.  */
   if (__mprotect (guardend, size - guardsize, prot) != 0)
@@ -372,6 +372,33 @@ setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize,
   return 0;
 }
 
+/* Mark the memory of the stack as usable to the kernel.  It frees everything
+   except for the space used for the TCB itself.  */
+static inline void
+__always_inline
+advise_stack_range (void *mem, size_t size, uintptr_t pd, size_t guardsize)
+{
+  uintptr_t sp = (uintptr_t) CURRENT_STACK_FRAME;
+  size_t pagesize_m1 = __getpagesize () - 1;
+#if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
+  size_t freesize = (sp - (uintptr_t) mem) & ~pagesize_m1;
+  assert (freesize < size);
+  if (freesize > PTHREAD_STACK_MIN)
+    __madvise (mem, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
+#else
+  /* Page aligned start of memory to free (higher than or equal
+     to current sp plus the minimum stack size).  */
+  uintptr_t freeblock = (sp + PTHREAD_STACK_MIN + pagesize_m1) & ~pagesize_m1;
+  uintptr_t free_end = (pd - guardsize) & ~pagesize_m1;
+  if (free_end > freeblock)
+    {
+      size_t freesize = free_end - freeblock;
+      assert (freesize < size);
+      __madvise ((void*) freeblock, freesize, MADV_DONTNEED);
+    }
+#endif
+}
+
 /* Returns a usable stack for a new thread either by allocating a
    new stack or reusing a cached stack of sufficient size.
    ATTR must be non-NULL and point to a valid pthread_attr.