about summary refs log tree commit diff
path: root/nptl/nptl-stack.c
blob: 7853c105be30111a3f6b007ec4966836e3a5c742 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
/* Stack cache management for NPTL.
   Copyright (C) 2002-2021 Free Software Foundation, Inc.
   This file is part of the GNU C Library.
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <nptl-stack.h>
#include <ldsodefs.h>

/* Maximum size in kB of cache.  40MiBi by default.  */
static const size_t stack_cache_maxsize = 40 * 1024 * 1024;

void
__nptl_stack_list_del (list_t *elem)
{
  GL (dl_in_flight_stack) = (uintptr_t) elem;

  atomic_write_barrier ();

  list_del (elem);

  atomic_write_barrier ();

  GL (dl_in_flight_stack) = 0;
}
libc_hidden_def (__nptl_stack_list_del)

void
__nptl_stack_list_add (list_t *elem, list_t *list)
{
  GL (dl_in_flight_stack) = (uintptr_t) elem | 1;

  atomic_write_barrier ();

  list_add (elem, list);

  atomic_write_barrier ();

  GL (dl_in_flight_stack) = 0;
}
libc_hidden_def (__nptl_stack_list_add)

void
__nptl_free_stacks (size_t limit)
{
  /* We reduce the size of the cache.  Remove the last entries until
     the size is below the limit.  */
  list_t *entry;
  list_t *prev;

  /* Search from the end of the list.  */
  list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
    {
      struct pthread *curr;

      curr = list_entry (entry, struct pthread, list);
      if (__nptl_stack_in_use (curr))
	{
	  /* Unlink the block.  */
	  __nptl_stack_list_del (entry);

	  /* Account for the freed memory.  */
	  GL (dl_stack_cache_actsize) -= curr->stackblock_size;

	  /* Free the memory associated with the ELF TLS.  */
	  _dl_deallocate_tls (TLS_TPADJ (curr), false);

	  /* Remove this block.  This should never fail.  If it does
	     something is really wrong.  */
	  if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
	    abort ();

	  /* Maybe we have freed enough.  */
	  if (GL (dl_stack_cache_actsize) <= limit)
	    break;
	}
    }
}

/* Add a stack frame which is not used anymore to the stack.  Must be
   called with the cache lock held.  */
static inline void
__attribute ((always_inline))
queue_stack (struct pthread *stack)
{
  /* We unconditionally add the stack to the list.  The memory may
     still be in use but it will not be reused until the kernel marks
     the stack as not used anymore.  */
  __nptl_stack_list_add (&stack->list, &GL (dl_stack_cache));

  GL (dl_stack_cache_actsize) += stack->stackblock_size;
  if (__glibc_unlikely (GL (dl_stack_cache_actsize) > stack_cache_maxsize))
    __nptl_free_stacks (stack_cache_maxsize);
}

void
__nptl_deallocate_stack (struct pthread *pd)
{
  lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);

  /* Remove the thread from the list of threads with user defined
     stacks.  */
  __nptl_stack_list_del (&pd->list);

  /* Not much to do.  Just free the mmap()ed memory.  Note that we do
     not reset the 'used' flag in the 'tid' field.  This is done by
     the kernel.  If no thread has been created yet this field is
     still zero.  */
  if (__glibc_likely (! pd->user_stack))
    (void) queue_stack (pd);
  else
    /* Free the memory associated with the ELF TLS.  */
    _dl_deallocate_tls (TLS_TPADJ (pd), false);

  lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
}
libc_hidden_def (__nptl_deallocate_stack)