1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
|
/* Compatibility code for malloc debugging and state management.
Copyright (C) 2001-2021 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <https://www.gnu.org/licenses/>. */
#ifndef weak_variable
# define weak_variable weak_function
#endif
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
void (*__malloc_initialize_hook) (void);
compat_symbol (libc, __malloc_initialize_hook,
__malloc_initialize_hook, GLIBC_2_0);
#endif
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_34)
void weak_variable (*__after_morecore_hook) (void) = NULL;
compat_symbol (libc, __after_morecore_hook, __after_morecore_hook, GLIBC_2_0);
void *(*__morecore)(ptrdiff_t);
compat_symbol (libc, __morecore, __morecore, GLIBC_2_0);
#endif
static void *malloc_hook_ini (size_t, const void *) __THROW;
static void *realloc_hook_ini (void *, size_t, const void *) __THROW;
static void *memalign_hook_ini (size_t, size_t, const void *) __THROW;
void weak_variable (*__free_hook) (void *, const void *) = NULL;
void *weak_variable (*__malloc_hook)
(size_t, const void *) = malloc_hook_ini;
void *weak_variable (*__realloc_hook)
(void *, size_t, const void *) = realloc_hook_ini;
void *weak_variable (*__memalign_hook)
(size_t, size_t, const void *) = memalign_hook_ini;
/* Hooks for debugging versions. The initial hooks just call the
initialization routine, then do the normal work. */
/* These hooks will get executed only through the interposed allocator
functions in libc_malloc_debug.so. This means that the calls to malloc,
realloc, etc. will lead back into the interposed functions, which is what we
want.
These initial hooks are assumed to be called in a single-threaded context,
so it is safe to reset all hooks at once upon initialization. */
static void
generic_hook_ini (void)
{
__malloc_hook = NULL;
__realloc_hook = NULL;
__memalign_hook = NULL;
ptmalloc_init ();
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
if (hook != NULL)
(*hook)();
#endif
__malloc_initialized = 1;
}
static void *
malloc_hook_ini (size_t sz, const void *caller)
{
generic_hook_ini ();
return malloc (sz);
}
static void *
realloc_hook_ini (void *ptr, size_t sz, const void *caller)
{
generic_hook_ini ();
return realloc (ptr, sz);
}
static void *
memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
{
generic_hook_ini ();
return memalign (alignment, sz);
}
#include "malloc-check.c"
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
/* Support for restoring dumped heaps contained in historic Emacs
executables. The heap saving feature (malloc_get_state) is no
longer implemented in this version of glibc, but we have a heap
rewriter in malloc_set_state which transforms the heap into a
version compatible with current malloc. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
struct malloc_save_state
{
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char *sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
unsigned long max_fast;
unsigned long arena_test;
unsigned long arena_max;
unsigned long narenas;
};
/* Dummy implementation which always fails. We need to provide this
symbol so that existing Emacs binaries continue to work with
BIND_NOW. */
void *
attribute_compat_text_section
malloc_get_state (void)
{
__set_errno (ENOSYS);
return NULL;
}
compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
int
attribute_compat_text_section
malloc_set_state (void *msptr)
{
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
/* Must fail if the major version is too high. */
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
/* We do not need to perform locking here because malloc_set_state
must be called before the first call into the malloc subsytem
(usually via __malloc_initialize_hook). pthread_create always
calls calloc and thus must be called only afterwards, so there
cannot be more than one thread when we reach this point. */
/* Disable the malloc hooks (and malloc checking). */
__malloc_hook = NULL;
__realloc_hook = NULL;
__free_hook = NULL;
__memalign_hook = NULL;
using_malloc_checking = 0;
/* Patch the dumped heap. We no longer try to integrate into the
existing heap. Instead, we mark the existing chunks as mmapped.
Together with the update to dumped_main_arena_start and
dumped_main_arena_end, realloc and free will recognize these
chunks as dumped fake mmapped chunks and never free them. */
/* Find the chunk with the lowest address with the heap. */
mchunkptr chunk = NULL;
{
size_t *candidate = (size_t *) ms->sbrk_base;
size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
while (candidate < end)
if (*candidate != 0)
{
chunk = mem2chunk ((void *) (candidate + 1));
break;
}
else
++candidate;
}
if (chunk == NULL)
return 0;
/* Iterate over the dumped heap and patch the chunks so that they
are treated as fake mmapped chunks. */
mchunkptr top = ms->av[2];
while (chunk < top)
{
if (inuse (chunk))
{
/* Mark chunk as mmapped, to trigger the fallback path. */
size_t size = chunksize (chunk);
set_head (chunk, size | IS_MMAPPED);
}
chunk = next_chunk (chunk);
}
/* The dumped fake mmapped chunks all lie in this address range. */
dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
dumped_main_arena_end = top;
return 0;
}
compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
#endif /* SHLIB_COMPAT */
/*
* Local variables:
* c-basic-offset: 2
* End:
*/
|