1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
|
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; see the file COPYING.LIB. If
not, see <https://www.gnu.org/licenses/>. */
/* What to do if the standard debugging hooks are in place and a
corrupt pointer is detected: do nothing (0), print an error message
(1), or call abort() (2). */
/* Hooks for debugging versions. The initial hooks just call the
initialization routine, then do the normal work. */
static void *
malloc_hook_ini (size_t sz, const void *caller)
{
__malloc_hook = NULL;
ptmalloc_init ();
return __libc_malloc (sz);
}
static void *
realloc_hook_ini (void *ptr, size_t sz, const void *caller)
{
__malloc_hook = NULL;
__realloc_hook = NULL;
ptmalloc_init ();
return __libc_realloc (ptr, sz);
}
static void *
memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
{
__memalign_hook = NULL;
ptmalloc_init ();
return __libc_memalign (alignment, sz);
}
/* Whether we are using malloc checking. */
static int using_malloc_checking;
/* Activate a standard set of debugging hooks. */
void
__malloc_check_init (void)
{
using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
__realloc_hook = realloc_check;
__memalign_hook = memalign_check;
}
/* When memory is tagged, the checking data is stored in the user part
of the chunk. We can't rely on the user not having modified the
tags, so fetch the tag at each location before dereferencing
it. */
#define SAFE_CHAR_OFFSET(p,offset) \
((unsigned char *) TAG_AT (((unsigned char *) p) + offset))
/* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or
overruns. The goal here is to avoid obscure crashes due to invalid
usage, unlike in the MALLOC_DEBUG code. */
static unsigned char
magicbyte (const void *p)
{
unsigned char magic;
magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
/* Do not return 1. See the comment in mem2mem_check(). */
if (magic == 1)
++magic;
return magic;
}
/* Visualize the chunk as being partitioned into blocks of 255 bytes from the
highest address of the chunk, downwards. The end of each block tells
us the size of that block, up to the actual size of the requested
memory. Our magic byte is right at the end of the requested size, so we
must reach it with this iteration, otherwise we have witnessed a memory
corruption. */
static size_t
malloc_check_get_size (mchunkptr p)
{
size_t size;
unsigned char c;
unsigned char magic = magicbyte (p);
assert (using_malloc_checking == 1);
for (size = CHUNK_AVAILABLE_SIZE (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, size)) != magic;
size -= c)
{
if (c <= 0 || size < (c + CHUNK_HDR_SZ))
malloc_printerr ("malloc_check_get_size: memory corruption");
}
/* chunk2mem size. */
return size - CHUNK_HDR_SZ;
}
/* Instrument a chunk with overrun detector byte(s) and convert it
into a user pointer with requested size req_sz. */
static void *
mem2mem_check (void *ptr, size_t req_sz)
{
mchunkptr p;
unsigned char *m_ptr = ptr;
size_t max_sz, block_sz, i;
unsigned char magic;
if (!ptr)
return ptr;
p = mem2chunk (ptr);
magic = magicbyte (p);
max_sz = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ;
for (i = max_sz - 1; i > req_sz; i -= block_sz)
{
block_sz = MIN (i - req_sz, 0xff);
/* Don't allow the magic byte to appear in the chain of length bytes.
For the following to work, magicbyte cannot return 0x01. */
if (block_sz == magic)
--block_sz;
*SAFE_CHAR_OFFSET (m_ptr, i) = block_sz;
}
*SAFE_CHAR_OFFSET (m_ptr, req_sz) = magic;
return (void *) m_ptr;
}
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
pointer. If the provided pointer is not valid, return NULL. */
static mchunkptr
mem2chunk_check (void *mem, unsigned char **magic_p)
{
mchunkptr p;
INTERNAL_SIZE_T sz, c;
unsigned char magic;
if (!aligned_OK (mem))
return NULL;
p = mem2chunk (mem);
sz = chunksize (p);
magic = magicbyte (p);
if (!chunk_is_mmapped (p))
{
/* Must be a chunk in conventional heap memory. */
int contig = contiguous (&main_arena);
if ((contig &&
((char *) p < mp_.sbrk_base ||
((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
(!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
(contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
next_chunk (prev_chunk (p)) != p)))
return NULL;
for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
sz -= c)
{
if (c == 0 || sz < (c + CHUNK_HDR_SZ))
return NULL;
}
}
else
{
unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
alignment relative to the beginning of a page. Check this
first. */
offset = (unsigned long) mem & page_mask;
if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
offset < 0x2000) ||
!chunk_is_mmapped (p) || prev_inuse (p) ||
((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
((prev_size (p) + sz) & page_mask) != 0)
return NULL;
for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
(c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
sz -= c)
{
if (c == 0 || sz < (c + CHUNK_HDR_SZ))
return NULL;
}
}
unsigned char* safe_p = SAFE_CHAR_OFFSET (p, sz);
*safe_p ^= 0xFF;
if (magic_p)
*magic_p = safe_p;
return p;
}
/* Check for corruption of the top chunk. */
static void
top_check (void)
{
mchunkptr t = top (&main_arena);
if (t == initial_top (&main_arena) ||
(!chunk_is_mmapped (t) &&
chunksize (t) >= MINSIZE &&
prev_inuse (t) &&
(!contiguous (&main_arena) ||
(char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
return;
malloc_printerr ("malloc: top chunk is corrupt");
}
static void *
malloc_check (size_t sz, const void *caller)
{
void *victim;
size_t nb;
if (__builtin_add_overflow (sz, 1, &nb))
{
__set_errno (ENOMEM);
return NULL;
}
__libc_lock_lock (main_arena.mutex);
top_check ();
victim = _int_malloc (&main_arena, nb);
__libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (victim), sz);
}
static void
free_check (void *mem, const void *caller)
{
mchunkptr p;
if (!mem)
return;
int err = errno;
#ifdef USE_MTAG
/* Quickly check that the freed pointer matches the tag for the memory.
This gives a useful double-free detection. */
*(volatile char *)mem;
#endif
__libc_lock_lock (main_arena.mutex);
p = mem2chunk_check (mem, NULL);
if (!p)
malloc_printerr ("free(): invalid pointer");
if (chunk_is_mmapped (p))
{
__libc_lock_unlock (main_arena.mutex);
munmap_chunk (p);
}
else
{
/* Mark the chunk as belonging to the library again. */
(void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p)
- CHUNK_HDR_SZ);
_int_free (&main_arena, p, 1);
__libc_lock_unlock (main_arena.mutex);
}
__set_errno (err);
}
static void *
realloc_check (void *oldmem, size_t bytes, const void *caller)
{
INTERNAL_SIZE_T chnb;
void *newmem = 0;
unsigned char *magic_p;
size_t rb;
if (__builtin_add_overflow (bytes, 1, &rb))
{
__set_errno (ENOMEM);
return NULL;
}
if (oldmem == 0)
return malloc_check (bytes, NULL);
if (bytes == 0)
{
free_check (oldmem, NULL);
return NULL;
}
#ifdef USE_MTAG
/* Quickly check that the freed pointer matches the tag for the memory.
This gives a useful double-free detection. */
*(volatile char *)oldmem;
#endif
__libc_lock_lock (main_arena.mutex);
const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
__libc_lock_unlock (main_arena.mutex);
if (!oldp)
malloc_printerr ("realloc(): invalid pointer");
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
if (!checked_request2size (rb, &chnb))
goto invert;
__libc_lock_lock (main_arena.mutex);
if (chunk_is_mmapped (oldp))
{
#if HAVE_MREMAP
mchunkptr newp = mremap_chunk (oldp, chnb);
if (newp)
newmem = chunk2mem (newp);
else
#endif
{
/* Note the extra SIZE_SZ overhead. */
if (oldsize - SIZE_SZ >= chnb)
newmem = oldmem; /* do nothing */
else
{
/* Must alloc, copy, free. */
top_check ();
newmem = _int_malloc (&main_arena, rb);
if (newmem)
{
memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
munmap_chunk (oldp);
}
}
}
}
else
{
top_check ();
newmem = _int_realloc (&main_arena, oldp, oldsize, chnb);
}
DIAG_PUSH_NEEDS_COMMENT;
#if __GNUC_PREREQ (7, 0)
/* GCC 7 warns about magic_p may be used uninitialized. But we never
reach here if magic_p is uninitialized. */
DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
#endif
/* mem2chunk_check changed the magic byte in the old chunk.
If newmem is NULL, then the old chunk will still be used though,
so we need to invert that change here. */
invert:
if (newmem == NULL)
*magic_p ^= 0xFF;
DIAG_POP_NEEDS_COMMENT;
__libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (newmem), bytes);
}
static void *
memalign_check (size_t alignment, size_t bytes, const void *caller)
{
void *mem;
if (alignment <= MALLOC_ALIGNMENT)
return malloc_check (bytes, NULL);
if (alignment < MINSIZE)
alignment = MINSIZE;
/* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
power of 2 and will cause overflow in the check below. */
if (alignment > SIZE_MAX / 2 + 1)
{
__set_errno (EINVAL);
return 0;
}
/* Check for overflow. */
if (bytes > SIZE_MAX - alignment - MINSIZE)
{
__set_errno (ENOMEM);
return 0;
}
/* Make sure alignment is power of 2. */
if (!powerof2 (alignment))
{
size_t a = MALLOC_ALIGNMENT * 2;
while (a < alignment)
a <<= 1;
alignment = a;
}
__libc_lock_lock (main_arena.mutex);
top_check ();
mem = _int_memalign (&main_arena, alignment, bytes + 1);
__libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (mem), bytes);
}
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)
/* Support for restoring dumped heaps contained in historic Emacs
executables. The heap saving feature (malloc_get_state) is no
longer implemented in this version of glibc, but we have a heap
rewriter in malloc_set_state which transforms the heap into a
version compatible with current malloc. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
struct malloc_save_state
{
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char *sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
unsigned long max_fast;
unsigned long arena_test;
unsigned long arena_max;
unsigned long narenas;
};
/* Dummy implementation which always fails. We need to provide this
symbol so that existing Emacs binaries continue to work with
BIND_NOW. */
void *
attribute_compat_text_section
malloc_get_state (void)
{
__set_errno (ENOSYS);
return NULL;
}
compat_symbol (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
int
attribute_compat_text_section
malloc_set_state (void *msptr)
{
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
/* Must fail if the major version is too high. */
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
/* We do not need to perform locking here because malloc_set_state
must be called before the first call into the malloc subsytem
(usually via __malloc_initialize_hook). pthread_create always
calls calloc and thus must be called only afterwards, so there
cannot be more than one thread when we reach this point. */
/* Disable the malloc hooks (and malloc checking). */
__malloc_hook = NULL;
__realloc_hook = NULL;
__free_hook = NULL;
__memalign_hook = NULL;
using_malloc_checking = 0;
/* Patch the dumped heap. We no longer try to integrate into the
existing heap. Instead, we mark the existing chunks as mmapped.
Together with the update to dumped_main_arena_start and
dumped_main_arena_end, realloc and free will recognize these
chunks as dumped fake mmapped chunks and never free them. */
/* Find the chunk with the lowest address with the heap. */
mchunkptr chunk = NULL;
{
size_t *candidate = (size_t *) ms->sbrk_base;
size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
while (candidate < end)
if (*candidate != 0)
{
chunk = mem2chunk ((void *) (candidate + 1));
break;
}
else
++candidate;
}
if (chunk == NULL)
return 0;
/* Iterate over the dumped heap and patch the chunks so that they
are treated as fake mmapped chunks. */
mchunkptr top = ms->av[2];
while (chunk < top)
{
if (inuse (chunk))
{
/* Mark chunk as mmapped, to trigger the fallback path. */
size_t size = chunksize (chunk);
set_head (chunk, size | IS_MMAPPED);
}
chunk = next_chunk (chunk);
}
/* The dumped fake mmapped chunks all lie in this address range. */
dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
dumped_main_arena_end = top;
return 0;
}
compat_symbol (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
#endif /* SHLIB_COMPAT */
/*
* Local variables:
* c-basic-offset: 2
* End:
*/
|