about summary refs log tree commit diff
path: root/support/blob_repeat.c
blob: 5e5fecac00f0fb740d1027f85cf07300116fc7fc (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/* Repeating a memory blob, with alias mapping optimization.
   Copyright (C) 2018-2022 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <support/blob_repeat.h>
#include <support/check.h>
#include <support/test-driver.h>
#include <support/support.h>
#include <support/xunistd.h>
#include <sys/mman.h>
#include <unistd.h>
#include <wchar.h>

/* Small allocations should use malloc directly instead of the mmap
   optimization because mappings carry a lot of overhead.  */
static const size_t maximum_small_size = 4 * 1024 * 1024;

/* Internal helper for fill.  */
static void
fill0 (char *target, const char *element, size_t element_size,
       size_t count)
{
  while (count > 0)
    {
      memcpy (target, element, element_size);
      target += element_size;
      --count;
    }
}

/* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE
   bytes starting at ELEMENT.  */
static void
fill (char *target, const char *element, size_t element_size,
      size_t count)
{
  if (element_size == 0 || count == 0)
    return;
  else if (element_size == 1)
    memset (target, element[0], count);
  else if (element_size == sizeof (wchar_t))
    {
      wchar_t wc;
      memcpy (&wc, element, sizeof (wc));
      wmemset ((wchar_t *) target, wc, count);
    }
  else if (element_size < 1024 && count > 4096)
    {
      /* Use larger copies for really small element sizes.  */
      char buffer[8192];
      size_t buffer_count = sizeof (buffer) / element_size;
      fill0 (buffer, element, element_size, buffer_count);
      while (count > 0)
        {
          size_t copy_count = buffer_count;
          if (copy_count > count)
            copy_count = count;
          size_t copy_bytes = copy_count * element_size;
          memcpy (target, buffer, copy_bytes);
          target += copy_bytes;
          count -= copy_count;
        }
    }
  else
    fill0 (target, element, element_size, count);
}

/* Use malloc instead of mmap for small allocations and unusual size
   combinations.  */
static struct support_blob_repeat
allocate_malloc (size_t total_size, const void *element, size_t element_size,
                 size_t count)
{
  void *buffer = malloc (total_size);
  if (buffer == NULL)
    return (struct support_blob_repeat) { 0 };
  fill (buffer, element, element_size, count);
  return (struct support_blob_repeat)
    {
      .start = buffer,
      .size = total_size,
      .use_malloc = true
    };
}

/* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE,
   avoiding overflow.  This assumes that PAGE_SIZE is a power of
   two.  */
static size_t
minimum_stride_size (size_t page_size, size_t element_size)
{
  TEST_VERIFY_EXIT (page_size > 0);
  TEST_VERIFY_EXIT (element_size > 0);

  /* Compute the number of trailing zeros common to both sizes.  */
  unsigned int common_zeros = __builtin_ctzll (page_size | element_size);

  /* In the product, this power of two appears twice, but in the least
     common multiple, it appears only once.  Therefore, shift one
     factor.  */
  size_t multiple;
  if (__builtin_mul_overflow (page_size >> common_zeros, element_size,
			      &multiple))
    return 0;
  return multiple;
}

/* Allocations larger than maximum_small_size potentially use mmap
   with alias mappings.  If SHARED, the alias mappings are created
   using MAP_SHARED instead of MAP_PRIVATE.  */
static struct support_blob_repeat
allocate_big (size_t total_size, const void *element, size_t element_size,
              size_t count, bool shared)
{
  unsigned long page_size = xsysconf (_SC_PAGESIZE);
  size_t stride_size = minimum_stride_size (page_size, element_size);
  if (stride_size == 0)
    {
      errno = EOVERFLOW;
      return (struct support_blob_repeat) { 0 };
    }

  /* Ensure that the stride size is at least maximum_small_size.  This
     is necessary to reduce the number of distinct mappings.  */
  if (stride_size < maximum_small_size)
    stride_size
      = ((maximum_small_size + stride_size - 1) / stride_size) * stride_size;

  if (stride_size > total_size)
    /* The mmap optimization would not save anything.  */
    return allocate_malloc (total_size, element, element_size, count);

  /* Reserve the memory region.  If we cannot create the mapping,
     there is no reason to set up the backing file.  */
  void *target = mmap (NULL, total_size, PROT_NONE | PROT_MAX_RW,
                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
  if (target == MAP_FAILED)
    return (struct support_blob_repeat) { 0 };

  /* Create the backing file for the repeated mapping.  Call mkstemp
     directly to remove the resources backing the temporary file
     immediately, once support_blob_repeat_free is called.  Using
     create_temp_file would result in a warning during post-test
     cleanup.  */
  int fd;
  {
    char *temppath = xasprintf ("%s/support_blob_repeat-XXXXXX", test_dir);
    fd = mkstemp (temppath);
    if (fd < 0)
      FAIL_EXIT1 ("mkstemp (\"%s\"): %m", temppath);
    xunlink (temppath);
    free (temppath);
  }

  /* Make sure that there is backing storage, so that the fill
     operation will not fault.  */
  if (posix_fallocate (fd, 0, stride_size) != 0)
    FAIL_EXIT1 ("posix_fallocate (%zu): %m", stride_size);

  /* The stride size must still be a multiple of the page size and
     element size.  */
  TEST_VERIFY_EXIT ((stride_size % page_size) == 0);
  TEST_VERIFY_EXIT ((stride_size % element_size) == 0);

  /* Fill the backing store.  */
  {
    void *ptr = mmap (target, stride_size, PROT_READ | PROT_WRITE,
                      MAP_FIXED | MAP_FILE | MAP_SHARED, fd, 0);
    if (ptr == MAP_FAILED)
      {
        int saved_errno = errno;
        xmunmap (target, total_size);
        xclose (fd);
        errno = saved_errno;
        return (struct support_blob_repeat) { 0 };
      }
    if (ptr != target)
      FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p",
                  stride_size, target, ptr);

    /* Write the repeating data.  */
    fill (target, element, element_size, stride_size / element_size);

    /* Return to a PROT_NONE mapping, just to be on the safe side.  */
    ptr = mmap (target, stride_size, PROT_NONE,
                MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
    if (ptr == MAP_FAILED)
      FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m");
    if (ptr != target)
      FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p",
                  stride_size, target, ptr);
  }

  /* Create the alias mappings.  */
  {
    size_t remaining_size = total_size;
    char *current = target;
    int flags = MAP_FIXED | MAP_FILE;
    if (shared)
      flags |= MAP_SHARED;
    else
      flags |= MAP_PRIVATE;
#ifdef MAP_NORESERVE
    flags |= MAP_NORESERVE;
#endif
    while (remaining_size > 0)
      {
        size_t to_map = stride_size;
        if (to_map > remaining_size)
          to_map = remaining_size;
        void *ptr = mmap (current, to_map, PROT_READ | PROT_WRITE,
                          flags, fd, 0);
        if (ptr == MAP_FAILED)
          {
            int saved_errno = errno;
            xmunmap (target, total_size);
            xclose (fd);
            errno = saved_errno;
            return (struct support_blob_repeat) { 0 };
          }
        if (ptr != current)
          FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p",
                      to_map, target, ptr);
        remaining_size -= to_map;
        current += to_map;
      }
  }

  xclose (fd);

  return (struct support_blob_repeat)
    {
      .start = target,
      .size = total_size,
      .use_malloc = false
    };
}

struct support_blob_repeat
repeat_allocate (const void *element, size_t element_size,
		 size_t count, bool shared)
{
  size_t total_size;
  if (__builtin_mul_overflow (element_size, count, &total_size))
    {
      errno = EOVERFLOW;
      return (struct support_blob_repeat) { 0 };
    }
  if (total_size <= maximum_small_size)
    return allocate_malloc (total_size, element, element_size, count);
  else
    return allocate_big (total_size, element, element_size, count, shared);
}

struct support_blob_repeat
support_blob_repeat_allocate (const void *element, size_t element_size,
                              size_t count)
{
  return repeat_allocate (element, element_size, count, false);
}

struct support_blob_repeat
support_blob_repeat_allocate_shared (const void *element, size_t element_size,
				     size_t count)
{
  return repeat_allocate (element, element_size, count, true);
}

void
support_blob_repeat_free (struct support_blob_repeat *blob)
{
  if (blob->size > 0)
    {
      int saved_errno = errno;
      if (blob->use_malloc)
        free (blob->start);
      else
        xmunmap (blob->start, blob->size);
      errno = saved_errno;
    }
  *blob = (struct support_blob_repeat) { 0 };
}