about summary refs log tree commit diff
path: root/elf
diff options
context:
space:
mode:
authorRongwei Wang <rongwei.wang@linux.alibaba.com>2021-12-10 20:39:10 +0800
committerH.J. Lu <hjl.tools@gmail.com>2021-12-10 11:25:37 -0800
commit718fdd87b1b98ef88e883a37d9c18867256fa5a4 (patch)
treea6448e52abdd289a43acd8a0d77efc645a0668f7 /elf
parent2e75604f8337fa4332977f72a8f6726309679edf (diff)
downloadglibc-718fdd87b1b98ef88e883a37d9c18867256fa5a4.tar.gz
glibc-718fdd87b1b98ef88e883a37d9c18867256fa5a4.tar.xz
glibc-718fdd87b1b98ef88e883a37d9c18867256fa5a4.zip
elf: Properly align PT_LOAD segments [BZ #28676]
When PT_LOAD segment alignment > the page size, allocate enough space to
ensure that the segment can be properly aligned.  This change helps code
segments use huge pages become simple and available.

This fixes [BZ #28676].

Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-load.c2
-rw-r--r--elf/dl-load.h3
-rw-r--r--elf/dl-map-segments.h50
3 files changed, 50 insertions, 5 deletions
diff --git a/elf/dl-load.c b/elf/dl-load.c
index bf8957e73c..721593135e 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments from the file.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -1150,6 +1151,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
 	  c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
 	  c->dataend = ph->p_vaddr + ph->p_filesz;
 	  c->allocend = ph->p_vaddr + ph->p_memsz;
+	  c->mapalign = ph->p_align;
 	  c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
 
 	  /* Determine whether there is a gap between the last segment
diff --git a/elf/dl-load.h b/elf/dl-load.h
index e329d49a81..e6dabcb336 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments from the file.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -74,7 +75,7 @@ ELF_PREFERRED_ADDRESS_DATA;
    Its details have been expanded out and converted.  */
 struct loadcmd
 {
-  ElfW(Addr) mapstart, mapend, dataend, allocend;
+  ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
   ElfW(Off) mapoff;
   int prot;                             /* PROT_* bits.  */
 };
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index f9fb110ee3..70a4c40695 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments.  Generic version.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +19,50 @@
 
 #include <dl-load.h>
 
+/* Map a segment and align it properly.  */
+
+static __always_inline ElfW(Addr)
+_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
+		 const size_t maplength, int fd)
+{
+  if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
+    return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+				MAP_COPY|MAP_FILE, fd, c->mapoff);
+
+  /* If the segment alignment > the page size, allocate enough space to
+     ensure that the segment can be properly aligned.  */
+  ElfW(Addr) maplen = (maplength >= c->mapalign
+		       ? (maplength + c->mapalign)
+		       : (2 * c->mapalign));
+  ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+					      PROT_NONE,
+					      MAP_ANONYMOUS|MAP_PRIVATE,
+					      -1, 0);
+  if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
+    return map_start;
+
+  ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+  map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+					   maplength, c->prot,
+					   MAP_COPY|MAP_FILE|MAP_FIXED,
+					   fd, c->mapoff);
+  if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
+    __munmap ((void *) map_start, maplen);
+  else
+    {
+      /* Unmap the unused regions.  */
+      ElfW(Addr) delta = map_start_aligned - map_start;
+      if (delta)
+	__munmap ((void *) map_start, delta);
+      ElfW(Addr) map_end = map_start_aligned + maplength;
+      delta = map_start + maplen - map_end;
+      if (delta)
+	__munmap ((void *) map_end, delta);
+    }
+
+  return map_start_aligned;
+}
+
 /* This implementation assumes (as does the corresponding implementation
    of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
    are always laid out with all segments contiguous (or with gaps
@@ -53,10 +98,7 @@ _dl_map_segments (struct link_map *l, int fd,
            - MAP_BASE_ADDR (l));
 
       /* Remember which part of the address space this object uses.  */
-      l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
-                                            c->prot,
-                                            MAP_COPY|MAP_FILE,
-                                            fd, c->mapoff);
+      l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
       if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
         return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;