about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
diff options
context:
space:
mode:
authorRichard Earnshaw <rearnsha@arm.com>2020-12-21 15:03:03 +0000
committerRichard Earnshaw <rearnsha@arm.com>2020-12-21 15:25:25 +0000
commitbde4949b6b342641681a22cf6092dbc2f9d1d2c7 (patch)
treeb6cb43ddcf9bf866afc095863da036cae1d6003c /sysdeps/unix/sysv/linux/aarch64/cpu-features.c
parent0d1bafdcb62187a1535618c71b840672308ba07d (diff)
downloadglibc-bde4949b6b342641681a22cf6092dbc2f9d1d2c7.tar.gz
glibc-bde4949b6b342641681a22cf6092dbc2f9d1d2c7.tar.xz
glibc-bde4949b6b342641681a22cf6092dbc2f9d1d2c7.zip
aarch64: Add sysv specific enabling code for memory tagging
Add various defines and stubs for enabling MTE on AArch64 sysv-like
systems such as Linux.  The HWCAP feature bit is copied over in the
same way as other feature bits.  Similarly we add a new wrapper header
for mman.h to define the PROT_MTE flag that can be used with mmap and
related functions.

We add a new field to struct cpu_features that can be used, for
example, to check whether or not certain ifunc'd routines should be
bound to MTE-safe versions.

Finally, if we detect that MTE should be enabled (ie via the glibc
tunable); we enable MTE during startup as required.

Support in the Linux kernel was added in version 5.10.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
Diffstat (limited to 'sysdeps/unix/sysv/linux/aarch64/cpu-features.c')
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/cpu-features.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
index b9ab827aca..bd899c4b09 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
@@ -19,10 +19,17 @@
 #include <cpu-features.h>
 #include <sys/auxv.h>
 #include <elf/dl-hwcaps.h>
+#include <sys/prctl.h>
 
 #define DCZID_DZP_MASK (1 << 4)
 #define DCZID_BS_MASK (0xf)
 
+/* The maximal set of permitted tags that the MTE random tag generation
+   instruction may use.  We exclude tag 0 because a) we want to reserve
+   that for the libc heap structures and b) because it makes it easier
+   to see when pointer have been correctly tagged.  */
+#define MTE_ALLOWED_TAGS (0xfffe << PR_MTE_TAG_SHIFT)
+
 #if HAVE_TUNABLES
 struct cpu_list
 {
@@ -86,4 +93,27 @@ init_cpu_features (struct cpu_features *cpu_features)
 
   /* Check if BTI is supported.  */
   cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
+
+  /* Setup memory tagging support if the HW and kernel support it, and if
+     the user has requested it.  */
+  cpu_features->mte_state = 0;
+
+#ifdef USE_MTAG
+# if HAVE_TUNABLES
+  int mte_state = TUNABLE_GET (glibc, mem, tagging, unsigned, 0);
+  cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
+  /* If we lack the MTE feature, disable the tunable, since it will
+     otherwise cause instructions that won't run on this CPU to be used.  */
+  TUNABLE_SET (glibc, mem, tagging, unsigned, cpu_features->mte_state);
+# endif
+
+  if (cpu_features->mte_state & 2)
+    __prctl (PR_SET_TAGGED_ADDR_CTRL,
+	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | MTE_ALLOWED_TAGS),
+	     0, 0, 0);
+  else if (cpu_features->mte_state)
+    __prctl (PR_SET_TAGGED_ADDR_CTRL,
+	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | MTE_ALLOWED_TAGS),
+	     0, 0, 0);
+#endif
 }