about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2022-09-29 17:40:58 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2022-11-22 14:31:25 +0000
commitbd8fac4e2846e0621f87c675d7c609385a4d932d (patch)
tree494018b11bd6d9cf632561fc5846b2f9093b3174 /sysdeps
parentd7d9ee6edcb81380645de095a41f0f301c1bf5e1 (diff)
downloadglibc-bd8fac4e2846e0621f87c675d7c609385a4d932d.tar.gz
glibc-bd8fac4e2846e0621f87c675d7c609385a4d932d.tar.xz
glibc-bd8fac4e2846e0621f87c675d7c609385a4d932d.zip
cheri: malloc: Capability narrowing using internal lookup table
Add more cap_ hooks to implement narrowing without depending on a
global capability covering the heap.  Either recording every
narrowed capability in a lookup table or recording every mapping
used for the heap are supported.  The morello implmentation uses
a lookup table for now.

The lookup table adds memory overhead, failure paths and locks.
Recording and removing entries from the lookup table must be done
carefully in realloc so on failure the old pointer is usable and
on success the old pointer is immediately reusable concurrently.
The locks require fork hooks so malloc works in multi-threaded
fork child.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/aarch64/morello/libc-cap.h295
-rw-r--r--sysdeps/generic/libc-cap.h9
2 files changed, 301 insertions, 3 deletions
diff --git a/sysdeps/aarch64/morello/libc-cap.h b/sysdeps/aarch64/morello/libc-cap.h
index d772e36dee..19ccc47ada 100644
--- a/sysdeps/aarch64/morello/libc-cap.h
+++ b/sysdeps/aarch64/morello/libc-cap.h
@@ -19,6 +19,268 @@
 #ifndef _AARCH64_MORELLO_LIBC_CAP_H
 #define _AARCH64_MORELLO_LIBC_CAP_H 1
 
+#include <stdint.h>
+#include <sys/mman.h>
+#include <libc-lock.h>
+
+/* Hash table for __libc_cap_widen.  */
+
+#define HT_MIN_LEN (65536 / sizeof (struct htentry))
+#define HT_MAX_LEN (1UL << 58)
+
+struct htentry
+{
+  uint64_t key;
+  uint64_t unused;
+  void *value;
+};
+
+struct ht
+{
+  __libc_lock_define(,mutex);
+  size_t mask;    /* Length - 1, note: length is powerof2.  */
+  size_t fill;    /* Used + deleted entries.  */
+  size_t used;
+  size_t reserve; /* Planned adds.  */
+  struct htentry *tab;
+};
+
+static inline bool
+htentry_isempty (struct htentry *e)
+{
+  return e->key == 0;
+}
+
+static inline bool
+htentry_isdeleted (struct htentry *e)
+{
+  return e->key == -1;
+}
+
+static inline bool
+htentry_isused (struct htentry *e)
+{
+  return e->key != 0 && e->key != -1;
+}
+
+static inline uint64_t
+ht_key_hash (uint64_t key)
+{
+  return (key >> 4) ^ (key >> 18);
+}
+
+static struct htentry *
+ht_tab_alloc (size_t n)
+{
+  size_t size = n * sizeof (struct htentry);
+  assert (size && (size & 65535) == 0);
+  void *p = __mmap (0, size, PROT_READ|PROT_WRITE,
+		    MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+  if (p == MAP_FAILED)
+    return NULL;
+  return p;
+}
+
+static void
+ht_tab_free (struct htentry *tab, size_t n)
+{
+  int r = __munmap (tab, n * sizeof (struct htentry));
+  assert (r == 0);
+}
+
+static bool
+ht_init (struct ht *ht)
+{
+  __libc_lock_init (ht->mutex);
+  ht->mask = HT_MIN_LEN - 1;
+  ht->fill = 0;
+  ht->used = 0;
+  ht->reserve = 0;
+  ht->tab = ht_tab_alloc (ht->mask + 1);
+  return ht->tab != NULL;
+}
+
+static struct htentry *
+ht_lookup (struct ht *ht, uint64_t key, uint64_t hash)
+{
+  size_t mask = ht->mask;
+  size_t i = hash;
+  size_t j;
+  struct htentry *e = ht->tab + (i & mask);
+  struct htentry *del;
+
+  if (e->key == key || htentry_isempty (e))
+    return e;
+  if (htentry_isdeleted (e))
+    del = e;
+  else
+    del = NULL;
+
+  /* Quadratic probing.  */
+  for (j =1, i += j++; ; i += j++)
+    {
+      e = ht->tab + (i & mask);
+      if (e->key == key)
+	return e;
+      if (htentry_isempty (e))
+	return del != NULL ? del : e;
+      if (del == NULL && htentry_isdeleted (e))
+	del = e;
+    }
+}
+
+static bool
+ht_resize (struct ht *ht)
+{
+  size_t len;
+  size_t used = ht->used;
+  size_t n = ht->used + ht->reserve;
+  size_t oldlen = ht->mask + 1;
+
+  if (2 * n >= HT_MAX_LEN)
+    len = HT_MAX_LEN;
+  else
+    for (len = HT_MIN_LEN; len < 2 * n; len *= 2);
+  struct htentry *newtab = ht_tab_alloc (len);
+  struct htentry *oldtab = ht->tab;
+  struct htentry *e;
+  if (newtab == NULL)
+    return false;
+
+  ht->tab = newtab;
+  ht->mask = len - 1;
+  ht->fill = ht->used;
+  for (e = oldtab; used > 0; e++)
+    {
+      if (htentry_isused (e))
+	{
+	  uint64_t hash = ht_key_hash (e->key);
+	  used--;
+	  *ht_lookup (ht, e->key, hash) = *e;
+	}
+    }
+  ht_tab_free (oldtab, oldlen);
+  return true;
+}
+
+static bool
+ht_reserve (struct ht *ht)
+{
+  bool r = true;
+  __libc_lock_lock (ht->mutex);
+  ht->reserve++;
+  size_t future_fill = ht->fill + ht->reserve;
+  size_t future_used = ht->used + ht->reserve;
+  /* Resize at 3/4 fill or if there are many deleted entries.  */
+  if (future_fill > ht->mask - ht->mask / 4
+      || future_fill > future_used * 4)
+    r = ht_resize (ht);
+  if (!r)
+    ht->reserve--;
+  __libc_lock_unlock (ht->mutex);
+  return r;
+}
+
+static void
+ht_unreserve (struct ht *ht)
+{
+  __libc_lock_lock (ht->mutex);
+  assert (ht->reserve > 0);
+  ht->reserve--;
+  __libc_lock_unlock (ht->mutex);
+}
+
+static bool
+ht_add (struct ht *ht, uint64_t key, void *value)
+{
+  __libc_lock_lock (ht->mutex);
+  assert (ht->reserve > 0);
+  ht->reserve--;
+  uint64_t hash = ht_key_hash (key);
+  struct htentry *e = ht_lookup (ht, key, hash);
+  bool r = false;
+  if (!htentry_isused (e))
+    {
+      if (htentry_isempty (e))
+        ht->fill++;
+      ht->used++;
+      e->key = key;
+      r = true;
+    }
+  e->value = value;
+  __libc_lock_unlock (ht->mutex);
+  return r;
+}
+
+static bool
+ht_del (struct ht *ht, uint64_t key)
+{
+  __libc_lock_lock (ht->mutex);
+  struct htentry *e = ht_lookup (ht, key, ht_key_hash (key));
+  bool r = htentry_isused (e);
+  if (r)
+    {
+      ht->used--;
+      e->key = -1;
+    }
+  __libc_lock_unlock (ht->mutex);
+  return r;
+}
+
+static void *
+ht_get (struct ht *ht, uint64_t key)
+{
+  __libc_lock_lock (ht->mutex);
+  struct htentry *e = ht_lookup (ht, key, ht_key_hash (key));
+  void *v = htentry_isused (e) ? e->value : NULL;
+  __libc_lock_unlock (ht->mutex);
+  return v;
+}
+
+/* Capability narrowing APIs.  */
+
+static struct ht __libc_cap_ht;
+
+static __always_inline bool
+__libc_cap_init (void)
+{
+  return ht_init (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_fork_lock (void)
+{
+  __libc_lock_lock (__libc_cap_ht.mutex);
+}
+
+static __always_inline void
+__libc_cap_fork_unlock_parent (void)
+{
+  __libc_lock_unlock (__libc_cap_ht.mutex);
+}
+
+static __always_inline void
+__libc_cap_fork_unlock_child (void)
+{
+  __libc_lock_init (__libc_cap_ht.mutex);
+}
+
+static __always_inline bool
+__libc_cap_map_add (void *p)
+{
+  assert (p != NULL);
+// TODO: depends on pcuabi
+//  assert (__builtin_cheri_base_get (p) == (uint64_t) p);
+  return true;
+}
+
+static __always_inline void
+__libc_cap_map_del (void *p)
+{
+  assert (p != NULL);
+//  assert (__builtin_cheri_base_get (p) == (uint64_t) p);
+}
+
 /* No special alignment is needed for n <= __CAP_ALIGN_THRESHOLD
    allocations, i.e. __libc_cap_align (n) <= MALLOC_ALIGNMENT.  */
 #define __CAP_ALIGN_THRESHOLD 32759
@@ -51,7 +313,11 @@ __libc_cap_align (size_t n)
 static __always_inline void *
 __libc_cap_narrow (void *p, size_t n)
 {
-  return __builtin_cheri_bounds_set_exact (p, n);
+  assert (p != NULL);
+  uint64_t key = (uint64_t)(uintptr_t) p;
+  assert (ht_add (&__libc_cap_ht, key, p));
+  void *narrow = __builtin_cheri_bounds_set_exact (p, n);
+  return narrow;
 }
 
 /* Given a p with narrowed bound (output of __libc_cap_narrow) return
@@ -59,8 +325,31 @@ __libc_cap_narrow (void *p, size_t n)
 static __always_inline void *
 __libc_cap_widen (void *p)
 {
-  void *cap = __builtin_cheri_global_data_get ();
-  return __builtin_cheri_address_set (cap, p);
+  assert (__builtin_cheri_tag_get (p) && __builtin_cheri_offset_get (p) == 0);
+  uint64_t key = (uint64_t)(uintptr_t) p;
+  void *cap = ht_get (&__libc_cap_ht, key);
+  assert (cap == p);
+  return cap;
+}
+
+static __always_inline bool
+__libc_cap_reserve (void)
+{
+  return ht_reserve (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_unreserve (void)
+{
+  ht_unreserve (&__libc_cap_ht);
+}
+
+static __always_inline void
+__libc_cap_drop (void *p)
+{
+  assert (p != NULL);
+  uint64_t key = (uint64_t)(uintptr_t) p;
+  assert (ht_del (&__libc_cap_ht, key));
 }
 
 #endif
diff --git a/sysdeps/generic/libc-cap.h b/sysdeps/generic/libc-cap.h
index 85ff2a6b61..9d93d61c9e 100644
--- a/sysdeps/generic/libc-cap.h
+++ b/sysdeps/generic/libc-cap.h
@@ -26,9 +26,18 @@
 void __libc_cap_link_error (void);
 
 #define __libc_cap_fail(rtype) (__libc_cap_link_error (), (rtype) 0)
+#define __libc_cap_init() __libc_cap_fail (bool)
+#define __libc_cap_fork_lock() __libc_cap_fail (void)
+#define __libc_cap_fork_unlock_parent() __libc_cap_fail (void)
+#define __libc_cap_fork_unlock_child() __libc_cap_fail (void)
+#define __libc_cap_map_add(p) __libc_cap_fail (bool)
+#define __libc_cap_map_del(p) __libc_cap_fail (void)
 #define __libc_cap_roundup(n) __libc_cap_fail (size_t)
 #define __libc_cap_align(n) __libc_cap_fail (size_t)
 #define __libc_cap_narrow(p, n) __libc_cap_fail (void *)
 #define __libc_cap_widen(p) __libc_cap_fail (void *)
+#define __libc_cap_reserve(p) __libc_cap_fail (bool)
+#define __libc_cap_unreserve(p) __libc_cap_fail (void)
+#define __libc_cap_drop(p) __libc_cap_fail (void)
 
 #endif