diff options
author | Palmer Dabbelt <palmer@dabbelt.com> | 2018-01-29 10:27:17 -0800 |
---|---|---|
committer | Palmer Dabbelt <palmer@dabbelt.com> | 2018-01-29 10:27:17 -0800 |
commit | d1c09b247130c1aedd11b913f579c7764b1b5ae1 (patch) | |
tree | f7b2eb5361ae69af3490074320178fb04f7e3fd7 /sysdeps/unix/sysv | |
parent | b2cb5e0298e08b486190610a9e82356ccb6f564b (diff) | |
download | glibc-d1c09b247130c1aedd11b913f579c7764b1b5ae1.tar.gz glibc-d1c09b247130c1aedd11b913f579c7764b1b5ae1.tar.xz glibc-d1c09b247130c1aedd11b913f579c7764b1b5ae1.zip |
RISC-V: Atomic and Locking Routines
This patch implements various atomic and locking routines on RISC-V. We mandate the A extension on Linux-capable RISC-V systems, so this can rely on always having the various atomic instructions availiable. 2018-01-29 Palmer Dabbelt <palmer@sifive.com> * sysdeps/riscv/nptl/bits/pthreadtypes-arch.h: New file. * sysdeps/riscv/nptl/bits/semaphore.h: Likewise. * sysdeps/riscv/nptl/libc-lowlevellock.c: Likewise. * sysdeps/unix/sysv/linux/riscv/atomic-machine.h: Likewise.
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r-- | sysdeps/unix/sysv/linux/riscv/atomic-machine.h | 194 |
1 files changed, 194 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/riscv/atomic-machine.h b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h new file mode 100644 index 0000000000..3d967d3b08 --- /dev/null +++ b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h @@ -0,0 +1,194 @@ +/* Low-level functions for atomic operations. RISC-V version. + Copyright (C) 2014-2018 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _LINUX_RISCV_BITS_ATOMIC_H +#define _LINUX_RISCV_BITS_ATOMIC_H 1 + +#include <stdint.h> + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define atomic_full_barrier() __sync_synchronize () + +#ifdef __riscv_atomic + +# define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) +# define USE_ATOMIC_COMPILER_BUILTINS 1 +# define ATOMIC_EXCHANGE_USES_CAS 0 + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +/* Atomic compare and exchange. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_16_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_64_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + +/* Atomically add value and return the previous (unincremented) value. */ + +# define __arch_exchange_and_add_8_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_16_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_32_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_64_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define atomic_exchange_and_add_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_ACQUIRE) + +# define atomic_exchange_and_add_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_RELEASE) + +/* Miscellaneous. */ + +# define asm_amo(which, ordering, mem, value) ({ \ + __atomic_check_size (mem); \ + typeof (*mem) __tmp; \ + if (sizeof (__tmp) == 4) \ + asm volatile (which ".w" ordering "\t%0, %z2, %1" \ + : "=r" (__tmp), "+A" (* (mem)) \ + : "rJ" (value)); \ + else if (sizeof (__tmp) == 8) \ + asm volatile (which ".d" ordering "\t%0, %z2, %1" \ + : "=r" (__tmp), "+A" (* (mem)) \ + : "rJ" (value)); \ + else \ + abort (); \ + __tmp; }) + +# define atomic_max(mem, value) asm_amo ("amomaxu", ".aq", mem, value) +# define atomic_min(mem, value) asm_amo ("amominu", ".aq", mem, value) + +# define atomic_bit_test_set(mem, bit) \ + ({ typeof (*mem) __mask = (typeof (*mem))1 << (bit); \ + asm_amo ("amoor", ".aq", mem, __mask) & __mask; }) + +# define catomic_exchange_and_add(mem, value) \ + atomic_exchange_and_add (mem, value) +# define catomic_max(mem, value) atomic_max (mem, value) + +#else /* __riscv_atomic */ +# error "ISAs that do not subsume the A extension are not supported" +#endif /* !__riscv_atomic */ + +#endif /* bits/atomic.h */ |