From de071d199a8578055edf2722114788ae749823aa Mon Sep 17 00:00:00 2001 From: Joseph Myers Date: Fri, 11 Sep 2015 20:00:19 +0000 Subject: Move bits/atomic.h to atomic-machine.h (bug 14912). It was noted in that the bits/*.h naming scheme should only be used for installed headers. This patch renames bits/atomic.h to atomic-machine.h to follow that convention. This is the only change in this series that needs to change the filename rather than simply removing a directory level (because both atomic.h and bits/atomic.h exist at present). Tested for x86_64 (testsuite, and that installed stripped shared libraries are unchanged by the patch). [BZ #14912] * sysdeps/aarch64/bits/atomic.h: Move to ... * sysdeps/aarch64/atomic-machine.h: ...here. (_AARCH64_BITS_ATOMIC_H): Rename macro to _AARCH64_ATOMIC_MACHINE_H. * sysdeps/alpha/bits/atomic.h: Move to ... * sysdeps/alpha/atomic-machine.h: ...here. * sysdeps/arm/bits/atomic.h: Move to ... * sysdeps/arm/atomic-machine.h: ...here. Update comments. * bits/atomic.h: Move to ... * sysdeps/generic/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/i386/bits/atomic.h: Move to ... * sysdeps/i386/atomic-machine.h: ...here. * sysdeps/ia64/bits/atomic.h: Move to ... * sysdeps/ia64/atomic-machine.h: ...here. * sysdeps/m68k/coldfire/bits/atomic.h: Move to ... * sysdeps/m68k/coldfire/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/m68k/m680x0/m68020/bits/atomic.h: Move to ... * sysdeps/m68k/m680x0/m68020/atomic-machine.h: ...here. * sysdeps/microblaze/bits/atomic.h: Move to ... * sysdeps/microblaze/atomic-machine.h: ...here. * sysdeps/mips/bits/atomic.h: Move to ... * sysdeps/mips/atomic-machine.h: ...here. (_MIPS_BITS_ATOMIC_H): Rename macro to _MIPS_ATOMIC_MACHINE_H. * sysdeps/powerpc/bits/atomic.h: Move to ... * sysdeps/powerpc/atomic-machine.h: ...here. Update comments. * sysdeps/powerpc/powerpc32/bits/atomic.h: Move to ... * sysdeps/powerpc/powerpc32/atomic-machine.h: ...here. Update comments. Include instead of . * sysdeps/powerpc/powerpc64/bits/atomic.h: Move to ... * sysdeps/powerpc/powerpc64/atomic-machine.h: ...here. Include instead of . * sysdeps/s390/bits/atomic.h: Move to ... * sysdeps/s390/atomic-machine.h: ...here. * sysdeps/sparc/sparc32/bits/atomic.h: Move to ... * sysdeps/sparc/sparc32/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/sparc/sparc32/sparcv9/bits/atomic.h: Move to ... * sysdeps/sparc/sparc32/sparcv9/atomic-machine.h: ...here. * sysdeps/sparc/sparc64/bits/atomic.h: Move to ... * sysdeps/sparc/sparc64/atomic-machine.h: ...here. * sysdeps/tile/bits/atomic.h: Move to ... * sysdeps/tile/atomic-machine.h: ...here. * sysdeps/tile/tilegx/bits/atomic.h: Move to ... * sysdeps/tile/tilegx/atomic-machine.h: ...here. Include instead of . (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/tile/tilepro/bits/atomic.h: Move to ... * sysdeps/tile/tilepro/atomic-machine.h: ...here. Include instead of . (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/arm/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/arm/atomic-machine.h: ...here. Include instead of . * sysdeps/unix/sysv/linux/hppa/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/hppa/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/nios2/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/nios2/atomic-machine.h: ...here. (_NIOS2_BITS_ATOMIC_H): Rename macro to _NIOS2_ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/sh/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/sh/atomic-machine.h: ...here. * sysdeps/x86_64/bits/atomic.h: Move to ... * sysdeps/x86_64/atomic-machine.h: ...here. * include/atomic.h: Include instead of . --- sysdeps/aarch64/atomic-machine.h | 173 +++++++ sysdeps/aarch64/bits/atomic.h | 173 ------- sysdeps/alpha/atomic-machine.h | 371 ++++++++++++++ sysdeps/alpha/bits/atomic.h | 371 -------------- sysdeps/arm/atomic-machine.h | 162 ++++++ sysdeps/arm/bits/atomic.h | 162 ------ sysdeps/generic/atomic-machine.h | 42 ++ sysdeps/i386/atomic-machine.h | 544 +++++++++++++++++++++ sysdeps/i386/bits/atomic.h | 544 --------------------- sysdeps/ia64/atomic-machine.h | 119 +++++ sysdeps/ia64/bits/atomic.h | 119 ----- sysdeps/m68k/coldfire/atomic-machine.h | 72 +++ sysdeps/m68k/coldfire/bits/atomic.h | 72 --- sysdeps/m68k/m680x0/m68020/atomic-machine.h | 256 ++++++++++ sysdeps/m68k/m680x0/m68020/bits/atomic.h | 256 ---------- sysdeps/microblaze/atomic-machine.h | 272 +++++++++++ sysdeps/microblaze/bits/atomic.h | 272 ----------- sysdeps/mips/atomic-machine.h | 503 +++++++++++++++++++ sysdeps/mips/bits/atomic.h | 503 ------------------- sysdeps/powerpc/atomic-machine.h | 345 +++++++++++++ sysdeps/powerpc/bits/atomic.h | 345 ------------- sysdeps/powerpc/powerpc32/atomic-machine.h | 144 ++++++ sysdeps/powerpc/powerpc32/bits/atomic.h | 144 ------ sysdeps/powerpc/powerpc64/atomic-machine.h | 274 +++++++++++ sysdeps/powerpc/powerpc64/bits/atomic.h | 274 ----------- sysdeps/s390/atomic-machine.h | 120 +++++ sysdeps/s390/bits/atomic.h | 120 ----- sysdeps/sparc/sparc32/atomic-machine.h | 360 ++++++++++++++ sysdeps/sparc/sparc32/bits/atomic.h | 360 -------------- sysdeps/sparc/sparc32/sparcv9/atomic-machine.h | 105 ++++ sysdeps/sparc/sparc32/sparcv9/bits/atomic.h | 105 ---- sysdeps/sparc/sparc64/atomic-machine.h | 126 +++++ sysdeps/sparc/sparc64/bits/atomic.h | 126 ----- sysdeps/tile/atomic-machine.h | 86 ++++ sysdeps/tile/bits/atomic.h | 86 ---- sysdeps/tile/tilegx/atomic-machine.h | 60 +++ sysdeps/tile/tilegx/bits/atomic.h | 60 --- sysdeps/tile/tilepro/atomic-machine.h | 88 ++++ sysdeps/tile/tilepro/bits/atomic.h | 88 ---- sysdeps/unix/sysv/linux/arm/atomic-machine.h | 107 ++++ sysdeps/unix/sysv/linux/arm/bits/atomic.h | 107 ---- sysdeps/unix/sysv/linux/hppa/atomic-machine.h | 103 ++++ sysdeps/unix/sysv/linux/hppa/bits/atomic.h | 103 ---- .../unix/sysv/linux/m68k/coldfire/atomic-machine.h | 106 ++++ .../unix/sysv/linux/m68k/coldfire/bits/atomic.h | 106 ---- sysdeps/unix/sysv/linux/nios2/atomic-machine.h | 92 ++++ sysdeps/unix/sysv/linux/nios2/bits/atomic.h | 92 ---- sysdeps/unix/sysv/linux/sh/atomic-machine.h | 428 ++++++++++++++++ sysdeps/unix/sysv/linux/sh/bits/atomic.h | 428 ---------------- sysdeps/x86_64/atomic-machine.h | 481 ++++++++++++++++++ sysdeps/x86_64/bits/atomic.h | 481 ------------------ 51 files changed, 5539 insertions(+), 5497 deletions(-) create mode 100644 sysdeps/aarch64/atomic-machine.h delete mode 100644 sysdeps/aarch64/bits/atomic.h create mode 100644 sysdeps/alpha/atomic-machine.h delete mode 100644 sysdeps/alpha/bits/atomic.h create mode 100644 sysdeps/arm/atomic-machine.h delete mode 100644 sysdeps/arm/bits/atomic.h create mode 100644 sysdeps/generic/atomic-machine.h create mode 100644 sysdeps/i386/atomic-machine.h delete mode 100644 sysdeps/i386/bits/atomic.h create mode 100644 sysdeps/ia64/atomic-machine.h delete mode 100644 sysdeps/ia64/bits/atomic.h create mode 100644 sysdeps/m68k/coldfire/atomic-machine.h delete mode 100644 sysdeps/m68k/coldfire/bits/atomic.h create mode 100644 sysdeps/m68k/m680x0/m68020/atomic-machine.h delete mode 100644 sysdeps/m68k/m680x0/m68020/bits/atomic.h create mode 100644 sysdeps/microblaze/atomic-machine.h delete mode 100644 sysdeps/microblaze/bits/atomic.h create mode 100644 sysdeps/mips/atomic-machine.h delete mode 100644 sysdeps/mips/bits/atomic.h create mode 100644 sysdeps/powerpc/atomic-machine.h delete mode 100644 sysdeps/powerpc/bits/atomic.h create mode 100644 sysdeps/powerpc/powerpc32/atomic-machine.h delete mode 100644 sysdeps/powerpc/powerpc32/bits/atomic.h create mode 100644 sysdeps/powerpc/powerpc64/atomic-machine.h delete mode 100644 sysdeps/powerpc/powerpc64/bits/atomic.h create mode 100644 sysdeps/s390/atomic-machine.h delete mode 100644 sysdeps/s390/bits/atomic.h create mode 100644 sysdeps/sparc/sparc32/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc32/bits/atomic.h create mode 100644 sysdeps/sparc/sparc32/sparcv9/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc32/sparcv9/bits/atomic.h create mode 100644 sysdeps/sparc/sparc64/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc64/bits/atomic.h create mode 100644 sysdeps/tile/atomic-machine.h delete mode 100644 sysdeps/tile/bits/atomic.h create mode 100644 sysdeps/tile/tilegx/atomic-machine.h delete mode 100644 sysdeps/tile/tilegx/bits/atomic.h create mode 100644 sysdeps/tile/tilepro/atomic-machine.h delete mode 100644 sysdeps/tile/tilepro/bits/atomic.h create mode 100644 sysdeps/unix/sysv/linux/arm/atomic-machine.h delete mode 100644 sysdeps/unix/sysv/linux/arm/bits/atomic.h create mode 100644 sysdeps/unix/sysv/linux/hppa/atomic-machine.h delete mode 100644 sysdeps/unix/sysv/linux/hppa/bits/atomic.h create mode 100644 sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h delete mode 100644 sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h create mode 100644 sysdeps/unix/sysv/linux/nios2/atomic-machine.h delete mode 100644 sysdeps/unix/sysv/linux/nios2/bits/atomic.h create mode 100644 sysdeps/unix/sysv/linux/sh/atomic-machine.h delete mode 100644 sysdeps/unix/sysv/linux/sh/bits/atomic.h create mode 100644 sysdeps/x86_64/atomic-machine.h delete mode 100644 sysdeps/x86_64/bits/atomic.h (limited to 'sysdeps') diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h new file mode 100644 index 0000000000..3758dd7b7e --- /dev/null +++ b/sysdeps/aarch64/atomic-machine.h @@ -0,0 +1,173 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _AARCH64_ATOMIC_MACHINE_H +#define _AARCH64_ATOMIC_MACHINE_H 1 + +#include + +typedef int8_t atomic8_t; +typedef int16_t atomic16_t; +typedef int32_t atomic32_t; +typedef int64_t atomic64_t; + +typedef uint8_t uatomic8_t; +typedef uint16_t uatomic16_t; +typedef uint32_t uatomic32_t; +typedef uint64_t uatomic64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 1 + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_RELEASE) + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_16_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_64_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + + +/* Atomically add value and return the previous (unincremented) value. */ + +# define __arch_exchange_and_add_8_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_16_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_32_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define __arch_exchange_and_add_64_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# define atomic_exchange_and_add_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_ACQUIRE) + +# define atomic_exchange_and_add_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_RELEASE) + +/* Barrier macro. */ +#define atomic_full_barrier() __sync_synchronize() + +#endif diff --git a/sysdeps/aarch64/bits/atomic.h b/sysdeps/aarch64/bits/atomic.h deleted file mode 100644 index fdd5eaa517..0000000000 --- a/sysdeps/aarch64/bits/atomic.h +++ /dev/null @@ -1,173 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _AARCH64_BITS_ATOMIC_H -#define _AARCH64_BITS_ATOMIC_H 1 - -#include - -typedef int8_t atomic8_t; -typedef int16_t atomic16_t; -typedef int32_t atomic32_t; -typedef int64_t atomic64_t; - -typedef uint8_t uatomic8_t; -typedef uint16_t uatomic16_t; -typedef uint32_t uatomic32_t; -typedef uint64_t uatomic64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 1 - -/* Compare and exchange. - For all "bool" routines, we return FALSE if exchange succesful. */ - -# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - -# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - -# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - -# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - - -/* Compare and exchange with "acquire" semantics, ie barrier after. */ - -# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -# define atomic_compare_and_exchange_val_acq(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -/* Compare and exchange with "release" semantics, ie barrier before. */ - -# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_RELEASE) - -# define atomic_compare_and_exchange_val_rel(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_RELEASE) - - -/* Atomic exchange (without compare). */ - -# define __arch_exchange_8_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# define __arch_exchange_16_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# define __arch_exchange_32_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# define __arch_exchange_64_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# define atomic_exchange_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) - -# define atomic_exchange_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) - - -/* Atomically add value and return the previous (unincremented) value. */ - -# define __arch_exchange_and_add_8_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) - -# define __arch_exchange_and_add_16_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) - -# define __arch_exchange_and_add_32_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) - -# define __arch_exchange_and_add_64_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) - -# define atomic_exchange_and_add_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - __ATOMIC_ACQUIRE) - -# define atomic_exchange_and_add_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - __ATOMIC_RELEASE) - -/* Barrier macro. */ -#define atomic_full_barrier() __sync_synchronize() - -#endif diff --git a/sysdeps/alpha/atomic-machine.h b/sysdeps/alpha/atomic-machine.h new file mode 100644 index 0000000000..7adb162c35 --- /dev/null +++ b/sysdeps/alpha/atomic-machine.h @@ -0,0 +1,371 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#ifdef UP +# define __MB /* nothing */ +#else +# define __MB " mb\n" +#endif + + +/* Compare and exchange. For all of the "xxx" routines, we expect a + "__prev" and a "__cmp" variable to be provided by the enclosing scope, + in which values are returned. */ + +#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \ +({ \ + unsigned long __tmp, __snew, __addr64; \ + __asm__ __volatile__ ( \ + mb1 \ + " andnot %[__addr8],7,%[__addr64]\n" \ + " insbl %[__new],%[__addr8],%[__snew]\n" \ + "1: ldq_l %[__tmp],0(%[__addr64])\n" \ + " extbl %[__tmp],%[__addr8],%[__prev]\n" \ + " cmpeq %[__prev],%[__old],%[__cmp]\n" \ + " beq %[__cmp],2f\n" \ + " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \ + " or %[__snew],%[__tmp],%[__tmp]\n" \ + " stq_c %[__tmp],0(%[__addr64])\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + "2:" \ + : [__prev] "=&r" (__prev), \ + [__snew] "=&r" (__snew), \ + [__tmp] "=&r" (__tmp), \ + [__cmp] "=&r" (__cmp), \ + [__addr64] "=&r" (__addr64) \ + : [__addr8] "r" (mem), \ + [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \ + [__new] "r" (new) \ + : "memory"); \ +}) + +#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \ +({ \ + unsigned long __tmp, __snew, __addr64; \ + __asm__ __volatile__ ( \ + mb1 \ + " andnot %[__addr16],7,%[__addr64]\n" \ + " inswl %[__new],%[__addr16],%[__snew]\n" \ + "1: ldq_l %[__tmp],0(%[__addr64])\n" \ + " extwl %[__tmp],%[__addr16],%[__prev]\n" \ + " cmpeq %[__prev],%[__old],%[__cmp]\n" \ + " beq %[__cmp],2f\n" \ + " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \ + " or %[__snew],%[__tmp],%[__tmp]\n" \ + " stq_c %[__tmp],0(%[__addr64])\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + "2:" \ + : [__prev] "=&r" (__prev), \ + [__snew] "=&r" (__snew), \ + [__tmp] "=&r" (__tmp), \ + [__cmp] "=&r" (__cmp), \ + [__addr64] "=&r" (__addr64) \ + : [__addr16] "r" (mem), \ + [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \ + [__new] "r" (new) \ + : "memory"); \ +}) + +#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \ +({ \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldl_l %[__prev],%[__mem]\n" \ + " cmpeq %[__prev],%[__old],%[__cmp]\n" \ + " beq %[__cmp],2f\n" \ + " mov %[__new],%[__cmp]\n" \ + " stl_c %[__cmp],%[__mem]\n" \ + " beq %[__cmp],1b\n" \ + mb2 \ + "2:" \ + : [__prev] "=&r" (__prev), \ + [__cmp] "=&r" (__cmp) \ + : [__mem] "m" (*(mem)), \ + [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \ + [__new] "Ir" (new) \ + : "memory"); \ +}) + +#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \ +({ \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldq_l %[__prev],%[__mem]\n" \ + " cmpeq %[__prev],%[__old],%[__cmp]\n" \ + " beq %[__cmp],2f\n" \ + " mov %[__new],%[__cmp]\n" \ + " stq_c %[__cmp],%[__mem]\n" \ + " beq %[__cmp],1b\n" \ + mb2 \ + "2:" \ + : [__prev] "=&r" (__prev), \ + [__cmp] "=&r" (__cmp) \ + : [__mem] "m" (*(mem)), \ + [__old] "Ir" ((uint64_t)(old)), \ + [__new] "Ir" (new) \ + : "memory"); \ +}) + +/* For all "bool" routines, we return FALSE if exchange succesful. */ + +#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \ + !__cmp; }) + +#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \ + !__cmp; }) + +#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \ + !__cmp; }) + +#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \ + !__cmp; }) + +/* For all "val" routines, return the old value whether exchange + successful or not. */ + +#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \ + (typeof (*mem))__prev; }) + +#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \ + (typeof (*mem))__prev; }) + +#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \ + (typeof (*mem))__prev; }) + +#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \ +({ unsigned long __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \ + (typeof (*mem))__prev; }) + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +#define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, "", __MB) + +#define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, "", __MB) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +#define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __MB, "") + +#define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __MB, "") + + +/* Atomically store value and return the previous value. */ + +#define __arch_exchange_8_int(mem, value, mb1, mb2) \ +({ \ + unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + " andnot %[__addr8],7,%[__addr64]\n" \ + " insbl %[__value],%[__addr8],%[__sval]\n" \ + "1: ldq_l %[__tmp],0(%[__addr64])\n" \ + " extbl %[__tmp],%[__addr8],%[__ret]\n" \ + " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \ + " or %[__sval],%[__tmp],%[__tmp]\n" \ + " stq_c %[__tmp],0(%[__addr64])\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__sval] "=&r" (__sval), \ + [__tmp] "=&r" (__tmp), \ + [__addr64] "=&r" (__addr64) \ + : [__addr8] "r" (mem), \ + [__value] "r" (value) \ + : "memory"); \ + __ret; }) + +#define __arch_exchange_16_int(mem, value, mb1, mb2) \ +({ \ + unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + " andnot %[__addr16],7,%[__addr64]\n" \ + " inswl %[__value],%[__addr16],%[__sval]\n" \ + "1: ldq_l %[__tmp],0(%[__addr64])\n" \ + " extwl %[__tmp],%[__addr16],%[__ret]\n" \ + " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \ + " or %[__sval],%[__tmp],%[__tmp]\n" \ + " stq_c %[__tmp],0(%[__addr64])\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__sval] "=&r" (__sval), \ + [__tmp] "=&r" (__tmp), \ + [__addr64] "=&r" (__addr64) \ + : [__addr16] "r" (mem), \ + [__value] "r" (value) \ + : "memory"); \ + __ret; }) + +#define __arch_exchange_32_int(mem, value, mb1, mb2) \ +({ \ + signed int __tmp; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldl_l %[__ret],%[__mem]\n" \ + " mov %[__val],%[__tmp]\n" \ + " stl_c %[__tmp],%[__mem]\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__tmp] "=&r" (__tmp) \ + : [__mem] "m" (*(mem)), \ + [__val] "Ir" (value) \ + : "memory"); \ + __ret; }) + +#define __arch_exchange_64_int(mem, value, mb1, mb2) \ +({ \ + unsigned long __tmp; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldq_l %[__ret],%[__mem]\n" \ + " mov %[__val],%[__tmp]\n" \ + " stq_c %[__tmp],%[__mem]\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__tmp] "=&r" (__tmp) \ + : [__mem] "m" (*(mem)), \ + [__val] "Ir" (value) \ + : "memory"); \ + __ret; }) + +#define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB) + +#define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "") + + +/* Atomically add value and return the previous (unincremented) value. */ + +#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \ + ({ __builtin_trap (); 0; }) + +#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \ + ({ __builtin_trap (); 0; }) + +#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \ +({ \ + signed int __tmp; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldl_l %[__ret],%[__mem]\n" \ + " addl %[__ret],%[__val],%[__tmp]\n" \ + " stl_c %[__tmp],%[__mem]\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__tmp] "=&r" (__tmp) \ + : [__mem] "m" (*(mem)), \ + [__val] "Ir" ((signed int)(value)) \ + : "memory"); \ + __ret; }) + +#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \ +({ \ + unsigned long __tmp; __typeof(*mem) __ret; \ + __asm__ __volatile__ ( \ + mb1 \ + "1: ldq_l %[__ret],%[__mem]\n" \ + " addq %[__ret],%[__val],%[__tmp]\n" \ + " stq_c %[__tmp],%[__mem]\n" \ + " beq %[__tmp],1b\n" \ + mb2 \ + : [__ret] "=&r" (__ret), \ + [__tmp] "=&r" (__tmp) \ + : [__mem] "m" (*(mem)), \ + [__val] "Ir" ((unsigned long)(value)) \ + : "memory"); \ + __ret; }) + +/* ??? Barrier semantics for atomic_exchange_and_add appear to be + undefined. Use full barrier for now, as that's safe. */ +#define atomic_exchange_and_add(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB) + + +/* ??? Blah, I'm lazy. Implement these later. Can do better than the + compare-and-exchange loop provided by generic code. + +#define atomic_decrement_if_positive(mem) +#define atomic_bit_test_set(mem, bit) + +*/ + +#ifndef UP +# define atomic_full_barrier() __asm ("mb" : : : "memory"); +# define atomic_read_barrier() __asm ("mb" : : : "memory"); +# define atomic_write_barrier() __asm ("wmb" : : : "memory"); +#endif diff --git a/sysdeps/alpha/bits/atomic.h b/sysdeps/alpha/bits/atomic.h deleted file mode 100644 index 7adb162c35..0000000000 --- a/sysdeps/alpha/bits/atomic.h +++ /dev/null @@ -1,371 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#ifdef UP -# define __MB /* nothing */ -#else -# define __MB " mb\n" -#endif - - -/* Compare and exchange. For all of the "xxx" routines, we expect a - "__prev" and a "__cmp" variable to be provided by the enclosing scope, - in which values are returned. */ - -#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \ -({ \ - unsigned long __tmp, __snew, __addr64; \ - __asm__ __volatile__ ( \ - mb1 \ - " andnot %[__addr8],7,%[__addr64]\n" \ - " insbl %[__new],%[__addr8],%[__snew]\n" \ - "1: ldq_l %[__tmp],0(%[__addr64])\n" \ - " extbl %[__tmp],%[__addr8],%[__prev]\n" \ - " cmpeq %[__prev],%[__old],%[__cmp]\n" \ - " beq %[__cmp],2f\n" \ - " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \ - " or %[__snew],%[__tmp],%[__tmp]\n" \ - " stq_c %[__tmp],0(%[__addr64])\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - "2:" \ - : [__prev] "=&r" (__prev), \ - [__snew] "=&r" (__snew), \ - [__tmp] "=&r" (__tmp), \ - [__cmp] "=&r" (__cmp), \ - [__addr64] "=&r" (__addr64) \ - : [__addr8] "r" (mem), \ - [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \ - [__new] "r" (new) \ - : "memory"); \ -}) - -#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \ -({ \ - unsigned long __tmp, __snew, __addr64; \ - __asm__ __volatile__ ( \ - mb1 \ - " andnot %[__addr16],7,%[__addr64]\n" \ - " inswl %[__new],%[__addr16],%[__snew]\n" \ - "1: ldq_l %[__tmp],0(%[__addr64])\n" \ - " extwl %[__tmp],%[__addr16],%[__prev]\n" \ - " cmpeq %[__prev],%[__old],%[__cmp]\n" \ - " beq %[__cmp],2f\n" \ - " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \ - " or %[__snew],%[__tmp],%[__tmp]\n" \ - " stq_c %[__tmp],0(%[__addr64])\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - "2:" \ - : [__prev] "=&r" (__prev), \ - [__snew] "=&r" (__snew), \ - [__tmp] "=&r" (__tmp), \ - [__cmp] "=&r" (__cmp), \ - [__addr64] "=&r" (__addr64) \ - : [__addr16] "r" (mem), \ - [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \ - [__new] "r" (new) \ - : "memory"); \ -}) - -#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \ -({ \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldl_l %[__prev],%[__mem]\n" \ - " cmpeq %[__prev],%[__old],%[__cmp]\n" \ - " beq %[__cmp],2f\n" \ - " mov %[__new],%[__cmp]\n" \ - " stl_c %[__cmp],%[__mem]\n" \ - " beq %[__cmp],1b\n" \ - mb2 \ - "2:" \ - : [__prev] "=&r" (__prev), \ - [__cmp] "=&r" (__cmp) \ - : [__mem] "m" (*(mem)), \ - [__old] "Ir" ((uint64_t)(atomic32_t)(uint64_t)(old)), \ - [__new] "Ir" (new) \ - : "memory"); \ -}) - -#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \ -({ \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldq_l %[__prev],%[__mem]\n" \ - " cmpeq %[__prev],%[__old],%[__cmp]\n" \ - " beq %[__cmp],2f\n" \ - " mov %[__new],%[__cmp]\n" \ - " stq_c %[__cmp],%[__mem]\n" \ - " beq %[__cmp],1b\n" \ - mb2 \ - "2:" \ - : [__prev] "=&r" (__prev), \ - [__cmp] "=&r" (__cmp) \ - : [__mem] "m" (*(mem)), \ - [__old] "Ir" ((uint64_t)(old)), \ - [__new] "Ir" (new) \ - : "memory"); \ -}) - -/* For all "bool" routines, we return FALSE if exchange succesful. */ - -#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \ - !__cmp; }) - -#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \ - !__cmp; }) - -#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \ - !__cmp; }) - -#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \ - !__cmp; }) - -/* For all "val" routines, return the old value whether exchange - successful or not. */ - -#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \ - (typeof (*mem))__prev; }) - -#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \ - (typeof (*mem))__prev; }) - -#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \ - (typeof (*mem))__prev; }) - -#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \ -({ unsigned long __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \ - (typeof (*mem))__prev; }) - -/* Compare and exchange with "acquire" semantics, ie barrier after. */ - -#define atomic_compare_and_exchange_bool_acq(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, "", __MB) - -#define atomic_compare_and_exchange_val_acq(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, "", __MB) - -/* Compare and exchange with "release" semantics, ie barrier before. */ - -#define atomic_compare_and_exchange_bool_rel(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __MB, "") - -#define atomic_compare_and_exchange_val_rel(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __MB, "") - - -/* Atomically store value and return the previous value. */ - -#define __arch_exchange_8_int(mem, value, mb1, mb2) \ -({ \ - unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - " andnot %[__addr8],7,%[__addr64]\n" \ - " insbl %[__value],%[__addr8],%[__sval]\n" \ - "1: ldq_l %[__tmp],0(%[__addr64])\n" \ - " extbl %[__tmp],%[__addr8],%[__ret]\n" \ - " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \ - " or %[__sval],%[__tmp],%[__tmp]\n" \ - " stq_c %[__tmp],0(%[__addr64])\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__sval] "=&r" (__sval), \ - [__tmp] "=&r" (__tmp), \ - [__addr64] "=&r" (__addr64) \ - : [__addr8] "r" (mem), \ - [__value] "r" (value) \ - : "memory"); \ - __ret; }) - -#define __arch_exchange_16_int(mem, value, mb1, mb2) \ -({ \ - unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - " andnot %[__addr16],7,%[__addr64]\n" \ - " inswl %[__value],%[__addr16],%[__sval]\n" \ - "1: ldq_l %[__tmp],0(%[__addr64])\n" \ - " extwl %[__tmp],%[__addr16],%[__ret]\n" \ - " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \ - " or %[__sval],%[__tmp],%[__tmp]\n" \ - " stq_c %[__tmp],0(%[__addr64])\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__sval] "=&r" (__sval), \ - [__tmp] "=&r" (__tmp), \ - [__addr64] "=&r" (__addr64) \ - : [__addr16] "r" (mem), \ - [__value] "r" (value) \ - : "memory"); \ - __ret; }) - -#define __arch_exchange_32_int(mem, value, mb1, mb2) \ -({ \ - signed int __tmp; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldl_l %[__ret],%[__mem]\n" \ - " mov %[__val],%[__tmp]\n" \ - " stl_c %[__tmp],%[__mem]\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__tmp] "=&r" (__tmp) \ - : [__mem] "m" (*(mem)), \ - [__val] "Ir" (value) \ - : "memory"); \ - __ret; }) - -#define __arch_exchange_64_int(mem, value, mb1, mb2) \ -({ \ - unsigned long __tmp; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldq_l %[__ret],%[__mem]\n" \ - " mov %[__val],%[__tmp]\n" \ - " stq_c %[__tmp],%[__mem]\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__tmp] "=&r" (__tmp) \ - : [__mem] "m" (*(mem)), \ - [__val] "Ir" (value) \ - : "memory"); \ - __ret; }) - -#define atomic_exchange_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB) - -#define atomic_exchange_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "") - - -/* Atomically add value and return the previous (unincremented) value. */ - -#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \ - ({ __builtin_trap (); 0; }) - -#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \ - ({ __builtin_trap (); 0; }) - -#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \ -({ \ - signed int __tmp; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldl_l %[__ret],%[__mem]\n" \ - " addl %[__ret],%[__val],%[__tmp]\n" \ - " stl_c %[__tmp],%[__mem]\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__tmp] "=&r" (__tmp) \ - : [__mem] "m" (*(mem)), \ - [__val] "Ir" ((signed int)(value)) \ - : "memory"); \ - __ret; }) - -#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \ -({ \ - unsigned long __tmp; __typeof(*mem) __ret; \ - __asm__ __volatile__ ( \ - mb1 \ - "1: ldq_l %[__ret],%[__mem]\n" \ - " addq %[__ret],%[__val],%[__tmp]\n" \ - " stq_c %[__tmp],%[__mem]\n" \ - " beq %[__tmp],1b\n" \ - mb2 \ - : [__ret] "=&r" (__ret), \ - [__tmp] "=&r" (__tmp) \ - : [__mem] "m" (*(mem)), \ - [__val] "Ir" ((unsigned long)(value)) \ - : "memory"); \ - __ret; }) - -/* ??? Barrier semantics for atomic_exchange_and_add appear to be - undefined. Use full barrier for now, as that's safe. */ -#define atomic_exchange_and_add(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB) - - -/* ??? Blah, I'm lazy. Implement these later. Can do better than the - compare-and-exchange loop provided by generic code. - -#define atomic_decrement_if_positive(mem) -#define atomic_bit_test_set(mem, bit) - -*/ - -#ifndef UP -# define atomic_full_barrier() __asm ("mb" : : : "memory"); -# define atomic_read_barrier() __asm ("mb" : : : "memory"); -# define atomic_write_barrier() __asm ("wmb" : : : "memory"); -#endif diff --git a/sysdeps/arm/atomic-machine.h b/sysdeps/arm/atomic-machine.h new file mode 100644 index 0000000000..2a89a73f5b --- /dev/null +++ b/sysdeps/arm/atomic-machine.h @@ -0,0 +1,162 @@ +/* Atomic operations. Pure ARM version. + Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +void __arm_link_error (void); + +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +# define atomic_full_barrier() __sync_synchronize () +#else +# define atomic_full_barrier() __arm_assisted_full_barrier () +#endif + +/* An OS-specific atomic-machine.h file will define this macro if + the OS can provide something. If not, we'll fail to build + with a compiler that doesn't supply the operation. */ +#ifndef __arm_assisted_full_barrier +# define __arm_assisted_full_barrier() __arm_link_error() +#endif + +/* Use the atomic builtins provided by GCC in case the backend provides + a pattern to do this efficiently. */ +#if __GNUC_PREREQ (4, 7) && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_16_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_64_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_RELEASE) + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +#elif defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +/* Atomic compare and exchange. */ +# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap ((mem), (oldval), (newval)) +#else +# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval)) +#endif + +#if !__GNUC_PREREQ (4, 7) || !defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) +/* We don't support atomic operations on any non-word types. + So make them link errors. */ +# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) +#endif + +/* An OS-specific atomic-machine.h file will define this macro if + the OS can provide something. If not, we'll fail to build + with a compiler that doesn't supply the operation. */ +#ifndef __arm_assisted_compare_and_exchange_val_32_acq +# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) +#endif diff --git a/sysdeps/arm/bits/atomic.h b/sysdeps/arm/bits/atomic.h deleted file mode 100644 index 772a354d44..0000000000 --- a/sysdeps/arm/bits/atomic.h +++ /dev/null @@ -1,162 +0,0 @@ -/* Atomic operations. Pure ARM version. - Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -void __arm_link_error (void); - -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -# define atomic_full_barrier() __sync_synchronize () -#else -# define atomic_full_barrier() __arm_assisted_full_barrier () -#endif - -/* An OS-specific bits/atomic.h file will define this macro if - the OS can provide something. If not, we'll fail to build - with a compiler that doesn't supply the operation. */ -#ifndef __arm_assisted_full_barrier -# define __arm_assisted_full_barrier() __arm_link_error() -#endif - -/* Use the atomic builtins provided by GCC in case the backend provides - a pattern to do this efficiently. */ -#if __GNUC_PREREQ (4, 7) && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 - -# define atomic_exchange_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) - -# define atomic_exchange_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) - -/* Atomic exchange (without compare). */ - -# define __arch_exchange_8_int(mem, newval, model) \ - (__arm_link_error (), (typeof (*mem)) 0) - -# define __arch_exchange_16_int(mem, newval, model) \ - (__arm_link_error (), (typeof (*mem)) 0) - -# define __arch_exchange_32_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# define __arch_exchange_64_int(mem, newval, model) \ - (__arm_link_error (), (typeof (*mem)) 0) - -/* Compare and exchange with "acquire" semantics, ie barrier after. */ - -# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -# define atomic_compare_and_exchange_val_acq(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -/* Compare and exchange with "release" semantics, ie barrier before. */ - -# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_RELEASE) - -# define atomic_compare_and_exchange_val_rel(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_RELEASE) - -/* Compare and exchange. - For all "bool" routines, we return FALSE if exchange succesful. */ - -# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ - ({__arm_link_error (); 0; }) - -# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ - ({__arm_link_error (); 0; }) - -# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ - ({__arm_link_error (); 0; }) - -# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ - ({__arm_link_error (); oldval; }) - -# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ - ({__arm_link_error (); oldval; }) - -# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - -# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ - ({__arm_link_error (); oldval; }) - -#elif defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -/* Atomic compare and exchange. */ -# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap ((mem), (oldval), (newval)) -#else -# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval)) -#endif - -#if !__GNUC_PREREQ (4, 7) || !defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) -/* We don't support atomic operations on any non-word types. - So make them link errors. */ -# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - ({ __arm_link_error (); oldval; }) - -# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - ({ __arm_link_error (); oldval; }) - -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __arm_link_error (); oldval; }) -#endif - -/* An OS-specific bits/atomic.h file will define this macro if - the OS can provide something. If not, we'll fail to build - with a compiler that doesn't supply the operation. */ -#ifndef __arm_assisted_compare_and_exchange_val_32_acq -# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __arm_link_error (); oldval; }) -#endif diff --git a/sysdeps/generic/atomic-machine.h b/sysdeps/generic/atomic-machine.h new file mode 100644 index 0000000000..c51f3ef5b0 --- /dev/null +++ b/sysdeps/generic/atomic-machine.h @@ -0,0 +1,42 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +/* We have by default no support for atomic operations. So define + them non-atomic. If this is a problem somebody will have to come + up with real definitions. */ + +/* The only basic operation needed is compare and exchange. */ +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ __typeof (mem) __gmemp = (mem); \ + __typeof (*mem) __gret = *__gmemp; \ + __typeof (*mem) __gnewval = (newval); \ + \ + if (__gret == (oldval)) \ + *__gmemp = __gnewval; \ + __gret; }) + +#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ __typeof (mem) __gmemp = (mem); \ + __typeof (*mem) __gnewval = (newval); \ + \ + *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; }) + +#endif /* atomic-machine.h */ diff --git a/sysdeps/i386/atomic-machine.h b/sysdeps/i386/atomic-machine.h new file mode 100644 index 0000000000..59f3d34871 --- /dev/null +++ b/sysdeps/i386/atomic-machine.h @@ -0,0 +1,544 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include /* For tcbhead_t. */ + + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + + +#ifndef LOCK_PREFIX +# ifdef UP +# define LOCK_PREFIX /* nothing */ +# else +# define LOCK_PREFIX "lock;" +# endif +#endif + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap (mem, oldval, newval) +#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + (! __sync_bool_compare_and_swap (mem, oldval, newval)) + + +#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgb %b2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "q" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgw %w2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "r" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgl %2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "r" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +/* XXX We do not really need 64-bit compare-and-exchange. At least + not in the moment. Using it would mean causing portability + problems since not many other 32-bit architectures have support for + such an operation. So don't define any code for now. If it is + really going to be used the code below can be used on Intel Pentium + and later, but NOT on i486. */ +#if 1 +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret = *(mem); \ + abort (); \ + ret = (newval); \ + ret = (oldval); \ + ret; }) +# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret = *(mem); \ + abort (); \ + ret = (newval); \ + ret = (oldval); \ + ret; }) +#else +# ifdef __PIC__ +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("xchgl %2, %%ebx\n\t" \ + LOCK_PREFIX "cmpxchg8b %1\n\t" \ + "xchgl %2, %%ebx" \ + : "=A" (ret), "=m" (*mem) \ + : "DS" (((unsigned long long int) (newval)) \ + & 0xffffffff), \ + "c" (((unsigned long long int) (newval)) >> 32), \ + "m" (*mem), "a" (((unsigned long long int) (oldval)) \ + & 0xffffffff), \ + "d" (((unsigned long long int) (oldval)) >> 32)); \ + ret; }) + +# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("xchgl %2, %%ebx\n\t" \ + "cmpl $0, %%gs:%P7\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchg8b %1\n\t" \ + "xchgl %2, %%ebx" \ + : "=A" (ret), "=m" (*mem) \ + : "DS" (((unsigned long long int) (newval)) \ + & 0xffffffff), \ + "c" (((unsigned long long int) (newval)) >> 32), \ + "m" (*mem), "a" (((unsigned long long int) (oldval)) \ + & 0xffffffff), \ + "d" (((unsigned long long int) (oldval)) >> 32), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) +# else +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \ + : "=A" (ret), "=m" (*mem) \ + : "b" (((unsigned long long int) (newval)) \ + & 0xffffffff), \ + "c" (((unsigned long long int) (newval)) >> 32), \ + "m" (*mem), "a" (((unsigned long long int) (oldval)) \ + & 0xffffffff), \ + "d" (((unsigned long long int) (oldval)) >> 32)); \ + ret; }) + +# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%gs:%P7\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchg8b %1" \ + : "=A" (ret), "=m" (*mem) \ + : "b" (((unsigned long long int) (newval)) \ + & 0xffffffff), \ + "c" (((unsigned long long int) (newval)) >> 32), \ + "m" (*mem), "a" (((unsigned long long int) (oldval)) \ + & 0xffffffff), \ + "d" (((unsigned long long int) (oldval)) >> 32), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) +# endif +#endif + + +/* Note that we need no lock prefix. */ +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*mem) result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile ("xchgb %b0, %1" \ + : "=q" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile ("xchgw %w0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile ("xchgl %0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else \ + { \ + result = 0; \ + abort (); \ + } \ + result; }) + + +#define __arch_exchange_and_add_body(lock, pfx, mem, value) \ + ({ __typeof (*mem) __result; \ + __typeof (value) __addval = (value); \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "xaddb %b0, %1" \ + : "=q" (__result), "=m" (*mem) \ + : "0" (__addval), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "xaddw %w0, %1" \ + : "=r" (__result), "=m" (*mem) \ + : "0" (__addval), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "xaddl %0, %1" \ + : "=r" (__result), "=m" (*mem) \ + : "0" (__addval), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*mem) __tmpval; \ + __result = *__memp; \ + do \ + __tmpval = __result; \ + while ((__result = pfx##_compare_and_exchange_val_64_acq \ + (__memp, __result + __addval, __result)) == __tmpval); \ + } \ + __result; }) + +#define atomic_exchange_and_add(mem, value) \ + __sync_fetch_and_add (mem, value) + +#define __arch_exchange_and_add_cprefix \ + "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t" + +#define catomic_exchange_and_add(mem, value) \ + __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \ + mem, value) + + +#define __arch_add_body(lock, pfx, mem, value) \ + do { \ + if (__builtin_constant_p (value) && (value) == 1) \ + atomic_increment (mem); \ + else if (__builtin_constant_p (value) && (value) == -1) \ + atomic_decrement (mem); \ + else if (sizeof (*mem) == 1) \ + __asm __volatile (lock "addb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "addw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "addl %1, %0" \ + : "=m" (*mem) \ + : "ir" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + { \ + __typeof (value) __addval = (value); \ + __typeof (mem) __memp = (mem); \ + __typeof (*mem) __oldval = *__memp; \ + __typeof (*mem) __tmpval; \ + do \ + __tmpval = __oldval; \ + while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ + (__memp, __oldval + __addval, __oldval)) == __tmpval); \ + } \ + } while (0) + +#define atomic_add(mem, value) \ + __arch_add_body (LOCK_PREFIX, __arch, mem, value) + +#define __arch_add_cprefix \ + "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t" + +#define catomic_add(mem, value) \ + __arch_add_body (__arch_add_cprefix, __arch_c, mem, value) + + +#define atomic_add_negative(mem, value) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "iq" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else \ + abort (); \ + __result; }) + + +#define atomic_add_zero(mem, value) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "iq" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else \ + abort (); \ + __result; }) + + +#define __arch_increment_body(lock, pfx, mem) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "incb %b0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "incw %w0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "incl %0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*mem) __oldval = *__memp; \ + __typeof (*mem) __tmpval; \ + do \ + __tmpval = __oldval; \ + while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ + (__memp, __oldval + 1, __oldval)) == __tmpval); \ + } \ + } while (0) + +#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem) + +#define __arch_increment_cprefix \ + "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t" + +#define catomic_increment(mem) \ + __arch_increment_body (__arch_increment_cprefix, __arch_c, mem) + + +#define atomic_increment_and_test(mem) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else \ + abort (); \ + __result; }) + + +#define __arch_decrement_body(lock, pfx, mem) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "decb %b0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "decw %w0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "decl %0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*mem) __oldval = *__memp; \ + __typeof (*mem) __tmpval; \ + do \ + __tmpval = __oldval; \ + while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ + (__memp, __oldval - 1, __oldval)) == __tmpval); \ + } \ + } while (0) + +#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem) + +#define __arch_decrement_cprefix \ + "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t" + +#define catomic_decrement(mem) \ + __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem) + + +#define atomic_decrement_and_test(mem) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else \ + abort (); \ + __result; }) + + +#define atomic_bit_set(mem, bit) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "iq" (1 << (bit))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "ir" (1 << (bit))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "orl %2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "ir" (1 << (bit))); \ + else \ + abort (); \ + } while (0) + + +#define atomic_bit_test_set(mem, bit) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + else \ + abort (); \ + __result; }) + + +#define atomic_spin_nop() asm ("rep; nop") + + +#define __arch_and_body(lock, mem, mask) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "andb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "andw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "andl %1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + abort (); \ + } while (0) + +#define __arch_cprefix \ + "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t" + +#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask) + +#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask) + + +#define __arch_or_body(lock, mem, mask) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "orb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "orw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "orl %1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + abort (); \ + } while (0) + +#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask) + +#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask) + +/* We don't use mfence because it is supposedly slower due to having to + provide stronger guarantees (e.g., regarding self-modifying code). */ +#define atomic_full_barrier() \ + __asm __volatile (LOCK_PREFIX "orl $0, (%%esp)" ::: "memory") +#define atomic_read_barrier() __asm ("" ::: "memory") +#define atomic_write_barrier() __asm ("" ::: "memory") diff --git a/sysdeps/i386/bits/atomic.h b/sysdeps/i386/bits/atomic.h deleted file mode 100644 index 59f3d34871..0000000000 --- a/sysdeps/i386/bits/atomic.h +++ /dev/null @@ -1,544 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include /* For tcbhead_t. */ - - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - - -#ifndef LOCK_PREFIX -# ifdef UP -# define LOCK_PREFIX /* nothing */ -# else -# define LOCK_PREFIX "lock;" -# endif -#endif - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap (mem, oldval, newval) -#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - (! __sync_bool_compare_and_swap (mem, oldval, newval)) - - -#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgb %b2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "q" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgw %w2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "r" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgl %2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "r" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -/* XXX We do not really need 64-bit compare-and-exchange. At least - not in the moment. Using it would mean causing portability - problems since not many other 32-bit architectures have support for - such an operation. So don't define any code for now. If it is - really going to be used the code below can be used on Intel Pentium - and later, but NOT on i486. */ -#if 1 -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret = *(mem); \ - abort (); \ - ret = (newval); \ - ret = (oldval); \ - ret; }) -# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret = *(mem); \ - abort (); \ - ret = (newval); \ - ret = (oldval); \ - ret; }) -#else -# ifdef __PIC__ -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("xchgl %2, %%ebx\n\t" \ - LOCK_PREFIX "cmpxchg8b %1\n\t" \ - "xchgl %2, %%ebx" \ - : "=A" (ret), "=m" (*mem) \ - : "DS" (((unsigned long long int) (newval)) \ - & 0xffffffff), \ - "c" (((unsigned long long int) (newval)) >> 32), \ - "m" (*mem), "a" (((unsigned long long int) (oldval)) \ - & 0xffffffff), \ - "d" (((unsigned long long int) (oldval)) >> 32)); \ - ret; }) - -# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("xchgl %2, %%ebx\n\t" \ - "cmpl $0, %%gs:%P7\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchg8b %1\n\t" \ - "xchgl %2, %%ebx" \ - : "=A" (ret), "=m" (*mem) \ - : "DS" (((unsigned long long int) (newval)) \ - & 0xffffffff), \ - "c" (((unsigned long long int) (newval)) >> 32), \ - "m" (*mem), "a" (((unsigned long long int) (oldval)) \ - & 0xffffffff), \ - "d" (((unsigned long long int) (oldval)) >> 32), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) -# else -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile (LOCK_PREFIX "cmpxchg8b %1" \ - : "=A" (ret), "=m" (*mem) \ - : "b" (((unsigned long long int) (newval)) \ - & 0xffffffff), \ - "c" (((unsigned long long int) (newval)) >> 32), \ - "m" (*mem), "a" (((unsigned long long int) (oldval)) \ - & 0xffffffff), \ - "d" (((unsigned long long int) (oldval)) >> 32)); \ - ret; }) - -# define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%gs:%P7\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchg8b %1" \ - : "=A" (ret), "=m" (*mem) \ - : "b" (((unsigned long long int) (newval)) \ - & 0xffffffff), \ - "c" (((unsigned long long int) (newval)) >> 32), \ - "m" (*mem), "a" (((unsigned long long int) (oldval)) \ - & 0xffffffff), \ - "d" (((unsigned long long int) (oldval)) >> 32), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) -# endif -#endif - - -/* Note that we need no lock prefix. */ -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*mem) result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile ("xchgb %b0, %1" \ - : "=q" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile ("xchgw %w0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile ("xchgl %0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else \ - { \ - result = 0; \ - abort (); \ - } \ - result; }) - - -#define __arch_exchange_and_add_body(lock, pfx, mem, value) \ - ({ __typeof (*mem) __result; \ - __typeof (value) __addval = (value); \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "xaddb %b0, %1" \ - : "=q" (__result), "=m" (*mem) \ - : "0" (__addval), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "xaddw %w0, %1" \ - : "=r" (__result), "=m" (*mem) \ - : "0" (__addval), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "xaddl %0, %1" \ - : "=r" (__result), "=m" (*mem) \ - : "0" (__addval), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*mem) __tmpval; \ - __result = *__memp; \ - do \ - __tmpval = __result; \ - while ((__result = pfx##_compare_and_exchange_val_64_acq \ - (__memp, __result + __addval, __result)) == __tmpval); \ - } \ - __result; }) - -#define atomic_exchange_and_add(mem, value) \ - __sync_fetch_and_add (mem, value) - -#define __arch_exchange_and_add_cprefix \ - "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t" - -#define catomic_exchange_and_add(mem, value) \ - __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c, \ - mem, value) - - -#define __arch_add_body(lock, pfx, mem, value) \ - do { \ - if (__builtin_constant_p (value) && (value) == 1) \ - atomic_increment (mem); \ - else if (__builtin_constant_p (value) && (value) == -1) \ - atomic_decrement (mem); \ - else if (sizeof (*mem) == 1) \ - __asm __volatile (lock "addb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "addw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "addl %1, %0" \ - : "=m" (*mem) \ - : "ir" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - { \ - __typeof (value) __addval = (value); \ - __typeof (mem) __memp = (mem); \ - __typeof (*mem) __oldval = *__memp; \ - __typeof (*mem) __tmpval; \ - do \ - __tmpval = __oldval; \ - while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ - (__memp, __oldval + __addval, __oldval)) == __tmpval); \ - } \ - } while (0) - -#define atomic_add(mem, value) \ - __arch_add_body (LOCK_PREFIX, __arch, mem, value) - -#define __arch_add_cprefix \ - "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t" - -#define catomic_add(mem, value) \ - __arch_add_body (__arch_add_cprefix, __arch_c, mem, value) - - -#define atomic_add_negative(mem, value) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "iq" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else \ - abort (); \ - __result; }) - - -#define atomic_add_zero(mem, value) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "iq" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else \ - abort (); \ - __result; }) - - -#define __arch_increment_body(lock, pfx, mem) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "incb %b0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "incw %w0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "incl %0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*mem) __oldval = *__memp; \ - __typeof (*mem) __tmpval; \ - do \ - __tmpval = __oldval; \ - while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ - (__memp, __oldval + 1, __oldval)) == __tmpval); \ - } \ - } while (0) - -#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem) - -#define __arch_increment_cprefix \ - "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t" - -#define catomic_increment(mem) \ - __arch_increment_body (__arch_increment_cprefix, __arch_c, mem) - - -#define atomic_increment_and_test(mem) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %0; sete %b1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %0; sete %w1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else \ - abort (); \ - __result; }) - - -#define __arch_decrement_body(lock, pfx, mem) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "decb %b0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "decw %w0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "decl %0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*mem) __oldval = *__memp; \ - __typeof (*mem) __tmpval; \ - do \ - __tmpval = __oldval; \ - while ((__oldval = pfx##_compare_and_exchange_val_64_acq \ - (__memp, __oldval - 1, __oldval)) == __tmpval); \ - } \ - } while (0) - -#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem) - -#define __arch_decrement_cprefix \ - "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t" - -#define catomic_decrement(mem) \ - __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem) - - -#define atomic_decrement_and_test(mem) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else \ - abort (); \ - __result; }) - - -#define atomic_bit_set(mem, bit) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "iq" (1 << (bit))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "ir" (1 << (bit))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "orl %2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "ir" (1 << (bit))); \ - else \ - abort (); \ - } while (0) - - -#define atomic_bit_test_set(mem, bit) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - else \ - abort (); \ - __result; }) - - -#define atomic_spin_nop() asm ("rep; nop") - - -#define __arch_and_body(lock, mem, mask) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "andb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "andw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "andl %1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - abort (); \ - } while (0) - -#define __arch_cprefix \ - "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t" - -#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask) - -#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask) - - -#define __arch_or_body(lock, mem, mask) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "orb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "orw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "orl %1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - abort (); \ - } while (0) - -#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask) - -#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask) - -/* We don't use mfence because it is supposedly slower due to having to - provide stronger guarantees (e.g., regarding self-modifying code). */ -#define atomic_full_barrier() \ - __asm __volatile (LOCK_PREFIX "orl $0, (%%esp)" ::: "memory") -#define atomic_read_barrier() __asm ("" ::: "memory") -#define atomic_write_barrier() __asm ("" ::: "memory") diff --git a/sysdeps/ia64/atomic-machine.h b/sysdeps/ia64/atomic-machine.h new file mode 100644 index 0000000000..4c2b54094c --- /dev/null +++ b/sysdeps/ia64/atomic-machine.h @@ -0,0 +1,119 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ + (!__sync_bool_compare_and_swap ((mem), (int) (long) (oldval), \ + (int) (long) (newval))) + +#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ + (!__sync_bool_compare_and_swap ((mem), (long) (oldval), \ + (long) (newval))) + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap ((mem), (int) (long) (oldval), \ + (int) (long) (newval)) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap ((mem), (long) (oldval), (long) (newval)) + +/* Atomically store newval and return the old value. */ +#define atomic_exchange_acq(mem, value) \ + __sync_lock_test_and_set (mem, value) + +#define atomic_exchange_rel(mem, value) \ + (__sync_synchronize (), __sync_lock_test_and_set (mem, value)) + +#define atomic_exchange_and_add(mem, value) \ + __sync_fetch_and_add ((mem), (value)) + +#define atomic_decrement_if_positive(mem) \ + ({ __typeof (*mem) __oldval, __val; \ + __typeof (mem) __memp = (mem); \ + \ + __val = (*__memp); \ + do \ + { \ + __oldval = __val; \ + if (__builtin_expect (__val <= 0, 0)) \ + break; \ + __val = atomic_compare_and_exchange_val_acq (__memp, __oldval - 1, \ + __oldval); \ + } \ + while (__builtin_expect (__val != __oldval, 0)); \ + __oldval; }) + +#define atomic_bit_test_set(mem, bit) \ + ({ __typeof (*mem) __oldval, __val; \ + __typeof (mem) __memp = (mem); \ + __typeof (*mem) __mask = ((__typeof (*mem)) 1 << (bit)); \ + \ + __val = (*__memp); \ + do \ + { \ + __oldval = __val; \ + __val = atomic_compare_and_exchange_val_acq (__memp, \ + __oldval | __mask, \ + __oldval); \ + } \ + while (__builtin_expect (__val != __oldval, 0)); \ + __oldval & __mask; }) + +#define atomic_full_barrier() __sync_synchronize () diff --git a/sysdeps/ia64/bits/atomic.h b/sysdeps/ia64/bits/atomic.h deleted file mode 100644 index 4c2b54094c..0000000000 --- a/sysdeps/ia64/bits/atomic.h +++ /dev/null @@ -1,119 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ - (!__sync_bool_compare_and_swap ((mem), (int) (long) (oldval), \ - (int) (long) (newval))) - -#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ - (!__sync_bool_compare_and_swap ((mem), (long) (oldval), \ - (long) (newval))) - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap ((mem), (int) (long) (oldval), \ - (int) (long) (newval)) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap ((mem), (long) (oldval), (long) (newval)) - -/* Atomically store newval and return the old value. */ -#define atomic_exchange_acq(mem, value) \ - __sync_lock_test_and_set (mem, value) - -#define atomic_exchange_rel(mem, value) \ - (__sync_synchronize (), __sync_lock_test_and_set (mem, value)) - -#define atomic_exchange_and_add(mem, value) \ - __sync_fetch_and_add ((mem), (value)) - -#define atomic_decrement_if_positive(mem) \ - ({ __typeof (*mem) __oldval, __val; \ - __typeof (mem) __memp = (mem); \ - \ - __val = (*__memp); \ - do \ - { \ - __oldval = __val; \ - if (__builtin_expect (__val <= 0, 0)) \ - break; \ - __val = atomic_compare_and_exchange_val_acq (__memp, __oldval - 1, \ - __oldval); \ - } \ - while (__builtin_expect (__val != __oldval, 0)); \ - __oldval; }) - -#define atomic_bit_test_set(mem, bit) \ - ({ __typeof (*mem) __oldval, __val; \ - __typeof (mem) __memp = (mem); \ - __typeof (*mem) __mask = ((__typeof (*mem)) 1 << (bit)); \ - \ - __val = (*__memp); \ - do \ - { \ - __oldval = __val; \ - __val = atomic_compare_and_exchange_val_acq (__memp, \ - __oldval | __mask, \ - __oldval); \ - } \ - while (__builtin_expect (__val != __oldval, 0)); \ - __oldval & __mask; }) - -#define atomic_full_barrier() __sync_synchronize () diff --git a/sysdeps/m68k/coldfire/atomic-machine.h b/sysdeps/m68k/coldfire/atomic-machine.h new file mode 100644 index 0000000000..48899ce5ed --- /dev/null +++ b/sysdeps/m68k/coldfire/atomic-machine.h @@ -0,0 +1,72 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include + +/* Coldfire has no atomic compare-and-exchange operation, and the + kernel provides no userspace atomicity operations. Here we just + use generic non-atomic implementations instead. */ + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +/* If we have just non-atomic operations, we can as well make them wide. */ +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* The only basic operation needed is compare and exchange. */ +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ __typeof (mem) __gmemp = (mem); \ + __typeof (*mem) __gret = *__gmemp; \ + __typeof (*mem) __gnewval = (newval); \ + \ + if (__gret == (oldval)) \ + *__gmemp = __gnewval; \ + __gret; }) + +#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ __typeof (mem) __gmemp = (mem); \ + __typeof (*mem) __gnewval = (newval); \ + \ + *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; }) + +#endif diff --git a/sysdeps/m68k/coldfire/bits/atomic.h b/sysdeps/m68k/coldfire/bits/atomic.h deleted file mode 100644 index c09bf188ac..0000000000 --- a/sysdeps/m68k/coldfire/bits/atomic.h +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include - -/* Coldfire has no atomic compare-and-exchange operation, and the - kernel provides no userspace atomicity operations. Here we just - use generic non-atomic implementations instead. */ - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -/* If we have just non-atomic operations, we can as well make them wide. */ -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* The only basic operation needed is compare and exchange. */ -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ __typeof (mem) __gmemp = (mem); \ - __typeof (*mem) __gret = *__gmemp; \ - __typeof (*mem) __gnewval = (newval); \ - \ - if (__gret == (oldval)) \ - *__gmemp = __gnewval; \ - __gret; }) - -#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ __typeof (mem) __gmemp = (mem); \ - __typeof (*mem) __gnewval = (newval); \ - \ - *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; }) - -#endif diff --git a/sysdeps/m68k/m680x0/m68020/atomic-machine.h b/sysdeps/m68k/m680x0/m68020/atomic-machine.h new file mode 100644 index 0000000000..9a29022af9 --- /dev/null +++ b/sysdeps/m68k/m680x0/m68020/atomic-machine.h @@ -0,0 +1,256 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Andreas Schwab , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include + + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __ret; \ + __asm __volatile ("cas%.b %0,%2,%1" \ + : "=d" (__ret), "+m" (*(mem)) \ + : "d" (newval), "0" (oldval)); \ + __ret; }) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __ret; \ + __asm __volatile ("cas%.w %0,%2,%1" \ + : "=d" (__ret), "+m" (*(mem)) \ + : "d" (newval), "0" (oldval)); \ + __ret; }) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __ret; \ + __asm __volatile ("cas%.l %0,%2,%1" \ + : "=d" (__ret), "+m" (*(mem)) \ + : "d" (newval), "0" (oldval)); \ + __ret; }) + +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __ret; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \ + : "=d" (__ret) \ + : "d" (newval), "r" (__memp), \ + "r" ((char *) __memp + 4), "0" (oldval) \ + : "memory"); \ + __ret; }) + +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*(mem)) __result = *(mem); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("1: cas%.b %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)) \ + : "d" (newvalue), "0" (__result)); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("1: cas%.w %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)) \ + : "d" (newvalue), "0" (__result)); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("1: cas%.l %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)) \ + : "d" (newvalue), "0" (__result)); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \ + " jbne 1b" \ + : "=d" (__result) \ + : "d" (newvalue), "r" (__memp), \ + "r" ((char *) __memp + 4), "0" (__result) \ + : "memory"); \ + } \ + __result; }) + +#define atomic_exchange_and_add(mem, value) \ + ({ __typeof (*(mem)) __result = *(mem); \ + __typeof (*(mem)) __temp; \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("1: move%.b %0,%2;" \ + " add%.b %3,%2;" \ + " cas%.b %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)), \ + "=&d" (__temp) \ + : "d" (value), "0" (__result)); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("1: move%.w %0,%2;" \ + " add%.w %3,%2;" \ + " cas%.w %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)), \ + "=&d" (__temp) \ + : "d" (value), "0" (__result)); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("1: move%.l %0,%2;" \ + " add%.l %3,%2;" \ + " cas%.l %0,%2,%1;" \ + " jbne 1b" \ + : "=d" (__result), "+m" (*(mem)), \ + "=&d" (__temp) \ + : "d" (value), "0" (__result)); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ("1: move%.l %0,%1;" \ + " move%.l %R0,%R1;" \ + " add%.l %R2,%R1;" \ + " addx%.l %2,%1;" \ + " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ + " jbne 1b" \ + : "=d" (__result), "=&d" (__temp) \ + : "d" (value), "r" (__memp), \ + "r" ((char *) __memp + 4), "0" (__result) \ + : "memory"); \ + } \ + __result; }) + +#define atomic_add(mem, value) \ + (void) ({ if (sizeof (*(mem)) == 1) \ + __asm __volatile ("add%.b %1,%0" \ + : "+m" (*(mem)) \ + : "id" (value)); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("add%.w %1,%0" \ + : "+m" (*(mem)) \ + : "id" (value)); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("add%.l %1,%0" \ + : "+m" (*(mem)) \ + : "id" (value)); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __oldval = *__memp; \ + __typeof (*(mem)) __temp; \ + __asm __volatile ("1: move%.l %0,%1;" \ + " move%.l %R0,%R1;" \ + " add%.l %R2,%R1;" \ + " addx%.l %2,%1;" \ + " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ + " jbne 1b" \ + : "=d" (__oldval), "=&d" (__temp) \ + : "d" (value), "r" (__memp), \ + "r" ((char *) __memp + 4), "0" (__oldval) \ + : "memory"); \ + } \ + }) + +#define atomic_increment_and_test(mem) \ + ({ char __result; \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("addq%.b %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("addq%.w %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("addq%.l %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __oldval = *__memp; \ + __typeof (*(mem)) __temp; \ + __asm __volatile ("1: move%.l %1,%2;" \ + " move%.l %R1,%R2;" \ + " addq%.l %#1,%R2;" \ + " addx%.l %5,%2;" \ + " seq %0;" \ + " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ + " jbne 1b" \ + : "=&dm" (__result), "=d" (__oldval), \ + "=&d" (__temp) \ + : "r" (__memp), "r" ((char *) __memp + 4), \ + "d" (0), "1" (__oldval) \ + : "memory"); \ + } \ + __result; }) + +#define atomic_decrement_and_test(mem) \ + ({ char __result; \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("subq%.b %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("subq%.w %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("subq%.l %#1,%1; seq %0" \ + : "=dm" (__result), "+m" (*(mem))); \ + else \ + { \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __oldval = *__memp; \ + __typeof (*(mem)) __temp; \ + __asm __volatile ("1: move%.l %1,%2;" \ + " move%.l %R1,%R2;" \ + " subq%.l %#1,%R2;" \ + " subx%.l %5,%2;" \ + " seq %0;" \ + " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ + " jbne 1b" \ + : "=&dm" (__result), "=d" (__oldval), \ + "=&d" (__temp) \ + : "r" (__memp), "r" ((char *) __memp + 4), \ + "d" (0), "1" (__oldval) \ + : "memory"); \ + } \ + __result; }) + +#define atomic_bit_set(mem, bit) \ + __asm __volatile ("bfset %0{%1,#1}" \ + : "+m" (*(mem)) \ + : "di" (sizeof (*(mem)) * 8 - (bit) - 1)) + +#define atomic_bit_test_set(mem, bit) \ + ({ char __result; \ + __asm __volatile ("bfset %1{%2,#1}; sne %0" \ + : "=dm" (__result), "+m" (*(mem)) \ + : "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \ + __result; }) diff --git a/sysdeps/m68k/m680x0/m68020/bits/atomic.h b/sysdeps/m68k/m680x0/m68020/bits/atomic.h deleted file mode 100644 index 9a29022af9..0000000000 --- a/sysdeps/m68k/m680x0/m68020/bits/atomic.h +++ /dev/null @@ -1,256 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Andreas Schwab , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include - - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __ret; \ - __asm __volatile ("cas%.b %0,%2,%1" \ - : "=d" (__ret), "+m" (*(mem)) \ - : "d" (newval), "0" (oldval)); \ - __ret; }) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __ret; \ - __asm __volatile ("cas%.w %0,%2,%1" \ - : "=d" (__ret), "+m" (*(mem)) \ - : "d" (newval), "0" (oldval)); \ - __ret; }) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __ret; \ - __asm __volatile ("cas%.l %0,%2,%1" \ - : "=d" (__ret), "+m" (*(mem)) \ - : "d" (newval), "0" (oldval)); \ - __ret; }) - -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __ret; \ - __typeof (mem) __memp = (mem); \ - __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \ - : "=d" (__ret) \ - : "d" (newval), "r" (__memp), \ - "r" ((char *) __memp + 4), "0" (oldval) \ - : "memory"); \ - __ret; }) - -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*(mem)) __result = *(mem); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("1: cas%.b %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)) \ - : "d" (newvalue), "0" (__result)); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("1: cas%.w %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)) \ - : "d" (newvalue), "0" (__result)); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("1: cas%.l %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)) \ - : "d" (newvalue), "0" (__result)); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \ - " jbne 1b" \ - : "=d" (__result) \ - : "d" (newvalue), "r" (__memp), \ - "r" ((char *) __memp + 4), "0" (__result) \ - : "memory"); \ - } \ - __result; }) - -#define atomic_exchange_and_add(mem, value) \ - ({ __typeof (*(mem)) __result = *(mem); \ - __typeof (*(mem)) __temp; \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("1: move%.b %0,%2;" \ - " add%.b %3,%2;" \ - " cas%.b %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)), \ - "=&d" (__temp) \ - : "d" (value), "0" (__result)); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("1: move%.w %0,%2;" \ - " add%.w %3,%2;" \ - " cas%.w %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)), \ - "=&d" (__temp) \ - : "d" (value), "0" (__result)); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("1: move%.l %0,%2;" \ - " add%.l %3,%2;" \ - " cas%.l %0,%2,%1;" \ - " jbne 1b" \ - : "=d" (__result), "+m" (*(mem)), \ - "=&d" (__temp) \ - : "d" (value), "0" (__result)); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __asm __volatile ("1: move%.l %0,%1;" \ - " move%.l %R0,%R1;" \ - " add%.l %R2,%R1;" \ - " addx%.l %2,%1;" \ - " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ - " jbne 1b" \ - : "=d" (__result), "=&d" (__temp) \ - : "d" (value), "r" (__memp), \ - "r" ((char *) __memp + 4), "0" (__result) \ - : "memory"); \ - } \ - __result; }) - -#define atomic_add(mem, value) \ - (void) ({ if (sizeof (*(mem)) == 1) \ - __asm __volatile ("add%.b %1,%0" \ - : "+m" (*(mem)) \ - : "id" (value)); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("add%.w %1,%0" \ - : "+m" (*(mem)) \ - : "id" (value)); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("add%.l %1,%0" \ - : "+m" (*(mem)) \ - : "id" (value)); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __oldval = *__memp; \ - __typeof (*(mem)) __temp; \ - __asm __volatile ("1: move%.l %0,%1;" \ - " move%.l %R0,%R1;" \ - " add%.l %R2,%R1;" \ - " addx%.l %2,%1;" \ - " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \ - " jbne 1b" \ - : "=d" (__oldval), "=&d" (__temp) \ - : "d" (value), "r" (__memp), \ - "r" ((char *) __memp + 4), "0" (__oldval) \ - : "memory"); \ - } \ - }) - -#define atomic_increment_and_test(mem) \ - ({ char __result; \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("addq%.b %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("addq%.w %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("addq%.l %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __oldval = *__memp; \ - __typeof (*(mem)) __temp; \ - __asm __volatile ("1: move%.l %1,%2;" \ - " move%.l %R1,%R2;" \ - " addq%.l %#1,%R2;" \ - " addx%.l %5,%2;" \ - " seq %0;" \ - " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ - " jbne 1b" \ - : "=&dm" (__result), "=d" (__oldval), \ - "=&d" (__temp) \ - : "r" (__memp), "r" ((char *) __memp + 4), \ - "d" (0), "1" (__oldval) \ - : "memory"); \ - } \ - __result; }) - -#define atomic_decrement_and_test(mem) \ - ({ char __result; \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("subq%.b %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("subq%.w %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("subq%.l %#1,%1; seq %0" \ - : "=dm" (__result), "+m" (*(mem))); \ - else \ - { \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __oldval = *__memp; \ - __typeof (*(mem)) __temp; \ - __asm __volatile ("1: move%.l %1,%2;" \ - " move%.l %R1,%R2;" \ - " subq%.l %#1,%R2;" \ - " subx%.l %5,%2;" \ - " seq %0;" \ - " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \ - " jbne 1b" \ - : "=&dm" (__result), "=d" (__oldval), \ - "=&d" (__temp) \ - : "r" (__memp), "r" ((char *) __memp + 4), \ - "d" (0), "1" (__oldval) \ - : "memory"); \ - } \ - __result; }) - -#define atomic_bit_set(mem, bit) \ - __asm __volatile ("bfset %0{%1,#1}" \ - : "+m" (*(mem)) \ - : "di" (sizeof (*(mem)) * 8 - (bit) - 1)) - -#define atomic_bit_test_set(mem, bit) \ - ({ char __result; \ - __asm __volatile ("bfset %1{%2,#1}; sne %0" \ - : "=dm" (__result), "+m" (*(mem)) \ - : "di" (sizeof (*(mem)) * 8 - (bit) - 1)); \ - __result; }) diff --git a/sysdeps/microblaze/atomic-machine.h b/sysdeps/microblaze/atomic-machine.h new file mode 100644 index 0000000000..f1cbf43876 --- /dev/null +++ b/sysdeps/microblaze/atomic-machine.h @@ -0,0 +1,272 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include +#include + + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +/* Microblaze does not have byte and halfword forms of load and reserve and + store conditional. So for microblaze we stub out the 8- and 16-bit forms. */ +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + int test; \ + __asm __volatile ( \ + " addc r0, r0, r0;" \ + "1: lwx %0, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + " cmp %1, %0, %4;" \ + " bnei %1, 2f;" \ + " swx %5, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + "2:" \ + : "=&r" (__tmp), \ + "=&r" (test), \ + "=m" (*__memp) \ + : "r" (__memp), \ + "r" (oldval), \ + "r" (newval) \ + : "cc", "memory"); \ + __tmp; \ + }) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define __arch_atomic_exchange_32_acq(mem, value) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + int test; \ + __asm __volatile ( \ + " addc r0, r0, r0;" \ + "1: lwx %0, %4, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + " swx %3, %4, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + : "=&r" (__tmp), \ + "=&r" (test), \ + "=m" (*__memp) \ + : "r" (value), \ + "r" (__memp) \ + : "cc", "memory"); \ + __tmp; \ + }) + +#define __arch_atomic_exchange_64_acq(mem, newval) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_exchange_acq(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_rel(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define __arch_atomic_exchange_and_add_32(mem, value) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + int test; \ + __asm __volatile ( \ + " addc r0, r0, r0;" \ + "1: lwx %0, %4, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + " add %1, %3, %0;" \ + " swx %1, %4, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + : "=&r" (__tmp), \ + "=&r" (test), \ + "=m" (*__memp) \ + : "r" (value), \ + "r" (__memp) \ + : "cc", "memory"); \ + __tmp; \ + }) + +#define __arch_atomic_exchange_and_add_64(mem, value) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_exchange_and_add(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32 (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64 (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define __arch_atomic_increment_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + int test; \ + __asm __volatile ( \ + " addc r0, r0, r0;" \ + "1: lwx %0, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + " addi %0, %0, 1;" \ + " swx %0, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + : "=&r" (__val), \ + "=&r" (test), \ + "=m" (*mem) \ + : "r" (mem), \ + "m" (*mem) \ + : "cc", "memory"); \ + __val; \ + }) + +#define __arch_atomic_increment_val_64(mem) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_increment_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_increment_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_increment_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; }) + +#define __arch_atomic_decrement_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + int test; \ + __asm __volatile ( \ + " addc r0, r0, r0;" \ + "1: lwx %0, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + " rsubi %0, %0, 1;" \ + " swx %0, %3, r0;" \ + " addic %1, r0, 0;" \ + " bnei %1, 1b;" \ + : "=&r" (__val), \ + "=&r" (test), \ + "=m" (*mem) \ + : "r" (mem), \ + "m" (*mem) \ + : "cc", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_val_64(mem) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_decrement_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_decrement_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_decrement_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; }) diff --git a/sysdeps/microblaze/bits/atomic.h b/sysdeps/microblaze/bits/atomic.h deleted file mode 100644 index f1cbf43876..0000000000 --- a/sysdeps/microblaze/bits/atomic.h +++ /dev/null @@ -1,272 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include -#include - - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -/* Microblaze does not have byte and halfword forms of load and reserve and - store conditional. So for microblaze we stub out the 8- and 16-bit forms. */ -#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - int test; \ - __asm __volatile ( \ - " addc r0, r0, r0;" \ - "1: lwx %0, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - " cmp %1, %0, %4;" \ - " bnei %1, 2f;" \ - " swx %5, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - "2:" \ - : "=&r" (__tmp), \ - "=&r" (test), \ - "=m" (*__memp) \ - : "r" (__memp), \ - "r" (oldval), \ - "r" (newval) \ - : "cc", "memory"); \ - __tmp; \ - }) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ - else \ - abort (); \ - __result; \ - }) - -#define __arch_atomic_exchange_32_acq(mem, value) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - int test; \ - __asm __volatile ( \ - " addc r0, r0, r0;" \ - "1: lwx %0, %4, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - " swx %3, %4, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - : "=&r" (__tmp), \ - "=&r" (test), \ - "=m" (*__memp) \ - : "r" (value), \ - "r" (__memp) \ - : "cc", "memory"); \ - __tmp; \ - }) - -#define __arch_atomic_exchange_64_acq(mem, newval) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_exchange_acq(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_32_acq (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_64_acq (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_exchange_rel(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_32_acq (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_64_acq (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define __arch_atomic_exchange_and_add_32(mem, value) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - int test; \ - __asm __volatile ( \ - " addc r0, r0, r0;" \ - "1: lwx %0, %4, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - " add %1, %3, %0;" \ - " swx %1, %4, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - : "=&r" (__tmp), \ - "=&r" (test), \ - "=m" (*__memp) \ - : "r" (value), \ - "r" (__memp) \ - : "cc", "memory"); \ - __tmp; \ - }) - -#define __arch_atomic_exchange_and_add_64(mem, value) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_exchange_and_add(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_and_add_32 (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_and_add_64 (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define __arch_atomic_increment_val_32(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - int test; \ - __asm __volatile ( \ - " addc r0, r0, r0;" \ - "1: lwx %0, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - " addi %0, %0, 1;" \ - " swx %0, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - : "=&r" (__val), \ - "=&r" (test), \ - "=m" (*mem) \ - : "r" (mem), \ - "m" (*mem) \ - : "cc", "memory"); \ - __val; \ - }) - -#define __arch_atomic_increment_val_64(mem) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_increment_val(mem) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*(mem)) == 4) \ - __result = __arch_atomic_increment_val_32 (mem); \ - else if (sizeof (*(mem)) == 8) \ - __result = __arch_atomic_increment_val_64 (mem); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; }) - -#define __arch_atomic_decrement_val_32(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - int test; \ - __asm __volatile ( \ - " addc r0, r0, r0;" \ - "1: lwx %0, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - " rsubi %0, %0, 1;" \ - " swx %0, %3, r0;" \ - " addic %1, r0, 0;" \ - " bnei %1, 1b;" \ - : "=&r" (__val), \ - "=&r" (test), \ - "=m" (*mem) \ - : "r" (mem), \ - "m" (*mem) \ - : "cc", "memory"); \ - __val; \ - }) - -#define __arch_atomic_decrement_val_64(mem) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_decrement_val(mem) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*(mem)) == 4) \ - __result = __arch_atomic_decrement_val_32 (mem); \ - else if (sizeof (*(mem)) == 8) \ - __result = __arch_atomic_decrement_val_64 (mem); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; }) diff --git a/sysdeps/mips/atomic-machine.h b/sysdeps/mips/atomic-machine.h new file mode 100644 index 0000000000..6db8fee1d4 --- /dev/null +++ b/sysdeps/mips/atomic-machine.h @@ -0,0 +1,503 @@ +/* Low-level functions for atomic operations. Mips version. + Copyright (C) 2005-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _MIPS_ATOMIC_MACHINE_H +#define _MIPS_ATOMIC_MACHINE_H 1 + +#include +#include +#include + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#if _MIPS_SIM == _ABIO32 && __mips < 2 +#define MIPS_PUSH_MIPS2 ".set mips2\n\t" +#else +#define MIPS_PUSH_MIPS2 +#endif + +#if _MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIN32 +#define __HAVE_64B_ATOMICS 0 +#else +#define __HAVE_64B_ATOMICS 1 +#endif + +/* See the comments in about the use of the sync instruction. */ +#ifndef MIPS_SYNC +# define MIPS_SYNC sync +#endif + +/* Certain revisions of the R10000 Processor need an LL/SC Workaround + enabled. Revisions before 3.0 misbehave on atomic operations, and + Revs 2.6 and lower deadlock after several seconds due to other errata. + + To quote the R10K Errata: + Workaround: The basic idea is to inhibit the four instructions + from simultaneously becoming active in R10000. Padding all + ll/sc sequences with nops or changing the looping branch in the + routines to a branch likely (which is always predicted taken + by R10000) will work. The nops should go after the loop, and the + number of them should be 28. This number could be decremented for + each additional instruction in the ll/sc loop such as the lock + modifier(s) between the ll and sc, the looping branch and its + delay slot. For typical short routines with one ll/sc loop, any + instructions after the loop could also count as a decrement. The + nop workaround pollutes the cache more but would be a few cycles + faster if all the code is in the cache and the looping branch + is predicted not taken. */ + + +#ifdef _MIPS_ARCH_R10000 +#define R10K_BEQZ_INSN "beqzl" +#else +#define R10K_BEQZ_INSN "beqz" +#endif + +#define MIPS_SYNC_STR_2(X) #X +#define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X) +#define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC) + +#if __GNUC_PREREQ (4, 8) || (defined __mips16 && __GNUC_PREREQ (4, 7)) +/* The __atomic_* builtins are available in GCC 4.7 and later, but MIPS + support for their efficient implementation was added only in GCC 4.8. + We still want to use them even with GCC 4.7 for MIPS16 code where we + have no assembly alternative available and want to avoid the __sync_* + builtins if at all possible. */ + +#define USE_ATOMIC_COMPILER_BUILTINS 1 + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + (abort (), 0) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + (abort (), 0) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# if _MIPS_SIM == _ABIO32 + /* We can't do an atomic 64-bit operation in O32. */ +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + (abort (), 0) +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + (abort (), (typeof(*mem)) 0) +# else +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + __arch_compare_and_exchange_bool_32_int (mem, newval, oldval, model) +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + __arch_compare_and_exchange_val_32_int (mem, newval, oldval, model) +# endif + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_RELEASE) + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_16_int(mem, newval, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# if _MIPS_SIM == _ABIO32 +/* We can't do an atomic 64-bit operation in O32. */ +# define __arch_exchange_64_int(mem, newval, model) \ + (abort (), (typeof(*mem)) 0) +# else +# define __arch_exchange_64_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) +# endif + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + + +/* Atomically add value and return the previous (unincremented) value. */ + +# define __arch_exchange_and_add_8_int(mem, value, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_and_add_16_int(mem, value, model) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_and_add_32_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) + +# if _MIPS_SIM == _ABIO32 +/* We can't do an atomic 64-bit operation in O32. */ +# define __arch_exchange_and_add_64_int(mem, value, model) \ + (abort (), (typeof(*mem)) 0) +# else +# define __arch_exchange_and_add_64_int(mem, value, model) \ + __atomic_fetch_add (mem, value, model) +# endif + +# define atomic_exchange_and_add_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_ACQUIRE) + +# define atomic_exchange_and_add_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + __ATOMIC_RELEASE) + +#elif defined __mips16 /* !__GNUC_PREREQ (4, 7) */ +/* This implementation using __sync* builtins will be removed once glibc + requires GCC 4.7 or later to build. */ + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap ((mem), (oldval), (newval)) +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + (!__sync_bool_compare_and_swap ((mem), (oldval), (newval))) + +# define atomic_exchange_acq(mem, newval) \ + __sync_lock_test_and_set ((mem), (newval)) + +# define atomic_exchange_and_add(mem, val) \ + __sync_fetch_and_add ((mem), (val)) + +# define atomic_bit_test_set(mem, bit) \ + ({ __typeof (bit) __bit = (bit); \ + (__sync_fetch_and_or ((mem), 1 << (__bit)) & (1 << (__bit))); }) + +# define atomic_and(mem, mask) (void) __sync_fetch_and_and ((mem), (mask)) +# define atomic_and_val(mem, mask) __sync_fetch_and_and ((mem), (mask)) + +# define atomic_or(mem, mask) (void) __sync_fetch_and_or ((mem), (mask)) +# define atomic_or_val(mem, mask) __sync_fetch_and_or ((mem), (mask)) + +#else /* !__mips16 && !__GNUC_PREREQ (4, 8) */ +/* This implementation using inline assembly will be removed once glibc + requires GCC 4.8 or later to build. */ + +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* Compare and exchange. For all of the "xxx" routines, we expect a + "__prev" and a "__cmp" variable to be provided by the enclosing scope, + in which values are returned. */ + +# define __arch_compare_and_exchange_xxx_8_int(mem, newval, oldval, rel, acq) \ + (abort (), __prev = 0, __cmp = 0, (void) __cmp) + +# define __arch_compare_and_exchange_xxx_16_int(mem, newval, oldval, rel, acq) \ + (abort (), __prev = 0, __cmp = 0, (void) __cmp) + +# define __arch_compare_and_exchange_xxx_32_int(mem, newval, oldval, rel, acq) \ + __asm__ __volatile__ ( \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\t" \ + "ll %0,%5\n\t" \ + "move %1,$0\n\t" \ + "bne %0,%3,2f\n\t" \ + "move %1,%4\n\t" \ + "sc %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (oldval), "r" (newval), "m" (*mem) \ + : "memory") + +# if _MIPS_SIM == _ABIO32 +/* We can't do an atomic 64-bit operation in O32. */ +# define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \ + (abort (), __prev = 0, __cmp = 0, (void) __cmp) +# else +# define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \ + __asm__ __volatile__ ("\n" \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\t" \ + "lld %0,%5\n\t" \ + "move %1,$0\n\t" \ + "bne %0,%3,2f\n\t" \ + "move %1,%4\n\t" \ + "scd %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (oldval), "r" (newval), "m" (*mem) \ + : "memory") +# endif + +/* For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ + __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \ + !__cmp; }) + +# define __arch_compare_and_exchange_bool_16_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ + __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \ + !__cmp; }) + +# define __arch_compare_and_exchange_bool_32_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ + __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \ + !__cmp; }) + +# define __arch_compare_and_exchange_bool_64_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ + __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \ + !__cmp; }) + +/* For all "val" routines, return the old value whether exchange + successful or not. */ + +# define __arch_compare_and_exchange_val_8_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \ + (typeof (*mem))__prev; }) + +# define __arch_compare_and_exchange_val_16_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \ + (typeof (*mem))__prev; }) + +# define __arch_compare_and_exchange_val_32_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \ + (typeof (*mem))__prev; }) + +# define __arch_compare_and_exchange_val_64_int(mem, new, old, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \ + (typeof (*mem))__prev; }) + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, "", MIPS_SYNC_STR) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, "", MIPS_SYNC_STR) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, MIPS_SYNC_STR, "") + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, MIPS_SYNC_STR, "") + + + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_xxx_8_int(mem, newval, rel, acq) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_xxx_16_int(mem, newval, rel, acq) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_xxx_32_int(mem, newval, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __asm__ __volatile__ ("\n" \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\t" \ + "ll %0,%4\n\t" \ + "move %1,%3\n\t" \ + "sc %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (newval), "m" (*mem) \ + : "memory"); \ + __prev; }) + +# if _MIPS_SIM == _ABIO32 +/* We can't do an atomic 64-bit operation in O32. */ +# define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \ + (abort (), (typeof(*mem)) 0) +# else +# define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __asm__ __volatile__ ("\n" \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\n" \ + "lld %0,%4\n\t" \ + "move %1,%3\n\t" \ + "scd %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (newval), "m" (*mem) \ + : "memory"); \ + __prev; }) +# endif + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, "", MIPS_SYNC_STR) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, MIPS_SYNC_STR, "") + + +/* Atomically add value and return the previous (unincremented) value. */ + +# define __arch_exchange_and_add_8_int(mem, newval, rel, acq) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_and_add_16_int(mem, newval, rel, acq) \ + (abort (), (typeof(*mem)) 0) + +# define __arch_exchange_and_add_32_int(mem, value, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __asm__ __volatile__ ("\n" \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\t" \ + "ll %0,%4\n\t" \ + "addu %1,%0,%3\n\t" \ + "sc %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (value), "m" (*mem) \ + : "memory"); \ + __prev; }) + +# if _MIPS_SIM == _ABIO32 +/* We can't do an atomic 64-bit operation in O32. */ +# define __arch_exchange_and_add_64_int(mem, value, rel, acq) \ + (abort (), (typeof(*mem)) 0) +# else +# define __arch_exchange_and_add_64_int(mem, value, rel, acq) \ +({ typeof (*mem) __prev; int __cmp; \ + __asm__ __volatile__ ( \ + ".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + rel "\n" \ + "1:\t" \ + "lld %0,%4\n\t" \ + "daddu %1,%0,%3\n\t" \ + "scd %1,%2\n\t" \ + R10K_BEQZ_INSN" %1,1b\n" \ + acq "\n\t" \ + ".set pop\n" \ + "2:\n\t" \ + : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ + : "r" (value), "m" (*mem) \ + : "memory"); \ + __prev; }) +# endif + +# define atomic_exchange_and_add_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + "", MIPS_SYNC_STR) + +# define atomic_exchange_and_add_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ + MIPS_SYNC_STR, "") + +#endif /* !__mips16 && !__GNUC_PREREQ (4, 8) */ + +/* TODO: More atomic operations could be implemented efficiently; only the + basic requirements are done. */ + +#ifdef __mips16 +# define atomic_full_barrier() __sync_synchronize () + +#else /* !__mips16 */ +# define atomic_full_barrier() \ + __asm__ __volatile__ (".set push\n\t" \ + MIPS_PUSH_MIPS2 \ + MIPS_SYNC_STR "\n\t" \ + ".set pop" : : : "memory") +#endif /* !__mips16 */ + +#endif /* atomic-machine.h */ diff --git a/sysdeps/mips/bits/atomic.h b/sysdeps/mips/bits/atomic.h deleted file mode 100644 index 375448957c..0000000000 --- a/sysdeps/mips/bits/atomic.h +++ /dev/null @@ -1,503 +0,0 @@ -/* Low-level functions for atomic operations. Mips version. - Copyright (C) 2005-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _MIPS_BITS_ATOMIC_H -#define _MIPS_BITS_ATOMIC_H 1 - -#include -#include -#include - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#if _MIPS_SIM == _ABIO32 && __mips < 2 -#define MIPS_PUSH_MIPS2 ".set mips2\n\t" -#else -#define MIPS_PUSH_MIPS2 -#endif - -#if _MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIN32 -#define __HAVE_64B_ATOMICS 0 -#else -#define __HAVE_64B_ATOMICS 1 -#endif - -/* See the comments in about the use of the sync instruction. */ -#ifndef MIPS_SYNC -# define MIPS_SYNC sync -#endif - -/* Certain revisions of the R10000 Processor need an LL/SC Workaround - enabled. Revisions before 3.0 misbehave on atomic operations, and - Revs 2.6 and lower deadlock after several seconds due to other errata. - - To quote the R10K Errata: - Workaround: The basic idea is to inhibit the four instructions - from simultaneously becoming active in R10000. Padding all - ll/sc sequences with nops or changing the looping branch in the - routines to a branch likely (which is always predicted taken - by R10000) will work. The nops should go after the loop, and the - number of them should be 28. This number could be decremented for - each additional instruction in the ll/sc loop such as the lock - modifier(s) between the ll and sc, the looping branch and its - delay slot. For typical short routines with one ll/sc loop, any - instructions after the loop could also count as a decrement. The - nop workaround pollutes the cache more but would be a few cycles - faster if all the code is in the cache and the looping branch - is predicted not taken. */ - - -#ifdef _MIPS_ARCH_R10000 -#define R10K_BEQZ_INSN "beqzl" -#else -#define R10K_BEQZ_INSN "beqz" -#endif - -#define MIPS_SYNC_STR_2(X) #X -#define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X) -#define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC) - -#if __GNUC_PREREQ (4, 8) || (defined __mips16 && __GNUC_PREREQ (4, 7)) -/* The __atomic_* builtins are available in GCC 4.7 and later, but MIPS - support for their efficient implementation was added only in GCC 4.8. - We still want to use them even with GCC 4.7 for MIPS16 code where we - have no assembly alternative available and want to avoid the __sync_* - builtins if at all possible. */ - -#define USE_ATOMIC_COMPILER_BUILTINS 1 - -/* Compare and exchange. - For all "bool" routines, we return FALSE if exchange succesful. */ - -# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ - (abort (), 0) - -# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ - (abort (), 0) - -# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - }) - -# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ - ({ \ - typeof (*mem) __oldval = (oldval); \ - __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ - model, __ATOMIC_RELAXED); \ - __oldval; \ - }) - -# if _MIPS_SIM == _ABIO32 - /* We can't do an atomic 64-bit operation in O32. */ -# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ - (abort (), 0) -# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ - (abort (), (typeof(*mem)) 0) -# else -# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ - __arch_compare_and_exchange_bool_32_int (mem, newval, oldval, model) -# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ - __arch_compare_and_exchange_val_32_int (mem, newval, oldval, model) -# endif - -/* Compare and exchange with "acquire" semantics, ie barrier after. */ - -# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -# define atomic_compare_and_exchange_val_acq(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_ACQUIRE) - -/* Compare and exchange with "release" semantics, ie barrier before. */ - -# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, __ATOMIC_RELEASE) - -# define atomic_compare_and_exchange_val_rel(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, __ATOMIC_RELEASE) - - -/* Atomic exchange (without compare). */ - -# define __arch_exchange_8_int(mem, newval, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_16_int(mem, newval, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_32_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) - -# if _MIPS_SIM == _ABIO32 -/* We can't do an atomic 64-bit operation in O32. */ -# define __arch_exchange_64_int(mem, newval, model) \ - (abort (), (typeof(*mem)) 0) -# else -# define __arch_exchange_64_int(mem, newval, model) \ - __atomic_exchange_n (mem, newval, model) -# endif - -# define atomic_exchange_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) - -# define atomic_exchange_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) - - -/* Atomically add value and return the previous (unincremented) value. */ - -# define __arch_exchange_and_add_8_int(mem, value, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_and_add_16_int(mem, value, model) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_and_add_32_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) - -# if _MIPS_SIM == _ABIO32 -/* We can't do an atomic 64-bit operation in O32. */ -# define __arch_exchange_and_add_64_int(mem, value, model) \ - (abort (), (typeof(*mem)) 0) -# else -# define __arch_exchange_and_add_64_int(mem, value, model) \ - __atomic_fetch_add (mem, value, model) -# endif - -# define atomic_exchange_and_add_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - __ATOMIC_ACQUIRE) - -# define atomic_exchange_and_add_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - __ATOMIC_RELEASE) - -#elif defined __mips16 /* !__GNUC_PREREQ (4, 7) */ -/* This implementation using __sync* builtins will be removed once glibc - requires GCC 4.7 or later to build. */ - -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap ((mem), (oldval), (newval)) -# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - (!__sync_bool_compare_and_swap ((mem), (oldval), (newval))) - -# define atomic_exchange_acq(mem, newval) \ - __sync_lock_test_and_set ((mem), (newval)) - -# define atomic_exchange_and_add(mem, val) \ - __sync_fetch_and_add ((mem), (val)) - -# define atomic_bit_test_set(mem, bit) \ - ({ __typeof (bit) __bit = (bit); \ - (__sync_fetch_and_or ((mem), 1 << (__bit)) & (1 << (__bit))); }) - -# define atomic_and(mem, mask) (void) __sync_fetch_and_and ((mem), (mask)) -# define atomic_and_val(mem, mask) __sync_fetch_and_and ((mem), (mask)) - -# define atomic_or(mem, mask) (void) __sync_fetch_and_or ((mem), (mask)) -# define atomic_or_val(mem, mask) __sync_fetch_and_or ((mem), (mask)) - -#else /* !__mips16 && !__GNUC_PREREQ (4, 8) */ -/* This implementation using inline assembly will be removed once glibc - requires GCC 4.8 or later to build. */ - -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* Compare and exchange. For all of the "xxx" routines, we expect a - "__prev" and a "__cmp" variable to be provided by the enclosing scope, - in which values are returned. */ - -# define __arch_compare_and_exchange_xxx_8_int(mem, newval, oldval, rel, acq) \ - (abort (), __prev = 0, __cmp = 0, (void) __cmp) - -# define __arch_compare_and_exchange_xxx_16_int(mem, newval, oldval, rel, acq) \ - (abort (), __prev = 0, __cmp = 0, (void) __cmp) - -# define __arch_compare_and_exchange_xxx_32_int(mem, newval, oldval, rel, acq) \ - __asm__ __volatile__ ( \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\t" \ - "ll %0,%5\n\t" \ - "move %1,$0\n\t" \ - "bne %0,%3,2f\n\t" \ - "move %1,%4\n\t" \ - "sc %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (oldval), "r" (newval), "m" (*mem) \ - : "memory") - -# if _MIPS_SIM == _ABIO32 -/* We can't do an atomic 64-bit operation in O32. */ -# define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \ - (abort (), __prev = 0, __cmp = 0, (void) __cmp) -# else -# define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \ - __asm__ __volatile__ ("\n" \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\t" \ - "lld %0,%5\n\t" \ - "move %1,$0\n\t" \ - "bne %0,%3,2f\n\t" \ - "move %1,%4\n\t" \ - "scd %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (oldval), "r" (newval), "m" (*mem) \ - : "memory") -# endif - -/* For all "bool" routines, we return FALSE if exchange succesful. */ - -# define __arch_compare_and_exchange_bool_8_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ - __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \ - !__cmp; }) - -# define __arch_compare_and_exchange_bool_16_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ - __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \ - !__cmp; }) - -# define __arch_compare_and_exchange_bool_32_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ - __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \ - !__cmp; }) - -# define __arch_compare_and_exchange_bool_64_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev __attribute__ ((unused)); int __cmp; \ - __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \ - !__cmp; }) - -/* For all "val" routines, return the old value whether exchange - successful or not. */ - -# define __arch_compare_and_exchange_val_8_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \ - (typeof (*mem))__prev; }) - -# define __arch_compare_and_exchange_val_16_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \ - (typeof (*mem))__prev; }) - -# define __arch_compare_and_exchange_val_32_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \ - (typeof (*mem))__prev; }) - -# define __arch_compare_and_exchange_val_64_int(mem, new, old, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \ - (typeof (*mem))__prev; }) - -/* Compare and exchange with "acquire" semantics, ie barrier after. */ - -# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, "", MIPS_SYNC_STR) - -# define atomic_compare_and_exchange_val_acq(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, "", MIPS_SYNC_STR) - -/* Compare and exchange with "release" semantics, ie barrier before. */ - -# define atomic_compare_and_exchange_bool_rel(mem, new, old) \ - __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ - mem, new, old, MIPS_SYNC_STR, "") - -# define atomic_compare_and_exchange_val_rel(mem, new, old) \ - __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ - mem, new, old, MIPS_SYNC_STR, "") - - - -/* Atomic exchange (without compare). */ - -# define __arch_exchange_xxx_8_int(mem, newval, rel, acq) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_xxx_16_int(mem, newval, rel, acq) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_xxx_32_int(mem, newval, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __asm__ __volatile__ ("\n" \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\t" \ - "ll %0,%4\n\t" \ - "move %1,%3\n\t" \ - "sc %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (newval), "m" (*mem) \ - : "memory"); \ - __prev; }) - -# if _MIPS_SIM == _ABIO32 -/* We can't do an atomic 64-bit operation in O32. */ -# define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \ - (abort (), (typeof(*mem)) 0) -# else -# define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __asm__ __volatile__ ("\n" \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\n" \ - "lld %0,%4\n\t" \ - "move %1,%3\n\t" \ - "scd %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (newval), "m" (*mem) \ - : "memory"); \ - __prev; }) -# endif - -# define atomic_exchange_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, "", MIPS_SYNC_STR) - -# define atomic_exchange_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, MIPS_SYNC_STR, "") - - -/* Atomically add value and return the previous (unincremented) value. */ - -# define __arch_exchange_and_add_8_int(mem, newval, rel, acq) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_and_add_16_int(mem, newval, rel, acq) \ - (abort (), (typeof(*mem)) 0) - -# define __arch_exchange_and_add_32_int(mem, value, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __asm__ __volatile__ ("\n" \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\t" \ - "ll %0,%4\n\t" \ - "addu %1,%0,%3\n\t" \ - "sc %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (value), "m" (*mem) \ - : "memory"); \ - __prev; }) - -# if _MIPS_SIM == _ABIO32 -/* We can't do an atomic 64-bit operation in O32. */ -# define __arch_exchange_and_add_64_int(mem, value, rel, acq) \ - (abort (), (typeof(*mem)) 0) -# else -# define __arch_exchange_and_add_64_int(mem, value, rel, acq) \ -({ typeof (*mem) __prev; int __cmp; \ - __asm__ __volatile__ ( \ - ".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - rel "\n" \ - "1:\t" \ - "lld %0,%4\n\t" \ - "daddu %1,%0,%3\n\t" \ - "scd %1,%2\n\t" \ - R10K_BEQZ_INSN" %1,1b\n" \ - acq "\n\t" \ - ".set pop\n" \ - "2:\n\t" \ - : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \ - : "r" (value), "m" (*mem) \ - : "memory"); \ - __prev; }) -# endif - -# define atomic_exchange_and_add_acq(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - "", MIPS_SYNC_STR) - -# define atomic_exchange_and_add_rel(mem, value) \ - __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ - MIPS_SYNC_STR, "") - -#endif /* !__mips16 && !__GNUC_PREREQ (4, 8) */ - -/* TODO: More atomic operations could be implemented efficiently; only the - basic requirements are done. */ - -#ifdef __mips16 -# define atomic_full_barrier() __sync_synchronize () - -#else /* !__mips16 */ -# define atomic_full_barrier() \ - __asm__ __volatile__ (".set push\n\t" \ - MIPS_PUSH_MIPS2 \ - MIPS_SYNC_STR "\n\t" \ - ".set pop" : : : "memory") -#endif /* !__mips16 */ - -#endif /* bits/atomic.h */ diff --git a/sysdeps/powerpc/atomic-machine.h b/sysdeps/powerpc/atomic-machine.h new file mode 100644 index 0000000000..a056eb18c4 --- /dev/null +++ b/sysdeps/powerpc/atomic-machine.h @@ -0,0 +1,345 @@ +/* Atomic operations. PowerPC Common version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* + * Never include sysdeps/powerpc/atomic-machine.h directly. + * Alway use include/atomic.h which will include either + * sysdeps/powerpc/powerpc32/atomic-machine.h + * or + * sysdeps/powerpc/powerpc64/atomic-machine.h + * as appropriate and which in turn include this file. + */ + +#include + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +/* + * Powerpc does not have byte and halfword forms of load and reserve and + * store conditional. So for powerpc we stub out the 8- and 16-bit forms. + */ +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ + (abort (), 0) + +#ifdef UP +# define __ARCH_ACQ_INSTR "" +# define __ARCH_REL_INSTR "" +#else +# define __ARCH_ACQ_INSTR "isync" +# ifndef __ARCH_REL_INSTR +# define __ARCH_REL_INSTR "sync" +# endif +#endif + +#ifndef MUTEX_HINT_ACQ +# define MUTEX_HINT_ACQ +#endif +#ifndef MUTEX_HINT_REL +# define MUTEX_HINT_REL +#endif + +#define atomic_full_barrier() __asm ("sync" ::: "memory") + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ( \ + "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ + " cmpw %0,%2\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ + " cmpw %0,%2\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_atomic_exchange_32_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile ( \ + "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_32_rel(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_32(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3\n" \ + " add %1,%0,%4\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_32_acq(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \ + " add %1,%0,%4\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b\n" \ + __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_32_rel(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \ + " add %1,%0,%4\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_increment_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: lwarx %0,0,%2\n" \ + " addi %0,%0,1\n" \ + " stwcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: lwarx %0,0,%2\n" \ + " subi %0,%0,1\n" \ + " stwcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_if_positive_32(mem) \ + ({ int __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3\n" \ + " cmpwi 0,%0,0\n" \ + " addi %1,%0,-1\n" \ + " ble 2f\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_acq(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_rel(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_rel (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_rel (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_and_add(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32 (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64 (mem, value); \ + else \ + abort (); \ + __result; \ + }) +#define atomic_exchange_and_add_acq(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) +#define atomic_exchange_and_add_rel(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32_rel (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64_rel (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_increment_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_increment_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_increment_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; }) + +#define atomic_decrement_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_decrement_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_decrement_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; }) + + +/* Decrement *MEM if it is > 0, and return the old value. */ +#define atomic_decrement_if_positive(mem) \ + ({ __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_decrement_if_positive_32 (mem); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_decrement_if_positive_64 (mem); \ + else \ + abort (); \ + __result; \ + }) diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h deleted file mode 100644 index 8ca45ee5ee..0000000000 --- a/sysdeps/powerpc/bits/atomic.h +++ /dev/null @@ -1,345 +0,0 @@ -/* Atomic operations. PowerPC Common version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Paul Mackerras , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -/* - * Never include sysdeps/powerpc/bits/atomic.h directly. - * Alway use include/atomic.h which will include either - * sysdeps/powerpc/powerpc32/bits/atomic.h - * or - * sysdeps/powerpc/powerpc64/bits/atomic.h - * as appropriate and which in turn include this file. - */ - -#include - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -/* - * Powerpc does not have byte and halfword forms of load and reserve and - * store conditional. So for powerpc we stub out the 8- and 16-bit forms. - */ -#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ - (abort (), 0) - -#ifdef UP -# define __ARCH_ACQ_INSTR "" -# define __ARCH_REL_INSTR "" -#else -# define __ARCH_ACQ_INSTR "isync" -# ifndef __ARCH_REL_INSTR -# define __ARCH_REL_INSTR "sync" -# endif -#endif - -#ifndef MUTEX_HINT_ACQ -# define MUTEX_HINT_ACQ -#endif -#ifndef MUTEX_HINT_REL -# define MUTEX_HINT_REL -#endif - -#define atomic_full_barrier() __asm ("sync" ::: "memory") - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - __asm __volatile ( \ - "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ - " cmpw %0,%2\n" \ - " bne 2f\n" \ - " stwcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&r" (__tmp) \ - : "b" (__memp), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp; \ - }) - -#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ - " cmpw %0,%2\n" \ - " bne 2f\n" \ - " stwcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " \ - : "=&r" (__tmp) \ - : "b" (__memp), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp; \ - }) - -#define __arch_atomic_exchange_32_acq(mem, value) \ - ({ \ - __typeof (*mem) __val; \ - __asm __volatile ( \ - "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ - " stwcx. %3,0,%2\n" \ - " bne- 1b\n" \ - " " __ARCH_ACQ_INSTR \ - : "=&r" (__val), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_32_rel(mem, value) \ - ({ \ - __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \ - " stwcx. %3,0,%2\n" \ - " bne- 1b" \ - : "=&r" (__val), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_32(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3\n" \ - " add %1,%0,%4\n" \ - " stwcx. %1,0,%3\n" \ - " bne- 1b" \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_32_acq(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \ - " add %1,%0,%4\n" \ - " stwcx. %1,0,%3\n" \ - " bne- 1b\n" \ - __ARCH_ACQ_INSTR \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_32_rel(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \ - " add %1,%0,%4\n" \ - " stwcx. %1,0,%3\n" \ - " bne- 1b" \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_increment_val_32(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - __asm __volatile ("1: lwarx %0,0,%2\n" \ - " addi %0,%0,1\n" \ - " stwcx. %0,0,%2\n" \ - " bne- 1b" \ - : "=&b" (__val), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_decrement_val_32(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - __asm __volatile ("1: lwarx %0,0,%2\n" \ - " subi %0,%0,1\n" \ - " stwcx. %0,0,%2\n" \ - " bne- 1b" \ - : "=&b" (__val), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_decrement_if_positive_32(mem) \ - ({ int __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3\n" \ - " cmpwi 0,%0,0\n" \ - " addi %1,%0,-1\n" \ - " ble 2f\n" \ - " stwcx. %1,0,%3\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_exchange_acq(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_32_acq (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_64_acq (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_exchange_rel(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_32_rel (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_64_rel (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_exchange_and_add(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_and_add_32 (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_and_add_64 (mem, value); \ - else \ - abort (); \ - __result; \ - }) -#define atomic_exchange_and_add_acq(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_and_add_32_acq (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_and_add_64_acq (mem, value); \ - else \ - abort (); \ - __result; \ - }) -#define atomic_exchange_and_add_rel(mem, value) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_exchange_and_add_32_rel (mem, value); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_exchange_and_add_64_rel (mem, value); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_increment_val(mem) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*(mem)) == 4) \ - __result = __arch_atomic_increment_val_32 (mem); \ - else if (sizeof (*(mem)) == 8) \ - __result = __arch_atomic_increment_val_64 (mem); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; }) - -#define atomic_decrement_val(mem) \ - ({ \ - __typeof (*(mem)) __result; \ - if (sizeof (*(mem)) == 4) \ - __result = __arch_atomic_decrement_val_32 (mem); \ - else if (sizeof (*(mem)) == 8) \ - __result = __arch_atomic_decrement_val_64 (mem); \ - else \ - abort (); \ - __result; \ - }) - -#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; }) - - -/* Decrement *MEM if it is > 0, and return the old value. */ -#define atomic_decrement_if_positive(mem) \ - ({ __typeof (*(mem)) __result; \ - if (sizeof (*mem) == 4) \ - __result = __arch_atomic_decrement_if_positive_32 (mem); \ - else if (sizeof (*mem) == 8) \ - __result = __arch_atomic_decrement_if_positive_64 (mem); \ - else \ - abort (); \ - __result; \ - }) diff --git a/sysdeps/powerpc/powerpc32/atomic-machine.h b/sysdeps/powerpc/powerpc32/atomic-machine.h new file mode 100644 index 0000000000..9ff70c54d5 --- /dev/null +++ b/sysdeps/powerpc/powerpc32/atomic-machine.h @@ -0,0 +1,144 @@ +/* Atomic operations. PowerPC32 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. + This is a hint to the hardware to expect additional updates adjacent + to the lock word or not. If we are acquiring a Mutex, the hint + should be true. Otherwise we releasing a Mutex or doing a simple + atomic operation. In that case we don't expect additional updates + adjacent to the lock word after the Store Conditional and the hint + should be false. */ + +#if defined _ARCH_PWR6 || defined _ARCH_PWR6X +# define MUTEX_HINT_ACQ ",1" +# define MUTEX_HINT_REL ",0" +#else +# define MUTEX_HINT_ACQ +# define MUTEX_HINT_REL +#endif + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* + * The 32-bit exchange_bool is different on powerpc64 because the subf + * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned + * (a load word and zero (high 32) form). So powerpc64 has a slightly + * different version in sysdeps/powerpc/powerpc64/atomic-machine.h. + */ +#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile ( \ + "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of + load and reserve (ldarx) and store conditional (stdcx.) instructions. + So for powerpc32 we stub out the 64-bit forms. */ +#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_atomic_exchange_64_acq(mem, value) \ + ({ abort (); (*mem) = (value); }) + +#define __arch_atomic_exchange_64_rel(mem, value) \ + ({ abort (); (*mem) = (value); }) + +#define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ abort (); (*mem) = (value); }) + +#define __arch_atomic_exchange_and_add_64_acq(mem, value) \ + ({ abort (); (*mem) = (value); }) + +#define __arch_atomic_exchange_and_add_64_rel(mem, value) \ + ({ abort (); (*mem) = (value); }) + +#define __arch_atomic_increment_val_64(mem) \ + ({ abort (); (*mem)++; }) + +#define __arch_atomic_decrement_val_64(mem) \ + ({ abort (); (*mem)--; }) + +#define __arch_atomic_decrement_if_positive_64(mem) \ + ({ abort (); (*mem)--; }) + +#ifdef _ARCH_PWR4 +/* + * Newer powerpc64 processors support the new "light weight" sync (lwsync) + * So if the build is using -mcpu=[power4,power5,power5+,970] we can + * safely use lwsync. + */ +# define atomic_read_barrier() __asm ("lwsync" ::: "memory") +/* + * "light weight" sync can also be used for the release barrier. + */ +# ifndef UP +# define __ARCH_REL_INSTR "lwsync" +# endif +# define atomic_write_barrier() __asm ("lwsync" ::: "memory") +#else +/* + * Older powerpc32 processors don't support the new "light weight" + * sync (lwsync). So the only safe option is to use normal sync + * for all powerpc32 applications. + */ +# define atomic_read_barrier() __asm ("sync" ::: "memory") +# define atomic_write_barrier() __asm ("sync" ::: "memory") +#endif + +/* + * Include the rest of the atomic ops macros which are common to both + * powerpc32 and powerpc64. + */ +#include_next diff --git a/sysdeps/powerpc/powerpc32/bits/atomic.h b/sysdeps/powerpc/powerpc32/bits/atomic.h deleted file mode 100644 index 8cb0627ca9..0000000000 --- a/sysdeps/powerpc/powerpc32/bits/atomic.h +++ /dev/null @@ -1,144 +0,0 @@ -/* Atomic operations. PowerPC32 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Paul Mackerras , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. - This is a hint to the hardware to expect additional updates adjacent - to the lock word or not. If we are acquiring a Mutex, the hint - should be true. Otherwise we releasing a Mutex or doing a simple - atomic operation. In that case we don't expect additional updates - adjacent to the lock word after the Store Conditional and the hint - should be false. */ - -#if defined _ARCH_PWR6 || defined _ARCH_PWR6X -# define MUTEX_HINT_ACQ ",1" -# define MUTEX_HINT_REL ",0" -#else -# define MUTEX_HINT_ACQ -# define MUTEX_HINT_REL -#endif - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* - * The 32-bit exchange_bool is different on powerpc64 because the subf - * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned - * (a load word and zero (high 32) form). So powerpc64 has a slightly - * different version in sysdeps/powerpc/powerpc64/bits/atomic.h. - */ -#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ -({ \ - unsigned int __tmp; \ - __asm __volatile ( \ - "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ - " subf. %0,%2,%0\n" \ - " bne 2f\n" \ - " stwcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&r" (__tmp) \ - : "b" (mem), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ -({ \ - unsigned int __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ - " subf. %0,%2,%0\n" \ - " bne 2f\n" \ - " stwcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " \ - : "=&r" (__tmp) \ - : "b" (mem), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of - load and reserve (ldarx) and store conditional (stdcx.) instructions. - So for powerpc32 we stub out the 64-bit forms. */ -#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_atomic_exchange_64_acq(mem, value) \ - ({ abort (); (*mem) = (value); }) - -#define __arch_atomic_exchange_64_rel(mem, value) \ - ({ abort (); (*mem) = (value); }) - -#define __arch_atomic_exchange_and_add_64(mem, value) \ - ({ abort (); (*mem) = (value); }) - -#define __arch_atomic_exchange_and_add_64_acq(mem, value) \ - ({ abort (); (*mem) = (value); }) - -#define __arch_atomic_exchange_and_add_64_rel(mem, value) \ - ({ abort (); (*mem) = (value); }) - -#define __arch_atomic_increment_val_64(mem) \ - ({ abort (); (*mem)++; }) - -#define __arch_atomic_decrement_val_64(mem) \ - ({ abort (); (*mem)--; }) - -#define __arch_atomic_decrement_if_positive_64(mem) \ - ({ abort (); (*mem)--; }) - -#ifdef _ARCH_PWR4 -/* - * Newer powerpc64 processors support the new "light weight" sync (lwsync) - * So if the build is using -mcpu=[power4,power5,power5+,970] we can - * safely use lwsync. - */ -# define atomic_read_barrier() __asm ("lwsync" ::: "memory") -/* - * "light weight" sync can also be used for the release barrier. - */ -# ifndef UP -# define __ARCH_REL_INSTR "lwsync" -# endif -# define atomic_write_barrier() __asm ("lwsync" ::: "memory") -#else -/* - * Older powerpc32 processors don't support the new "light weight" - * sync (lwsync). So the only safe option is to use normal sync - * for all powerpc32 applications. - */ -# define atomic_read_barrier() __asm ("sync" ::: "memory") -# define atomic_write_barrier() __asm ("sync" ::: "memory") -#endif - -/* - * Include the rest of the atomic ops macros which are common to both - * powerpc32 and powerpc64. - */ -#include_next diff --git a/sysdeps/powerpc/powerpc64/atomic-machine.h b/sysdeps/powerpc/powerpc64/atomic-machine.h new file mode 100644 index 0000000000..e1d3e9bb0d --- /dev/null +++ b/sysdeps/powerpc/powerpc64/atomic-machine.h @@ -0,0 +1,274 @@ +/* Atomic operations. PowerPC64 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. + This is a hint to the hardware to expect additional updates adjacent + to the lock word or not. If we are acquiring a Mutex, the hint + should be true. Otherwise we releasing a Mutex or doing a simple + atomic operation. In that case we don't expect additional updates + adjacent to the lock word after the Store Conditional and the hint + should be false. */ + +#if defined _ARCH_PWR6 || defined _ARCH_PWR6X +# define MUTEX_HINT_ACQ ",1" +# define MUTEX_HINT_REL ",0" +#else +# define MUTEX_HINT_ACQ +# define MUTEX_HINT_REL +#endif + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* The 32-bit exchange_bool is different on powerpc64 because the subf + does signed 64-bit arithmetic while the lwarx is 32-bit unsigned + (a load word and zero (high 32) form) load. + In powerpc64 register values are 64-bit by default, including oldval. + The value in old val unknown sign extension, lwarx loads the 32-bit + value as unsigned. So we explicitly clear the high 32 bits in oldval. */ +#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +({ \ + unsigned int __tmp, __tmp2; \ + __asm __volatile (" clrldi %1,%1,32\n" \ + "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ + " subf. %0,%1,%0\n" \ + " bne 2f\n" \ + " stwcx. %4,0,%2\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp), "=r" (__tmp2) \ + : "b" (mem), "1" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp, __tmp2; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + " clrldi %1,%1,32\n" \ + "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \ + " subf. %0,%1,%0\n" \ + " bne 2f\n" \ + " stwcx. %4,0,%2\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp), "=r" (__tmp2) \ + : "b" (mem), "1" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +/* + * Only powerpc64 processors support Load doubleword and reserve index (ldarx) + * and Store doubleword conditional indexed (stdcx) instructions. So here + * we define the 64-bit forms. + */ +#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ +({ \ + unsigned long __tmp; \ + __asm __volatile ( \ + "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ +({ \ + unsigned long __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ( \ + "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ + " cmpd %0,%2\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \ + " cmpd %0,%2\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_atomic_exchange_64_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_64_rel(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3\n" \ + " add %1,%0,%4\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_64_acq(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \ + " add %1,%0,%4\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b\n" \ + __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_64_rel(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \ + " add %1,%0,%4\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_increment_val_64(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: ldarx %0,0,%2\n" \ + " addi %0,%0,1\n" \ + " stdcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_val_64(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: ldarx %0,0,%2\n" \ + " subi %0,%0,1\n" \ + " stdcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_if_positive_64(mem) \ + ({ int __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3\n" \ + " cmpdi 0,%0,0\n" \ + " addi %1,%0,-1\n" \ + " ble 2f\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +/* + * All powerpc64 processors support the new "light weight" sync (lwsync). + */ +#define atomic_read_barrier() __asm ("lwsync" ::: "memory") +/* + * "light weight" sync can also be used for the release barrier. + */ +#ifndef UP +# define __ARCH_REL_INSTR "lwsync" +#endif +#define atomic_write_barrier() __asm ("lwsync" ::: "memory") + +/* + * Include the rest of the atomic ops macros which are common to both + * powerpc32 and powerpc64. + */ +#include_next diff --git a/sysdeps/powerpc/powerpc64/bits/atomic.h b/sysdeps/powerpc/powerpc64/bits/atomic.h deleted file mode 100644 index 92a2a3df55..0000000000 --- a/sysdeps/powerpc/powerpc64/bits/atomic.h +++ /dev/null @@ -1,274 +0,0 @@ -/* Atomic operations. PowerPC64 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Paul Mackerras , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -/* POWER6 adds a "Mutex Hint" to the Load and Reserve instruction. - This is a hint to the hardware to expect additional updates adjacent - to the lock word or not. If we are acquiring a Mutex, the hint - should be true. Otherwise we releasing a Mutex or doing a simple - atomic operation. In that case we don't expect additional updates - adjacent to the lock word after the Store Conditional and the hint - should be false. */ - -#if defined _ARCH_PWR6 || defined _ARCH_PWR6X -# define MUTEX_HINT_ACQ ",1" -# define MUTEX_HINT_REL ",0" -#else -# define MUTEX_HINT_ACQ -# define MUTEX_HINT_REL -#endif - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* The 32-bit exchange_bool is different on powerpc64 because the subf - does signed 64-bit arithmetic while the lwarx is 32-bit unsigned - (a load word and zero (high 32) form) load. - In powerpc64 register values are 64-bit by default, including oldval. - The value in old val unknown sign extension, lwarx loads the 32-bit - value as unsigned. So we explicitly clear the high 32 bits in oldval. */ -#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ -({ \ - unsigned int __tmp, __tmp2; \ - __asm __volatile (" clrldi %1,%1,32\n" \ - "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ - " subf. %0,%1,%0\n" \ - " bne 2f\n" \ - " stwcx. %4,0,%2\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&r" (__tmp), "=r" (__tmp2) \ - : "b" (mem), "1" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ -({ \ - unsigned int __tmp, __tmp2; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - " clrldi %1,%1,32\n" \ - "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \ - " subf. %0,%1,%0\n" \ - " bne 2f\n" \ - " stwcx. %4,0,%2\n" \ - " bne- 1b\n" \ - "2: " \ - : "=&r" (__tmp), "=r" (__tmp2) \ - : "b" (mem), "1" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -/* - * Only powerpc64 processors support Load doubleword and reserve index (ldarx) - * and Store doubleword conditional indexed (stdcx) instructions. So here - * we define the 64-bit forms. - */ -#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ -({ \ - unsigned long __tmp; \ - __asm __volatile ( \ - "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ - " subf. %0,%2,%0\n" \ - " bne 2f\n" \ - " stdcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&r" (__tmp) \ - : "b" (mem), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ -({ \ - unsigned long __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \ - " subf. %0,%2,%0\n" \ - " bne 2f\n" \ - " stdcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " \ - : "=&r" (__tmp) \ - : "b" (mem), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp != 0; \ -}) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - __asm __volatile ( \ - "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ - " cmpd %0,%2\n" \ - " bne 2f\n" \ - " stdcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&r" (__tmp) \ - : "b" (__memp), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp; \ - }) - -#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ - ({ \ - __typeof (*(mem)) __tmp; \ - __typeof (mem) __memp = (mem); \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \ - " cmpd %0,%2\n" \ - " bne 2f\n" \ - " stdcx. %3,0,%1\n" \ - " bne- 1b\n" \ - "2: " \ - : "=&r" (__tmp) \ - : "b" (__memp), "r" (oldval), "r" (newval) \ - : "cr0", "memory"); \ - __tmp; \ - }) - -#define __arch_atomic_exchange_64_acq(mem, value) \ - ({ \ - __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ - " stdcx. %3,0,%2\n" \ - " bne- 1b\n" \ - " " __ARCH_ACQ_INSTR \ - : "=&r" (__val), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_64_rel(mem, value) \ - ({ \ - __typeof (*mem) __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \ - " stdcx. %3,0,%2\n" \ - " bne- 1b" \ - : "=&r" (__val), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_64(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: ldarx %0,0,%3\n" \ - " add %1,%0,%4\n" \ - " stdcx. %1,0,%3\n" \ - " bne- 1b" \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_64_acq(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \ - " add %1,%0,%4\n" \ - " stdcx. %1,0,%3\n" \ - " bne- 1b\n" \ - __ARCH_ACQ_INSTR \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_exchange_and_add_64_rel(mem, value) \ - ({ \ - __typeof (*mem) __val, __tmp; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \ - " add %1,%0,%4\n" \ - " stdcx. %1,0,%3\n" \ - " bne- 1b" \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "r" (value), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_increment_val_64(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - __asm __volatile ("1: ldarx %0,0,%2\n" \ - " addi %0,%0,1\n" \ - " stdcx. %0,0,%2\n" \ - " bne- 1b" \ - : "=&b" (__val), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_decrement_val_64(mem) \ - ({ \ - __typeof (*(mem)) __val; \ - __asm __volatile ("1: ldarx %0,0,%2\n" \ - " subi %0,%0,1\n" \ - " stdcx. %0,0,%2\n" \ - " bne- 1b" \ - : "=&b" (__val), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -#define __arch_atomic_decrement_if_positive_64(mem) \ - ({ int __val, __tmp; \ - __asm __volatile ("1: ldarx %0,0,%3\n" \ - " cmpdi 0,%0,0\n" \ - " addi %1,%0,-1\n" \ - " ble 2f\n" \ - " stdcx. %1,0,%3\n" \ - " bne- 1b\n" \ - "2: " __ARCH_ACQ_INSTR \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "b" (mem), "m" (*mem) \ - : "cr0", "memory"); \ - __val; \ - }) - -/* - * All powerpc64 processors support the new "light weight" sync (lwsync). - */ -#define atomic_read_barrier() __asm ("lwsync" ::: "memory") -/* - * "light weight" sync can also be used for the release barrier. - */ -#ifndef UP -# define __ARCH_REL_INSTR "lwsync" -#endif -#define atomic_write_barrier() __asm ("lwsync" ::: "memory") - -/* - * Include the rest of the atomic ops macros which are common to both - * powerpc32 and powerpc64. - */ -#include_next diff --git a/sysdeps/s390/atomic-machine.h b/sysdeps/s390/atomic-machine.h new file mode 100644 index 0000000000..16c8c54b94 --- /dev/null +++ b/sysdeps/s390/atomic-machine.h @@ -0,0 +1,120 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Martin Schwidefsky , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __typeof (mem) __archmem = (mem); \ + __typeof (*mem) __archold = (oldval); \ + __asm __volatile ("cs %0,%2,%1" \ + : "+d" (__archold), "=Q" (*__archmem) \ + : "d" (newval), "m" (*__archmem) : "cc", "memory" ); \ + __archold; }) + +#ifdef __s390x__ +# define __HAVE_64B_ATOMICS 1 +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (mem) __archmem = (mem); \ + __typeof (*mem) __archold = (oldval); \ + __asm __volatile ("csg %0,%2,%1" \ + : "+d" (__archold), "=Q" (*__archmem) \ + : "d" ((long) (newval)), "m" (*__archmem) : "cc", "memory" ); \ + __archold; }) +#else +# define __HAVE_64B_ATOMICS 0 +/* For 31 bit we do not really need 64-bit compare-and-exchange. We can + implement them by use of the csd instruction. The straightforward + implementation causes warnings so we skip the definition for now. */ +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) +#endif + +/* Store NEWVALUE in *MEM and return the old value. */ +/* On s390, the atomic_exchange_acq is different from generic implementation, + because the generic one does not use the condition-code of cs-instruction + to determine if looping is needed. Instead it saves the old-value and + compares it against old-value returned by cs-instruction. */ +#ifdef __s390x__ +# define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (mem) __atg5_memp = (mem); \ + __typeof (*(mem)) __atg5_oldval = *__atg5_memp; \ + __typeof (*(mem)) __atg5_value = (newvalue); \ + if (sizeof (*mem) == 4) \ + __asm __volatile ("0: cs %0,%2,%1\n" \ + " jl 0b" \ + : "+d" (__atg5_oldval), "=Q" (*__atg5_memp) \ + : "d" (__atg5_value), "m" (*__atg5_memp) \ + : "cc", "memory" ); \ + else if (sizeof (*mem) == 8) \ + __asm __volatile ("0: csg %0,%2,%1\n" \ + " jl 0b" \ + : "+d" ( __atg5_oldval), "=Q" (*__atg5_memp) \ + : "d" ((long) __atg5_value), "m" (*__atg5_memp) \ + : "cc", "memory" ); \ + else \ + abort (); \ + __atg5_oldval; }) +#else +# define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (mem) __atg5_memp = (mem); \ + __typeof (*(mem)) __atg5_oldval = *__atg5_memp; \ + __typeof (*(mem)) __atg5_value = (newvalue); \ + if (sizeof (*mem) == 4) \ + __asm __volatile ("0: cs %0,%2,%1\n" \ + " jl 0b" \ + : "+d" (__atg5_oldval), "=Q" (*__atg5_memp) \ + : "d" (__atg5_value), "m" (*__atg5_memp) \ + : "cc", "memory" ); \ + else \ + abort (); \ + __atg5_oldval; }) +#endif diff --git a/sysdeps/s390/bits/atomic.h b/sysdeps/s390/bits/atomic.h deleted file mode 100644 index 16c8c54b94..0000000000 --- a/sysdeps/s390/bits/atomic.h +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Martin Schwidefsky , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __typeof (mem) __archmem = (mem); \ - __typeof (*mem) __archold = (oldval); \ - __asm __volatile ("cs %0,%2,%1" \ - : "+d" (__archold), "=Q" (*__archmem) \ - : "d" (newval), "m" (*__archmem) : "cc", "memory" ); \ - __archold; }) - -#ifdef __s390x__ -# define __HAVE_64B_ATOMICS 1 -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (mem) __archmem = (mem); \ - __typeof (*mem) __archold = (oldval); \ - __asm __volatile ("csg %0,%2,%1" \ - : "+d" (__archold), "=Q" (*__archmem) \ - : "d" ((long) (newval)), "m" (*__archmem) : "cc", "memory" ); \ - __archold; }) -#else -# define __HAVE_64B_ATOMICS 0 -/* For 31 bit we do not really need 64-bit compare-and-exchange. We can - implement them by use of the csd instruction. The straightforward - implementation causes warnings so we skip the definition for now. */ -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) -#endif - -/* Store NEWVALUE in *MEM and return the old value. */ -/* On s390, the atomic_exchange_acq is different from generic implementation, - because the generic one does not use the condition-code of cs-instruction - to determine if looping is needed. Instead it saves the old-value and - compares it against old-value returned by cs-instruction. */ -#ifdef __s390x__ -# define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (mem) __atg5_memp = (mem); \ - __typeof (*(mem)) __atg5_oldval = *__atg5_memp; \ - __typeof (*(mem)) __atg5_value = (newvalue); \ - if (sizeof (*mem) == 4) \ - __asm __volatile ("0: cs %0,%2,%1\n" \ - " jl 0b" \ - : "+d" (__atg5_oldval), "=Q" (*__atg5_memp) \ - : "d" (__atg5_value), "m" (*__atg5_memp) \ - : "cc", "memory" ); \ - else if (sizeof (*mem) == 8) \ - __asm __volatile ("0: csg %0,%2,%1\n" \ - " jl 0b" \ - : "+d" ( __atg5_oldval), "=Q" (*__atg5_memp) \ - : "d" ((long) __atg5_value), "m" (*__atg5_memp) \ - : "cc", "memory" ); \ - else \ - abort (); \ - __atg5_oldval; }) -#else -# define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (mem) __atg5_memp = (mem); \ - __typeof (*(mem)) __atg5_oldval = *__atg5_memp; \ - __typeof (*(mem)) __atg5_value = (newvalue); \ - if (sizeof (*mem) == 4) \ - __asm __volatile ("0: cs %0,%2,%1\n" \ - " jl 0b" \ - : "+d" (__atg5_oldval), "=Q" (*__atg5_memp) \ - : "d" (__atg5_value), "m" (*__atg5_memp) \ - : "cc", "memory" ); \ - else \ - abort (); \ - __atg5_oldval; }) -#endif diff --git a/sysdeps/sparc/sparc32/atomic-machine.h b/sysdeps/sparc/sparc32/atomic-machine.h new file mode 100644 index 0000000000..0fb1111635 --- /dev/null +++ b/sysdeps/sparc/sparc32/atomic-machine.h @@ -0,0 +1,360 @@ +/* Atomic operations. sparc32 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +/* We have no compare and swap, just test and set. + The following implementation contends on 64 global locks + per library and assumes no variable will be accessed using atomic.h + macros from two different libraries. */ + +__make_section_unallocated + (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits"); + +volatile unsigned char __sparc32_atomic_locks[64] + __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks" + __sec_comment), + visibility ("hidden"))); + +#define __sparc32_atomic_do_lock(addr) \ + do \ + { \ + unsigned int __old_lock; \ + unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ + & 63; \ + do \ + __asm __volatile ("ldstub %1, %0" \ + : "=r" (__old_lock), \ + "=m" (__sparc32_atomic_locks[__idx]) \ + : "m" (__sparc32_atomic_locks[__idx]) \ + : "memory"); \ + while (__old_lock); \ + } \ + while (0) + +#define __sparc32_atomic_do_unlock(addr) \ + do \ + { \ + __sparc32_atomic_locks[(((long) addr >> 2) \ + ^ ((long) addr >> 12)) & 63] = 0; \ + __asm __volatile ("" ::: "memory"); \ + } \ + while (0) + +#define __sparc32_atomic_do_lock24(addr) \ + do \ + { \ + unsigned int __old_lock; \ + do \ + __asm __volatile ("ldstub %1, %0" \ + : "=r" (__old_lock), "=m" (*(addr)) \ + : "m" (*(addr)) \ + : "memory"); \ + while (__old_lock); \ + } \ + while (0) + +#define __sparc32_atomic_do_unlock24(addr) \ + do \ + { \ + __asm __volatile ("" ::: "memory"); \ + *(char *) (addr) = 0; \ + } \ + while (0) + + +#ifndef SHARED +# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \ + union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \ + register uint32_t __acev_tmp __asm ("%g6"); \ + register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \ + register uint32_t __acev_oldval __asm ("%g5"); \ + __acev_tmp = newval_arg.v; \ + __acev_oldval = oldval_arg.v; \ + /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \ + because as will then mark the object file as V8+ arch. */ \ + __asm __volatile (".word 0xcde05005" \ + : "+r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (__acev_oldval), "m" (*__acev_mem), \ + "r" (__acev_mem) : "memory"); \ + (__typeof (oldval)) __acev_tmp; }) +#endif + +/* The only basic operation needed is compare and exchange. */ +#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + if (__acev_ret == (oldval)) \ + *__acev_memp = __acev_newval; \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ __typeof (mem) __aceb_memp = (mem); \ + int __aceb_ret; \ + __typeof (*mem) __aceb_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__aceb_memp); \ + __aceb_ret = 0; \ + if (*__aceb_memp == (oldval)) \ + *__aceb_memp = __aceb_newval; \ + else \ + __aceb_ret = 1; \ + __sparc32_atomic_do_unlock (__aceb_memp); \ + __aceb_ret; }) + +#define __v7_exchange_acq(mem, newval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + *__acev_memp = __acev_newval; \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +#define __v7_exchange_and_add(mem, value) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + *__acev_memp = __acev_ret + (value); \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +/* Special versions, which guarantee that top 8 bits of all values + are cleared and use those bits as the ldstub lock. */ +#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock24 (__acev_memp); \ + __acev_ret = *__acev_memp & 0xffffff; \ + if (__acev_ret == (oldval)) \ + *__acev_memp = __acev_newval; \ + else \ + __sparc32_atomic_do_unlock24 (__acev_memp); \ + __asm __volatile ("" ::: "memory"); \ + __acev_ret; }) + +#define __v7_exchange_24_rel(mem, newval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock24 (__acev_memp); \ + __acev_ret = *__acev_memp & 0xffffff; \ + *__acev_memp = __acev_newval; \ + __asm __volatile ("" ::: "memory"); \ + __acev_ret; }) + +#ifdef SHARED + +/* When dynamically linked, we assume pre-v9 libraries are only ever + used on pre-v9 CPU. */ +# define __atomic_is_v9 0 + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __v7_compare_and_exchange_val_acq (mem, newval, oldval) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + __v7_compare_and_exchange_bool_acq (mem, newval, oldval) + +# define atomic_exchange_acq(mem, newval) \ + __v7_exchange_acq (mem, newval) + +# define atomic_exchange_and_add(mem, value) \ + __v7_exchange_and_add (mem, value) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ \ + if (sizeof (*mem) != 4) \ + abort (); \ + __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); }) + +# define atomic_exchange_24_rel(mem, newval) \ + ({ \ + if (sizeof (*mem) != 4) \ + abort (); \ + __v7_exchange_24_rel (mem, newval); }) + +# define atomic_full_barrier() __asm ("" ::: "memory") +# define atomic_read_barrier() atomic_full_barrier () +# define atomic_write_barrier() atomic_full_barrier () + +#else + +/* In libc.a/libpthread.a etc. we don't know if we'll be run on + pre-v9 or v9 CPU. To be interoperable with dynamically linked + apps on v9 CPUs e.g. with process shared primitives, use cas insn + on v9 CPUs and ldstub on pre-v9. */ + +extern uint64_t _dl_hwcap __attribute__((weak)); +# define __atomic_is_v9 \ + (__builtin_expect (&_dl_hwcap != 0, 1) \ + && __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9)) + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ + else \ + __acev_wret \ + = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \ + __acev_wret; }) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ \ + int __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + { \ + __typeof (oldval) __acev_woldval = (oldval); \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, \ + __acev_woldval) \ + != __acev_woldval; \ + } \ + else \ + __acev_wret \ + = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \ + __acev_wret; }) + +# define atomic_exchange_rel(mem, newval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + { \ + __typeof (mem) __acev_wmemp = (mem); \ + __typeof (*(mem)) __acev_wval = (newval); \ + do \ + __acev_wret = *__acev_wmemp; \ + while (__builtin_expect \ + (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\ + __acev_wval, \ + __acev_wret) \ + != __acev_wret, 0)); \ + } \ + else \ + __acev_wret = __v7_exchange_acq (mem, newval); \ + __acev_wret; }) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ + else \ + __acev_wret \ + = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\ + __acev_wret; }) + +# define atomic_exchange_24_rel(mem, newval) \ + ({ \ + __typeof (*mem) __acev_w24ret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_w24ret = atomic_exchange_rel (mem, newval); \ + else \ + __acev_w24ret = __v7_exchange_24_rel (mem, newval); \ + __acev_w24ret; }) + +#define atomic_full_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \ + __asm __volatile (".word 0x8143e00f" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#define atomic_read_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadLoad | #LoadStore */ \ + __asm __volatile (".word 0x8143e005" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#define atomic_write_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadStore | #StoreStore */ \ + __asm __volatile (".word 0x8143e00c" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#endif + +#include + +#endif /* atomic-machine.h */ diff --git a/sysdeps/sparc/sparc32/bits/atomic.h b/sysdeps/sparc/sparc32/bits/atomic.h deleted file mode 100644 index 4242ba831a..0000000000 --- a/sysdeps/sparc/sparc32/bits/atomic.h +++ /dev/null @@ -1,360 +0,0 @@ -/* Atomic operations. sparc32 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -/* We have no compare and swap, just test and set. - The following implementation contends on 64 global locks - per library and assumes no variable will be accessed using atomic.h - macros from two different libraries. */ - -__make_section_unallocated - (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits"); - -volatile unsigned char __sparc32_atomic_locks[64] - __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks" - __sec_comment), - visibility ("hidden"))); - -#define __sparc32_atomic_do_lock(addr) \ - do \ - { \ - unsigned int __old_lock; \ - unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ - & 63; \ - do \ - __asm __volatile ("ldstub %1, %0" \ - : "=r" (__old_lock), \ - "=m" (__sparc32_atomic_locks[__idx]) \ - : "m" (__sparc32_atomic_locks[__idx]) \ - : "memory"); \ - while (__old_lock); \ - } \ - while (0) - -#define __sparc32_atomic_do_unlock(addr) \ - do \ - { \ - __sparc32_atomic_locks[(((long) addr >> 2) \ - ^ ((long) addr >> 12)) & 63] = 0; \ - __asm __volatile ("" ::: "memory"); \ - } \ - while (0) - -#define __sparc32_atomic_do_lock24(addr) \ - do \ - { \ - unsigned int __old_lock; \ - do \ - __asm __volatile ("ldstub %1, %0" \ - : "=r" (__old_lock), "=m" (*(addr)) \ - : "m" (*(addr)) \ - : "memory"); \ - while (__old_lock); \ - } \ - while (0) - -#define __sparc32_atomic_do_unlock24(addr) \ - do \ - { \ - __asm __volatile ("" ::: "memory"); \ - *(char *) (addr) = 0; \ - } \ - while (0) - - -#ifndef SHARED -# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \ - union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \ - register uint32_t __acev_tmp __asm ("%g6"); \ - register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \ - register uint32_t __acev_oldval __asm ("%g5"); \ - __acev_tmp = newval_arg.v; \ - __acev_oldval = oldval_arg.v; \ - /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \ - because as will then mark the object file as V8+ arch. */ \ - __asm __volatile (".word 0xcde05005" \ - : "+r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (__acev_oldval), "m" (*__acev_mem), \ - "r" (__acev_mem) : "memory"); \ - (__typeof (oldval)) __acev_tmp; }) -#endif - -/* The only basic operation needed is compare and exchange. */ -#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - if (__acev_ret == (oldval)) \ - *__acev_memp = __acev_newval; \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ __typeof (mem) __aceb_memp = (mem); \ - int __aceb_ret; \ - __typeof (*mem) __aceb_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__aceb_memp); \ - __aceb_ret = 0; \ - if (*__aceb_memp == (oldval)) \ - *__aceb_memp = __aceb_newval; \ - else \ - __aceb_ret = 1; \ - __sparc32_atomic_do_unlock (__aceb_memp); \ - __aceb_ret; }) - -#define __v7_exchange_acq(mem, newval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - *__acev_memp = __acev_newval; \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -#define __v7_exchange_and_add(mem, value) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - *__acev_memp = __acev_ret + (value); \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -/* Special versions, which guarantee that top 8 bits of all values - are cleared and use those bits as the ldstub lock. */ -#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock24 (__acev_memp); \ - __acev_ret = *__acev_memp & 0xffffff; \ - if (__acev_ret == (oldval)) \ - *__acev_memp = __acev_newval; \ - else \ - __sparc32_atomic_do_unlock24 (__acev_memp); \ - __asm __volatile ("" ::: "memory"); \ - __acev_ret; }) - -#define __v7_exchange_24_rel(mem, newval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock24 (__acev_memp); \ - __acev_ret = *__acev_memp & 0xffffff; \ - *__acev_memp = __acev_newval; \ - __asm __volatile ("" ::: "memory"); \ - __acev_ret; }) - -#ifdef SHARED - -/* When dynamically linked, we assume pre-v9 libraries are only ever - used on pre-v9 CPU. */ -# define __atomic_is_v9 0 - -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - __v7_compare_and_exchange_val_acq (mem, newval, oldval) - -# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - __v7_compare_and_exchange_bool_acq (mem, newval, oldval) - -# define atomic_exchange_acq(mem, newval) \ - __v7_exchange_acq (mem, newval) - -# define atomic_exchange_and_add(mem, value) \ - __v7_exchange_and_add (mem, value) - -# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ \ - if (sizeof (*mem) != 4) \ - abort (); \ - __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); }) - -# define atomic_exchange_24_rel(mem, newval) \ - ({ \ - if (sizeof (*mem) != 4) \ - abort (); \ - __v7_exchange_24_rel (mem, newval); }) - -# define atomic_full_barrier() __asm ("" ::: "memory") -# define atomic_read_barrier() atomic_full_barrier () -# define atomic_write_barrier() atomic_full_barrier () - -#else - -/* In libc.a/libpthread.a etc. we don't know if we'll be run on - pre-v9 or v9 CPU. To be interoperable with dynamically linked - apps on v9 CPUs e.g. with process shared primitives, use cas insn - on v9 CPUs and ldstub on pre-v9. */ - -extern uint64_t _dl_hwcap __attribute__((weak)); -# define __atomic_is_v9 \ - (__builtin_expect (&_dl_hwcap != 0, 1) \ - && __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9)) - -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ - else \ - __acev_wret \ - = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \ - __acev_wret; }) - -# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ \ - int __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - { \ - __typeof (oldval) __acev_woldval = (oldval); \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, \ - __acev_woldval) \ - != __acev_woldval; \ - } \ - else \ - __acev_wret \ - = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \ - __acev_wret; }) - -# define atomic_exchange_rel(mem, newval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - { \ - __typeof (mem) __acev_wmemp = (mem); \ - __typeof (*(mem)) __acev_wval = (newval); \ - do \ - __acev_wret = *__acev_wmemp; \ - while (__builtin_expect \ - (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\ - __acev_wval, \ - __acev_wret) \ - != __acev_wret, 0)); \ - } \ - else \ - __acev_wret = __v7_exchange_acq (mem, newval); \ - __acev_wret; }) - -# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ - else \ - __acev_wret \ - = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\ - __acev_wret; }) - -# define atomic_exchange_24_rel(mem, newval) \ - ({ \ - __typeof (*mem) __acev_w24ret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_w24ret = atomic_exchange_rel (mem, newval); \ - else \ - __acev_w24ret = __v7_exchange_24_rel (mem, newval); \ - __acev_w24ret; }) - -#define atomic_full_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \ - __asm __volatile (".word 0x8143e00f" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#define atomic_read_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadLoad | #LoadStore */ \ - __asm __volatile (".word 0x8143e005" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#define atomic_write_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadStore | #StoreStore */ \ - __asm __volatile (".word 0x8143e00c" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#endif - -#include - -#endif /* bits/atomic.h */ diff --git a/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h b/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h new file mode 100644 index 0000000000..0ff5dcd69d --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h @@ -0,0 +1,105 @@ +/* Atomic operations. sparcv9 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("cas [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + else \ + __asm __volatile ("cas [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + __acev_tmp; }) + +/* This can be implemented if needed. */ +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*(mem)) __oldval; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (newvalue); \ + \ + if (sizeof (*(mem)) == 4) \ + __asm ("swap %0, %1" \ + : "=m" (*__memp), "=r" (__oldval) \ + : "m" (*__memp), "1" (__value) : "memory"); \ + else \ + abort (); \ + __oldval; }) + +#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) + +#define atomic_exchange_24_rel(mem, newval) \ + atomic_exchange_rel (mem, newval) + +#define atomic_full_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" \ + " | #StoreLoad | #StoreStore" : : : "memory") +#define atomic_read_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") +#define atomic_write_barrier() \ + __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h b/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h deleted file mode 100644 index 0ff5dcd69d..0000000000 --- a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h +++ /dev/null @@ -1,105 +0,0 @@ -/* Atomic operations. sparcv9 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("cas [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - else \ - __asm __volatile ("cas [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - __acev_tmp; }) - -/* This can be implemented if needed. */ -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*(mem)) __oldval; \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __value = (newvalue); \ - \ - if (sizeof (*(mem)) == 4) \ - __asm ("swap %0, %1" \ - : "=m" (*__memp), "=r" (__oldval) \ - : "m" (*__memp), "1" (__value) : "memory"); \ - else \ - abort (); \ - __oldval; }) - -#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - atomic_compare_and_exchange_val_acq (mem, newval, oldval) - -#define atomic_exchange_24_rel(mem, newval) \ - atomic_exchange_rel (mem, newval) - -#define atomic_full_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" \ - " | #StoreLoad | #StoreStore" : : : "memory") -#define atomic_read_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") -#define atomic_write_barrier() \ - __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") - -extern void __cpu_relax (void); -#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc64/atomic-machine.h b/sysdeps/sparc/sparc64/atomic-machine.h new file mode 100644 index 0000000000..79f4a1d9be --- /dev/null +++ b/sysdeps/sparc/sparc64/atomic-machine.h @@ -0,0 +1,126 @@ +/* Atomic operations. sparc64 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("cas [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + else \ + __asm __volatile ("cas [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + __acev_tmp; }) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("casx [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" ((long) (newval)) : "memory"); \ + else \ + __asm __volatile ("casx [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" ((long) (oldval)), "m" (*__acev_mem), \ + "r" (__acev_mem), "0" ((long) (newval)) : "memory"); \ + __acev_tmp; }) + +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*(mem)) __oldval, __val; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (newvalue); \ + \ + if (sizeof (*(mem)) == 4) \ + __asm ("swap %0, %1" \ + : "=m" (*__memp), "=r" (__oldval) \ + : "m" (*__memp), "1" (__value) : "memory"); \ + else \ + { \ + __val = *__memp; \ + do \ + { \ + __oldval = __val; \ + __val = atomic_compare_and_exchange_val_acq (__memp, __value, \ + __oldval); \ + } \ + while (__builtin_expect (__val != __oldval, 0)); \ + } \ + __oldval; }) + +#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) + +#define atomic_exchange_24_rel(mem, newval) \ + atomic_exchange_rel (mem, newval) + +#define atomic_full_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" \ + " | #StoreLoad | #StoreStore" : : : "memory") +#define atomic_read_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") +#define atomic_write_barrier() \ + __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc64/bits/atomic.h b/sysdeps/sparc/sparc64/bits/atomic.h deleted file mode 100644 index 79f4a1d9be..0000000000 --- a/sysdeps/sparc/sparc64/bits/atomic.h +++ /dev/null @@ -1,126 +0,0 @@ -/* Atomic operations. sparc64 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("cas [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - else \ - __asm __volatile ("cas [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - __acev_tmp; }) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("casx [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" ((long) (newval)) : "memory"); \ - else \ - __asm __volatile ("casx [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" ((long) (oldval)), "m" (*__acev_mem), \ - "r" (__acev_mem), "0" ((long) (newval)) : "memory"); \ - __acev_tmp; }) - -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*(mem)) __oldval, __val; \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __value = (newvalue); \ - \ - if (sizeof (*(mem)) == 4) \ - __asm ("swap %0, %1" \ - : "=m" (*__memp), "=r" (__oldval) \ - : "m" (*__memp), "1" (__value) : "memory"); \ - else \ - { \ - __val = *__memp; \ - do \ - { \ - __oldval = __val; \ - __val = atomic_compare_and_exchange_val_acq (__memp, __value, \ - __oldval); \ - } \ - while (__builtin_expect (__val != __oldval, 0)); \ - } \ - __oldval; }) - -#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - atomic_compare_and_exchange_val_acq (mem, newval, oldval) - -#define atomic_exchange_24_rel(mem, newval) \ - atomic_exchange_rel (mem, newval) - -#define atomic_full_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" \ - " | #StoreLoad | #StoreStore" : : : "memory") -#define atomic_read_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") -#define atomic_write_barrier() \ - __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") - -extern void __cpu_relax (void); -#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/tile/atomic-machine.h b/sysdeps/tile/atomic-machine.h new file mode 100644 index 0000000000..eabb07e711 --- /dev/null +++ b/sysdeps/tile/atomic-machine.h @@ -0,0 +1,86 @@ +/* Copyright (C) 2011-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf , 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +/* The sub-architecture headers provide definitions for these macros + that work for "int" and "long" size values only: + + atomic_compare_and_exchange_val_acq() + atomic_exchange_acq() + atomic_exchange_and_add() + atomic_and_val() + atomic_or_val() + atomic_decrement_if_positive() [tilegx only] + + Here we provide generic definitions true for all Tilera chips. */ + +#include +#include + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +/* Barrier macro. */ +#define atomic_full_barrier() __sync_synchronize() + +/* APIs with "release" semantics. */ +#define atomic_compare_and_exchange_val_rel(mem, n, o) \ + ({ \ + atomic_full_barrier (); \ + atomic_compare_and_exchange_val_acq ((mem), (n), (o)); \ + }) +#define atomic_compare_and_exchange_bool_rel(mem, n, o) \ + ({ \ + atomic_full_barrier (); \ + atomic_compare_and_exchange_bool_acq ((mem), (n), (o)); \ + }) +#define atomic_exchange_rel(mem, n) \ + ({ \ + atomic_full_barrier (); \ + atomic_exchange_acq ((mem), (n)); \ + }) + +/* Various macros that should just be synonyms. */ +#define catomic_exchange_and_add atomic_exchange_and_add +#define atomic_and(mem, mask) ((void) atomic_and_val ((mem), (mask))) +#define catomic_and atomic_and +#define atomic_or(mem, mask) ((void) atomic_or_val ((mem), (mask))) +#define catomic_or atomic_or + +/* atomic_bit_test_set in terms of atomic_or_val. */ +#define atomic_bit_test_set(mem, bit) \ + ({ __typeof (*(mem)) __att0_mask = ((__typeof (*(mem))) 1 << (bit)); \ + atomic_or_val ((mem), __att0_mask) & __att0_mask; }) + +/* + * This non-existent symbol is called for unsupporrted sizes, + * indicating a bug in the caller. + */ +extern int __atomic_error_bad_argument_size(void) + __attribute__ ((warning ("bad sizeof atomic argument"))); diff --git a/sysdeps/tile/bits/atomic.h b/sysdeps/tile/bits/atomic.h deleted file mode 100644 index eabb07e711..0000000000 --- a/sysdeps/tile/bits/atomic.h +++ /dev/null @@ -1,86 +0,0 @@ -/* Copyright (C) 2011-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Chris Metcalf , 2011. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -/* The sub-architecture headers provide definitions for these macros - that work for "int" and "long" size values only: - - atomic_compare_and_exchange_val_acq() - atomic_exchange_acq() - atomic_exchange_and_add() - atomic_and_val() - atomic_or_val() - atomic_decrement_if_positive() [tilegx only] - - Here we provide generic definitions true for all Tilera chips. */ - -#include -#include - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -/* Barrier macro. */ -#define atomic_full_barrier() __sync_synchronize() - -/* APIs with "release" semantics. */ -#define atomic_compare_and_exchange_val_rel(mem, n, o) \ - ({ \ - atomic_full_barrier (); \ - atomic_compare_and_exchange_val_acq ((mem), (n), (o)); \ - }) -#define atomic_compare_and_exchange_bool_rel(mem, n, o) \ - ({ \ - atomic_full_barrier (); \ - atomic_compare_and_exchange_bool_acq ((mem), (n), (o)); \ - }) -#define atomic_exchange_rel(mem, n) \ - ({ \ - atomic_full_barrier (); \ - atomic_exchange_acq ((mem), (n)); \ - }) - -/* Various macros that should just be synonyms. */ -#define catomic_exchange_and_add atomic_exchange_and_add -#define atomic_and(mem, mask) ((void) atomic_and_val ((mem), (mask))) -#define catomic_and atomic_and -#define atomic_or(mem, mask) ((void) atomic_or_val ((mem), (mask))) -#define catomic_or atomic_or - -/* atomic_bit_test_set in terms of atomic_or_val. */ -#define atomic_bit_test_set(mem, bit) \ - ({ __typeof (*(mem)) __att0_mask = ((__typeof (*(mem))) 1 << (bit)); \ - atomic_or_val ((mem), __att0_mask) & __att0_mask; }) - -/* - * This non-existent symbol is called for unsupporrted sizes, - * indicating a bug in the caller. - */ -extern int __atomic_error_bad_argument_size(void) - __attribute__ ((warning ("bad sizeof atomic argument"))); diff --git a/sysdeps/tile/tilegx/atomic-machine.h b/sysdeps/tile/tilegx/atomic-machine.h new file mode 100644 index 0000000000..1f7805bb88 --- /dev/null +++ b/sysdeps/tile/tilegx/atomic-machine.h @@ -0,0 +1,60 @@ +/* Copyright (C) 2011-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf , 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include + +#ifdef _LP64 +# define __HAVE_64B_ATOMICS 1 +#else +/* tilegx32 does have 64-bit atomics, but assumptions in the semaphore + code mean that unaligned 64-bit atomics will be used if this symbol + is true, and unaligned atomics are not supported on tile. */ +# define __HAVE_64B_ATOMICS 0 +#endif + +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* Pick appropriate 8- or 4-byte instruction. */ +#define __atomic_update(mem, v, op) \ + ((__typeof (*(mem))) (__typeof (*(mem) - *(mem))) \ + ((sizeof (*(mem)) == 8) ? \ + __insn_##op ((void *) (mem), (int64_t) (__typeof((v) - (v))) (v)) : \ + (sizeof (*(mem)) == 4) ? \ + __insn_##op##4 ((void *) (mem), (int32_t) (__typeof ((v) - (v))) (v)) : \ + __atomic_error_bad_argument_size())) + +#define atomic_compare_and_exchange_val_acq(mem, n, o) \ + ({ __insn_mtspr (SPR_CMPEXCH_VALUE, (int64_t) (__typeof ((o) - (o))) (o)); \ + __atomic_update (mem, n, cmpexch); }) +#define atomic_exchange_acq(mem, newvalue) \ + __atomic_update (mem, newvalue, exch) +#define atomic_exchange_and_add(mem, value) \ + __atomic_update (mem, value, fetchadd) +#define atomic_and_val(mem, mask) \ + __atomic_update (mem, mask, fetchand) +#define atomic_or_val(mem, mask) \ + __atomic_update (mem, mask, fetchor) +#define atomic_decrement_if_positive(mem) \ + __atomic_update (mem, -1, fetchaddgez) + +#include + +#endif /* atomic-machine.h */ diff --git a/sysdeps/tile/tilegx/bits/atomic.h b/sysdeps/tile/tilegx/bits/atomic.h deleted file mode 100644 index e75efb1c41..0000000000 --- a/sysdeps/tile/tilegx/bits/atomic.h +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (C) 2011-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Chris Metcalf , 2011. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include - -#ifdef _LP64 -# define __HAVE_64B_ATOMICS 1 -#else -/* tilegx32 does have 64-bit atomics, but assumptions in the semaphore - code mean that unaligned 64-bit atomics will be used if this symbol - is true, and unaligned atomics are not supported on tile. */ -# define __HAVE_64B_ATOMICS 0 -#endif - -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* Pick appropriate 8- or 4-byte instruction. */ -#define __atomic_update(mem, v, op) \ - ((__typeof (*(mem))) (__typeof (*(mem) - *(mem))) \ - ((sizeof (*(mem)) == 8) ? \ - __insn_##op ((void *) (mem), (int64_t) (__typeof((v) - (v))) (v)) : \ - (sizeof (*(mem)) == 4) ? \ - __insn_##op##4 ((void *) (mem), (int32_t) (__typeof ((v) - (v))) (v)) : \ - __atomic_error_bad_argument_size())) - -#define atomic_compare_and_exchange_val_acq(mem, n, o) \ - ({ __insn_mtspr (SPR_CMPEXCH_VALUE, (int64_t) (__typeof ((o) - (o))) (o)); \ - __atomic_update (mem, n, cmpexch); }) -#define atomic_exchange_acq(mem, newvalue) \ - __atomic_update (mem, newvalue, exch) -#define atomic_exchange_and_add(mem, value) \ - __atomic_update (mem, value, fetchadd) -#define atomic_and_val(mem, mask) \ - __atomic_update (mem, mask, fetchand) -#define atomic_or_val(mem, mask) \ - __atomic_update (mem, mask, fetchor) -#define atomic_decrement_if_positive(mem) \ - __atomic_update (mem, -1, fetchaddgez) - -#include - -#endif /* bits/atomic.h */ diff --git a/sysdeps/tile/tilepro/atomic-machine.h b/sysdeps/tile/tilepro/atomic-machine.h new file mode 100644 index 0000000000..a99f170394 --- /dev/null +++ b/sysdeps/tile/tilepro/atomic-machine.h @@ -0,0 +1,88 @@ +/* Copyright (C) 2011-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf , 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* 32-bit integer compare-and-exchange. */ +static __inline __attribute__ ((always_inline)) +int __atomic_cmpxchg_32 (volatile int *mem, int newval, int oldval) +{ + int result; + __asm__ __volatile__ ("swint1" + : "=R00" (result), "=m" (*mem) + : "R10" (__NR_FAST_cmpxchg), "R00" (mem), + "R01" (oldval), "R02" (newval), "m" (*mem) + : "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29", "memory"); + return result; +} + +#define atomic_compare_and_exchange_val_acq(mem, n, o) \ + ({ \ + if (sizeof (*(mem)) != 4) \ + __atomic_error_bad_argument_size (); \ + (__typeof (*(mem))) \ + __atomic_cmpxchg_32 ((int *) (mem), (int) (n), (int) (o)); \ + }) + +/* Atomically compute: + int old = *ptr; + *ptr = (old & mask) + addend; + return old; */ + +static __inline __attribute__ ((always_inline)) +int __atomic_update_32 (volatile int *mem, int mask, int addend) +{ + int result; + __asm__ __volatile__ ("swint1" + : "=R00" (result), "=m" (*mem) + : "R10" (__NR_FAST_atomic_update), "R00" (mem), + "R01" (mask), "R02" (addend), "m" (*mem) + : "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29", "memory"); + return result; +} + +/* Size-checked verson of __atomic_update_32. */ +#define __atomic_update(mem, mask, addend) \ + ({ \ + if (sizeof (*(mem)) != 4) \ + __atomic_error_bad_argument_size (); \ + (__typeof (*(mem))) \ + __atomic_update_32 ((int *) (mem), (int) (mask), (int) (addend)); \ + }) + +#define atomic_exchange_acq(mem, newvalue) \ + __atomic_update ((mem), 0, (newvalue)) +#define atomic_exchange_and_add(mem, value) \ + __atomic_update ((mem), -1, (value)) +#define atomic_and_val(mem, mask) \ + __atomic_update ((mem), (mask), 0) +#define atomic_or_val(mem, mask) \ + ({ __typeof (mask) __att1_v = (mask); \ + __atomic_update ((mem), ~__att1_v, __att1_v); }) + +#include + +#endif /* atomic-machine.h */ diff --git a/sysdeps/tile/tilepro/bits/atomic.h b/sysdeps/tile/tilepro/bits/atomic.h deleted file mode 100644 index e0ef9fb5e7..0000000000 --- a/sysdeps/tile/tilepro/bits/atomic.h +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (C) 2011-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Chris Metcalf , 2011. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* 32-bit integer compare-and-exchange. */ -static __inline __attribute__ ((always_inline)) -int __atomic_cmpxchg_32 (volatile int *mem, int newval, int oldval) -{ - int result; - __asm__ __volatile__ ("swint1" - : "=R00" (result), "=m" (*mem) - : "R10" (__NR_FAST_cmpxchg), "R00" (mem), - "R01" (oldval), "R02" (newval), "m" (*mem) - : "r20", "r21", "r22", "r23", "r24", - "r25", "r26", "r27", "r28", "r29", "memory"); - return result; -} - -#define atomic_compare_and_exchange_val_acq(mem, n, o) \ - ({ \ - if (sizeof (*(mem)) != 4) \ - __atomic_error_bad_argument_size (); \ - (__typeof (*(mem))) \ - __atomic_cmpxchg_32 ((int *) (mem), (int) (n), (int) (o)); \ - }) - -/* Atomically compute: - int old = *ptr; - *ptr = (old & mask) + addend; - return old; */ - -static __inline __attribute__ ((always_inline)) -int __atomic_update_32 (volatile int *mem, int mask, int addend) -{ - int result; - __asm__ __volatile__ ("swint1" - : "=R00" (result), "=m" (*mem) - : "R10" (__NR_FAST_atomic_update), "R00" (mem), - "R01" (mask), "R02" (addend), "m" (*mem) - : "r20", "r21", "r22", "r23", "r24", - "r25", "r26", "r27", "r28", "r29", "memory"); - return result; -} - -/* Size-checked verson of __atomic_update_32. */ -#define __atomic_update(mem, mask, addend) \ - ({ \ - if (sizeof (*(mem)) != 4) \ - __atomic_error_bad_argument_size (); \ - (__typeof (*(mem))) \ - __atomic_update_32 ((int *) (mem), (int) (mask), (int) (addend)); \ - }) - -#define atomic_exchange_acq(mem, newvalue) \ - __atomic_update ((mem), 0, (newvalue)) -#define atomic_exchange_and_add(mem, value) \ - __atomic_update ((mem), -1, (value)) -#define atomic_and_val(mem, mask) \ - __atomic_update ((mem), (mask), 0) -#define atomic_or_val(mem, mask) \ - ({ __typeof (mask) __att1_v = (mask); \ - __atomic_update ((mem), ~__att1_v, __att1_v); }) - -#include - -#endif /* bits/atomic.h */ diff --git a/sysdeps/unix/sysv/linux/arm/atomic-machine.h b/sysdeps/unix/sysv/linux/arm/atomic-machine.h new file mode 100644 index 0000000000..4f5f3f4514 --- /dev/null +++ b/sysdeps/unix/sysv/linux/arm/atomic-machine.h @@ -0,0 +1,107 @@ +/* Atomic operations. ARM/Linux version. + Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include + +/* If the compiler doesn't provide a primitive, we'll use this macro + to get assistance from the kernel. */ +#ifdef __thumb2__ +# define __arm_assisted_full_barrier() \ + __asm__ __volatile__ \ + ("movw\tip, #0x0fa0\n\t" \ + "movt\tip, #0xffff\n\t" \ + "blx\tip" \ + : : : "ip", "lr", "cc", "memory"); +#else +# define __arm_assisted_full_barrier() \ + __asm__ __volatile__ \ + ("mov\tip, #0xffff0fff\n\t" \ + "mov\tlr, pc\n\t" \ + "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)" \ + : : : "ip", "lr", "cc", "memory"); +#endif + +/* Atomic compare and exchange. This sequence relies on the kernel to + provide a compare and exchange operation which is atomic on the + current architecture, either via cleverness on pre-ARMv6 or via + ldrex / strex on ARMv6. + + It doesn't matter what register is used for a_oldval2, but we must + specify one to work around GCC PR rtl-optimization/21223. Otherwise + it may cause a_oldval or a_tmp to be moved to a different register. + + We use the union trick rather than simply using __typeof (...) in the + declarations of A_OLDVAL et al because when NEWVAL or OLDVAL is of the + form *PTR and PTR has a 'volatile ... *' type, then __typeof (*PTR) has + a 'volatile ...' type and this triggers -Wvolatile-register-var to + complain about 'register volatile ... asm ("reg")'. */ +#ifdef __thumb2__ +/* Thumb-2 has ldrex/strex. However it does not have barrier instructions, + so we still need to use the kernel helper. */ +# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\ + union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\ + register uint32_t a_oldval asm ("r0"); \ + register uint32_t a_newval asm ("r1") = newval_arg.v; \ + register __typeof (mem) a_ptr asm ("r2") = (mem); \ + register uint32_t a_tmp asm ("r3"); \ + register uint32_t a_oldval2 asm ("r4") = oldval_arg.v; \ + __asm__ __volatile__ \ + ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \ + "cmp\t%[tmp], %[old2]\n\t" \ + "bne\t1f\n\t" \ + "mov\t%[old], %[old2]\n\t" \ + "movw\t%[tmp], #0x0fc0\n\t" \ + "movt\t%[tmp], #0xffff\n\t" \ + "blx\t%[tmp]\n\t" \ + "bcc\t0b\n\t" \ + "mov\t%[tmp], %[old2]\n\t" \ + "1:" \ + : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \ + : [new] "r" (a_newval), [ptr] "r" (a_ptr), \ + [old2] "r" (a_oldval2) \ + : "ip", "lr", "cc", "memory"); \ + (__typeof (oldval)) a_tmp; }) +#else +# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\ + union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\ + register uint32_t a_oldval asm ("r0"); \ + register uint32_t a_newval asm ("r1") = newval_arg.v; \ + register __typeof (mem) a_ptr asm ("r2") = (mem); \ + register uint32_t a_tmp asm ("r3"); \ + register uint32_t a_oldval2 asm ("r4") = oldval_arg.v; \ + __asm__ __volatile__ \ + ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \ + "cmp\t%[tmp], %[old2]\n\t" \ + "bne\t1f\n\t" \ + "mov\t%[old], %[old2]\n\t" \ + "mov\t%[tmp], #0xffff0fff\n\t" \ + "mov\tlr, pc\n\t" \ + "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t" \ + "bcc\t0b\n\t" \ + "mov\t%[tmp], %[old2]\n\t" \ + "1:" \ + : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \ + : [new] "r" (a_newval), [ptr] "r" (a_ptr), \ + [old2] "r" (a_oldval2) \ + : "ip", "lr", "cc", "memory"); \ + (__typeof (oldval)) a_tmp; }) +#endif + +#include diff --git a/sysdeps/unix/sysv/linux/arm/bits/atomic.h b/sysdeps/unix/sysv/linux/arm/bits/atomic.h deleted file mode 100644 index 17cff546ad..0000000000 --- a/sysdeps/unix/sysv/linux/arm/bits/atomic.h +++ /dev/null @@ -1,107 +0,0 @@ -/* Atomic operations. ARM/Linux version. - Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include - -/* If the compiler doesn't provide a primitive, we'll use this macro - to get assistance from the kernel. */ -#ifdef __thumb2__ -# define __arm_assisted_full_barrier() \ - __asm__ __volatile__ \ - ("movw\tip, #0x0fa0\n\t" \ - "movt\tip, #0xffff\n\t" \ - "blx\tip" \ - : : : "ip", "lr", "cc", "memory"); -#else -# define __arm_assisted_full_barrier() \ - __asm__ __volatile__ \ - ("mov\tip, #0xffff0fff\n\t" \ - "mov\tlr, pc\n\t" \ - "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)" \ - : : : "ip", "lr", "cc", "memory"); -#endif - -/* Atomic compare and exchange. This sequence relies on the kernel to - provide a compare and exchange operation which is atomic on the - current architecture, either via cleverness on pre-ARMv6 or via - ldrex / strex on ARMv6. - - It doesn't matter what register is used for a_oldval2, but we must - specify one to work around GCC PR rtl-optimization/21223. Otherwise - it may cause a_oldval or a_tmp to be moved to a different register. - - We use the union trick rather than simply using __typeof (...) in the - declarations of A_OLDVAL et al because when NEWVAL or OLDVAL is of the - form *PTR and PTR has a 'volatile ... *' type, then __typeof (*PTR) has - a 'volatile ...' type and this triggers -Wvolatile-register-var to - complain about 'register volatile ... asm ("reg")'. */ -#ifdef __thumb2__ -/* Thumb-2 has ldrex/strex. However it does not have barrier instructions, - so we still need to use the kernel helper. */ -# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\ - union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\ - register uint32_t a_oldval asm ("r0"); \ - register uint32_t a_newval asm ("r1") = newval_arg.v; \ - register __typeof (mem) a_ptr asm ("r2") = (mem); \ - register uint32_t a_tmp asm ("r3"); \ - register uint32_t a_oldval2 asm ("r4") = oldval_arg.v; \ - __asm__ __volatile__ \ - ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \ - "cmp\t%[tmp], %[old2]\n\t" \ - "bne\t1f\n\t" \ - "mov\t%[old], %[old2]\n\t" \ - "movw\t%[tmp], #0x0fc0\n\t" \ - "movt\t%[tmp], #0xffff\n\t" \ - "blx\t%[tmp]\n\t" \ - "bcc\t0b\n\t" \ - "mov\t%[tmp], %[old2]\n\t" \ - "1:" \ - : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \ - : [new] "r" (a_newval), [ptr] "r" (a_ptr), \ - [old2] "r" (a_oldval2) \ - : "ip", "lr", "cc", "memory"); \ - (__typeof (oldval)) a_tmp; }) -#else -# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) };\ - union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) };\ - register uint32_t a_oldval asm ("r0"); \ - register uint32_t a_newval asm ("r1") = newval_arg.v; \ - register __typeof (mem) a_ptr asm ("r2") = (mem); \ - register uint32_t a_tmp asm ("r3"); \ - register uint32_t a_oldval2 asm ("r4") = oldval_arg.v; \ - __asm__ __volatile__ \ - ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \ - "cmp\t%[tmp], %[old2]\n\t" \ - "bne\t1f\n\t" \ - "mov\t%[old], %[old2]\n\t" \ - "mov\t%[tmp], #0xffff0fff\n\t" \ - "mov\tlr, pc\n\t" \ - "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t" \ - "bcc\t0b\n\t" \ - "mov\t%[tmp], %[old2]\n\t" \ - "1:" \ - : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \ - : [new] "r" (a_newval), [ptr] "r" (a_ptr), \ - [old2] "r" (a_oldval2) \ - : "ip", "lr", "cc", "memory"); \ - (__typeof (oldval)) a_tmp; }) -#endif - -#include diff --git a/sysdeps/unix/sysv/linux/hppa/atomic-machine.h b/sysdeps/unix/sysv/linux/hppa/atomic-machine.h new file mode 100644 index 0000000000..8fbcb493a7 --- /dev/null +++ b/sysdeps/unix/sysv/linux/hppa/atomic-machine.h @@ -0,0 +1,103 @@ +/* Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Carlos O'Donell , 2005. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#include /* Required for type definitions e.g. uint8_t. */ +#include /* Required for ABORT_INSTRUCTIUON. */ + +/* We need EFAULT, ENONSYS */ +#if !defined EFAULT && !defined ENOSYS +#define EFAULT 14 +#define ENOSYS 251 +#endif + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* prev = *addr; + if (prev == old) + *addr = new; + return prev; */ + +/* Use the kernel atomic light weight syscalls on hppa. */ +#define _LWS "0xb0" +#define _LWS_CAS "0" +/* Note r31 is the link register. */ +#define _LWS_CLOBBER "r1", "r23", "r22", "r20", "r31", "memory" +/* String constant for -EAGAIN. */ +#define _ASM_EAGAIN "-11" +/* String constant for -EDEADLOCK. */ +#define _ASM_EDEADLOCK "-45" + +/* The only basic operation needed is compare and exchange. The mem + pointer must be word aligned. */ +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + register long lws_errno asm("r21"); \ + register unsigned long lws_ret asm("r28"); \ + register unsigned long lws_mem asm("r26") = (unsigned long)(mem); \ + register unsigned long lws_old asm("r25") = (unsigned long)(oldval);\ + register unsigned long lws_new asm("r24") = (unsigned long)(newval);\ + __asm__ __volatile__( \ + "0: \n\t" \ + "ble " _LWS "(%%sr2, %%r0) \n\t" \ + "ldi " _LWS_CAS ", %%r20 \n\t" \ + "ldi " _ASM_EAGAIN ", %%r20 \n\t" \ + "cmpb,=,n %%r20, %%r21, 0b \n\t" \ + "nop \n\t" \ + "ldi " _ASM_EDEADLOCK ", %%r20 \n\t" \ + "cmpb,=,n %%r20, %%r21, 0b \n\t" \ + "nop \n\t" \ + : "=r" (lws_ret), "=r" (lws_errno) \ + : "r" (lws_mem), "r" (lws_old), "r" (lws_new) \ + : _LWS_CLOBBER \ + ); \ + \ + if (lws_errno == -EFAULT || lws_errno == -ENOSYS) \ + ABORT_INSTRUCTION; \ + \ + (__typeof (oldval)) lws_ret; \ + }) + +#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ \ + __typeof__ (*mem) ret; \ + ret = atomic_compare_and_exchange_val_acq(mem, newval, oldval); \ + /* Return 1 if it was already acquired. */ \ + (ret != oldval); \ + }) + +#endif +/* _ATOMIC_MACHINE_H */ diff --git a/sysdeps/unix/sysv/linux/hppa/bits/atomic.h b/sysdeps/unix/sysv/linux/hppa/bits/atomic.h deleted file mode 100644 index 26b66c5134..0000000000 --- a/sysdeps/unix/sysv/linux/hppa/bits/atomic.h +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Carlos O'Donell , 2005. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#include /* Required for type definitions e.g. uint8_t. */ -#include /* Required for ABORT_INSTRUCTIUON. */ - -/* We need EFAULT, ENONSYS */ -#if !defined EFAULT && !defined ENOSYS -#define EFAULT 14 -#define ENOSYS 251 -#endif - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* prev = *addr; - if (prev == old) - *addr = new; - return prev; */ - -/* Use the kernel atomic light weight syscalls on hppa. */ -#define _LWS "0xb0" -#define _LWS_CAS "0" -/* Note r31 is the link register. */ -#define _LWS_CLOBBER "r1", "r23", "r22", "r20", "r31", "memory" -/* String constant for -EAGAIN. */ -#define _ASM_EAGAIN "-11" -/* String constant for -EDEADLOCK. */ -#define _ASM_EDEADLOCK "-45" - -/* The only basic operation needed is compare and exchange. The mem - pointer must be word aligned. */ -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - register long lws_errno asm("r21"); \ - register unsigned long lws_ret asm("r28"); \ - register unsigned long lws_mem asm("r26") = (unsigned long)(mem); \ - register unsigned long lws_old asm("r25") = (unsigned long)(oldval);\ - register unsigned long lws_new asm("r24") = (unsigned long)(newval);\ - __asm__ __volatile__( \ - "0: \n\t" \ - "ble " _LWS "(%%sr2, %%r0) \n\t" \ - "ldi " _LWS_CAS ", %%r20 \n\t" \ - "ldi " _ASM_EAGAIN ", %%r20 \n\t" \ - "cmpb,=,n %%r20, %%r21, 0b \n\t" \ - "nop \n\t" \ - "ldi " _ASM_EDEADLOCK ", %%r20 \n\t" \ - "cmpb,=,n %%r20, %%r21, 0b \n\t" \ - "nop \n\t" \ - : "=r" (lws_ret), "=r" (lws_errno) \ - : "r" (lws_mem), "r" (lws_old), "r" (lws_new) \ - : _LWS_CLOBBER \ - ); \ - \ - if (lws_errno == -EFAULT || lws_errno == -ENOSYS) \ - ABORT_INSTRUCTION; \ - \ - (__typeof (oldval)) lws_ret; \ - }) - -#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ \ - __typeof__ (*mem) ret; \ - ret = atomic_compare_and_exchange_val_acq(mem, newval, oldval); \ - /* Return 1 if it was already acquired. */ \ - (ret != oldval); \ - }) - -#endif -/* _BITS_ATOMIC_H */ diff --git a/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h b/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h new file mode 100644 index 0000000000..ee6f8e254a --- /dev/null +++ b/sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h @@ -0,0 +1,106 @@ +/* Copyright (C) 2010-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Maxim Kuvyrkov , 2010. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include +#include +#include + +/* Coldfire has no atomic compare-and-exchange operation, but the + kernel provides userspace atomicity operations. Use them. */ + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* The only basic operation needed is compare and exchange. */ +/* For ColdFire we'll have to trap into the kernel mode anyway, + so trap from the library rather then from the kernel wrapper. */ +#ifdef SHARED +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + /* Use temporary variables to workaround call-clobberness of \ + the registers. */ \ + __typeof (mem) _mem = mem; \ + __typeof (oldval) _oldval = oldval; \ + __typeof (newval) _newval = newval; \ + register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \ + register uint32_t _d0 asm ("d0") = (uint32_t) _oldval; \ + register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \ + void *tmp; \ + \ + asm ("movel #_GLOBAL_OFFSET_TABLE_@GOTPC, %2\n\t" \ + "lea (-6, %%pc, %2), %2\n\t" \ + "movel " STR_M68K_VDSO_SYMBOL (__vdso_atomic_cmpxchg_32) \ + "@GOT(%2), %2\n\t" \ + "movel (%2), %2\n\t" \ + "jsr (%2)\n\t" \ + : "+d" (_d0), "+m" (*_a0), "=&a" (tmp) \ + : "a" (_a0), "d" (_d1)); \ + (__typeof (oldval)) _d0; \ + }) +#else +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + /* Use temporary variables to workaround call-clobberness of \ + the registers. */ \ + __typeof (mem) _mem = mem; \ + __typeof (oldval) _oldval = oldval; \ + __typeof (newval) _newval = newval; \ + register uint32_t _d0 asm ("d0") = SYS_ify (atomic_cmpxchg_32); \ + register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \ + register uint32_t _d2 asm ("d2") = (uint32_t) _oldval; \ + register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \ + \ + asm ("trap #0" \ + : "+d" (_d0), "+m" (*_a0) \ + : "a" (_a0), "d" (_d2), "d" (_d1)); \ + (__typeof (oldval)) _d0; \ + }) +#endif + +#ifdef SHARED +# define atomic_full_barrier() \ + ({ \ + void *tmp; \ + \ + asm ("movel #_GLOBAL_OFFSET_TABLE_@GOTPC, %0\n\t" \ + "lea (-6, %pc, %0), %0\n\t" \ + "movel " STR_M68K_VDSO_SYMBOL (__vdso_atomic_barrier) \ + "@GOT(%0), %0\n\t" \ + "movel (%0), %0\n\t" \ + "jsr (%0)\n\t" \ + : "=&a" (tmp)); \ + }) +#else +# define atomic_full_barrier() \ + (INTERNAL_SYSCALL (atomic_barrier, , 0), (void) 0) +#endif + +#endif diff --git a/sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h b/sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h deleted file mode 100644 index 16002f15db..0000000000 --- a/sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h +++ /dev/null @@ -1,106 +0,0 @@ -/* Copyright (C) 2010-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Maxim Kuvyrkov , 2010. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include -#include -#include - -/* Coldfire has no atomic compare-and-exchange operation, but the - kernel provides userspace atomicity operations. Use them. */ - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* The only basic operation needed is compare and exchange. */ -/* For ColdFire we'll have to trap into the kernel mode anyway, - so trap from the library rather then from the kernel wrapper. */ -#ifdef SHARED -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - /* Use temporary variables to workaround call-clobberness of \ - the registers. */ \ - __typeof (mem) _mem = mem; \ - __typeof (oldval) _oldval = oldval; \ - __typeof (newval) _newval = newval; \ - register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \ - register uint32_t _d0 asm ("d0") = (uint32_t) _oldval; \ - register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \ - void *tmp; \ - \ - asm ("movel #_GLOBAL_OFFSET_TABLE_@GOTPC, %2\n\t" \ - "lea (-6, %%pc, %2), %2\n\t" \ - "movel " STR_M68K_VDSO_SYMBOL (__vdso_atomic_cmpxchg_32) \ - "@GOT(%2), %2\n\t" \ - "movel (%2), %2\n\t" \ - "jsr (%2)\n\t" \ - : "+d" (_d0), "+m" (*_a0), "=&a" (tmp) \ - : "a" (_a0), "d" (_d1)); \ - (__typeof (oldval)) _d0; \ - }) -#else -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - /* Use temporary variables to workaround call-clobberness of \ - the registers. */ \ - __typeof (mem) _mem = mem; \ - __typeof (oldval) _oldval = oldval; \ - __typeof (newval) _newval = newval; \ - register uint32_t _d0 asm ("d0") = SYS_ify (atomic_cmpxchg_32); \ - register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \ - register uint32_t _d2 asm ("d2") = (uint32_t) _oldval; \ - register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \ - \ - asm ("trap #0" \ - : "+d" (_d0), "+m" (*_a0) \ - : "a" (_a0), "d" (_d2), "d" (_d1)); \ - (__typeof (oldval)) _d0; \ - }) -#endif - -#ifdef SHARED -# define atomic_full_barrier() \ - ({ \ - void *tmp; \ - \ - asm ("movel #_GLOBAL_OFFSET_TABLE_@GOTPC, %0\n\t" \ - "lea (-6, %pc, %0), %0\n\t" \ - "movel " STR_M68K_VDSO_SYMBOL (__vdso_atomic_barrier) \ - "@GOT(%0), %0\n\t" \ - "movel (%0), %0\n\t" \ - "jsr (%0)\n\t" \ - : "=&a" (tmp)); \ - }) -#else -# define atomic_full_barrier() \ - (INTERNAL_SYSCALL (atomic_barrier, , 0), (void) 0) -#endif - -#endif diff --git a/sysdeps/unix/sysv/linux/nios2/atomic-machine.h b/sysdeps/unix/sysv/linux/nios2/atomic-machine.h new file mode 100644 index 0000000000..ee4e4e3b0a --- /dev/null +++ b/sysdeps/unix/sysv/linux/nios2/atomic-machine.h @@ -0,0 +1,92 @@ +/* Low-level functions for atomic operations. Nios II version. + Copyright (C) 2012-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#ifndef _NIOS2_ATOMIC_MACHINE_H +#define _NIOS2_ATOMIC_MACHINE_H 1 + +#include + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ + (abort (), 0) +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ + (abort (), 0) +#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + register int r2 asm ("r2"); \ + register int* r4 asm ("r4") = (int*)(mem); \ + register int r5 asm ("r5"); \ + register int r6 asm ("r6") = (int)(newval); \ + int retval, orig_oldval = (int)(oldval); \ + long kernel_cmpxchg = 0x1004; \ + while (1) \ + { \ + r5 = *r4; \ + if (r5 != orig_oldval) \ + { \ + retval = r5; \ + break; \ + } \ + asm volatile ("callr %1\n" \ + : "=r" (r2) \ + : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \ + : "ra", "memory"); \ + if (!r2) { retval = orig_oldval; break; } \ + } \ + (__typeof (*(mem))) retval; \ + }) + +#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ + ({ \ + register int r2 asm ("r2"); \ + register int *r4 asm ("r4") = (int*)(mem); \ + register int r5 asm ("r5") = (int)(oldval); \ + register int r6 asm ("r6") = (int)(newval); \ + long kernel_cmpxchg = 0x1004; \ + asm volatile ("callr %1\n" \ + : "=r" (r2) \ + : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \ + : "ra", "memory"); \ + r2; \ + }) + +#define atomic_full_barrier() ({ asm volatile ("sync"); }) + +#endif /* _NIOS2_ATOMIC_MACHINE_H */ diff --git a/sysdeps/unix/sysv/linux/nios2/bits/atomic.h b/sysdeps/unix/sysv/linux/nios2/bits/atomic.h deleted file mode 100644 index 2329f7463d..0000000000 --- a/sysdeps/unix/sysv/linux/nios2/bits/atomic.h +++ /dev/null @@ -1,92 +0,0 @@ -/* Low-level functions for atomic operations. Nios II version. - Copyright (C) 2012-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library. If not, see - . */ - -#ifndef _NIOS2_BITS_ATOMIC_H -#define _NIOS2_BITS_ATOMIC_H 1 - -#include - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ - (abort (), 0) -#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ - (abort (), 0) -#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ - (abort (), 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ \ - register int r2 asm ("r2"); \ - register int* r4 asm ("r4") = (int*)(mem); \ - register int r5 asm ("r5"); \ - register int r6 asm ("r6") = (int)(newval); \ - int retval, orig_oldval = (int)(oldval); \ - long kernel_cmpxchg = 0x1004; \ - while (1) \ - { \ - r5 = *r4; \ - if (r5 != orig_oldval) \ - { \ - retval = r5; \ - break; \ - } \ - asm volatile ("callr %1\n" \ - : "=r" (r2) \ - : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \ - : "ra", "memory"); \ - if (!r2) { retval = orig_oldval; break; } \ - } \ - (__typeof (*(mem))) retval; \ - }) - -#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ - ({ \ - register int r2 asm ("r2"); \ - register int *r4 asm ("r4") = (int*)(mem); \ - register int r5 asm ("r5") = (int)(oldval); \ - register int r6 asm ("r6") = (int)(newval); \ - long kernel_cmpxchg = 0x1004; \ - asm volatile ("callr %1\n" \ - : "=r" (r2) \ - : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \ - : "ra", "memory"); \ - r2; \ - }) - -#define atomic_full_barrier() ({ asm volatile ("sync"); }) - -#endif /* _NIOS2_BITS_ATOMIC_H */ diff --git a/sysdeps/unix/sysv/linux/sh/atomic-machine.h b/sysdeps/unix/sysv/linux/sh/atomic-machine.h new file mode 100644 index 0000000000..8a188c5675 --- /dev/null +++ b/sysdeps/unix/sysv/linux/sh/atomic-machine.h @@ -0,0 +1,428 @@ +/* Atomic operations used inside libc. Linux/SH version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +/* SH kernel has implemented a gUSA ("g" User Space Atomicity) support + for the user space atomicity. The atomicity macros use this scheme. + + Reference: + Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity + Emulation with Little Kernel Modification", Linux Conference 2002, + Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in + Japanese). + + B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for + Uniprocessors", Proceedings of the Fifth Architectural Support for + Programming Languages and Operating Systems (ASPLOS), pp. 223-233, + October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps + + SuperH ABI: + r15: -(size of atomic instruction sequence) < 0 + r0: end point + r1: saved stack pointer +*/ + +#if __GNUC_PREREQ (4, 7) +# define rNOSP "u" +#else +# define rNOSP "r" +#endif + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __result; \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%1,%0\n\ + cmp/eq %0,%3\n\ + bf 1f\n\ + mov.b %2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ + : "r0", "r1", "t", "memory"); \ + __result; }) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __result; \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + mov #-8,r15\n\ + 0: mov.w @%1,%0\n\ + cmp/eq %0,%3\n\ + bf 1f\n\ + mov.w %2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ + : "r0", "r1", "t", "memory"); \ + __result; }) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __typeof (*(mem)) __result; \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%1,%0\n\ + cmp/eq %0,%3\n\ + bf 1f\n\ + mov.l %2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ + : "r0", "r1", "t", "memory"); \ + __result; }) + +/* XXX We do not really need 64-bit compare-and-exchange. At least + not in the moment. Using it would mean causing portability + problems since not many other 32-bit architectures have support for + such an operation. So don't define any code for now. */ + +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_exchange_and_add(mem, value) \ + ({ __typeof (*(mem)) __result, __tmp, __value = (value); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%2,%0\n\ + mov %1,r2\n\ + add %0,r2\n\ + mov.b r2,@%2\n\ + 1: mov r1,r15"\ + : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%2,%0\n\ + mov %1,r2\n\ + add %0,r2\n\ + mov.w r2,@%2\n\ + 1: mov r1,r15"\ + : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%2,%0\n\ + mov %1,r2\n\ + add %0,r2\n\ + mov.l r2,@%2\n\ + 1: mov r1,r15"\ + : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else \ + { \ + __typeof (mem) memp = (mem); \ + do \ + __result = *memp; \ + while (__arch_compare_and_exchange_val_64_acq \ + (memp, __result + __value, __result) == __result); \ + (void) __value; \ + } \ + __result; }) + +#define atomic_add(mem, value) \ + (void) ({ __typeof (*(mem)) __tmp, __value = (value); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%1,r2\n\ + add %0,r2\n\ + mov.b r2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%1,r2\n\ + add %0,r2\n\ + mov.w r2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%1,r2\n\ + add %0,r2\n\ + mov.l r2,@%1\n\ + 1: mov r1,r15"\ + : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ + : "r0", "r1", "r2", "memory"); \ + else \ + { \ + __typeof (*(mem)) oldval; \ + __typeof (mem) memp = (mem); \ + do \ + oldval = *memp; \ + while (__arch_compare_and_exchange_val_64_acq \ + (memp, oldval + __value, oldval) == oldval); \ + (void) __value; \ + } \ + }) + +#define atomic_add_negative(mem, value) \ + ({ unsigned char __result; \ + __typeof (*(mem)) __tmp, __value = (value); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%2,r2\n\ + add %1,r2\n\ + mov.b r2,@%2\n\ + 1: mov r1,r15\n\ + shal r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%2,r2\n\ + add %1,r2\n\ + mov.w r2,@%2\n\ + 1: mov r1,r15\n\ + shal r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%2,r2\n\ + add %1,r2\n\ + mov.l r2,@%2\n\ + 1: mov r1,r15\n\ + shal r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else \ + abort (); \ + __result; }) + +#define atomic_add_zero(mem, value) \ + ({ unsigned char __result; \ + __typeof (*(mem)) __tmp, __value = (value); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%2,r2\n\ + add %1,r2\n\ + mov.b r2,@%2\n\ + 1: mov r1,r15\n\ + tst r2,r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%2,r2\n\ + add %1,r2\n\ + mov.w r2,@%2\n\ + 1: mov r1,r15\n\ + tst r2,r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%2,r2\n\ + add %1,r2\n\ + mov.l r2,@%2\n\ + 1: mov r1,r15\n\ + tst r2,r2\n\ + movt %0"\ + : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ + : "r0", "r1", "r2", "t", "memory"); \ + else \ + abort (); \ + __result; }) + +#define atomic_increment_and_test(mem) atomic_add_zero((mem), 1) +#define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1) + +#define atomic_bit_set(mem, bit) \ + (void) ({ unsigned int __mask = 1 << (bit); \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%0,r2\n\ + or %1,r2\n\ + mov.b r2,@%0\n\ + 1: mov r1,r15"\ + : : rNOSP (mem), rNOSP (__mask) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%0,r2\n\ + or %1,r2\n\ + mov.w r2,@%0\n\ + 1: mov r1,r15"\ + : : rNOSP (mem), rNOSP (__mask) \ + : "r0", "r1", "r2", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + mov r15,r1\n\ + .align 2\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%0,r2\n\ + or %1,r2\n\ + mov.l r2,@%0\n\ + 1: mov r1,r15"\ + : : rNOSP (mem), rNOSP (__mask) \ + : "r0", "r1", "r2", "memory"); \ + else \ + abort (); \ + }) + +#define atomic_bit_test_set(mem, bit) \ + ({ unsigned int __mask = 1 << (bit); \ + unsigned int __result = __mask; \ + if (sizeof (*(mem)) == 1) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.b @%2,r2\n\ + mov r2,r3\n\ + or %1,r2\n\ + mov.b r2,@%2\n\ + 1: mov r1,r15\n\ + and r3,%0"\ + : "=&r" (__result), "=&r" (__mask) \ + : rNOSP (mem), "0" (__result), "1" (__mask) \ + : "r0", "r1", "r2", "r3", "memory"); \ + else if (sizeof (*(mem)) == 2) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.w @%2,r2\n\ + mov r2,r3\n\ + or %1,r2\n\ + mov.w %1,@%2\n\ + 1: mov r1,r15\n\ + and r3,%0"\ + : "=&r" (__result), "=&r" (__mask) \ + : rNOSP (mem), "0" (__result), "1" (__mask) \ + : "r0", "r1", "r2", "r3", "memory"); \ + else if (sizeof (*(mem)) == 4) \ + __asm __volatile ("\ + mova 1f,r0\n\ + .align 2\n\ + mov r15,r1\n\ + mov #(0f-1f),r15\n\ + 0: mov.l @%2,r2\n\ + mov r2,r3\n\ + or r2,%1\n\ + mov.l %1,@%2\n\ + 1: mov r1,r15\n\ + and r3,%0"\ + : "=&r" (__result), "=&r" (__mask) \ + : rNOSP (mem), "0" (__result), "1" (__mask) \ + : "r0", "r1", "r2", "r3", "memory"); \ + else \ + abort (); \ + __result; }) diff --git a/sysdeps/unix/sysv/linux/sh/bits/atomic.h b/sysdeps/unix/sysv/linux/sh/bits/atomic.h deleted file mode 100644 index 8a188c5675..0000000000 --- a/sysdeps/unix/sysv/linux/sh/bits/atomic.h +++ /dev/null @@ -1,428 +0,0 @@ -/* Atomic operations used inside libc. Linux/SH version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - -/* SH kernel has implemented a gUSA ("g" User Space Atomicity) support - for the user space atomicity. The atomicity macros use this scheme. - - Reference: - Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity - Emulation with Little Kernel Modification", Linux Conference 2002, - Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in - Japanese). - - B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for - Uniprocessors", Proceedings of the Fifth Architectural Support for - Programming Languages and Operating Systems (ASPLOS), pp. 223-233, - October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps - - SuperH ABI: - r15: -(size of atomic instruction sequence) < 0 - r0: end point - r1: saved stack pointer -*/ - -#if __GNUC_PREREQ (4, 7) -# define rNOSP "u" -#else -# define rNOSP "r" -#endif - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%1,%0\n\ - cmp/eq %0,%3\n\ - bf 1f\n\ - mov.b %2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ - : "r0", "r1", "t", "memory"); \ - __result; }) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - mov #-8,r15\n\ - 0: mov.w @%1,%0\n\ - cmp/eq %0,%3\n\ - bf 1f\n\ - mov.w %2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ - : "r0", "r1", "t", "memory"); \ - __result; }) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __typeof (*(mem)) __result; \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%1,%0\n\ - cmp/eq %0,%3\n\ - bf 1f\n\ - mov.l %2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__result) : rNOSP (mem), rNOSP (newval), rNOSP (oldval) \ - : "r0", "r1", "t", "memory"); \ - __result; }) - -/* XXX We do not really need 64-bit compare-and-exchange. At least - not in the moment. Using it would mean causing portability - problems since not many other 32-bit architectures have support for - such an operation. So don't define any code for now. */ - -# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_exchange_and_add(mem, value) \ - ({ __typeof (*(mem)) __result, __tmp, __value = (value); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%2,%0\n\ - mov %1,r2\n\ - add %0,r2\n\ - mov.b r2,@%2\n\ - 1: mov r1,r15"\ - : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%2,%0\n\ - mov %1,r2\n\ - add %0,r2\n\ - mov.w r2,@%2\n\ - 1: mov r1,r15"\ - : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%2,%0\n\ - mov %1,r2\n\ - add %0,r2\n\ - mov.l r2,@%2\n\ - 1: mov r1,r15"\ - : "=&r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else \ - { \ - __typeof (mem) memp = (mem); \ - do \ - __result = *memp; \ - while (__arch_compare_and_exchange_val_64_acq \ - (memp, __result + __value, __result) == __result); \ - (void) __value; \ - } \ - __result; }) - -#define atomic_add(mem, value) \ - (void) ({ __typeof (*(mem)) __tmp, __value = (value); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%1,r2\n\ - add %0,r2\n\ - mov.b r2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%1,r2\n\ - add %0,r2\n\ - mov.w r2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%1,r2\n\ - add %0,r2\n\ - mov.l r2,@%1\n\ - 1: mov r1,r15"\ - : "=&r" (__tmp) : rNOSP (mem), "0" (__value) \ - : "r0", "r1", "r2", "memory"); \ - else \ - { \ - __typeof (*(mem)) oldval; \ - __typeof (mem) memp = (mem); \ - do \ - oldval = *memp; \ - while (__arch_compare_and_exchange_val_64_acq \ - (memp, oldval + __value, oldval) == oldval); \ - (void) __value; \ - } \ - }) - -#define atomic_add_negative(mem, value) \ - ({ unsigned char __result; \ - __typeof (*(mem)) __tmp, __value = (value); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%2,r2\n\ - add %1,r2\n\ - mov.b r2,@%2\n\ - 1: mov r1,r15\n\ - shal r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%2,r2\n\ - add %1,r2\n\ - mov.w r2,@%2\n\ - 1: mov r1,r15\n\ - shal r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%2,r2\n\ - add %1,r2\n\ - mov.l r2,@%2\n\ - 1: mov r1,r15\n\ - shal r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else \ - abort (); \ - __result; }) - -#define atomic_add_zero(mem, value) \ - ({ unsigned char __result; \ - __typeof (*(mem)) __tmp, __value = (value); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%2,r2\n\ - add %1,r2\n\ - mov.b r2,@%2\n\ - 1: mov r1,r15\n\ - tst r2,r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%2,r2\n\ - add %1,r2\n\ - mov.w r2,@%2\n\ - 1: mov r1,r15\n\ - tst r2,r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%2,r2\n\ - add %1,r2\n\ - mov.l r2,@%2\n\ - 1: mov r1,r15\n\ - tst r2,r2\n\ - movt %0"\ - : "=r" (__result), "=&r" (__tmp) : rNOSP (mem), "1" (__value) \ - : "r0", "r1", "r2", "t", "memory"); \ - else \ - abort (); \ - __result; }) - -#define atomic_increment_and_test(mem) atomic_add_zero((mem), 1) -#define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1) - -#define atomic_bit_set(mem, bit) \ - (void) ({ unsigned int __mask = 1 << (bit); \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%0,r2\n\ - or %1,r2\n\ - mov.b r2,@%0\n\ - 1: mov r1,r15"\ - : : rNOSP (mem), rNOSP (__mask) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%0,r2\n\ - or %1,r2\n\ - mov.w r2,@%0\n\ - 1: mov r1,r15"\ - : : rNOSP (mem), rNOSP (__mask) \ - : "r0", "r1", "r2", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - mov r15,r1\n\ - .align 2\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%0,r2\n\ - or %1,r2\n\ - mov.l r2,@%0\n\ - 1: mov r1,r15"\ - : : rNOSP (mem), rNOSP (__mask) \ - : "r0", "r1", "r2", "memory"); \ - else \ - abort (); \ - }) - -#define atomic_bit_test_set(mem, bit) \ - ({ unsigned int __mask = 1 << (bit); \ - unsigned int __result = __mask; \ - if (sizeof (*(mem)) == 1) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.b @%2,r2\n\ - mov r2,r3\n\ - or %1,r2\n\ - mov.b r2,@%2\n\ - 1: mov r1,r15\n\ - and r3,%0"\ - : "=&r" (__result), "=&r" (__mask) \ - : rNOSP (mem), "0" (__result), "1" (__mask) \ - : "r0", "r1", "r2", "r3", "memory"); \ - else if (sizeof (*(mem)) == 2) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.w @%2,r2\n\ - mov r2,r3\n\ - or %1,r2\n\ - mov.w %1,@%2\n\ - 1: mov r1,r15\n\ - and r3,%0"\ - : "=&r" (__result), "=&r" (__mask) \ - : rNOSP (mem), "0" (__result), "1" (__mask) \ - : "r0", "r1", "r2", "r3", "memory"); \ - else if (sizeof (*(mem)) == 4) \ - __asm __volatile ("\ - mova 1f,r0\n\ - .align 2\n\ - mov r15,r1\n\ - mov #(0f-1f),r15\n\ - 0: mov.l @%2,r2\n\ - mov r2,r3\n\ - or r2,%1\n\ - mov.l %1,@%2\n\ - 1: mov r1,r15\n\ - and r3,%0"\ - : "=&r" (__result), "=&r" (__mask) \ - : rNOSP (mem), "0" (__result), "1" (__mask) \ - : "r0", "r1", "r2", "r3", "memory"); \ - else \ - abort (); \ - __result; }) diff --git a/sysdeps/x86_64/atomic-machine.h b/sysdeps/x86_64/atomic-machine.h new file mode 100644 index 0000000000..337b334db1 --- /dev/null +++ b/sysdeps/x86_64/atomic-machine.h @@ -0,0 +1,481 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include /* For tcbhead_t. */ +#include + + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + + +#ifndef LOCK_PREFIX +# ifdef UP +# define LOCK_PREFIX /* nothing */ +# else +# define LOCK_PREFIX "lock;" +# endif +#endif + +#define __HAVE_64B_ATOMICS 1 +#if __GNUC_PREREQ (4, 7) +#define USE_ATOMIC_COMPILER_BUILTINS 1 +#else +#define USE_ATOMIC_COMPILER_BUILTINS 0 +#endif + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __sync_val_compare_and_swap (mem, oldval, newval) +#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + (! __sync_bool_compare_and_swap (mem, oldval, newval)) + + +#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgb %b2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "q" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgw %w2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "q" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgl %2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "q" (newval), "m" (*mem), "0" (oldval), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + +#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __typeof (*mem) ret; \ + __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ + "je 0f\n\t" \ + "lock\n" \ + "0:\tcmpxchgq %q2, %1" \ + : "=a" (ret), "=m" (*mem) \ + : "q" ((atomic64_t) cast_to_integer (newval)), \ + "m" (*mem), \ + "0" ((atomic64_t) cast_to_integer (oldval)), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + ret; }) + + +/* Note that we need no lock prefix. */ +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*mem) result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile ("xchgb %b0, %1" \ + : "=q" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile ("xchgw %w0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile ("xchgl %0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (newvalue), "m" (*mem)); \ + else \ + __asm __volatile ("xchgq %q0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" ((atomic64_t) cast_to_integer (newvalue)), \ + "m" (*mem)); \ + result; }) + + +#define __arch_exchange_and_add_body(lock, mem, value) \ + ({ __typeof (*mem) result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "xaddb %b0, %1" \ + : "=q" (result), "=m" (*mem) \ + : "0" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "xaddw %w0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "xaddl %0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "xaddq %q0, %1" \ + : "=r" (result), "=m" (*mem) \ + : "0" ((atomic64_t) cast_to_integer (value)), \ + "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + result; }) + +#define atomic_exchange_and_add(mem, value) \ + __sync_fetch_and_add (mem, value) + +#define __arch_exchange_and_add_cprefix \ + "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t" + +#define catomic_exchange_and_add(mem, value) \ + __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value) + + +#define __arch_add_body(lock, pfx, mem, value) \ + do { \ + if (__builtin_constant_p (value) && (value) == 1) \ + pfx##_increment (mem); \ + else if (__builtin_constant_p (value) && (value) == -1) \ + pfx##_decrement (mem); \ + else if (sizeof (*mem) == 1) \ + __asm __volatile (lock "addb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "addw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "addl %1, %0" \ + : "=m" (*mem) \ + : "ir" (value), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "addq %q1, %0" \ + : "=m" (*mem) \ + : "ir" ((atomic64_t) cast_to_integer (value)), \ + "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + } while (0) + +#define atomic_add(mem, value) \ + __arch_add_body (LOCK_PREFIX, atomic, mem, value) + +#define __arch_add_cprefix \ + "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t" + +#define catomic_add(mem, value) \ + __arch_add_body (__arch_add_cprefix, catomic, mem, value) + + +#define atomic_add_negative(mem, value) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "iq" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else \ + __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" ((atomic64_t) cast_to_integer (value)), \ + "m" (*mem)); \ + __result; }) + + +#define atomic_add_zero(mem, value) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "iq" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" (value), "m" (*mem)); \ + else \ + __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "ir" ((atomic64_t) cast_to_integer (value)), \ + "m" (*mem)); \ + __result; }) + + +#define __arch_increment_body(lock, mem) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "incb %b0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "incw %w0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "incl %0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "incq %q0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + } while (0) + +#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem) + +#define __arch_increment_cprefix \ + "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t" + +#define catomic_increment(mem) \ + __arch_increment_body (__arch_increment_cprefix, mem) + + +#define atomic_increment_and_test(mem) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else \ + __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + __result; }) + + +#define __arch_decrement_body(lock, mem) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "decb %b0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "decw %w0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "decl %0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "decq %q0" \ + : "=m" (*mem) \ + : "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + } while (0) + +#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem) + +#define __arch_decrement_cprefix \ + "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t" + +#define catomic_decrement(mem) \ + __arch_decrement_body (__arch_decrement_cprefix, mem) + + +#define atomic_decrement_and_test(mem) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + else \ + __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \ + : "=m" (*mem), "=qm" (__result) \ + : "m" (*mem)); \ + __result; }) + + +#define atomic_bit_set(mem, bit) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "iq" (1L << (bit))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "ir" (1L << (bit))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "orl %2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "ir" (1L << (bit))); \ + else if (__builtin_constant_p (bit) && (bit) < 32) \ + __asm __volatile (LOCK_PREFIX "orq %2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "i" (1L << (bit))); \ + else \ + __asm __volatile (LOCK_PREFIX "orq %q2, %0" \ + : "=m" (*mem) \ + : "m" (*mem), "r" (1UL << (bit))); \ + } while (0) + + +#define atomic_bit_test_set(mem, bit) \ + ({ unsigned char __result; \ + if (sizeof (*mem) == 1) \ + __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "iq" (bit)); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + else \ + __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \ + : "=q" (__result), "=m" (*mem) \ + : "m" (*mem), "ir" (bit)); \ + __result; }) + + +#define atomic_spin_nop() asm ("rep; nop") + + +#define __arch_and_body(lock, mem, mask) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "andb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "andw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "andl %1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "andq %q1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + } while (0) + +#define __arch_cprefix \ + "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t" + +#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask) + +#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask) + + +#define __arch_or_body(lock, mem, mask) \ + do { \ + if (sizeof (*mem) == 1) \ + __asm __volatile (lock "orb %b1, %0" \ + : "=m" (*mem) \ + : "iq" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 2) \ + __asm __volatile (lock "orw %w1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else if (sizeof (*mem) == 4) \ + __asm __volatile (lock "orl %1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + else \ + __asm __volatile (lock "orq %q1, %0" \ + : "=m" (*mem) \ + : "ir" (mask), "m" (*mem), \ + "i" (offsetof (tcbhead_t, multiple_threads))); \ + } while (0) + +#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask) + +#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask) + +/* We don't use mfence because it is supposedly slower due to having to + provide stronger guarantees (e.g., regarding self-modifying code). */ +#define atomic_full_barrier() \ + __asm __volatile (LOCK_PREFIX "orl $0, (%%rsp)" ::: "memory") +#define atomic_read_barrier() __asm ("" ::: "memory") +#define atomic_write_barrier() __asm ("" ::: "memory") diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h deleted file mode 100644 index 337b334db1..0000000000 --- a/sysdeps/x86_64/bits/atomic.h +++ /dev/null @@ -1,481 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include /* For tcbhead_t. */ -#include - - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - - -#ifndef LOCK_PREFIX -# ifdef UP -# define LOCK_PREFIX /* nothing */ -# else -# define LOCK_PREFIX "lock;" -# endif -#endif - -#define __HAVE_64B_ATOMICS 1 -#if __GNUC_PREREQ (4, 7) -#define USE_ATOMIC_COMPILER_BUILTINS 1 -#else -#define USE_ATOMIC_COMPILER_BUILTINS 0 -#endif - -#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - __sync_val_compare_and_swap (mem, oldval, newval) -#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - (! __sync_bool_compare_and_swap (mem, oldval, newval)) - - -#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgb %b2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "q" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgw %w2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "q" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgl %2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "q" (newval), "m" (*mem), "0" (oldval), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - -#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - ({ __typeof (*mem) ret; \ - __asm __volatile ("cmpl $0, %%fs:%P5\n\t" \ - "je 0f\n\t" \ - "lock\n" \ - "0:\tcmpxchgq %q2, %1" \ - : "=a" (ret), "=m" (*mem) \ - : "q" ((atomic64_t) cast_to_integer (newval)), \ - "m" (*mem), \ - "0" ((atomic64_t) cast_to_integer (oldval)), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - ret; }) - - -/* Note that we need no lock prefix. */ -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*mem) result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile ("xchgb %b0, %1" \ - : "=q" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile ("xchgw %w0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile ("xchgl %0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (newvalue), "m" (*mem)); \ - else \ - __asm __volatile ("xchgq %q0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" ((atomic64_t) cast_to_integer (newvalue)), \ - "m" (*mem)); \ - result; }) - - -#define __arch_exchange_and_add_body(lock, mem, value) \ - ({ __typeof (*mem) result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "xaddb %b0, %1" \ - : "=q" (result), "=m" (*mem) \ - : "0" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "xaddw %w0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "xaddl %0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "xaddq %q0, %1" \ - : "=r" (result), "=m" (*mem) \ - : "0" ((atomic64_t) cast_to_integer (value)), \ - "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - result; }) - -#define atomic_exchange_and_add(mem, value) \ - __sync_fetch_and_add (mem, value) - -#define __arch_exchange_and_add_cprefix \ - "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t" - -#define catomic_exchange_and_add(mem, value) \ - __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value) - - -#define __arch_add_body(lock, pfx, mem, value) \ - do { \ - if (__builtin_constant_p (value) && (value) == 1) \ - pfx##_increment (mem); \ - else if (__builtin_constant_p (value) && (value) == -1) \ - pfx##_decrement (mem); \ - else if (sizeof (*mem) == 1) \ - __asm __volatile (lock "addb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "addw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "addl %1, %0" \ - : "=m" (*mem) \ - : "ir" (value), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "addq %q1, %0" \ - : "=m" (*mem) \ - : "ir" ((atomic64_t) cast_to_integer (value)), \ - "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - } while (0) - -#define atomic_add(mem, value) \ - __arch_add_body (LOCK_PREFIX, atomic, mem, value) - -#define __arch_add_cprefix \ - "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t" - -#define catomic_add(mem, value) \ - __arch_add_body (__arch_add_cprefix, catomic, mem, value) - - -#define atomic_add_negative(mem, value) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "iq" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else \ - __asm __volatile (LOCK_PREFIX "addq %q2, %0; sets %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" ((atomic64_t) cast_to_integer (value)), \ - "m" (*mem)); \ - __result; }) - - -#define atomic_add_zero(mem, value) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "iq" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" (value), "m" (*mem)); \ - else \ - __asm __volatile (LOCK_PREFIX "addq %q2, %0; setz %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "ir" ((atomic64_t) cast_to_integer (value)), \ - "m" (*mem)); \ - __result; }) - - -#define __arch_increment_body(lock, mem) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "incb %b0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "incw %w0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "incl %0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "incq %q0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - } while (0) - -#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem) - -#define __arch_increment_cprefix \ - "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t" - -#define catomic_increment(mem) \ - __arch_increment_body (__arch_increment_cprefix, mem) - - -#define atomic_increment_and_test(mem) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "incb %b0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "incw %w0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "incl %0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else \ - __asm __volatile (LOCK_PREFIX "incq %q0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - __result; }) - - -#define __arch_decrement_body(lock, mem) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "decb %b0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "decw %w0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "decl %0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "decq %q0" \ - : "=m" (*mem) \ - : "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - } while (0) - -#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem) - -#define __arch_decrement_cprefix \ - "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t" - -#define catomic_decrement(mem) \ - __arch_decrement_body (__arch_decrement_cprefix, mem) - - -#define atomic_decrement_and_test(mem) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - else \ - __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \ - : "=m" (*mem), "=qm" (__result) \ - : "m" (*mem)); \ - __result; }) - - -#define atomic_bit_set(mem, bit) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "orb %b2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "iq" (1L << (bit))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "orw %w2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "ir" (1L << (bit))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "orl %2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "ir" (1L << (bit))); \ - else if (__builtin_constant_p (bit) && (bit) < 32) \ - __asm __volatile (LOCK_PREFIX "orq %2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "i" (1L << (bit))); \ - else \ - __asm __volatile (LOCK_PREFIX "orq %q2, %0" \ - : "=m" (*mem) \ - : "m" (*mem), "r" (1UL << (bit))); \ - } while (0) - - -#define atomic_bit_test_set(mem, bit) \ - ({ unsigned char __result; \ - if (sizeof (*mem) == 1) \ - __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "iq" (bit)); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - else \ - __asm __volatile (LOCK_PREFIX "btsq %3, %1; setc %0" \ - : "=q" (__result), "=m" (*mem) \ - : "m" (*mem), "ir" (bit)); \ - __result; }) - - -#define atomic_spin_nop() asm ("rep; nop") - - -#define __arch_and_body(lock, mem, mask) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "andb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "andw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "andl %1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "andq %q1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - } while (0) - -#define __arch_cprefix \ - "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t" - -#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask) - -#define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask) - - -#define __arch_or_body(lock, mem, mask) \ - do { \ - if (sizeof (*mem) == 1) \ - __asm __volatile (lock "orb %b1, %0" \ - : "=m" (*mem) \ - : "iq" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 2) \ - __asm __volatile (lock "orw %w1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else if (sizeof (*mem) == 4) \ - __asm __volatile (lock "orl %1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - else \ - __asm __volatile (lock "orq %q1, %0" \ - : "=m" (*mem) \ - : "ir" (mask), "m" (*mem), \ - "i" (offsetof (tcbhead_t, multiple_threads))); \ - } while (0) - -#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask) - -#define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask) - -/* We don't use mfence because it is supposedly slower due to having to - provide stronger guarantees (e.g., regarding self-modifying code). */ -#define atomic_full_barrier() \ - __asm __volatile (LOCK_PREFIX "orl $0, (%%rsp)" ::: "memory") -#define atomic_read_barrier() __asm ("" ::: "memory") -#define atomic_write_barrier() __asm ("" ::: "memory") -- cgit 1.4.1