diff options
author | Wilco Dijkstra <wdijkstr@arm.com> | 2022-09-22 15:32:40 +0100 |
---|---|---|
committer | Wilco Dijkstra <wdijkstr@arm.com> | 2022-09-23 15:59:56 +0100 |
commit | d1babeb32de5dae8893c640bd925357b218d846c (patch) | |
tree | c5ee8e42e03e6a4caf2eb645d5f9b7a8bdbdb6e1 | |
parent | 8114b95cef10a5a1fc3e529ab8b3a75f56fe889a (diff) | |
download | glibc-d1babeb32de5dae8893c640bd925357b218d846c.tar.gz glibc-d1babeb32de5dae8893c640bd925357b218d846c.tar.xz glibc-d1babeb32de5dae8893c640bd925357b218d846c.zip |
Use C11 atomics instead of atomic_increment(_val)
Replace atomic_increment and atomic_increment_val with atomic_fetch_add_relaxed. One case in sem_post.c uses release semantics (see comment above it). The others are simple counters and do not protect any shared data from concurrent accesses. Passes regress on AArch64. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
-rw-r--r-- | htl/pt-create.c | 2 | ||||
-rw-r--r-- | manual/ipc.texi | 2 | ||||
-rw-r--r-- | manual/llio.texi | 4 | ||||
-rw-r--r-- | nptl/nptl_setxid.c | 2 | ||||
-rw-r--r-- | nptl/pthread_create.c | 2 | ||||
-rw-r--r-- | nptl/sem_post.c | 2 | ||||
-rw-r--r-- | nscd/cache.c | 2 | ||||
-rw-r--r-- | nscd/nscd_helper.c | 2 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/check_pf.c | 6 |
9 files changed, 12 insertions, 12 deletions
diff --git a/htl/pt-create.c b/htl/pt-create.c index 36f138b498..5d37edbbff 100644 --- a/htl/pt-create.c +++ b/htl/pt-create.c @@ -228,7 +228,7 @@ __pthread_create_internal (struct __pthread **thread, the number of threads from within the new thread isn't an option since this thread might return and call `pthread_exit' before the new thread runs. */ - atomic_increment (&__pthread_total); + atomic_fetch_add_relaxed (&__pthread_total, 1); /* Store a pointer to this thread in the thread ID lookup table. We could use __thread_setid, however, we only lock for reading as no diff --git a/manual/ipc.texi b/manual/ipc.texi index 46c049c3da..be74664af9 100644 --- a/manual/ipc.texi +++ b/manual/ipc.texi @@ -85,7 +85,7 @@ by @theglibc{}. @deftypefun int sem_wait (sem_t *@var{sem}); @safety{@prelim{}@mtsafe{}@assafe{}@acunsafe{@acucorrupt{}}} -@c atomic_increment (nwaiters) acucorrupt +@c atomic_fetch_add_relaxed (nwaiters) acucorrupt @c @c Given the use atomic operations this function seems @c to be AS-safe. It is AC-unsafe because there is still diff --git a/manual/llio.texi b/manual/llio.texi index 0dfcdad434..79cf4c1670 100644 --- a/manual/llio.texi +++ b/manual/llio.texi @@ -2528,7 +2528,7 @@ aiocb64}, since the LFS transparently replaces the old interface. @c _dl_allocate_tls_init ok @c GET_DTV ok @c mmap ok -@c atomic_increment_val ok +@c atomic_fetch_add_relaxed ok @c munmap ok @c change_stack_perm ok @c mprotect ok @@ -2567,7 +2567,7 @@ aiocb64}, since the LFS transparently replaces the old interface. @c do_clone @asulock @ascuheap @aculock @acsmem @c PREPARE_CREATE ok @c lll_lock (pd->lock) @asulock @aculock -@c atomic_increment ok +@c atomic_fetch_add_relaxed ok @c clone ok @c atomic_fetch_add_relaxed ok @c atomic_exchange_acq ok diff --git a/nptl/nptl_setxid.c b/nptl/nptl_setxid.c index e709822b9b..301809d200 100644 --- a/nptl/nptl_setxid.c +++ b/nptl/nptl_setxid.c @@ -163,7 +163,7 @@ setxid_signal_thread (struct xid_command *cmdp, struct pthread *t) /* If this failed, it must have had not started yet or else exited. */ if (!INTERNAL_SYSCALL_ERROR_P (val)) { - atomic_increment (&cmdp->cntr); + atomic_fetch_add_relaxed (&cmdp->cntr, 1); return 1; } else diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index 59d8df8cd7..0df67484e2 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -759,7 +759,7 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, we momentarily store a false value; this doesn't matter because there is no kosher thing a signal handler interrupting us right here can do that cares whether the thread count is correct. */ - atomic_increment (&__nptl_nthreads); + atomic_fetch_add_relaxed (&__nptl_nthreads, 1); /* Our local value of stopped_start and thread_ran can be accessed at any time. The PD->stopped_start may only be accessed if we have diff --git a/nptl/sem_post.c b/nptl/sem_post.c index 9e5741753a..7ec21e92eb 100644 --- a/nptl/sem_post.c +++ b/nptl/sem_post.c @@ -91,7 +91,7 @@ __old_sem_post (sem_t *sem) /* We must need to synchronize with consumers of this token, so the atomic increment must have release MO semantics. */ atomic_write_barrier (); - (void) atomic_increment_val (futex); + atomic_fetch_add_release (futex, 1); /* We always have to assume it is a shared semaphore. */ futex_wake (futex, 1, LLL_SHARED); return 0; diff --git a/nscd/cache.c b/nscd/cache.c index b66c35334a..21af9a0f95 100644 --- a/nscd/cache.c +++ b/nscd/cache.c @@ -192,7 +192,7 @@ cache_add (int type, const void *key, size_t len, struct datahead *packet, /* We depend on this value being correct and at least as high as the real number of entries. */ - atomic_increment (&table->head->nentries); + atomic_fetch_add_relaxed (&table->head->nentries, 1); /* It does not matter that we are not loading the just increment value, this is just for statistics. */ diff --git a/nscd/nscd_helper.c b/nscd/nscd_helper.c index 0651817a99..fc41bfdb6e 100644 --- a/nscd/nscd_helper.c +++ b/nscd/nscd_helper.c @@ -425,7 +425,7 @@ __nscd_get_map_ref (request_type type, const char *name, 0)) cur = NO_MAPPING; else - atomic_increment (&cur->counter); + atomic_fetch_add_relaxed (&cur->counter, 1); } } diff --git a/sysdeps/unix/sysv/linux/check_pf.c b/sysdeps/unix/sysv/linux/check_pf.c index 4d486ca9b5..0b77a2d897 100644 --- a/sysdeps/unix/sysv/linux/check_pf.c +++ b/sysdeps/unix/sysv/linux/check_pf.c @@ -72,8 +72,8 @@ static uint32_t nl_timestamp; uint32_t __bump_nl_timestamp (void) { - if (atomic_increment_val (&nl_timestamp) == 0) - atomic_increment (&nl_timestamp); + if (atomic_fetch_add_relaxed (&nl_timestamp, 1) + 1 == 0) + atomic_fetch_add_relaxed (&nl_timestamp, 1); return nl_timestamp; } @@ -309,7 +309,7 @@ __check_pf (bool *seen_ipv4, bool *seen_ipv6, if (cache_valid_p ()) { data = cache; - atomic_increment (&cache->usecnt); + atomic_fetch_add_relaxed (&cache->usecnt, 1); } else { |