summary refs log tree commit diff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-09-08 06:09:02 +0000
committerUlrich Drepper <drepper@redhat.com>2004-09-08 06:09:02 +0000
commit7ba0e52c39ae2a57933a856ce363ab0361ad1ed9 (patch)
treeaaabd4f3d63a9a27d7c8f4fb1105bcc3c5ef26a2
parentf510d815bec80c004b194aaf4847fa40069cfd18 (diff)
downloadglibc-7ba0e52c39ae2a57933a856ce363ab0361ad1ed9.tar.gz
glibc-7ba0e52c39ae2a57933a856ce363ab0361ad1ed9.tar.xz
glibc-7ba0e52c39ae2a57933a856ce363ab0361ad1ed9.zip
Update.
	* sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
	(atomic_decrement): Define.

	* sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
	atomic_decrement_val.
	* sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
	* sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.

	* csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
	and atomic_decrement_val.
-rw-r--r--ChangeLog11
-rw-r--r--csu/tst-atomic.c14
-rw-r--r--nptl/allocatestack.c3
-rw-r--r--nptl/sysdeps/pthread/pthread_barrier_wait.c2
-rw-r--r--nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c7
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_post.c7
-rw-r--r--sysdeps/powerpc/bits/atomic.h54
-rw-r--r--sysdeps/powerpc/powerpc32/bits/atomic.h31
-rw-r--r--sysdeps/powerpc/powerpc64/bits/atomic.h26
9 files changed, 129 insertions, 26 deletions
diff --git a/ChangeLog b/ChangeLog
index d9e415ff3a..94a848796f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,16 @@
 2004-09-07  Ulrich Drepper  <drepper@redhat.com>
 
+	* sysdeps/powerpc/bits/atomic.h (atomic_increment): Define.
+	(atomic_decrement): Define.
+
+	* sysdeps/powerpc/bits/atomic.h: Implement atomic_increment_val and
+	atomic_decrement_val.
+	* sysdeps/powerpc/powerpc32/bits/atomic.h: Likewise.
+	* sysdeps/powerpc/powerpc64/bits/atomic.h: Likewise.
+
+	* csu/tst-atomic.c (do_test): Add tests of atomic_increment_val
+	and atomic_decrement_val.
+
 	* include/atomic.h: Define atomic_increment_val, atomic_decrement_val,
 	and atomic_delay is not already defined.
 	* sysdeps/i386/i486/bits/atomic.h: Define atomic_delay.
diff --git a/csu/tst-atomic.c b/csu/tst-atomic.c
index 6104466739..cb6b6ba3d4 100644
--- a/csu/tst-atomic.c
+++ b/csu/tst-atomic.c
@@ -1,5 +1,5 @@
 /* Tests for atomic.h macros.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -130,6 +130,12 @@ do_test (void)
       ret = 1;
     }
 
+  if (atomic_increment_val (&mem) != 1)
+    {
+      puts ("atomic_increment_val test failed");
+      ret = 1;
+    }
+
   mem = 0;
   if (atomic_increment_and_test (&mem)
       || mem != 1)
@@ -162,6 +168,12 @@ do_test (void)
       ret = 1;
     }
 
+  if (atomic_decrement_val (&mem) != 15)
+    {
+      puts ("atomic_decrement_val test failed");
+      ret = 1;
+    }
+
   mem = 0;
   if (atomic_decrement_and_test (&mem)
       || mem != -1)
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 33fbbaa850..59f00d9231 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -445,8 +445,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 #if COLORING_INCREMENT != 0
 	  /* Atomically increment NCREATED.  */
-	  unsigned int ncreated = (atomic_exchange_and_add (&nptl_ncreated, 1)
-				   + 1);
+	  unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
 
 	  /* We chose the offset for coloring by incrementing it for
 	     every new thread by a fixed amount.  The offset used
diff --git a/nptl/sysdeps/pthread/pthread_barrier_wait.c b/nptl/sysdeps/pthread/pthread_barrier_wait.c
index aa5b42d419..c6b563f242 100644
--- a/nptl/sysdeps/pthread/pthread_barrier_wait.c
+++ b/nptl/sysdeps/pthread/pthread_barrier_wait.c
@@ -69,7 +69,7 @@ pthread_barrier_wait (barrier)
   unsigned int init_count = ibarrier->init_count;
 
   /* If this was the last woken thread, unlock.  */
-  if (atomic_exchange_and_add (&ibarrier->left, 1) == init_count - 1)
+  if (atomic_increment_val (&ibarrier->left) == init_count)
     /* We are done.  */
     lll_unlock (ibarrier->lock);
 
diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c b/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
index d9ee5d50b7..91b9955181 100644
--- a/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
+++ b/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Powerpc version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -30,11 +30,10 @@ int
 __new_sem_post (sem_t *sem)
 {
   int *futex = (int *) sem;
-  int err, nr;
 
   __asm __volatile (__lll_rel_instr ::: "memory");
-  nr = atomic_exchange_and_add (futex, 1);
-  err = lll_futex_wake (futex, nr + 1);
+  int nr = atomic_increment_val (futex);
+  int err = lll_futex_wake (futex, nr);
   if (__builtin_expect (err, 0) < 0)
     {
       __set_errno (-err);
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_post.c b/nptl/sysdeps/unix/sysv/linux/sem_post.c
index b4ee4cfc8a..671b43f7f7 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_post.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_post.c
@@ -1,5 +1,5 @@
 /* sem_post -- post to a POSIX semaphore.  Generic futex-using version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
 
@@ -30,10 +30,9 @@ int
 __new_sem_post (sem_t *sem)
 {
   int *futex = (int *) sem;
-  int err, nr;
 
-  nr = atomic_exchange_and_add (futex, 1);
-  err = lll_futex_wake (futex, nr + 1);
+  int nr = atomic_increment_val (futex);
+  int err = lll_futex_wake (futex, nr);
   if (__builtin_expect (err, 0) < 0)
     {
       __set_errno (-err);
diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h
index 4b6a761b72..31f27e9e10 100644
--- a/sysdeps/powerpc/bits/atomic.h
+++ b/sysdeps/powerpc/bits/atomic.h
@@ -147,6 +147,32 @@ typedef uintmax_t uatomic_max_t;
     __val;								      \
   })
 
+#define __arch_atomic_increment_val_32(mem) \
+  ({									      \
+    __typeof (*(mem)) __val;						      \
+    __asm __volatile ("1:	lwarx	%0,0,%2\n"			      \
+		      "		addi	%0,%0,1\n"			      \
+		      "		stwcx.	%0,0,%2\n"			      \
+		      "		bne-	1b"				      \
+		      : "=&b" (__val), "=m" (*mem)			      \
+		      : "b" (mem), "m" (*mem)				      \
+		      : "cr0", "memory");				      \
+    __val;								      \
+  })
+
+#define __arch_atomic_decrement_val_32(mem) \
+  ({									      \
+    __typeof (*(mem)) __val;						      \
+    __asm __volatile ("1:	lwarx	%0,0,%2\n"			      \
+		      "		subi	%0,%0,1\n"			      \
+		      "		stwcx.	%0,0,%2\n"			      \
+		      "		bne-	1b"				      \
+		      : "=&b" (__val), "=m" (*mem)			      \
+		      : "b" (mem), "m" (*mem)				      \
+		      : "cr0", "memory");				      \
+    __val;								      \
+  })
+
 #define __arch_atomic_decrement_if_positive_32(mem) \
   ({ int __val, __tmp;							      \
      __asm __volatile ("1:	lwarx	%0,0,%3\n"			      \
@@ -222,6 +248,34 @@ typedef uintmax_t uatomic_max_t;
     __result;								      \
   })
 
+#define atomic_increment_val(mem) \
+  ({									      \
+    __typeof (*(mem)) __result;						      \
+    if (sizeof (*(mem)) == 4)						      \
+      __result = __arch_atomic_increment_val_32 (mem);			      \
+    else if (sizeof (*(mem)) == 8)					      \
+      __result = __arch_atomic_increment_val_64 (mem);			      \
+    else 								      \
+       abort ();							      \
+    __result;								      \
+  })
+
+#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
+
+#define atomic_decrement_val(mem) \
+  ({									      \
+    __typeof (*(mem)) __result;						      \
+    if (sizeof (*(mem)) == 4)						      \
+      __result = __arch_atomic_decrement_val_32 (mem);			      \
+    else if (sizeof (*(mem)) == 8)					      \
+      __result = __arch_atomic_decrement_val_64 (mem);			      \
+    else 								      \
+       abort ();							      \
+    __result;								      \
+  })
+
+#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
+
 
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #define atomic_decrement_if_positive(mem) \
diff --git a/sysdeps/powerpc/powerpc32/bits/atomic.h b/sysdeps/powerpc/powerpc32/bits/atomic.h
index 4e2e24335d..0f1a72335f 100644
--- a/sysdeps/powerpc/powerpc32/bits/atomic.h
+++ b/sysdeps/powerpc/powerpc32/bits/atomic.h
@@ -1,5 +1,5 @@
 /* Atomic operations.  PowerPC32 version.
-   Copyright (C) 2003 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -56,17 +56,15 @@
   __tmp != 0;								      \
 })
 
-/* 
- * Powerpc32 processors don't implement the 64-bit (doubleword) forms of
- * load and reserve (ldarx) and store conditional (stdcx.) instructions.  
- * So for powerpc32 we stub out the 64-bit forms.
- */
+/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
+   load and reserve (ldarx) and store conditional (stdcx.) instructions.
+   So for powerpc32 we stub out the 64-bit forms.  */
 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
   (abort (), 0)
 
 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
   (abort (), (__typeof (*mem)) 0)
-  
+
 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
   (abort (), 0)
 
@@ -82,19 +80,24 @@
 # define __arch_atomic_exchange_and_add_64(mem, value) \
     ({ abort (); (*mem) = (value); })
 
+# define __arch_atomic_increment_val_64(mem) \
+    ({ abort (); (*mem)++; })
+
+# define __arch_atomic_decrement_val_64(mem) \
+    ({ abort (); (*mem)--; })
+
 # define __arch_atomic_decrement_if_positive_64(mem) \
     ({ abort (); (*mem)--; })
-    
-/* 
- * Older powerpc32 processors don't support the new "light weight" 
- * sync (lwsync).  So the only safe option is to use normal sync 
- * for all powerpc32 applications. 
+
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync).  So the only safe option is to use normal sync
+ * for all powerpc32 applications.
  */
 # define atomic_read_barrier()	__asm ("sync" ::: "memory")
 
 /*
  * Include the rest of the atomic ops macros which are common to both
- * powerpc32 and powerpc64. 
+ * powerpc32 and powerpc64.
  */
 #include_next <bits/atomic.h>
-
diff --git a/sysdeps/powerpc/powerpc64/bits/atomic.h b/sysdeps/powerpc/powerpc64/bits/atomic.h
index fa25678410..e46dc1e4d7 100644
--- a/sysdeps/powerpc/powerpc64/bits/atomic.h
+++ b/sysdeps/powerpc/powerpc64/bits/atomic.h
@@ -168,6 +168,32 @@
       __val;								      \
     })
 
+# define __arch_atomic_increment_val_64(mem) \
+    ({									      \
+      __typeof (*(mem)) __val;						      \
+      __asm __volatile ("1:	ldarx	%0,0,%2\n"			      \
+			"	addi	%0,%0,1\n"			      \
+			"	stdcx.	%0,0,%2\n"			      \
+			"	bne-	1b"				      \
+			: "=&b" (__val), "=m" (*mem)			      \
+			: "b" (mem), "m" (*mem)				      \
+			: "cr0", "memory");				      \
+      __val;								      \
+    })
+
+# define __arch_atomic_decrement_val_64(mem) \
+    ({									      \
+      __typeof (*(mem)) __val;						      \
+      __asm __volatile ("1:	ldarx	%0,0,%2\n"			      \
+			"	subi	%0,%0,1\n"			      \
+			"	stdcx.	%0,0,%2\n"			      \
+			"	bne-	1b"				      \
+			: "=&b" (__val), "=m" (*mem)			      \
+			: "b" (mem), "m" (*mem)				      \
+			: "cr0", "memory");				      \
+      __val;								      \
+    })
+
 # define __arch_atomic_decrement_if_positive_64(mem) \
   ({ int __val, __tmp;							      \
      __asm __volatile ("1:	ldarx	%0,0,%3\n"			      \