about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog12
-rw-r--r--NEWS2
-rw-r--r--nptl/sem_post.c2
-rw-r--r--nptl/sem_waitcommon.c10
-rw-r--r--sysdeps/nptl/internaltypes.h2
5 files changed, 20 insertions, 8 deletions
diff --git a/ChangeLog b/ChangeLog
index 051a7c4c56..a59266f771 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2015-01-23  H.J. Lu  <hongjiu.lu@intel.com>
+
+	[BZ #17870]
+	* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
+	with uint64_t.
+	* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
+	(uint64_t) 1.
+	(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
+	Replace 1UL with (uint64_t) 1.
+	* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
+	int with uint64_t.
+
 2015-01-23  Roland McGrath  <roland@hack.frob.com>
 
 	* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.
diff --git a/NEWS b/NEWS
index fd6da90595..0ce43521ea 100644
--- a/NEWS
+++ b/NEWS
@@ -18,7 +18,7 @@ Version 2.21
   17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
   17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
   17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
-  17844, 17848
+  17844, 17848, 17870
 
 * A new semaphore algorithm has been implemented in generic C code for all
   machines. Previous custom assembly implementations of semaphore were
diff --git a/nptl/sem_post.c b/nptl/sem_post.c
index 9162e4c8a6..6e495ed810 100644
--- a/nptl/sem_post.c
+++ b/nptl/sem_post.c
@@ -65,7 +65,7 @@ __new_sem_post (sem_t *sem)
      added tokens before (the release sequence includes atomic RMW operations
      by other threads).  */
   /* TODO Use atomic_fetch_add to make it scale better than a CAS loop?  */
-  unsigned long int d = atomic_load_relaxed (&isem->data);
+  uint64_t d = atomic_load_relaxed (&isem->data);
   do
     {
       if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c
index 96848d7ac5..c60daa3865 100644
--- a/nptl/sem_waitcommon.c
+++ b/nptl/sem_waitcommon.c
@@ -187,7 +187,7 @@ __sem_wait_cleanup (void *arg)
 
 #if __HAVE_64B_ATOMICS
   /* Stop being registered as a waiter.  See below for MO.  */
-  atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
+  atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 #else
   __sem_wait_32_finish (sem);
 #endif
@@ -263,8 +263,8 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 #if __HAVE_64B_ATOMICS
   /* Add a waiter.  Relaxed MO is sufficient because we can rely on the
      ordering provided by the RMW operations we use.  */
-  unsigned long d = atomic_fetch_add_relaxed (&sem->data,
-      1UL << SEM_NWAITERS_SHIFT);
+  uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+      (uint64_t) 1 << SEM_NWAITERS_SHIFT);
 
   pthread_cleanup_push (__sem_wait_cleanup, sem);
 
@@ -304,7 +304,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	      err = -1;
 	      /* Stop being registered as a waiter.  */
 	      atomic_fetch_add_relaxed (&sem->data,
-		  -(1UL << SEM_NWAITERS_SHIFT));
+		  -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
 	      break;
 	    }
 	  /* Relaxed MO is sufficient; see below.  */
@@ -320,7 +320,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
 	     up-to-date value; the futex_wait or the CAS perform the real
 	     work.  */
 	  if (atomic_compare_exchange_weak_acquire (&sem->data,
-	      &d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
+	      &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
 	    {
 	      err = 0;
 	      break;
diff --git a/sysdeps/nptl/internaltypes.h b/sysdeps/nptl/internaltypes.h
index 7c0d2402a3..8f5cfa4af6 100644
--- a/sysdeps/nptl/internaltypes.h
+++ b/sysdeps/nptl/internaltypes.h
@@ -155,7 +155,7 @@ struct new_sem
 # endif
 # define SEM_NWAITERS_SHIFT 32
 # define SEM_VALUE_MASK (~(unsigned int)0)
-  unsigned long int data;
+  uint64_t data;
   int private;
   int pad;
 #else