about summary refs log tree commit diff
path: root/nptl/pthread_spin_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_spin_lock.c')
-rw-r--r--nptl/pthread_spin_lock.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/nptl/pthread_spin_lock.c b/nptl/pthread_spin_lock.c
new file mode 100644
index 0000000000..91733fb7f1
--- /dev/null
+++ b/nptl/pthread_spin_lock.c
@@ -0,0 +1,69 @@
+/* pthread_spin_lock -- lock a spin lock.  Generic version.
+   Copyright (C) 2012 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include "pthreadP.h"
+
+/* A machine-specific version can define SPIN_LOCK_READS_BETWEEN_CMPXCHG
+  to the number of plain reads that it's optimal to spin on between uses
+  of atomic_compare_and_exchange_val_acq.  If spinning forever is optimal
+  then use -1.  If no plain reads here would ever be optimal, use 0.  */
+#ifndef SPIN_LOCK_READS_BETWEEN_CMPXCHG
+# warning machine-dependent file should define SPIN_LOCK_READS_BETWEEN_CMPXCHG
+# define SPIN_LOCK_READS_BETWEEN_CMPXCHG 1000
+#endif
+
+int
+pthread_spin_lock (pthread_spinlock_t *lock)
+{
+  /* atomic_exchange usually takes less instructions than
+     atomic_compare_and_exchange.  On the other hand,
+     atomic_compare_and_exchange potentially generates less bus traffic
+     when the lock is locked.
+     We assume that the first try mostly will be successful, and we use
+     atomic_exchange.  For the subsequent tries we use
+     atomic_compare_and_exchange.  */
+  if (atomic_exchange_acq (lock, 1) == 0)
+    return 0;
+
+  do
+    {
+      /* The lock is contended and we need to wait.  Going straight back
+	 to cmpxchg is not a good idea on many targets as that will force
+	 expensive memory synchronizations among processors and penalize other
+	 running threads.
+	 On the other hand, we do want to update memory state on the local core
+	 once in a while to avoid spinning indefinitely until some event that
+	 will happen to update local memory as a side-effect.  */
+      if (SPIN_LOCK_READS_BETWEEN_CMPXCHG >= 0)
+	{
+	  int wait = SPIN_LOCK_READS_BETWEEN_CMPXCHG;
+
+	  while (*lock != 0 && wait > 0)
+	    --wait;
+	}
+      else
+	{
+	  while (*lock != 0)
+	    ;
+	}
+    }
+  while (atomic_compare_and_exchange_val_acq (lock, 1, 0) != 0);
+
+  return 0;
+}