about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog15
-rw-r--r--elf/dl-close.c4
-rw-r--r--elf/dl-fptr.c4
-rw-r--r--elf/dl-open.c4
-rw-r--r--elf/dl-profile.c14
-rw-r--r--elf/dl-runtime.c8
-rw-r--r--elf/dl-sym.c4
-rw-r--r--gmon/mcount.c4
-rw-r--r--include/atomic.h139
-rw-r--r--malloc/memusage.c116
-rw-r--r--nptl/ChangeLog5
-rw-r--r--nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h18
-rw-r--r--resolv/res_libc.c2
-rw-r--r--stdlib/cxa_finalize.c4
-rw-r--r--sysdeps/x86_64/bits/atomic.h222
15 files changed, 415 insertions, 148 deletions
diff --git a/ChangeLog b/ChangeLog
index 4355bf59c6..6917e5bd7c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,18 @@
+2006-10-11  Ulrich Drepper  <drepper@redhat.com>
+
+	* include/atomic.c: Define catomic_* operations.
+	* sysdeps/x86_64/bits/atomic.h: Likewise.  Fix a few minor problems.
+	* stdlib/cxa_finalize.c: Use catomic_* operations instead of atomic_*.
+	* malloc/memusage.c: Likewise.
+	* gmon/mcount.c: Likewise.
+	* elf/dl-close.c: Likewise.
+	* elf/dl-open.c: Likewise.
+	* elf/dl-profile.c: Likewise.
+	* elf/dl-sym.c: Likewise.
+	* elf/dl-runtime.c: Likewise.
+	* elf/dl-fptr.c: Likewise.
+	* resolv/res_libc.c: Likewise.
+
 2006-10-10  Ulrich Drepper  <drepper@redhat.com>
 
 	* nis/nis_subr.c (nis_getnames): Add trailing dot to NIS_PATH
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 2e7c506a3d..84e57e09d0 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -423,11 +423,11 @@ _dl_close (void *_map)
 	      imap->l_scoperec = newp;
 	      __rtld_mrlock_done (imap->l_scoperec_lock);
 
-	      if (atomic_increment_val (&old->nusers) != 1)
+	      if (catomic_increment_val (&old->nusers) != 1)
 		{
 		  old->remove_after_use = true;
 		  old->notify = true;
-		  if (atomic_decrement_val (&old->nusers) != 0)
+		  if (catomic_decrement_val (&old->nusers) != 0)
 		    __rtld_waitzero (old->nusers);
 		}
 
diff --git a/elf/dl-fptr.c b/elf/dl-fptr.c
index 78beecfdcb..e068124d6f 100644
--- a/elf/dl-fptr.c
+++ b/elf/dl-fptr.c
@@ -1,5 +1,5 @@
 /* Manage function descriptors.  Generic version.
-   Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+   Copyright (C) 1999-2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -40,7 +40,7 @@
 
 #ifndef COMPARE_AND_SWAP
 # define COMPARE_AND_SWAP(ptr, old, new) \
-  (atomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
+  (catomic_compare_and_exchange_bool_acq (ptr, new, old) == 0)
 #endif
 
 ElfW(Addr) _dl_boot_fptr_table [ELF_MACHINE_BOOT_FPTR_TABLE_LEN];
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 5c90e06708..35712b5ac0 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -429,9 +429,9 @@ dl_open_worker (void *a)
 		  imap->l_scoperec = newp;
 		  __rtld_mrlock_done (imap->l_scoperec_lock);
 
-		  atomic_increment (&old->nusers);
+		  catomic_increment (&old->nusers);
 		  old->remove_after_use = true;
-		  if (atomic_decrement_val (&old->nusers) == 0)
+		  if (catomic_decrement_val (&old->nusers) == 0)
 		    /* No user, we can free it here and now.  */
 		    free (old);
 		}
diff --git a/elf/dl-profile.c b/elf/dl-profile.c
index 41214c1b08..47033f32ef 100644
--- a/elf/dl-profile.c
+++ b/elf/dl-profile.c
@@ -1,5 +1,5 @@
 /* Profiling of shared libraries.
-   Copyright (C) 1997-2002, 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 1997-2002, 2003, 2004, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
    Based on the BSD mcount implementation.
@@ -509,24 +509,24 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
 	      size_t newfromidx;
 	      to_index = (data[narcs].self_pc
 			  / (HASHFRACTION * sizeof (*tos)));
-	      newfromidx = atomic_exchange_and_add (&fromidx, 1) + 1;
+	      newfromidx = catomic_exchange_and_add (&fromidx, 1) + 1;
 	      froms[newfromidx].here = &data[narcs];
 	      froms[newfromidx].link = tos[to_index];
 	      tos[to_index] = newfromidx;
-	      atomic_increment (&narcs);
+	      catomic_increment (&narcs);
 	    }
 
 	  /* If we still have no entry stop searching and insert.  */
 	  if (*topcindex == 0)
 	    {
-	      uint_fast32_t newarc = atomic_exchange_and_add (narcsp, 1);
+	      uint_fast32_t newarc = catomic_exchange_and_add (narcsp, 1);
 
 	      /* In rare cases it could happen that all entries in FROMS are
 		 occupied.  So we cannot count this anymore.  */
 	      if (newarc >= fromlimit)
 		goto done;
 
-	      *topcindex = atomic_exchange_and_add (&fromidx, 1) + 1;
+	      *topcindex = catomic_exchange_and_add (&fromidx, 1) + 1;
 	      fromp = &froms[*topcindex];
 
 	      fromp->here = &data[newarc];
@@ -534,7 +534,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
 	      data[newarc].self_pc = selfpc;
 	      data[newarc].count = 0;
 	      fromp->link = 0;
-	      atomic_increment (&narcs);
+	      catomic_increment (&narcs);
 
 	      break;
 	    }
@@ -547,7 +547,7 @@ _dl_mcount (ElfW(Addr) frompc, ElfW(Addr) selfpc)
     }
 
   /* Increment the counter.  */
-  atomic_increment (&fromp->here->count);
+  catomic_increment (&fromp->here->count);
 
  done:
   ;
diff --git a/elf/dl-runtime.c b/elf/dl-runtime.c
index 83d565ac71..05fd974bf5 100644
--- a/elf/dl-runtime.c
+++ b/elf/dl-runtime.c
@@ -97,7 +97,7 @@ _dl_fixup (
 	{
 	  __rtld_mrlock_lock (l->l_scoperec_lock);
 	  scoperec = l->l_scoperec;
-	  atomic_increment (&scoperec->nusers);
+	  catomic_increment (&scoperec->nusers);
 	  __rtld_mrlock_unlock (l->l_scoperec_lock);
 	}
 
@@ -107,7 +107,7 @@ _dl_fixup (
 				    DL_LOOKUP_ADD_DEPENDENCY, NULL);
 
       if (l->l_type == lt_loaded
-	  && atomic_decrement_val (&scoperec->nusers) == 0
+	  && catomic_decrement_val (&scoperec->nusers) == 0
 	  && __builtin_expect (scoperec->remove_after_use, 0))
 	{
 	  if (scoperec->notify)
@@ -199,7 +199,7 @@ _dl_profile_fixup (
 	    {
 	      __rtld_mrlock_lock (l->l_scoperec_lock);
 	      scoperec = l->l_scoperec;
-	      atomic_increment (&scoperec->nusers);
+	      catomic_increment (&scoperec->nusers);
 	      __rtld_mrlock_unlock (l->l_scoperec_lock);
 	    }
 
@@ -209,7 +209,7 @@ _dl_profile_fixup (
 					DL_LOOKUP_ADD_DEPENDENCY, NULL);
 
 	  if (l->l_type == lt_loaded
-	      && atomic_decrement_val (&scoperec->nusers) == 0
+	      && catomic_decrement_val (&scoperec->nusers) == 0
 	      && __builtin_expect (scoperec->remove_after_use, 0))
 	    {
 	      if (scoperec->notify)
diff --git a/elf/dl-sym.c b/elf/dl-sym.c
index 1c66310d7c..43933466b4 100644
--- a/elf/dl-sym.c
+++ b/elf/dl-sym.c
@@ -124,7 +124,7 @@ do_sym (void *handle, const char *name, void *who,
 	{
 	  __rtld_mrlock_lock (match->l_scoperec_lock);
 	  struct r_scoperec *scoperec = match->l_scoperec;
-	  atomic_increment (&scoperec->nusers);
+	  catomic_increment (&scoperec->nusers);
 	  __rtld_mrlock_unlock (match->l_scoperec_lock);
 
 	  struct call_dl_lookup_args args;
@@ -141,7 +141,7 @@ do_sym (void *handle, const char *name, void *who,
 	  int err = GLRO(dl_catch_error) (&objname, &errstring, &malloced,
 					  call_dl_lookup, &args);
 
-	  if (atomic_decrement_val (&scoperec->nusers) == 0
+	  if (catomic_decrement_val (&scoperec->nusers) == 0
 	      && __builtin_expect (scoperec->remove_after_use, 0))
 	    {
 	      if (scoperec->notify)
diff --git a/gmon/mcount.c b/gmon/mcount.c
index 32a5f1ea0f..5a4a2499d4 100644
--- a/gmon/mcount.c
+++ b/gmon/mcount.c
@@ -69,8 +69,8 @@ _MCOUNT_DECL(frompc, selfpc)	/* _mcount; may be static, inline, etc */
 	 * check that we are profiling
 	 * and that we aren't recursively invoked.
 	 */
-	if (atomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
-						  GMON_PROF_ON))
+	if (catomic_compare_and_exchange_bool_acq (&p->state, GMON_PROF_BUSY,
+						   GMON_PROF_ON))
 	  return;
 
 	/*
diff --git a/include/atomic.h b/include/atomic.h
index bd2e2f13f7..340c6e6bfb 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -21,6 +21,26 @@
 #ifndef _ATOMIC_H
 #define _ATOMIC_H	1
 
+/* This header defines three types of macros:
+
+   - atomic arithmetic and logic operation on memory.  They all
+     have the prefix "atomic_".
+
+   - conditionally atomic operations of the same kinds.  These
+     always behave identical but can be faster when atomicity
+     is not really needed since only one thread has access to
+     the memory location.  In that case the code is slower in
+     the multi-thread case.  The interfaces have the prefix
+     "catomic_".
+
+   - support functions like barriers.  They also have the preifx
+     "atomic_".
+
+   Architectures must provide a few lowlevel macros (the compare
+   and exchange definitions).  All others are optional.  They
+   should only be provided if the architecture has specific
+   support for the operation.  */
+
 #include <stdlib.h>
 
 #include <bits/atomic.h>
@@ -70,12 +90,29 @@
 #endif
 
 
+#if !defined catomic_compare_and_exchange_val_acq \
+    && defined __arch_c_compare_and_exchange_val_32_acq
+# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+  __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq,		      \
+		       mem, newval, oldval)
+#else
+# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+  atomic_compare_and_exchange_val_acq (mem, newval, oldval)
+#endif
+
+
 #ifndef atomic_compare_and_exchange_val_rel
 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval)	      \
   atomic_compare_and_exchange_val_acq (mem, newval, oldval)
 #endif
 
 
+#ifndef catomic_compare_and_exchange_val_rel
+# define catomic_compare_and_exchange_val_rel(mem, newval, oldval)	      \
+  atomic_compare_and_exchange_val_acq (mem, newval, oldval)
+#endif
+
+
 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
    Return zero if *MEM was changed or non-zero if no exchange happened.  */
 #ifndef atomic_compare_and_exchange_bool_acq
@@ -94,12 +131,34 @@
 #endif
 
 
+#ifndef catomic_compare_and_exchange_bool_acq
+# ifdef __arch_c_compare_and_exchange_bool_32_acq
+#  define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+  __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq,		      \
+		        mem, newval, oldval)
+#  else
+#   define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+  ({ /* Cannot use __oldval here, because macros later in this file might     \
+	call this macro with __oldval argument.	 */			      \
+     __typeof (oldval) __old = (oldval);				      \
+     catomic_compare_and_exchange_val_acq (mem, newval, __old) != __old;      \
+  })
+# endif
+#endif
+
+
 #ifndef atomic_compare_and_exchange_bool_rel
 # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
   atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
 #endif
 
 
+#ifndef catomic_compare_and_exchange_bool_rel
+# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
+  catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
+#endif
+
+
 /* Store NEWVALUE in *MEM and return the old value.  */
 #ifndef atomic_exchange_acq
 # define atomic_exchange_acq(mem, newvalue) \
@@ -141,6 +200,23 @@
 #endif
 
 
+#ifndef catomic_exchange_and_add
+# define catomic_exchange_and_add(mem, value) \
+  ({ __typeof (*(mem)) __oldv;						      \
+     __typeof (mem) __memp = (mem);					      \
+     __typeof (*(mem)) __value = (value);				      \
+									      \
+     do									      \
+       __oldv = *__memp;						      \
+     while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp,  \
+								     __oldv   \
+								    + __value,\
+								     __oldv), \
+			      0));					      \
+									      \
+     __oldv; })
+#endif
+
 
 #ifndef atomic_max
 # define atomic_max(mem, value) \
@@ -159,6 +235,25 @@
   } while (0)
 #endif
 
+
+#ifndef catomic_max
+# define catomic_max(mem, value) \
+  do {									      \
+    __typeof (*(mem)) __oldv;						      \
+    __typeof (mem) __memp = (mem);					      \
+    __typeof (*(mem)) __value = (value);				      \
+    do {								      \
+      __oldv = *__memp;							      \
+      if (__oldv >= __value)						      \
+	break;								      \
+    } while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp, \
+								      __value,\
+								      __oldv),\
+			       0));					      \
+  } while (0)
+#endif
+
+
 #ifndef atomic_min
 # define atomic_min(mem, value) \
   do {									      \
@@ -176,21 +271,38 @@
   } while (0)
 #endif
 
+
 #ifndef atomic_add
 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
 #endif
 
 
+#ifndef catomic_add
+# define catomic_add(mem, value) \
+  (void) catomic_exchange_and_add ((mem), (value))
+#endif
+
+
 #ifndef atomic_increment
 # define atomic_increment(mem) atomic_add ((mem), 1)
 #endif
 
 
+#ifndef catomic_increment
+# define catomic_increment(mem) catomic_add ((mem), 1)
+#endif
+
+
 #ifndef atomic_increment_val
 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
 #endif
 
 
+#ifndef catomic_increment_val
+# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
+#endif
+
+
 /* Add one to *MEM and return true iff it's now zero.  */
 #ifndef atomic_increment_and_test
 # define atomic_increment_and_test(mem) \
@@ -203,11 +315,21 @@
 #endif
 
 
+#ifndef catomic_decrement
+# define catomic_decrement(mem) catomic_add ((mem), -1)
+#endif
+
+
 #ifndef atomic_decrement_val
 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
 #endif
 
 
+#ifndef catomic_decrement_val
+# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
+#endif
+
+
 /* Subtract 1 from *MEM and return true iff it's now zero.  */
 #ifndef atomic_decrement_and_test
 # define atomic_decrement_and_test(mem) \
@@ -327,6 +449,23 @@
   } while (0)
 #endif
 
+#ifndef catomic_or
+# define catomic_or(mem, mask) \
+  do {									      \
+    __typeof (*(mem)) __oldval;						      \
+    __typeof (mem) __memp = (mem);					      \
+    __typeof (*(mem)) __mask = (mask);					      \
+									      \
+    do									      \
+      __oldval = (*__memp);						      \
+    while (__builtin_expect (catomic_compare_and_exchange_bool_acq (__memp,   \
+								    __oldval  \
+								    | __mask, \
+								    __oldval),\
+			      0));					      \
+  } while (0)
+#endif
+
 /* Atomically *mem |= mask and return the old value of *mem.  */
 #ifndef atomic_or_val
 # define atomic_or_val(mem, mask) \
diff --git a/malloc/memusage.c b/malloc/memusage.c
index 8b37c43a8a..9003d8094a 100644
--- a/malloc/memusage.c
+++ b/malloc/memusage.c
@@ -1,5 +1,5 @@
 /* Profile heap and stack memory usage of running program.
-   Copyright (C) 1998-2002, 2004, 2005 Free Software Foundation, Inc.
+   Copyright (C) 1998-2002, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
 
@@ -128,8 +128,8 @@ update_data (struct header *result, size_t len, size_t old_len)
 
   /* Compute current heap usage and compare it with the maximum value.  */
   memusage_size_t heap
-    = atomic_exchange_and_add (&current_heap, len - old_len) + len - old_len;
-  atomic_max (&peak_heap, heap);
+    = catomic_exchange_and_add (&current_heap, len - old_len) + len - old_len;
+  catomic_max (&peak_heap, heap);
 
   /* Compute current stack usage and compare it with the maximum
      value.  The base stack pointer might not be set if this is not
@@ -152,15 +152,15 @@ update_data (struct header *result, size_t len, size_t old_len)
     start_sp = sp;
   size_t current_stack = start_sp - sp;
 #endif
-  atomic_max (&peak_stack, current_stack);
+  catomic_max (&peak_stack, current_stack);
 
   /* Add up heap and stack usage and compare it with the maximum value.  */
-  atomic_max (&peak_total, heap + current_stack);
+  catomic_max (&peak_total, heap + current_stack);
 
   /* Store the value only if we are writing to a file.  */
   if (fd != -1)
     {
-      uatomic32_t idx = atomic_exchange_and_add (&buffer_cnt, 1);
+      uatomic32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
       if (idx >= 2 * buffer_size)
 	{
 	  /* We try to reset the counter to the correct range.  If
@@ -168,7 +168,7 @@ update_data (struct header *result, size_t len, size_t old_len)
 	     counter it does not matter since that thread will take
 	     care of the correction.  */
 	  unsigned int reset = idx - 2 * buffer_size;
-	  atomic_compare_and_exchange_val_acq (&buffer_size, reset, idx);
+	  catomic_compare_and_exchange_val_acq (&buffer_size, reset, idx);
 	  idx = reset;
 	}
 
@@ -337,24 +337,24 @@ malloc (size_t len)
     return (*mallocp) (len);
 
   /* Keep track of number of calls.  */
-  atomic_increment (&calls[idx_malloc]);
+  catomic_increment (&calls[idx_malloc]);
   /* Keep track of total memory consumption for `malloc'.  */
-  atomic_add (&total[idx_malloc], len);
+  catomic_add (&total[idx_malloc], len);
   /* Keep track of total memory requirement.  */
-  atomic_add (&grand_total, len);
+  catomic_add (&grand_total, len);
   /* Remember the size of the request.  */
   if (len < 65536)
-    atomic_increment (&histogram[len / 16]);
+    catomic_increment (&histogram[len / 16]);
   else
-    atomic_increment (&large);
+    catomic_increment (&large);
   /* Total number of calls of any of the functions.  */
-  atomic_increment (&calls_total);
+  catomic_increment (&calls_total);
 
   /* Do the real work.  */
   result = (struct header *) (*mallocp) (len + sizeof (struct header));
   if (result == NULL)
     {
-      atomic_increment (&failed[idx_malloc]);
+      catomic_increment (&failed[idx_malloc]);
       return NULL;
     }
 
@@ -403,36 +403,36 @@ realloc (void *old, size_t len)
     }
 
   /* Keep track of number of calls.  */
-  atomic_increment (&calls[idx_realloc]);
+  catomic_increment (&calls[idx_realloc]);
   if (len > old_len)
     {
       /* Keep track of total memory consumption for `realloc'.  */
-      atomic_add (&total[idx_realloc], len - old_len);
+      catomic_add (&total[idx_realloc], len - old_len);
       /* Keep track of total memory requirement.  */
-      atomic_add (&grand_total, len - old_len);
+      catomic_add (&grand_total, len - old_len);
     }
   /* Remember the size of the request.  */
   if (len < 65536)
-    atomic_increment (&histogram[len / 16]);
+    catomic_increment (&histogram[len / 16]);
   else
-    atomic_increment (&large);
+    catomic_increment (&large);
   /* Total number of calls of any of the functions.  */
-  atomic_increment (&calls_total);
+  catomic_increment (&calls_total);
 
   /* Do the real work.  */
   result = (struct header *) (*reallocp) (real, len + sizeof (struct header));
   if (result == NULL)
     {
-      atomic_increment (&failed[idx_realloc]);
+      catomic_increment (&failed[idx_realloc]);
       return NULL;
     }
 
   /* Record whether the reduction/increase happened in place.  */
   if (real == result)
-    atomic_increment (&inplace);
+    catomic_increment (&inplace);
   /* Was the buffer increased?  */
   if (old_len > len)
-    atomic_increment (&decreasing);
+    catomic_increment (&decreasing);
 
   /* Update the allocation data and write out the records if necessary.  */
   update_data (result, len, old_len);
@@ -463,16 +463,16 @@ calloc (size_t n, size_t len)
     return (*callocp) (n, len);
 
   /* Keep track of number of calls.  */
-  atomic_increment (&calls[idx_calloc]);
+  catomic_increment (&calls[idx_calloc]);
   /* Keep track of total memory consumption for `calloc'.  */
-  atomic_add (&total[idx_calloc], size);
+  catomic_add (&total[idx_calloc], size);
   /* Keep track of total memory requirement.  */
-  atomic_add (&grand_total, size);
+  catomic_add (&grand_total, size);
   /* Remember the size of the request.  */
   if (size < 65536)
-    atomic_increment (&histogram[size / 16]);
+    catomic_increment (&histogram[size / 16]);
   else
-    atomic_increment (&large);
+    catomic_increment (&large);
   /* Total number of calls of any of the functions.  */
   ++calls_total;
 
@@ -480,7 +480,7 @@ calloc (size_t n, size_t len)
   result = (struct header *) (*mallocp) (size + sizeof (struct header));
   if (result == NULL)
     {
-      atomic_increment (&failed[idx_calloc]);
+      catomic_increment (&failed[idx_calloc]);
       return NULL;
     }
 
@@ -517,7 +517,7 @@ free (void *ptr)
   /* `free (NULL)' has no effect.  */
   if (ptr == NULL)
     {
-      atomic_increment (&calls[idx_free]);
+      catomic_increment (&calls[idx_free]);
       return;
     }
 
@@ -531,9 +531,9 @@ free (void *ptr)
     }
 
   /* Keep track of number of calls.  */
-  atomic_increment (&calls[idx_free]);
+  catomic_increment (&calls[idx_free]);
   /* Keep track of total memory freed using `free'.  */
-  atomic_add (&total[idx_free], real->length);
+  catomic_add (&total[idx_free], real->length);
 
   /* Update the allocation data and write out the records if necessary.  */
   update_data (NULL, 0, real->length);
@@ -567,22 +567,22 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
 		 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
 
       /* Keep track of number of calls.  */
-      atomic_increment (&calls[idx]);
+      catomic_increment (&calls[idx]);
       /* Keep track of total memory consumption for `malloc'.  */
-      atomic_add (&total[idx], len);
+      catomic_add (&total[idx], len);
       /* Keep track of total memory requirement.  */
-      atomic_add (&grand_total, len);
+      catomic_add (&grand_total, len);
       /* Remember the size of the request.  */
       if (len < 65536)
-	atomic_increment (&histogram[len / 16]);
+	catomic_increment (&histogram[len / 16]);
       else
-	atomic_increment (&large);
+	catomic_increment (&large);
       /* Total number of calls of any of the functions.  */
-      atomic_increment (&calls_total);
+      catomic_increment (&calls_total);
 
       /* Check for failures.  */
       if (result == NULL)
-	atomic_increment (&failed[idx]);
+	catomic_increment (&failed[idx]);
       else if (idx == idx_mmap_w)
 	/* Update the allocation data and write out the records if
 	   necessary.  Note the first parameter is NULL which means
@@ -619,22 +619,22 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
 		 ? idx_mmap_a : prot & PROT_WRITE ? idx_mmap_w : idx_mmap_r);
 
       /* Keep track of number of calls.  */
-      atomic_increment (&calls[idx]);
+      catomic_increment (&calls[idx]);
       /* Keep track of total memory consumption for `malloc'.  */
-      atomic_add (&total[idx], len);
+      catomic_add (&total[idx], len);
       /* Keep track of total memory requirement.  */
-      atomic_add (&grand_total, len);
+      catomic_add (&grand_total, len);
       /* Remember the size of the request.  */
       if (len < 65536)
-	atomic_increment (&histogram[len / 16]);
+	catomic_increment (&histogram[len / 16]);
       else
-	atomic_increment (&large);
+	catomic_increment (&large);
       /* Total number of calls of any of the functions.  */
-      atomic_increment (&calls_total);
+      catomic_increment (&calls_total);
 
       /* Check for failures.  */
       if (result == NULL)
-	atomic_increment (&failed[idx]);
+	catomic_increment (&failed[idx]);
       else if (idx == idx_mmap_w)
 	/* Update the allocation data and write out the records if
 	   necessary.  Note the first parameter is NULL which means
@@ -673,33 +673,33 @@ mremap (void *start, size_t old_len, size_t len, int flags,  ...)
   if (!not_me && trace_mmap)
     {
       /* Keep track of number of calls.  */
-      atomic_increment (&calls[idx_mremap]);
+      catomic_increment (&calls[idx_mremap]);
       if (len > old_len)
 	{
 	  /* Keep track of total memory consumption for `malloc'.  */
-	  atomic_add (&total[idx_mremap], len - old_len);
+	  catomic_add (&total[idx_mremap], len - old_len);
 	  /* Keep track of total memory requirement.  */
-	  atomic_add (&grand_total, len - old_len);
+	  catomic_add (&grand_total, len - old_len);
 	}
       /* Remember the size of the request.  */
       if (len < 65536)
-	atomic_increment (&histogram[len / 16]);
+	catomic_increment (&histogram[len / 16]);
       else
-	atomic_increment (&large);
+	catomic_increment (&large);
       /* Total number of calls of any of the functions.  */
-      atomic_increment (&calls_total);
+      catomic_increment (&calls_total);
 
       /* Check for failures.  */
       if (result == NULL)
-	atomic_increment (&failed[idx_mremap]);
+	catomic_increment (&failed[idx_mremap]);
       else
 	{
 	  /* Record whether the reduction/increase happened in place.  */
 	  if (start == result)
-	    atomic_increment (&inplace_mremap);
+	    catomic_increment (&inplace_mremap);
 	  /* Was the buffer increased?  */
 	  if (old_len > len)
-	    atomic_increment (&decreasing_mremap);
+	    catomic_increment (&decreasing_mremap);
 
 	  /* Update the allocation data and write out the records if
 	     necessary.  Note the first parameter is NULL which means
@@ -733,19 +733,19 @@ munmap (void *start, size_t len)
   if (!not_me && trace_mmap)
     {
       /* Keep track of number of calls.  */
-      atomic_increment (&calls[idx_munmap]);
+      catomic_increment (&calls[idx_munmap]);
 
       if (__builtin_expect (result == 0, 1))
 	{
 	  /* Keep track of total memory freed using `free'.  */
-	  atomic_add (&total[idx_munmap], len);
+	  catomic_add (&total[idx_munmap], len);
 
 	  /* Update the allocation data and write out the records if
 	     necessary.  */
 	  update_data (NULL, 0, len);
 	}
       else
-	atomic_increment (&failed[idx_munmap]);
+	catomic_increment (&failed[idx_munmap]);
     }
 
   return result;
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index dacdafee79..a8bd0fd45a 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,8 @@
+2006-10-11  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/unix/sysv/linux/rtld-lowlevel.h: Use catomic_*
+	operations instead of atomic_*.
+
 2006-10-09  Ulrich Drepper  <drepper@redhat.com>
 
 	* sysdeps/unix/sysv/linux/rtld-lowlevel.h: New file..
diff --git a/nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h b/nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h
index 908731440a..b16fd2165d 100644
--- a/nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h
+++ b/nptl/sysdeps/unix/sysv/linux/rtld-lowlevel.h
@@ -62,9 +62,9 @@ typedef int __rtld_mrlock_t;
 	      {								      \
 		int newval = ((oldval & __RTLD_MRLOCK_RBITS)		      \
 			      + __RTLD_MRLOCK_INC);			      \
-		int ret = atomic_compare_and_exchange_val_acq (&(lock),	      \
-							       newval,	      \
-							       oldval);	      \
+		int ret = catomic_compare_and_exchange_val_acq (&(lock),      \
+								newval,	      \
+								oldval);      \
 		if (__builtin_expect (ret == oldval, 1))		      \
 		  goto out;						      \
 	      }								      \
@@ -72,7 +72,7 @@ typedef int __rtld_mrlock_t;
 	  }								      \
 	if ((oldval & __RTLD_MRLOCK_RWAIT) == 0)			      \
 	  {								      \
-	    atomic_or (&(lock), __RTLD_MRLOCK_RWAIT);			      \
+	    catomic_or (&(lock), __RTLD_MRLOCK_RWAIT);			      \
 	    oldval |= __RTLD_MRLOCK_RWAIT;				      \
 	  }								      \
 	lll_futex_wait (lock, oldval);					      \
@@ -83,10 +83,10 @@ typedef int __rtld_mrlock_t;
 
 #define __rtld_mrlock_unlock(lock) \
   do {									      \
-    int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC);	      \
+    int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_INC);      \
     if (__builtin_expect ((oldval					      \
 			   & (__RTLD_MRLOCK_RBITS | __RTLD_MRLOCK_WWAIT))     \
-			  == __RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT, 0))     \
+			  == (__RTLD_MRLOCK_INC | __RTLD_MRLOCK_WWAIT), 0))   \
       /* We have to wake all threads since there might be some queued	      \
 	 readers already.  */						      \
       lll_futex_wake (&(lock), 0x7fffffff);				      \
@@ -107,7 +107,7 @@ typedef int __rtld_mrlock_t;
 	      {								      \
 		int newval = ((oldval & __RTLD_MRLOCK_RWAIT)		      \
 			      + __RTLD_MRLOCK_WRITER);			      \
-		int ret = atomic_compare_and_exchange_val_acq (&(lock),	      \
+		int ret = catomic_compare_and_exchange_val_acq (&(lock),      \
 							       newval,	      \
 							       oldval);	      \
 		if (__builtin_expect (ret == oldval, 1))		      \
@@ -115,7 +115,7 @@ typedef int __rtld_mrlock_t;
 	      }								      \
 	    atomic_delay ();						      \
 	  }								      \
-	atomic_or (&(lock), __RTLD_MRLOCK_WWAIT);			      \
+	catomic_or (&(lock), __RTLD_MRLOCK_WWAIT);			      \
 	oldval |= __RTLD_MRLOCK_WWAIT;					      \
 	lll_futex_wait (lock, oldval);					      \
       }									      \
@@ -125,7 +125,7 @@ typedef int __rtld_mrlock_t;
 
 #define __rtld_mrlock_done(lock) \
   do {				 \
-    int oldval = atomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER);    \
+    int oldval = catomic_exchange_and_add (&(lock), -__RTLD_MRLOCK_WRITER);   \
     if (__builtin_expect ((oldval & __RTLD_MRLOCK_RWAIT) != 0, 0))	      \
       lll_futex_wake (&(lock), 0x7fffffff);				      \
   } while (0)
diff --git a/resolv/res_libc.c b/resolv/res_libc.c
index 834773c32f..8af57f7a4a 100644
--- a/resolv/res_libc.c
+++ b/resolv/res_libc.c
@@ -33,7 +33,7 @@ extern unsigned long long int __res_initstamp attribute_hidden;
 #if __WORDSIZE == 64
 # define atomicinclock(lock) (void) 0
 # define atomicincunlock(lock) (void) 0
-# define atomicinc(var) atomic_increment (&(var))
+# define atomicinc(var) catomic_increment (&(var))
 #else
 __libc_lock_define_initialized (static, lock);
 # define atomicinclock(lock) __libc_lock_lock (lock)
diff --git a/stdlib/cxa_finalize.c b/stdlib/cxa_finalize.c
index bb49f36ddd..148d57f200 100644
--- a/stdlib/cxa_finalize.c
+++ b/stdlib/cxa_finalize.c
@@ -45,8 +45,8 @@ __cxa_finalize (void *d)
 	      /* We don't want to run this cleanup more than once.  */
 	      && (cxafn = f->func.cxa.fn,
 		  cxaarg = f->func.cxa.arg,
-		  ! atomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
-							  ef_cxa)))
+		  ! catomic_compare_and_exchange_bool_acq (&f->flavor, ef_free,
+							   ef_cxa)))
 	    {
 	      uint64_t check = __new_exitfn_called;
 
diff --git a/sysdeps/x86_64/bits/atomic.h b/sysdeps/x86_64/bits/atomic.h
index fc211964b2..e1981e94d6 100644
--- a/sysdeps/x86_64/bits/atomic.h
+++ b/sysdeps/x86_64/bits/atomic.h
@@ -18,6 +18,7 @@
    02111-1307 USA.  */
 
 #include <stdint.h>
+#include <tls.h>	/* For tcbhead_t.  */
 
 
 typedef int8_t atomic8_t;
@@ -85,6 +86,51 @@ typedef uintmax_t uatomic_max_t;
      ret; })
 
 
+#define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;						      \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
+		      "je 0f\n\t"					      \
+		      "lock\n"						      \
+		       "0:\tcmpxchgb %b2, %1"				      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
+			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;						      \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
+		      "je 0f\n\t"					      \
+		      "lock\n"						      \
+		       "0:\tcmpxchgw %w2, %1"				      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
+			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;						      \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
+		      "je 0f\n\t"					      \
+		      "lock\n"						      \
+		       "0:\tcmpxchgl %2, %1"				      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
+			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
+     ret; })
+
+#define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+  ({ __typeof (*mem) ret;						      \
+    __asm __volatile ("cmpl $0, %%fs:%P5\n\t"				      \
+		      "je 0f\n\t"					      \
+		      "lock\n"						      \
+		       "0:\tcmpxchgq %q2, %1"				      \
+		       : "=a" (ret), "=m" (*mem)			      \
+		       : "q" (newval), "m" (*mem), "0" (oldval),	      \
+			 "i" (offsetof (tcbhead_t, multiple_threads)));	      \
+     ret; })
+
+
 /* Note that we need no lock prefix.  */
 #define atomic_exchange_acq(mem, newvalue) \
   ({ __typeof (*mem) result;						      \
@@ -107,49 +153,76 @@ typedef uintmax_t uatomic_max_t;
      result; })
 
 
-#define atomic_exchange_and_add(mem, value) \
+#define __arch_exchange_and_add_body(lock, mem, value)			      \
   ({ __typeof (*mem) result;						      \
      if (sizeof (*mem) == 1)						      \
-       __asm __volatile (LOCK_PREFIX "xaddb %b0, %1"			      \
+       __asm __volatile (lock "xaddb %b0, %1"				      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem));			      \
+			 : "0" (value), "m" (*mem),			      \
+			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else if (sizeof (*mem) == 2)					      \
-       __asm __volatile (LOCK_PREFIX "xaddw %w0, %1"			      \
+       __asm __volatile (lock "xaddw %w0, %1"				      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem));			      \
+			 : "0" (value), "m" (*mem),			      \
+			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else if (sizeof (*mem) == 4)					      \
-       __asm __volatile (LOCK_PREFIX "xaddl %0, %1"			      \
+       __asm __volatile (lock "xaddl %0, %1"				      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" (value), "m" (*mem));			      \
+			 : "0" (value), "m" (*mem),			      \
+			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
      else								      \
-       __asm __volatile (LOCK_PREFIX "xaddq %q0, %1"			      \
+       __asm __volatile (lock "xaddq %q0, %1"				      \
 			 : "=r" (result), "=m" (*mem)			      \
-			 : "0" ((long) (value)), "m" (*mem));		      \
+			 : "0" ((long) (value)), "m" (*mem),		      \
+			   "i" (offsetof (tcbhead_t, multiple_threads)));     \
      result; })
 
+#define atomic_exchange_and_add(mem, value) \
+  __arch_exchange_and_add_body (LOCK_PREFIX, mem, value)
+
+#define __arch_exchange_and_add_cprefix \
+  "cmpl $0, %%fs:%P4\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_exchange_and_add(mem, value) \
+  __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, mem, value)
+
+
+#define __arch_add_body(lock, pfx, mem, value)				      \
+  do {									      \
+    if (__builtin_constant_p (value) && (value) == 1)			      \
+      pfx##_increment (mem);						      \
+    else if (__builtin_constant_p (value) && (value) == -1)		      \
+      pfx##_decrement (mem);						      \
+    else if (sizeof (*mem) == 1)					      \
+      __asm __volatile (lock "addb %b1, %0"				      \
+			: "=m" (*mem)					      \
+			: "ir" (value), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 2)					      \
+      __asm __volatile (lock "addw %w1, %0"				      \
+			: "=m" (*mem)					      \
+			: "ir" (value), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else if (sizeof (*mem) == 4)					      \
+      __asm __volatile (lock "addl %1, %0"				      \
+			: "=m" (*mem)					      \
+			: "ir" (value), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
+    else								      \
+      __asm __volatile (lock "addq %q1, %0"				      \
+			: "=m" (*mem)					      \
+			: "ir" ((long) (value)), "m" (*mem),		      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
+  } while (0)
 
 #define atomic_add(mem, value) \
-  (void) ({ if (__builtin_constant_p (value) && (value) == 1)		      \
-	      atomic_increment (mem);					      \
-	    else if (__builtin_constant_p (value) && (value) == 1)	      \
-	      atomic_decrement (mem);					      \
-	    else if (sizeof (*mem) == 1)				      \
-	      __asm __volatile (LOCK_PREFIX "addb %b1, %0"		      \
-				: "=m" (*mem)				      \
-				: "ir" (value), "m" (*mem));		      \
-	    else if (sizeof (*mem) == 2)				      \
-	      __asm __volatile (LOCK_PREFIX "addw %w1, %0"		      \
-				: "=m" (*mem)				      \
-				: "ir" (value), "m" (*mem));		      \
-	    else if (sizeof (*mem) == 4)				      \
-	      __asm __volatile (LOCK_PREFIX "addl %1, %0"		      \
-				: "=m" (*mem)				      \
-				: "ir" (value), "m" (*mem));		      \
-	    else							      \
-	      __asm __volatile (LOCK_PREFIX "addq %q1, %0"		      \
-				: "=m" (*mem)				      \
-				: "ir" ((long) (value)), "m" (*mem));	      \
-	    })
+  __arch_add_body (LOCK_PREFIX, atomic, mem, value)
+
+#define __arch_add_cprefix \
+  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_add(mem, value) \
+  __arch_add_body (__arch_add_cprefix, catomic, mem, value)
 
 
 #define atomic_add_negative(mem, value) \
@@ -194,26 +267,38 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define atomic_increment(mem) \
+#define __arch_increment_body(lock, mem) \
   do {									      \
     if (sizeof (*mem) == 1)						      \
-      __asm __volatile (LOCK_PREFIX "incb %b0"				      \
+      __asm __volatile (lock "incb %b0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (LOCK_PREFIX "incw %w0"				      \
+      __asm __volatile (lock "incw %w0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (LOCK_PREFIX "incl %0"				      \
+      __asm __volatile (lock "incl %0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else								      \
-      __asm __volatile (LOCK_PREFIX "incq %q0"				      \
+      __asm __volatile (lock "incq %q0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
   } while (0)
 
+#define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, mem)
+
+#define __arch_increment_cprefix \
+  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_increment(mem) \
+  __arch_increment_body (__arch_increment_cprefix, mem)
+
 
 #define atomic_increment_and_test(mem) \
   ({ unsigned char __result;						      \
@@ -236,26 +321,38 @@ typedef uintmax_t uatomic_max_t;
      __result; })
 
 
-#define atomic_decrement(mem) \
+#define __arch_decrement_body(lock, mem) \
   do {									      \
     if (sizeof (*mem) == 1)						      \
-      __asm __volatile (LOCK_PREFIX "decb %b0"				      \
+      __asm __volatile (lock "decb %b0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (LOCK_PREFIX "decw %w0"				      \
+      __asm __volatile (lock "decw %w0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (LOCK_PREFIX "decl %0"				      \
+      __asm __volatile (lock "decl %0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else								      \
-      __asm __volatile (LOCK_PREFIX "decq %q0"				      \
+      __asm __volatile (lock "decq %q0"					      \
 			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
+			: "m" (*mem),					      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
   } while (0)
 
+#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, mem)
+
+#define __arch_decrement_cprefix \
+  "cmpl $0, %%fs:%P2\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_decrement(mem) \
+  __arch_increment_body (__arch_decrement_cprefix, mem)
+
 
 #define atomic_decrement_and_test(mem) \
   ({ unsigned char __result;						      \
@@ -348,22 +445,33 @@ typedef uintmax_t uatomic_max_t;
   } while (0)
 
 
-#define atomic_or(mem, mask) \
+#define __arch_or_body(lock, mem, mask)					      \
   do {									      \
     if (sizeof (*mem) == 1)						      \
-      __asm __volatile (LOCK_PREFIX "orb %1, %b0"			      \
+      __asm __volatile (lock "orb %1, %b0"				      \
 			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
+			: "ir" (mask), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (LOCK_PREFIX "orw %1, %w0"			      \
+      __asm __volatile (lock "orw %1, %w0"				      \
 			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
+			: "ir" (mask), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (LOCK_PREFIX "orl %1, %0"			      \
+      __asm __volatile (lock "orl %1, %0"				      \
 			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
+			: "ir" (mask), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
     else								      \
-      __asm __volatile (LOCK_PREFIX "orq %1, %q0"			      \
+      __asm __volatile (lock "orq %1, %q0"				      \
 			: "=m" (*mem)					      \
-			: "ir" (mask), "m" (*mem));			      \
+			: "ir" (mask), "m" (*mem),			      \
+			  "i" (offsetof (tcbhead_t, multiple_threads)));      \
   } while (0)
+
+#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
+
+#define __arch_or_cprefix \
+  "cmpl $0, %%fs:%P3\n\tje 0f\n\tlock\n0:\t"
+
+#define catomic_or(mem, mask) __arch_or_body (__arch_or_cprefix, mem, mask)