summary refs log tree commit diff
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2007-08-01 17:16:42 +0000
committerJakub Jelinek <jakub@redhat.com>2007-08-01 17:16:42 +0000
commite08057b1ff24258dd7460ad81e84491f7a28b424 (patch)
treef63a12d52cbc1796013a84382fe25f57ac675204
parent4baf42dd00e8cafc79e2a3c94ef8effa6ef0a921 (diff)
downloadglibc-e08057b1ff24258dd7460ad81e84491f7a28b424.tar.gz
glibc-e08057b1ff24258dd7460ad81e84491f7a28b424.tar.xz
glibc-e08057b1ff24258dd7460ad81e84491f7a28b424.zip
Updated to fedora-glibc-20070801T1703 cvs/fedora-glibc-2_6_90-2
-rw-r--r--ChangeLog33
-rw-r--r--fedora/branch.mk4
-rw-r--r--fedora/glibc.spec.in537
-rw-r--r--localedata/ChangeLog4
-rw-r--r--localedata/tst-strptime.c2
-rw-r--r--nptl/ChangeLog460
-rw-r--r--nptl/allocatestack.c44
-rw-r--r--nptl/descr.h4
-rw-r--r--nptl/old_pthread_cond_broadcast.c4
-rw-r--r--nptl/old_pthread_cond_signal.c4
-rw-r--r--nptl/old_pthread_cond_timedwait.c4
-rw-r--r--nptl/old_pthread_cond_wait.c4
-rw-r--r--nptl/pthreadP.h2
-rw-r--r--nptl/pthread_attr_init.c4
-rw-r--r--nptl/pthread_barrier_destroy.c6
-rw-r--r--nptl/pthread_barrier_init.c2
-rw-r--r--nptl/pthread_barrier_wait.c12
-rw-r--r--nptl/pthread_cond_broadcast.c6
-rw-r--r--nptl/pthread_cond_destroy.c8
-rw-r--r--nptl/pthread_cond_init.c2
-rw-r--r--nptl/pthread_cond_signal.c4
-rw-r--r--nptl/pthread_cond_timedwait.c10
-rw-r--r--nptl/pthread_cond_wait.c14
-rw-r--r--nptl/pthread_create.c10
-rw-r--r--nptl/pthread_getattr_np.c4
-rw-r--r--nptl/pthread_getschedparam.c4
-rw-r--r--nptl/pthread_mutex_lock.c8
-rw-r--r--nptl/pthread_mutex_timedlock.c19
-rw-r--r--nptl/pthread_mutex_trylock.c8
-rw-r--r--nptl/pthread_mutex_unlock.c10
-rw-r--r--nptl/pthread_once.c8
-rw-r--r--nptl/pthread_rwlock_rdlock.c13
-rw-r--r--nptl/pthread_rwlock_timedrdlock.c12
-rw-r--r--nptl/pthread_rwlock_timedwrlock.c12
-rw-r--r--nptl/pthread_rwlock_tryrdlock.c4
-rw-r--r--nptl/pthread_rwlock_trywrlock.c6
-rw-r--r--nptl/pthread_rwlock_unlock.c14
-rw-r--r--nptl/pthread_rwlock_wrlock.c11
-rw-r--r--nptl/pthread_setschedparam.c4
-rw-r--r--nptl/pthread_setschedprio.c4
-rw-r--r--nptl/sem_close.c4
-rw-r--r--nptl/sem_open.c6
-rw-r--r--nptl/semaphoreP.h2
-rw-r--r--nptl/sysdeps/pthread/bits/libc-lock.h8
-rw-r--r--nptl/sysdeps/pthread/bits/stdio-lock.h4
-rw-r--r--nptl/sysdeps/pthread/createthread.c8
-rw-r--r--nptl/sysdeps/sparc/tls.h4
-rw-r--r--nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h142
-rw-r--r--nptl/sysdeps/unix/sysv/linux/fork.c2
-rw-r--r--nptl/sysdeps/unix/sysv/linux/fork.h4
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S15
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S169
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S60
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S36
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S35
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S31
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S56
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S51
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S42
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S55
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S53
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S29
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S40
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S10
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S13
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S9
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S10
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h549
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S12
-rw-r--r--nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h244
-rw-r--r--nptl/sysdeps/unix/sysv/linux/lowlevellock.c29
-rw-r--r--nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c13
-rw-r--r--nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h103
-rw-r--r--nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c32
-rw-r--r--nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c6
-rw-r--r--nptl/sysdeps/unix/sysv/linux/register-atfork.c12
-rw-r--r--nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h158
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_post.c3
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_timedwait.c3
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_wait.c3
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h18
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h133
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c45
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c55
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c78
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c25
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c34
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c2
-rw-r--r--nptl/sysdeps/unix/sysv/linux/unregister-atfork.c4
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S15
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S161
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h555
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S75
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S35
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S31
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S27
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S31
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S39
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S11
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S29
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S30
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S29
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S25
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S29
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S10
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S10
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S7
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S10
-rw-r--r--nptl/tpp.c10
-rw-r--r--nptl/tst-rwlock14.c6
-rw-r--r--stdio-common/tst-fmemopen2.c2
-rw-r--r--stdlib/tst-strtod2.c2
-rw-r--r--sysdeps/generic/unwind-dw2-fde-glibc.c7
-rw-r--r--sysdeps/generic/unwind-dw2-fde.c15
-rw-r--r--sysdeps/generic/unwind-dw2.c22
-rw-r--r--sysdeps/unix/sysv/linux/getsysstats.c109
-rw-r--r--sysdeps/unix/sysv/linux/i386/posix_fallocate.c58
-rw-r--r--sysdeps/unix/sysv/linux/i386/posix_fallocate64.c61
-rw-r--r--sysdeps/unix/sysv/linux/i386/syscalls.list1
-rw-r--r--sysdeps/unix/sysv/linux/posix_fallocate.c2
120 files changed, 2671 insertions, 2567 deletions
diff --git a/ChangeLog b/ChangeLog
index b10c3aa38d..816d9350db 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,38 @@
+2007-08-01  Jakub Jelinek  <jakub@redhat.com>
+
+	* sysdeps/unix/sysv/linux/i386/syscalls.list (fallocate): Add fallocate
+	syscall as __fallocate64.
+	* sysdeps/unix/sysv/linux/i386/posix_fallocate.c: New file.
+	* sysdeps/unix/sysv/linux/i386/posix_fallocate64.c: New file.
+
+2007-07-30  Jakub Jelinek  <jakub@redhat.com>
+
+	* stdlib/tst-strtod2.c (do_test): Use %tu in fmt string for ptrdiff_t
+	value.
+
+	* stdio-common/tst-fmemopen2.c (do_test): Avoid fmt string warning
+	if off_t is different rank from size_t.
+
+	* sysdeps/generic/unwind-dw2.c (extract_cie_info, execute_cfa_program,
+	uw_frame_state_for): Avoid type punning warnings.
+	* sysdeps/generic/unwind-dw2-fde-glibc.c
+	(_Unwind_IteratePhdrCallback): Likewise.
+	* sysdeps/generic/unwind-dw2-fde.c (_Unwind_Find_FDE): Likewise.
+	(binary_search_single_encoding_fdes, binary_search_mixed_encoding_fdes,
+	get_cie_encoding, linear_search_fdes): Don't mix char and unsigned char
+	pointers.
+
+2007-07-31  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/unix/sysv/linux/getsysstats.c (__get_nprocs_conf): Count
+	total processors using sysfs.
+	(__get_nprocs): Use sysfs to determine which processors are online.
+
 2007-07-31  Jakub Jelinek  <jakub@redhat.com>
 
+	* sysdeps/unix/sysv/linux/posix_fallocate.c (posix_fallocate): Fix
+	syscall arguments count.
+
 	* stdio-common/tfformat.c (sprint_doubles): Add 12 new tests.
 
 2007-07-30  Roland McGrath  <roland@redhat.com>
diff --git a/fedora/branch.mk b/fedora/branch.mk
index 6725eedabf..ee9e4499c0 100644
--- a/fedora/branch.mk
+++ b/fedora/branch.mk
@@ -3,5 +3,5 @@ glibc-branch := fedora
 glibc-base := HEAD
 DIST_BRANCH := devel
 COLLECTION := dist-fc7
-fedora-sync-date := 2007-07-31 16:24 UTC
-fedora-sync-tag := fedora-glibc-20070731T1624
+fedora-sync-date := 2007-08-01 17:03 UTC
+fedora-sync-tag := fedora-glibc-20070801T1703
diff --git a/fedora/glibc.spec.in b/fedora/glibc.spec.in
index d228db7dbd..ae713354b2 100644
--- a/fedora/glibc.spec.in
+++ b/fedora/glibc.spec.in
@@ -1,4 +1,4 @@
-%define glibcrelease 1
+%define glibcrelease 2
 %define auxarches i586 i686 athlon sparcv9 alphaev6
 %define xenarches i686 athlon
 %ifarch %{xenarches}
@@ -148,6 +148,7 @@ Obsoletes: %{name}-headers(i386)
 Obsoletes: libc-debug, libc-headers, libc-devel
 Prereq: kernel-headers
 Requires: kernel-headers >= 2.2.1, %{name} = %{version}-%{release}
+BuildRequires: kernel-headers >= 2.6.22
 Autoreq: true
 
 %description headers
@@ -243,532 +244,6 @@ package or when debugging this package.
 %endif
 %endif
 
-# Hack till glibc-kernheaders get updated, argh
-mkdir -p override_headers/linux
-cat > override_headers/linux/version.h <<EOF
-#define UTS_RELEASE "2.6.9"
-#define LINUX_VERSION_CODE 132617
-#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
-EOF
-mkdir -p override_headers/asm
-cat > override_headers/asm/unistd.h <<EOF
-#ifndef _HACK_ASM_UNISTD_H
-#include_next <asm/unistd.h>
-%ifarch alpha
-#ifndef __NR_stat64
-#define __NR_stat64			425
-#define __NR_lstat64			426
-#define __NR_fstat64			427
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open			432
-#define __NR_mq_unlink			433
-#define __NR_mq_timedsend		434
-#define __NR_mq_timedreceive		435
-#define __NR_mq_notify			436
-#define __NR_mq_getsetattr		437
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid			438
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init		444
-#define __NR_inotify_add_watch		445
-#define __NR_inotify_rm_watch		446
-#endif
-%endif
-%ifarch %{ix86}
-#ifndef __NR_mq_open
-#define __NR_mq_open 		277
-#define __NR_mq_unlink		(__NR_mq_open+1)
-#define __NR_mq_timedsend	(__NR_mq_open+2)
-#define __NR_mq_timedreceive	(__NR_mq_open+3)
-#define __NR_mq_notify		(__NR_mq_open+4)
-#define __NR_mq_getsetattr	(__NR_mq_open+5)
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		284
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	291
-#define __NR_inotify_add_watch	292
-#define __NR_inotify_rm_watch	293
-#endif
-#ifndef __NR_openat
-#define __NR_openat		295
-#define __NR_mkdirat		296
-#define __NR_mknodat		297
-#define __NR_fchownat		298
-#define __NR_futimesat		299
-#define __NR_unlinkat		301
-#define __NR_renameat		302
-#define __NR_linkat		303
-#define __NR_symlinkat		304
-#define __NR_readlinkat		305
-#define __NR_fchmodat		306
-#define __NR_faccessat		307
-#endif
-#ifndef __NR_fstatat64
-#define __NR_fstatat64		300
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		308
-#define __NR_ppoll		309
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		310
-#endif
-#ifndef __NR_set_robust_list
-#define __NR_set_robust_list	311
-#define __NR_get_robust_list	312
-#endif
-#ifndef __NR_splice
-#define __NR_splice		313
-#endif
-#ifndef __NR_sync_file_range
-#define __NR_sync_file_range	314
-#endif
-#ifndef __NR_tee
-#define __NR_tee		315
-#endif
-#ifndef __NR_vmsplice
-#define __NR_vmsplice		316
-#endif
-%endif
-%ifarch ia64
-#ifndef __NR_timer_create
-#define __NR_timer_create	1248
-#define __NR_timer_settime	1249
-#define __NR_timer_gettime	1250
-#define __NR_timer_getoverrun	1251
-#define __NR_timer_delete	1252
-#define __NR_clock_settime	1253
-#define __NR_clock_gettime	1254
-#define __NR_clock_getres	1255
-#define __NR_clock_nanosleep	1256
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open		1262
-#define __NR_mq_unlink		1263
-#define __NR_mq_timedsend	1264
-#define __NR_mq_timedreceive	1265
-#define __NR_mq_notify		1266
-#define __NR_mq_getsetattr	1267
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		1270
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	1277
-#define __NR_inotify_add_watch	1278
-#define __NR_inotify_rm_watch	1279
-#endif
-#ifndef __NR_openat
-#define __NR_openat		1281
-#define __NR_mkdirat		1282
-#define __NR_mknodat		1283
-#define __NR_fchownat		1284
-#define __NR_futimesat		1285
-#define __NR_newfstatat		1286
-#define __NR_unlinkat		1287
-#define __NR_renameat		1288
-#define __NR_linkat		1289
-#define __NR_symlinkat		1290
-#define __NR_readlinkat		1291
-#define __NR_fchmodat		1292
-#define __NR_faccessat		1293
-#endif
-#if 0
-#ifndef __NR_pselect6
-#define __NR_pselect6		1294
-#define __NR_ppoll		1295
-#endif
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		1296
-#endif
-#ifndef __NR_splice
-#define __NR_splice		1297
-#endif
-#ifndef __NR_set_robust_list
-#define __NR_set_robust_list	1298
-#define __NR_get_robust_list	1299
-#endif
-#ifndef __NR_sync_file_range
-#define __NR_sync_file_range	1300
-#endif
-#ifndef __NR_tee
-#define __NR_tee		1301
-#endif
-#ifndef __NR_vmsplice
-#define __NR_vmsplice		1302
-#endif
-%endif
-%ifarch ppc
-#ifndef __NR_utimes
-#define __NR_utimes		251
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64		252
-#define __NR_fstatfs64		253
-#endif
-#ifndef __NR_fadvise64_64
-#define __NR_fadvise64_64	254
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open		262
-#define __NR_mq_unlink		263
-#define __NR_mq_timedsend	264
-#define __NR_mq_timedreceive	265
-#define __NR_mq_notify		266
-#define __NR_mq_getsetattr	267
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		272
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	275
-#define __NR_inotify_add_watch	276
-#define __NR_inotify_rm_watch	277
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		280
-#define __NR_ppoll		281
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		282
-#endif
-#ifndef __NR_splice
-#define __NR_splice		283
-#endif
-#ifndef __NR_tee
-#define __NR_tee		284
-#endif
-#ifndef __NR_vmsplice
-#define __NR_vmsplice		285
-#endif
-#ifndef __NR_openat
-#define __NR_openat		286
-#define __NR_mkdirat		287
-#define __NR_mknodat		288
-#define __NR_fchownat		289
-#define __NR_futimesat		290
-#define __NR_fstatat64		291
-#define __NR_unlinkat		292
-#define __NR_renameat		293
-#define __NR_linkat		294
-#define __NR_symlinkat		295
-#define __NR_readlinkat		296
-#define __NR_fchmodat		297
-#define __NR_faccessat		298
-#endif
-%endif
-%ifarch ppc64
-#ifndef __NR_utimes
-#define __NR_utimes		251
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open		262
-#define __NR_mq_unlink		263
-#define __NR_mq_timedsend	264
-#define __NR_mq_timedreceive	265
-#define __NR_mq_notify		266
-#define __NR_mq_getsetattr	267
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		272
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	275
-#define __NR_inotify_add_watch	276
-#define __NR_inotify_rm_watch	277
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		280
-#define __NR_ppoll		281
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		282
-#endif
-#ifndef __NR_splice
-#define __NR_splice		283
-#endif
-#ifndef __NR_tee
-#define __NR_tee		284
-#endif
-#ifndef __NR_vmsplice
-#define __NR_vmsplice		285
-#endif
-#ifndef __NR_openat
-#define __NR_openat		286
-#define __NR_mkdirat		287
-#define __NR_mknodat		288
-#define __NR_fchownat		289
-#define __NR_futimesat		290
-#define __NR_newfstatat		291
-#define __NR_unlinkat		292
-#define __NR_renameat		293
-#define __NR_linkat		294
-#define __NR_symlinkat		295
-#define __NR_readlinkat		296
-#define __NR_fchmodat		297
-#define __NR_faccessat		298
-#endif
-%endif
-%ifarch s390
-#ifndef __NR_timer_create
-#define __NR_timer_create	254
-#define __NR_timer_settime	(__NR_timer_create+1)
-#define __NR_timer_gettime	(__NR_timer_create+2)
-#define __NR_timer_getoverrun	(__NR_timer_create+3)
-#define __NR_timer_delete	(__NR_timer_create+4)
-#define __NR_clock_settime	(__NR_timer_create+5)
-#define __NR_clock_gettime	(__NR_timer_create+6)
-#define __NR_clock_getres	(__NR_timer_create+7)
-#define __NR_clock_nanosleep	(__NR_timer_create+8)
-#endif
-#ifndef __NR_fadvise64_64
-#define __NR_fadvise64_64	264
-#endif
-#ifndef __NR_statfs64
-#define __NR_statfs64		265
-#define __NR_fstatfs64		266
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open		271
-#define __NR_mq_unlink		272
-#define __NR_mq_timedsend	273
-#define __NR_mq_timedreceive	274
-#define __NR_mq_notify		275
-#define __NR_mq_getsetattr	276
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		281
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	284
-#define __NR_inotify_add_watch	285
-#define __NR_inotify_rm_watch	286
-#endif
-#ifndef __NR_openat
-#define __NR_openat		288
-#define __NR_mkdirat		289
-#define __NR_mknodat		290
-#define __NR_fchownat		291
-#define __NR_futimesat		292
-#define __NR_fstatat64		293
-#define __NR_unlinkat		294
-#define __NR_renameat		295
-#define __NR_linkat		296
-#define __NR_symlinkat		297
-#define __NR_readlinkat		298
-#define __NR_fchmodat		299
-#define __NR_faccessat		300
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		301
-#define __NR_ppoll		302
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		303
-#endif
-%endif
-%ifarch s390x
-#ifndef __NR_timer_create
-#define __NR_timer_create	254
-#define __NR_timer_settime	(__NR_timer_create+1)
-#define __NR_timer_gettime	(__NR_timer_create+2)
-#define __NR_timer_getoverrun	(__NR_timer_create+3)
-#define __NR_timer_delete	(__NR_timer_create+4)
-#define __NR_clock_settime	(__NR_timer_create+5)
-#define __NR_clock_gettime	(__NR_timer_create+6)
-#define __NR_clock_getres	(__NR_timer_create+7)
-#define __NR_clock_nanosleep	(__NR_timer_create+8)
-#endif
-#ifndef __NR_mq_open
-#define __NR_mq_open		271
-#define __NR_mq_unlink		272
-#define __NR_mq_timedsend	273
-#define __NR_mq_timedreceive	274
-#define __NR_mq_notify		275
-#define __NR_mq_getsetattr	276
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		281
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	284
-#define __NR_inotify_add_watch	285
-#define __NR_inotify_rm_watch	286
-#endif
-#ifndef __NR_openat
-#define __NR_openat		288
-#define __NR_mkdirat		289
-#define __NR_mknodat		290
-#define __NR_fchownat		291
-#define __NR_futimesat		292
-#define __NR_newfstatat		293
-#define __NR_unlinkat		294
-#define __NR_renameat		295
-#define __NR_linkat		296
-#define __NR_symlinkat		297
-#define __NR_readlinkat		298
-#define __NR_fchmodat		299
-#define __NR_faccessat		300
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		301
-#define __NR_ppoll		302
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		303
-#endif
-%endif
-%ifarch sparc sparcv9 sparc64
-#ifndef __NR_mq_open
-#define __NR_mq_open		273
-#define __NR_mq_unlink		274
-#define __NR_mq_timedsend	275
-#define __NR_mq_timedreceive	276
-#define __NR_mq_notify		277
-#define __NR_mq_getsetattr	278
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		279
-#endif
-#ifndef __NR_stat64
-#define __NR_fstat64		63
-#define __NR_lstat64		132
-#define __NR_stat64		139
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	151
-#define __NR_inotify_add_watch	152
-#define __NR_inotify_rm_watch	156
-#endif
-#ifndef __NR_openat
-#define __NR_openat		284
-#define __NR_mkdirat		285
-#define __NR_mknodat		286
-#define __NR_fchownat		287
-#define __NR_futimesat		288
-#define __NR_newfstatat		289
-#define __NR_unlinkat		290
-#define __NR_renameat		291
-#define __NR_linkat		292
-#define __NR_symlinkat		293
-#define __NR_readlinkat		294
-#define __NR_fchmodat		295
-#define __NR_faccessat		296
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		297
-#define __NR_ppoll		298
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		299
-#endif
-%endif
-%ifarch x86_64
-#ifndef __NR_mq_open
-#define __NR_mq_open		240
-#define __NR_mq_unlink		241
-#define __NR_mq_timedsend	242
-#define __NR_mq_timedreceive	243
-#define __NR_mq_notify		244
-#define __NR_mq_getsetattr	245
-#endif
-#ifndef __NR_waitid
-#define __NR_waitid		247
-#endif
-#ifndef __NR_inotify_init
-#define __NR_inotify_init	253
-#define __NR_inotify_add_watch	254
-#define __NR_inotify_rm_watch	255
-#endif
-#ifndef __NR_openat
-#define __NR_openat		257
-#define __NR_mkdirat		258
-#define __NR_mknodat		259
-#define __NR_fchownat		260
-#define __NR_futimesat		261
-#define __NR_newfstatat		262
-#define __NR_unlinkat		263
-#define __NR_renameat		264
-#define __NR_linkat		265
-#define __NR_symlinkat		266
-#define __NR_readlinkat		267
-#define __NR_fchmodat		268
-#define __NR_faccessat		269
-#endif
-#ifndef __NR_pselect6
-#define __NR_pselect6		270
-#define __NR_ppoll		271
-#endif
-#ifndef __NR_unshare
-#define __NR_unshare		272
-#endif
-#ifndef __NR_set_robust_list
-#define __NR_set_robust_list	273
-#define __NR_get_robust_list	274
-#endif
-#ifndef __NR_splice
-#define __NR_splice		275
-#endif
-#ifndef __NR_tee
-#define __NR_tee		276
-#endif
-#ifndef __NR_sync_file_range
-#define __NR_sync_file_range	277
-#endif
-#ifndef __NR_vmsplice
-#define __NR_vmsplice		278
-#endif
-%endif
-#endif
-EOF
-cat > override_headers/asm/errno.h <<EOF
-#ifndef _HACK_ASM_ERRNO_H
-#include_next <asm/errno.h>
-%ifarch alpha
-#ifndef ENOKEY
-#define ENOKEY		132
-#define EKEYEXPIRED	133
-#define EKEYREVOKED	134
-#define EKEYREJECTED	135
-#endif
-#ifndef EOWNERDEAD
-#define EOWNERDEAD	136
-#define ENOTRECOVERABLE	137
-#endif
-%endif
-%ifarch %{ix86} ia64 ppc ppc64 s390 s390x x86_64
-#ifndef ENOKEY
-#define ENOKEY		126
-#define EKEYEXPIRED	127
-#define EKEYREVOKED	128
-#define EKEYREJECTED	129
-#endif
-#ifndef EOWNERDEAD
-#define EOWNERDEAD	130
-#define ENOTRECOVERABLE	131
-#endif
-%endif
-%ifarch sparc sparcv9 sparc64
-#ifndef ENOKEY
-#define ENOKEY		128
-#define EKEYEXPIRED	129
-#define EKEYREVOKED	130
-#define EKEYREJECTED	131
-#endif
-#ifndef EOWNERDEAD
-#define EOWNERDEAD	132
-#define ENOTRECOVERABLE	133
-#endif
-%endif
-#endif
-EOF
-
 # A lot of programs still misuse memcpy when they have to use
 # memmove. The memcpy implementation below is not tolerant at
 # all.
@@ -840,7 +315,7 @@ mkdir $builddir ; cd $builddir
 build_CFLAGS="$BuildFlags -g -O3 $*"
 CC="$GCC" CXX="$GXX" CFLAGS="$build_CFLAGS" ../configure --prefix=%{_prefix} \
 	--enable-add-ons=nptl$AddOns --without-cvs $EnableKernel \
-	--with-headers=`cd ..; pwd`/override_headers:%{_prefix}/include --enable-bind-now \
+	--with-headers=%{_prefix}/include --enable-bind-now \
 	--with-tls --with-__thread --build %{nptl_target_cpu}-redhat-linux \
 	--host %{nptl_target_cpu}-redhat-linux \
 	--disable-profile
@@ -1566,6 +1041,12 @@ rm -f *.filelist*
 %endif
 
 %changelog
+* Wed Aug  1 2007 Jakub Jelinek <jakub@redhat.com> 2.6.90-2
+- make aux-cache purely optional performance optimization in ldconfig,
+  don't issue any errors if it can't be created (#250430)
+- remove override_headers hack, BuildRequire >= 2.6.22 kernel-headers
+  and rely on its content
+
 * Tue Jul 31 2007 Jakub Jelinek <jakub@redhat.com> 2.6.90-1
 - update to trunk
   - private futex optimizations
diff --git a/localedata/ChangeLog b/localedata/ChangeLog
index 7a7a62fa2f..8eae5da34f 100644
--- a/localedata/ChangeLog
+++ b/localedata/ChangeLog
@@ -1,3 +1,7 @@
+2007-07-30  Jakub Jelinek  <jakub@redhat.com>
+
+	* tst-strptime.c (do_test): Use %tu in fmt string for ptrdiff_t value.
+
 2007-07-16  Jakub Jelinek  <jakub@redhat.com>
 
 	* locales/ar_SA (d_t_fmt, d_fmt, t_fmt, t_fmt_ampm): Replace %.1d
diff --git a/localedata/tst-strptime.c b/localedata/tst-strptime.c
index 4ee4fcb78d..b5ab232d7e 100644
--- a/localedata/tst-strptime.c
+++ b/localedata/tst-strptime.c
@@ -15,7 +15,7 @@ do_test (void)
   static const char s[] = "\
 \x54\x68\xb8\x6e\x67\x20\x6d\xad\xea\x69\x20\x6d\xe9\x74";
   char *r = strptime (s, "%b", &tm);
-  printf ("r = %p, r-s = %ju, tm.tm_mon = %d\n", r, r - s, tm.tm_mon);
+  printf ("r = %p, r-s = %tu, tm.tm_mon = %d\n", r, r - s, tm.tm_mon);
   return r == NULL || r - s != 14 || tm.tm_mon != 10;
 }
 
diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index a4740c958d..4e7c7d79cc 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,463 @@
+2007-07-31  Anton Blanchard  <anton@samba.org>
+
+	* sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
+	Use __asm __volatile (__lll_acq_instr ::: "memory") instead of
+	atomic_full_barrier.
+
+2007-07-31  Jakub Jelinek  <jakub@redhat.com>
+
+	* allocatestack.c (stack_cache_lock): Change type to int.
+	(get_cached_stack, allocate_stack, __deallocate_stack,
+	__make_stacks_executable, __find_thread_by_id, __nptl_setxid,
+	__pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE
+	as second argument to lll_lock and lll_unlock macros on
+	stack_cache_lock.
+	* pthread_create.c (__find_in_stack_list): Likewise.
+	(start_thread): Similarly with pd->lock.  Use lll_robust_dead
+	macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it
+	as second argument.
+	* descr.h (struct pthread): Change lock and setxid_futex field
+	type to int.
+	* old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use
+	LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER.
+	* old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise.
+	* old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0):
+	Likewise.
+	* old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise.
+	* pthread_cond_init.c (__pthread_cond_init): Likewise.
+	* pthreadP.h (__attr_list_lock): Change type to int.
+	* pthread_attr_init.c (__attr_list_lock): Likewise.
+	* pthread_barrier_destroy.c (pthread_barrier_destroy): Pass
+	ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to
+	lll_{,un}lock.
+	* pthread_barrier_wait.c (pthread_barrier_wait): Likewise and
+	also for lll_futex_{wake,wait}.
+	* pthread_barrier_init.c (pthread_barrier_init): Make iattr
+	a pointer to const.
+	* pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass
+	LLL_SHARED as second argument to lll_{,un}lock.
+	* pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
+	* pthread_cond_signal.c (__pthread_cond_singal): Likewise.
+	* pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
+	* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait):
+	Likewise.
+	* pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE
+	as second argument to lll_{,un}lock macros on pd->lock.
+	* pthread_getschedparam.c (__pthread_getschedparam): Likewise.
+	* pthread_setschedparam.c (__pthread_setschedparam): Likewise.
+	* pthread_setschedprio.c (pthread_setschedprio): Likewise.
+	* tpp.c (__pthread_tpp_change_priority, __pthread_current_priority):
+	Likewise.
+	* sysdeps/pthread/createthread.c (do_clone, create_thread):
+	Likewise.
+	* pthread_once.c (once_lock): Change type to int.
+	(__pthread_once): Pass LLL_PRIVATE as second argument to
+	lll_{,un}lock macros on once_lock.
+	* pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use
+	lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass
+	rwlock->__data.__shared as second argument to them and similarly
+	for lll_futex_w*.
+	* pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock):
+	Likewise.
+	* pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock):
+	Likewise.
+	* pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
+	* pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise.
+	* pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise.
+	* pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
+	* sem_close.c (sem_close): Pass LLL_PRIVATE as second argument
+	to lll_{,un}lock macros on __sem_mappings_lock.
+	* sem_open.c (check_add_mapping): Likewise.
+	(__sem_mappings_lock): Change type to int.
+	* semaphoreP.h (__sem_mappings_lock): Likewise.
+	* pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
+	LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros
+	instead of lll_*mutex_*, pass LLL_SHARED as last
+	argument.
+	(__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock,
+	pass LLL_SHARED as last argument.
+	* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK,
+	LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use
+	lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*,
+	pass LLL_SHARED as last argument.
+	* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
+	lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass
+	LLL_SHARED as last argument.
+	* pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly.
+	* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt):
+	Similarly.
+	* sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock,
+	__libc_lock_lock_recursive, __libc_lock_unlock,
+	__libc_lock_unlock_recursive): Pass LLL_PRIVATE as second
+	argument to lll_{,un}lock.
+	* sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock,
+	_IO_lock_unlock): Likewise.
+	* sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use
+	compound literal.
+	* sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
+	Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on
+	__fork_lock.
+	* sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork,
+	free_mem): Likewise.
+	(__fork_lock): Change type to int.
+	* sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise.
+	* sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass
+	isem->private ^ FUTEX_PRIVATE_FLAG as second argument to
+	lll_futex_wake.
+	* sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise.
+	* sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise.
+	* sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private):
+	New function.
+	(__lll_lock_wait, __lll_timedlock_wait): Add private argument and
+	pass it through to lll_futex_*wait, only compile in when
+	IS_IN_libpthread.
+	* sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+	(__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private
+	argument and pass it through to lll_futex_*wait.
+	* sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all
+	lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp.
+	lll_robust_*.  Renamed all __lll_mutex_* resp. __lll_robust_mutex_*
+	inline functions to __lll_* resp. __lll_robust_*.
+	(LLL_MUTEX_LOCK_INITIALIZER): Remove.
+	(lll_mutex_dead): Add private argument.
+	(__lll_lock_wait_private): New prototype.
+	(__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait,
+	__lll_robust_lock_timedwait): Add private argument to prototypes.
+	(__lll_lock): Add private argument, if it is constant LLL_PRIVATE,
+	call __lll_lock_wait_private, otherwise pass private to
+	__lll_lock_wait.
+	(__lll_robust_lock, __lll_cond_lock, __lll_timedlock,
+	__lll_robust_timedlock): Add private argument, pass it to
+	__lll_*wait functions.
+	(__lll_unlock): Add private argument, if it is constant LLL_PRIVATE,
+	call __lll_unlock_wake_private, otherwise pass private to
+	__lll_unlock_wake.
+	(__lll_robust_unlock): Add private argument, pass it to
+	__lll_robust_unlock_wake.
+	(lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock,
+	lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private
+	argument, pass it through to __lll_* inline function.
+	(__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove.
+	(lll_lock_t): Remove.
+	(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
+	__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
+	lll_cond_wake, lll_cond_broadcast): Remove.
+	* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
+	* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
+	* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
+	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
+	* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including
+	the header from assembler.  Renamed all lll_mutex_* resp.
+	lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
+	(LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP,
+	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
+	(LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED,
+	LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove.
+	(__lll_mutex_lock_wait, __lll_mutex_timedlock_wait,
+	__lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake):
+	Remove prototype.
+	(__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define.
+	(lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER*
+	rather than LLL_MUTEX_LOCK_INITIALIZER* macros.
+	(lll_trylock): Likewise, use __lll_trylock_asm, pass
+	MULTIPLE_THREADS_OFFSET as another asm operand.
+	(lll_lock): Add private argument, use __lll_lock_asm_start, pass
+	MULTIPLE_THREADS_OFFSET as last asm operand, call
+	__lll_lock_wait_private if private is constant LLL_PRIVATE,
+	otherwise pass private as another argument to __lll_lock_wait.
+	(lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
+	lll_timedlock, lll_robust_timedlock): Add private argument, pass
+	private as another argument to __lll_*lock_wait call.
+	(lll_unlock): Add private argument, use __lll_unlock_asm, pass
+	MULTIPLE_THREADS_OFFSET as another asm operand, call
+	__lll_unlock_wake_private if private is constant LLL_PRIVATE,
+	otherwise pass private as another argument to __lll_unlock_wake.
+	(lll_robust_unlock): Add private argument, pass private as another
+	argument to __lll_unlock_wake.
+	(lll_robust_dead): Add private argument, use __lll_private_flag
+	macro.
+	(lll_islocked): Use LLL_LOCK_INITIALIZER instead of
+	LLL_MUTEX_LOCK_INITIALIZER.
+	(lll_lock_t): Remove.
+	(LLL_LOCK_INITIALIZER_WAITERS): Define.
+	(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
+	__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
+	lll_cond_wake, lll_cond_broadcast): Remove.
+	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+	* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert
+	2007-05-2{3,9} changes.
+	* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include
+	kernel-features.h and lowlevellock.h.
+	(LOAD_PRIVATE_FUTEX_WAIT): Define.
+	(LOAD_FUTEX_WAIT): Rewritten.
+	(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
+	define.
+	(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
+	(__lll_mutex_lock_wait): Rename to ...
+	(__lll_lock_wait): ... this.  Take futex addr from %edx instead of
+	%ecx, %ecx is now private argument.  Don't compile in for libc.so.
+	(__lll_mutex_timedlock_wait): Rename to ...
+	(__lll_timedlock_wait): ... this.  Use __NR_gettimeofday.  %esi
+	contains private argument.  Don't compile in for libc.so.
+	(__lll_mutex_unlock_wake): Rename to ...
+	(__lll_unlock_wake): ... this.  %ecx contains private argument.
+	Don't compile in for libc.so.
+	(__lll_timedwait_tid): Use __NR_gettimeofday.
+	* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include
+	kernel-features.h and lowlevellock.h.
+	(LOAD_FUTEX_WAIT): Define.
+	(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
+	define.
+	(__lll_robust_mutex_lock_wait): Rename to ...
+	(__lll_robust_lock_wait): ... this.  Futex addr is now in %edx
+	argument, %ecx argument contains private.  Use LOAD_FUTEX_WAIT
+	macro.
+	(__lll_robust_mutex_timedlock_wait): Rename to ...
+	(__lll_robust_timedlock_wait): ... this.  Use __NR_gettimeofday.
+	%esi argument contains private, use LOAD_FUTEX_WAIT macro.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include
+	lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
+	PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to
+	__lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address
+	to __lll_lock_wait in %edx.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
+	Include lowlevellock.h and pthread-errnos.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
+	FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
+	(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass
+	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+	pass LLL_SHARED in %ecx to both __lll_lock_wait and
+	__lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
+	Include lowlevellock.h and pthread-errnos.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
+	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
+	(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass
+	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+	pass LLL_SHARED in %ecx to both __lll_lock_wait and
+	__lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
+	Include lowlevellock.h.
+	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+	Don't define.
+	(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass
+	cond_lock address in %edx rather than %ecx to __lll_lock_wait,
+	pass LLL_SHARED in %ecx to both __lll_lock_wait and
+	__lll_unlock_wake.  Use __NR_gettimeofday.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_*
+	to __lll_*, pass cond_lock address in %edx rather than %ecx to
+	__lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass
+	MUTEX(%ebx) address in %edx rather than %ecx to
+	__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.  Move return value from %ecx to %edx
+	register.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+	Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+	MUTEX(%ebp) address in %edx rather than %ecx to
+	__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.  Move return value from %ecx to %edx
+	register.  Use __NR_gettimeofday.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
+	Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+	MUTEX(%ebp) address in %edx rather than %ecx to
+	__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.  Move return value from %ecx to %edx
+	register.  Use __NR_gettimeofday.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass
+	MUTEX(%edi) address in %edx rather than %ecx to
+	__lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
+	MUTEX(%ebx) address in %edx rather than %ecx to
+	__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.  Move return value from %ecx to %edx
+	register.
+	* sysdeps/unix/sysv/linux/i386/pthread_once.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
+	define.
+	* sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
+	* sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define.
+	(sem_timedwait): Use __NR_gettimeofday.
+	* sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include
+	lowlevellock.h.
+	(LOCK): Don't define.
+	* sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+	* sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there
+	are waiters.
+	* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert
+	2007-05-2{3,9} changes.
+	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include
+	kernel-features.h and lowlevellock.h.
+	(LOAD_PRIVATE_FUTEX_WAIT): Define.
+	(LOAD_FUTEX_WAIT): Rewritten.
+	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
+	(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
+	(__lll_mutex_lock_wait): Rename to ...
+	(__lll_lock_wait): ... this.  %esi is now private argument.
+	Don't compile in for libc.so.
+	(__lll_mutex_timedlock_wait): Rename to ...
+	(__lll_timedlock_wait): ... this.  %esi contains private argument.
+	Don't compile in for libc.so.
+	(__lll_mutex_unlock_wake): Rename to ...
+	(__lll_unlock_wake): ... this.  %esi contains private argument.
+	Don't compile in for libc.so.
+	* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include
+	kernel-features.h and lowlevellock.h.
+	(LOAD_FUTEX_WAIT): Define.
+	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
+	(__lll_robust_mutex_lock_wait): Rename to ...
+	(__lll_robust_lock_wait): ... this.  %esi argument contains private.
+	Use LOAD_FUTEX_WAIT macro.
+	(__lll_robust_mutex_timedlock_wait): Rename to ...
+	(__lll_robust_timedlock_wait): ... this. %esi argument contains
+	private, use LOAD_FUTEX_WAIT macro.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include
+	lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
+	PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to
+	__lll_lock_wait and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S:
+	Include lowlevellock.h and pthread-errnos.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
+	FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
+	(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*,
+	pass LLL_SHARED in %esi to both __lll_lock_wait and
+	__lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S:
+	Include lowlevellock.h and pthread-errnos.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
+	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
+	(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*,
+	pass LLL_SHARED in %esi to both __lll_lock_wait and
+	__lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*,
+	pass LLL_SHARED in %esi to both __lll_lock_wait and
+	__lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
+	(__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_*
+	to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+	Don't define.
+	(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*,
+	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+	Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+	Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+	Don't define.
+	(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*,
+	pass PSHARED(%rdi) in %esi to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S:
+	Include lowlevellock.h.
+	(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
+	Don't define.
+	(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
+	pass PSHARED(%rdi) in %ecx to both __lll_lock_wait
+	and __lll_unlock_wake.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
+	define.
+	* sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
+	* sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+	* sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include
+	lowlevellock.h.
+	(LOCK): Don't define.
+	* sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include
+	lowlevellock.h.
+	(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
+	* sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file.
+	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file.
+	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file.
+	* sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
+	(__lll_lock_wait_private): New function.
+	(__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass
+	it to lll_futex_*wait.  Don't compile in for libc.so.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c:
+	Remove.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
+	(struct sparc_pthread_barrier): Remove.
+	(pthread_barrier_wait): Use union sparc_pthread_barrier instead of
+	struct sparc_pthread_barrier.  Pass
+	ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock
+	and lll_futex_wait macros.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
+	Remove.
+	* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
+	Include sparc pthread_barrier_wait.c instead of generic one.
+
+2007-07-30  Jakub Jelinek  <jakub@redhat.com>
+
+	* tst-rwlock14.c (do_test): Avoid warnings on 32-bit arches.
+
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+	(pthread_rwlock_timedrdlock): Copy futex retval to %esi rather than
+	%ecx.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+	(pthread_rwlock_timedwrlock): Likewise.
+	* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+	(__pthread_rwlock_unlock): Fix MUTEX != 0 args to __lll_*.
+
+2007-07-31  Jakub Jelinek  <jakub@redhat.com>
+
+	* sysdeps/sparc/tls.h (tcbhead_t): Add private_futex field.
+
 2007-07-26  Jakub Jelinek  <jakub@redhat.com>
 
 	* tst-locale2.c (useless): Add return statement.
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index ddf91e5c10..f30c88f301 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default.  */
 static size_t stack_cache_actsize;
 
 /* Mutex protecting this variable.  */
-static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
+static int stack_cache_lock = LLL_LOCK_INITIALIZER;
 
 /* List of queued stack frames.  */
 static LIST_HEAD (stack_cache);
@@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void **memp)
   struct pthread *result = NULL;
   list_t *entry;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Search the cache for a matching entry.  We search for the
      smallest stack which has at least the required size.  Note that
@@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void **memp)
       || __builtin_expect (result->stackblock_size > 4 * size, 0))
     {
       /* Release the lock.  */
-      lll_unlock (stack_cache_lock);
+      lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
       return NULL;
     }
@@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void **memp)
   stack_cache_actsize -= result->stackblock_size;
 
   /* Release the lock early.  */
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   /* Report size and location of the stack to the caller.  */
   *sizep = result->stackblock_size;
@@ -400,12 +400,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 
       /* Prepare to modify global data.  */
-      lll_lock (stack_cache_lock);
+      lll_lock (stack_cache_lock, LLL_PRIVATE);
 
       /* And add to the list of stacks in use.  */
       list_add (&pd->list, &__stack_user);
 
-      lll_unlock (stack_cache_lock);
+      lll_unlock (stack_cache_lock, LLL_PRIVATE);
     }
   else
     {
@@ -544,12 +544,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 
 
 	  /* Prepare to modify global data.  */
-	  lll_lock (stack_cache_lock);
+	  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
 	  /* And add to the list of stacks in use.  */
 	  list_add (&pd->list, &stack_used);
 
-	  lll_unlock (stack_cache_lock);
+	  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
 
 	  /* There might have been a race.  Another thread might have
@@ -598,12 +598,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
 	    mprot_error:
 	      err = errno;
 
-	      lll_lock (stack_cache_lock);
+	      lll_lock (stack_cache_lock, LLL_PRIVATE);
 
 	      /* Remove the thread from the list.  */
 	      list_del (&pd->list);
 
-	      lll_unlock (stack_cache_lock);
+	      lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
 	      /* Get rid of the TLS block we allocated.  */
 	      _dl_deallocate_tls (TLS_TPADJ (pd), false);
@@ -699,7 +699,7 @@ void
 internal_function
 __deallocate_stack (struct pthread *pd)
 {
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Remove the thread from the list of threads with user defined
      stacks.  */
@@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd)
     /* Free the memory associated with the ELF TLS.  */
     _dl_deallocate_tls (TLS_TPADJ (pd), false);
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 }
 
 
@@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_endp)
   const size_t pagemask = ~(__getpagesize () - 1);
 #endif
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   list_t *runp;
   list_for_each (runp, &stack_used)
@@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_endp)
 	  break;
       }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return err;
 }
@@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid)
 {
   struct pthread *result = NULL;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Iterate over the list with system-allocated threads first.  */
   list_t *runp;
@@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid)
     }
 
  out:
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return result;
 }
@@ -920,7 +920,7 @@ attribute_hidden
 __nptl_setxid (struct xid_command *cmdp)
 {
   int result;
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   __xidcmd = cmdp;
   cmdp->cntr = 0;
@@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp)
       result = -1;
     }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
   return result;
 }
 
@@ -995,7 +995,7 @@ void
 attribute_hidden
 __pthread_init_static_tls (struct link_map *map)
 {
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   /* Iterate over the list with system-allocated threads first.  */
   list_t *runp;
@@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_map *map)
   list_for_each (runp, &__stack_user)
     init_one_static_tls (list_entry (runp, struct pthread, list), map);
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 }
 
 
@@ -1014,7 +1014,7 @@ void
 attribute_hidden
 __wait_lookup_done (void)
 {
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   struct pthread *self = THREAD_SELF;
 
@@ -1063,5 +1063,5 @@ __wait_lookup_done (void)
       while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
     }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 }
diff --git a/nptl/descr.h b/nptl/descr.h
index 3a3361d9e3..3c00e1418c 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -309,10 +309,10 @@ struct pthread
   int parent_cancelhandling;
 
   /* Lock to synchronize access to the descriptor.  */
-  lll_lock_t lock;
+  int lock;
 
   /* Lock for synchronizing setxid calls.  */
-  lll_lock_t setxid_futex;
+  int setxid_futex;
 
 #if HP_TIMING_AVAIL
   /* Offset of the CPU clock at start thread start time.  */
diff --git a/nptl/old_pthread_cond_broadcast.c b/nptl/old_pthread_cond_broadcast.c
index 3852943fac..5ad6fa2ce4 100644
--- a/nptl/old_pthread_cond_broadcast.c
+++ b/nptl/old_pthread_cond_broadcast.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond)
     {
       pthread_cond_t *newcond;
 
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
       newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
       if (newcond == NULL)
 	return ENOMEM;
diff --git a/nptl/old_pthread_cond_signal.c b/nptl/old_pthread_cond_signal.c
index 65beb0b9db..5b67f1153e 100644
--- a/nptl/old_pthread_cond_signal.c
+++ b/nptl/old_pthread_cond_signal.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond)
     {
       pthread_cond_t *newcond;
 
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
       newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
       if (newcond == NULL)
 	return ENOMEM;
diff --git a/nptl/old_pthread_cond_timedwait.c b/nptl/old_pthread_cond_timedwait.c
index 27c10938d3..2f09f3b0ef 100644
--- a/nptl/old_pthread_cond_timedwait.c
+++ b/nptl/old_pthread_cond_timedwait.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mutex, abstime)
     {
       pthread_cond_t *newcond;
 
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
       newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
       if (newcond == NULL)
 	return ENOMEM;
diff --git a/nptl/old_pthread_cond_wait.c b/nptl/old_pthread_cond_wait.c
index 0a503a1cdc..4f1caadcf5 100644
--- a/nptl/old_pthread_cond_wait.c
+++ b/nptl/old_pthread_cond_wait.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex)
     {
       pthread_cond_t *newcond;
 
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
       newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
       if (newcond == NULL)
 	return ENOMEM;
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 85fb9b8e48..82c0f1ecf6 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -151,7 +151,7 @@ hidden_proto (__stack_user)
 
 /* Attribute handling.  */
 extern struct pthread_attr *__attr_list attribute_hidden;
-extern lll_lock_t __attr_list_lock attribute_hidden;
+extern int __attr_list_lock attribute_hidden;
 
 /* First available RT signal.  */
 extern int __current_sigrtmin attribute_hidden;
diff --git a/nptl/pthread_attr_init.c b/nptl/pthread_attr_init.c
index c84b33f318..d9a309abca 100644
--- a/nptl/pthread_attr_init.c
+++ b/nptl/pthread_attr_init.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,7 +27,7 @@
 
 
 struct pthread_attr *__attr_list;
-lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
+int __attr_list_lock = LLL_LOCK_INITIALIZER;
 
 
 int
diff --git a/nptl/pthread_barrier_destroy.c b/nptl/pthread_barrier_destroy.c
index 492b29485b..4951621dd8 100644
--- a/nptl/pthread_barrier_destroy.c
+++ b/nptl/pthread_barrier_destroy.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier)
 
   ibarrier = (struct pthread_barrier *) barrier;
 
-  lll_lock (ibarrier->lock);
+  lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
     /* The barrier is not used anymore.  */
     result = 0;
   else
     /* Still used, return with an error.  */
-    lll_unlock (ibarrier->lock);
+    lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   return result;
 }
diff --git a/nptl/pthread_barrier_init.c b/nptl/pthread_barrier_init.c
index 8dfc444965..8e6edba380 100644
--- a/nptl/pthread_barrier_init.c
+++ b/nptl/pthread_barrier_init.c
@@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, count)
   if (__builtin_expect (count == 0, 0))
     return EINVAL;
 
-  struct pthread_barrierattr *iattr
+  const struct pthread_barrierattr *iattr
     = (attr != NULL
        ? iattr = (struct pthread_barrierattr *) attr
        : &default_attr);
diff --git a/nptl/pthread_barrier_wait.c b/nptl/pthread_barrier_wait.c
index e96a3e5473..9d80cad902 100644
--- a/nptl/pthread_barrier_wait.c
+++ b/nptl/pthread_barrier_wait.c
@@ -32,7 +32,7 @@ pthread_barrier_wait (barrier)
   int result = 0;
 
   /* Make sure we are alone.  */
-  lll_lock (ibarrier->lock);
+  lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   /* One more arrival.  */
   --ibarrier->left;
@@ -46,8 +46,7 @@ pthread_barrier_wait (barrier)
 
       /* Wake up everybody.  */
       lll_futex_wake (&ibarrier->curr_event, INT_MAX,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+		      ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
       /* This is the thread which finished the serialization.  */
       result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -59,13 +58,12 @@ pthread_barrier_wait (barrier)
       unsigned int event = ibarrier->curr_event;
 
       /* Before suspending, make the barrier available to others.  */
-      lll_unlock (ibarrier->lock);
+      lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Wait for the event counter of the barrier to change.  */
       do
 	lll_futex_wait (&ibarrier->curr_event, event,
-			// XYZ check mutex flag
-			LLL_SHARED);
+			ibarrier->private ^ FUTEX_PRIVATE_FLAG);
       while (event == ibarrier->curr_event);
     }
 
@@ -75,7 +73,7 @@ pthread_barrier_wait (barrier)
   /* If this was the last woken thread, unlock.  */
   if (atomic_increment_val (&ibarrier->left) == init_count)
     /* We are done.  */
-    lll_unlock (ibarrier->lock);
+    lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
 
   return result;
 }
diff --git a/nptl/pthread_cond_broadcast.c b/nptl/pthread_cond_broadcast.c
index aec33f3bd8..5c0d76effc 100644
--- a/nptl/pthread_cond_broadcast.c
+++ b/nptl/pthread_cond_broadcast.c
@@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond)
      pthread_cond_t *cond;
 {
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond)
       ++cond->__data.__broadcast_seq;
 
       /* We are done.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
       /* Do not use requeue for pshared condvars.  */
       if (cond->__data.__mutex == (void *) ~0l)
@@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond)
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   return 0;
 }
diff --git a/nptl/pthread_cond_destroy.c b/nptl/pthread_cond_destroy.c
index 8574b6118f..53b5cd272f 100644
--- a/nptl/pthread_cond_destroy.c
+++ b/nptl/pthread_cond_destroy.c
@@ -27,13 +27,13 @@ __pthread_cond_destroy (cond)
      pthread_cond_t *cond;
 {
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
     {
       /* If there are still some waiters which have not been
 	 woken up, this is an application bug.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
       return EBUSY;
     }
 
@@ -66,13 +66,13 @@ __pthread_cond_destroy (cond)
 
       do
 	{
-	  lll_mutex_unlock (cond->__data.__lock);
+	  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
 	  lll_futex_wait (&cond->__data.__nwaiters, nwaiters,
 			  // XYZ check mutex flag
 			  LLL_SHARED);
 
-	  lll_mutex_lock (cond->__data.__lock);
+	  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
 	  nwaiters = cond->__data.__nwaiters;
 	}
diff --git a/nptl/pthread_cond_init.c b/nptl/pthread_cond_init.c
index 7c6d4c18f1..a75b82b9cf 100644
--- a/nptl/pthread_cond_init.c
+++ b/nptl/pthread_cond_init.c
@@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr)
 {
   struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
 
-  cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
+  cond->__data.__lock = LLL_LOCK_INITIALIZER;
   cond->__data.__futex = 0;
   cond->__data.__nwaiters = (icond_attr != NULL
 			     && ((icond_attr->value
diff --git a/nptl/pthread_cond_signal.c b/nptl/pthread_cond_signal.c
index a4faf41854..f2de58fa1d 100644
--- a/nptl/pthread_cond_signal.c
+++ b/nptl/pthread_cond_signal.c
@@ -33,7 +33,7 @@ __pthread_cond_signal (cond)
      pthread_cond_t *cond;
 {
   /* Make sure we are alone.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* Are there any waiters to be woken?  */
   if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -56,7 +56,7 @@ __pthread_cond_signal (cond)
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   return 0;
 }
diff --git a/nptl/pthread_cond_timedwait.c b/nptl/pthread_cond_timedwait.c
index d1c29d2377..a8d95dc224 100644
--- a/nptl/pthread_cond_timedwait.c
+++ b/nptl/pthread_cond_timedwait.c
@@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, abstime)
     return EINVAL;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* Now we can release the mutex.  */
   int err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (err)
     {
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
       return err;
     }
 
@@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_mutex_lock(cond->__data.__lock);
+      lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, abstime)
 		    LLL_SHARED);
 
   /* We are done with the condvar.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);
diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
index e524aa6c94..679655f8fd 100644
--- a/nptl/pthread_cond_wait.c
+++ b/nptl/pthread_cond_wait.c
@@ -45,7 +45,7 @@ __condvar_cleanup (void *arg)
   unsigned int destroying;
 
   /* We are going to modify shared data.  */
-  lll_mutex_lock (cbuffer->cond->__data.__lock);
+  lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
     {
@@ -78,7 +78,7 @@ __condvar_cleanup (void *arg)
     }
 
   /* We are done.  */
-  lll_mutex_unlock (cbuffer->cond->__data.__lock);
+  lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* Wake everybody to make sure no condvar signal gets lost.  */
   if (! destroying)
@@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex)
   int err;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (cond->__data.__lock);
+  lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* Now we can release the mutex.  */
   err = __pthread_mutex_unlock_usercnt (mutex, 0);
   if (__builtin_expect (err, 0))
     {
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
       return err;
     }
 
@@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex)
       unsigned int futex_val = cond->__data.__futex;
 
       /* Prepare to wait.  Release the condvar futex.  */
-      lll_mutex_unlock (cond->__data.__lock);
+      lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
       /* Enable asynchronous cancellation.  Required by the standard.  */
       cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex)
       __pthread_disable_asynccancel (cbuffer.oldtype);
 
       /* We are going to look at shared data again, so get the lock.  */
-      lll_mutex_lock (cond->__data.__lock);
+      lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
       /* If a broadcast happened, we are done.  */
       if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex)
 		    LLL_SHARED);
 
   /* We are done with the condvar.  */
-  lll_mutex_unlock (cond->__data.__lock);
+  lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
 
   /* The cancellation handling is back to normal, remove the handler.  */
   __pthread_cleanup_pop (&buffer, 0);
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index ca55903c22..3ab2fa498d 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -63,7 +63,7 @@ __find_in_stack_list (pd)
   list_t *entry;
   struct pthread *result = NULL;
 
-  lll_lock (stack_cache_lock);
+  lll_lock (stack_cache_lock, LLL_PRIVATE);
 
   list_for_each (entry, &stack_used)
     {
@@ -90,7 +90,7 @@ __find_in_stack_list (pd)
 	  }
       }
 
-  lll_unlock (stack_cache_lock);
+  lll_unlock (stack_cache_lock, LLL_PRIVATE);
 
   return result;
 }
@@ -284,9 +284,9 @@ start_thread (void *arg)
 	  int oldtype = CANCEL_ASYNC ();
 
 	  /* Get the lock the parent locked to force synchronization.  */
-	  lll_lock (pd->lock);
+	  lll_lock (pd->lock, LLL_PRIVATE);
 	  /* And give it up right away.  */
-	  lll_unlock (pd->lock);
+	  lll_unlock (pd->lock, LLL_PRIVATE);
 
 	  CANCEL_RESET (oldtype);
 	}
@@ -370,7 +370,7 @@ start_thread (void *arg)
 # endif
 	  this->__list.__next = NULL;
 
-	  lll_robust_mutex_dead (this->__lock);
+	  lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
 	}
       while (robust != (void *) &pd->robust_head);
     }
diff --git a/nptl/pthread_getattr_np.c b/nptl/pthread_getattr_np.c
index 87cf56482f..9c25caff89 100644
--- a/nptl/pthread_getattr_np.c
+++ b/nptl/pthread_getattr_np.c
@@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr)
   struct pthread_attr *iattr = (struct pthread_attr *) attr;
   int ret = 0;
 
-  lll_lock (thread->lock);
+  lll_lock (thread->lock, LLL_PRIVATE);
 
   /* The thread library is responsible for keeping the values in the
      thread desriptor up-to-date in case the user changes them.  */
@@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr)
 	}
     }
 
-  lll_unlock (thread->lock);
+  lll_unlock (thread->lock, LLL_PRIVATE);
 
   return ret;
 }
diff --git a/nptl/pthread_getschedparam.c b/nptl/pthread_getschedparam.c
index 5e8713016e..d4e9459bb3 100644
--- a/nptl/pthread_getschedparam.c
+++ b/nptl/pthread_getschedparam.c
@@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, policy, param)
 
   int result = 0;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
 
   /* The library is responsible for maintaining the values at all
      times.  If the user uses a interface other than
@@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, policy, param)
       memcpy (param, &pd->schedparam, sizeof (struct sched_param));
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
diff --git a/nptl/pthread_mutex_lock.c b/nptl/pthread_mutex_lock.c
index d0d6805aea..a82922e99a 100644
--- a/nptl/pthread_mutex_lock.c
+++ b/nptl/pthread_mutex_lock.c
@@ -27,9 +27,9 @@
 
 
 #ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
+# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
+# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
 #endif
 
 
@@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex)
 	    {
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
-	      lll_mutex_unlock (mutex->__data.__lock);
+	      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }
diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c
index 825a9849b8..7a0ed57eaa 100644
--- a/nptl/pthread_mutex_timedlock.c
+++ b/nptl/pthread_mutex_timedlock.c
@@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime)
 	}
 
       /* We have to get the mutex.  */
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+      result = lll_timedlock (mutex->__data.__lock, abstime,
+			      /* XYZ */ LLL_SHARED);
 
       if (result != 0)
 	goto out;
@@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime)
     case PTHREAD_MUTEX_TIMED_NP:
     simple:
       /* Normal mutex.  */
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+      result = lll_timedlock (mutex->__data.__lock, abstime,
+			      /* XYZ */ LLL_SHARED);
       break;
 
     case PTHREAD_MUTEX_ADAPTIVE_NP:
       if (! __is_smp)
 	goto simple;
 
-      if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+      if (lll_trylock (mutex->__data.__lock) != 0)
 	{
 	  int cnt = 0;
 	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime)
 	    {
 	      if (cnt++ >= max_cnt)
 		{
-		  result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+		  result = lll_timedlock (mutex->__data.__lock, abstime,
+					  /* XYZ */ LLL_SHARED);
 		  break;
 		}
 
@@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime)
 	      BUSY_WAIT_NOP;
 #endif
 	    }
-	  while (lll_mutex_trylock (mutex->__data.__lock) != 0);
+	  while (lll_trylock (mutex->__data.__lock) != 0);
 
 	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
 	}
@@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime)
 		}
 	    }
 
-	  result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
-					       id);
+	  result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
+					 /* XYZ */ LLL_SHARED);
 
 	  if (__builtin_expect (mutex->__data.__owner
 				== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
 	    {
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
-	      lll_mutex_unlock (mutex->__data.__lock);
+	      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }
diff --git a/nptl/pthread_mutex_trylock.c b/nptl/pthread_mutex_trylock.c
index 9a97a6cf81..9478d382c2 100644
--- a/nptl/pthread_mutex_trylock.c
+++ b/nptl/pthread_mutex_trylock.c
@@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex)
 	  return 0;
 	}
 
-      if (lll_mutex_trylock (mutex->__data.__lock) == 0)
+      if (lll_trylock (mutex->__data.__lock) == 0)
 	{
 	  /* Record the ownership.  */
 	  mutex->__data.__owner = id;
@@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex)
     case PTHREAD_MUTEX_TIMED_NP:
     case PTHREAD_MUTEX_ADAPTIVE_NP:
       /* Normal mutex.  */
-      if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+      if (lll_trylock (mutex->__data.__lock) != 0)
 	break;
 
       /* Record the ownership.  */
@@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex)
 		}
 	    }
 
-	  oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
+	  oldval = lll_robust_trylock (mutex->__data.__lock, id);
 	  if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
 	    {
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
@@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex)
 	      /* This mutex is now not recoverable.  */
 	      mutex->__data.__count = 0;
 	      if (oldval == id)
-		lll_mutex_unlock (mutex->__data.__lock);
+		lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
 	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
 	      return ENOTRECOVERABLE;
 	    }
diff --git a/nptl/pthread_mutex_unlock.c b/nptl/pthread_mutex_unlock.c
index 642e3a4442..6226089ebe 100644
--- a/nptl/pthread_mutex_unlock.c
+++ b/nptl/pthread_mutex_unlock.c
@@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
     case PTHREAD_MUTEX_ERRORCHECK_NP:
       /* Error checking mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
-	  || ! lll_mutex_islocked (mutex->__data.__lock))
+	  || ! lll_islocked (mutex->__data.__lock))
 	return EPERM;
       /* FALLTHROUGH */
 
@@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
 	--mutex->__data.__nusers;
 
       /* Unlock.  */
-      lll_mutex_unlock (mutex->__data.__lock);
+      lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
       break;
 
     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
@@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
       if ((mutex->__data.__lock & FUTEX_TID_MASK)
 	  != THREAD_GETMEM (THREAD_SELF, tid)
-	  || ! lll_mutex_islocked (mutex->__data.__lock))
+	  || ! lll_islocked (mutex->__data.__lock))
 	return EPERM;
 
       /* If the previous owner died and the caller did not succeed in
@@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
 	--mutex->__data.__nusers;
 
       /* Unlock.  */
-      lll_robust_mutex_unlock (mutex->__data.__lock);
+      lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
 
       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
       break;
@@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
       if ((mutex->__data.__lock & FUTEX_TID_MASK)
 	  != THREAD_GETMEM (THREAD_SELF, tid)
-	  || ! lll_mutex_islocked (mutex->__data.__lock))
+	  || ! lll_islocked (mutex->__data.__lock))
 	return EPERM;
 
       /* If the previous owner died and the caller did not succeed in
diff --git a/nptl/pthread_once.c b/nptl/pthread_once.c
index 9b2cef8645..306af0a34e 100644
--- a/nptl/pthread_once.c
+++ b/nptl/pthread_once.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -22,7 +22,7 @@
 
 
 
-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
+static int once_lock = LLL_LOCK_INITIALIZER;
 
 
 int
@@ -35,7 +35,7 @@ __pthread_once (once_control, init_routine)
      object.  */
   if (*once_control == PTHREAD_ONCE_INIT)
     {
-      lll_lock (once_lock);
+      lll_lock (once_lock, LLL_PRIVATE);
 
       /* XXX This implementation is not complete.  It doesn't take
 	 cancelation and fork into account.  */
@@ -46,7 +46,7 @@ __pthread_once (once_control, init_routine)
 	  *once_control = !PTHREAD_ONCE_INIT;
 	}
 
-      lll_unlock (once_lock);
+      lll_unlock (once_lock, LLL_PRIVATE);
     }
 
   return 0;
diff --git a/nptl/pthread_rwlock_rdlock.c b/nptl/pthread_rwlock_rdlock.c
index b8f9d41d6a..31eb508a0d 100644
--- a/nptl/pthread_rwlock_rdlock.c
+++ b/nptl/pthread_rwlock_rdlock.c
@@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock)
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock)
       int waitval = rwlock->__data.__readers_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer to finish.  */
-      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval, 
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
+		      rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       --rwlock->__data.__nr_readers_queued;
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_rwlock_timedrdlock.c b/nptl/pthread_rwlock_timedrdlock.c
index 654d628b2f..fcd10aac90 100644
--- a/nptl/pthread_rwlock_timedrdlock.c
+++ b/nptl/pthread_rwlock_timedrdlock.c
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock(rwlock->__data.__lock);
+  lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
       int waitval = rwlock->__data.__readers_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer to finish.  */
       err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
-				  waitval, &rt,
-				  // XYZ check mutex flag
-				  LLL_SHARED);
+				  waitval, &rt, rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       --rwlock->__data.__nr_readers_queued;
 
@@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abstime)
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_rwlock_timedwrlock.c b/nptl/pthread_rwlock_timedwrlock.c
index 354beb0846..e6283f4623 100644
--- a/nptl/pthread_rwlock_timedwrlock.c
+++ b/nptl/pthread_rwlock_timedwrlock.c
@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
       int waitval = rwlock->__data.__writer_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer or reader(s) to finish.  */
       err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
-				  waitval, &rt,
-				  // XYZ check mutex flag
-				  LLL_SHARED);
+				  waitval, &rt, rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* To start over again, remove the thread from the writer list.  */
       --rwlock->__data.__nr_writers_queued;
@@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abstime)
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_rwlock_tryrdlock.c b/nptl/pthread_rwlock_tryrdlock.c
index df8863bcf8..8461e1bdde 100644
--- a/nptl/pthread_rwlock_tryrdlock.c
+++ b/nptl/pthread_rwlock_tryrdlock.c
@@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock)
 {
   int result = EBUSY;
 
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   if (rwlock->__data.__writer == 0
       && (rwlock->__data.__nr_writers_queued == 0
@@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock)
 	result = 0;
     }
 
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_rwlock_trywrlock.c b/nptl/pthread_rwlock_trywrlock.c
index b754a19565..5111f9ca9a 100644
--- a/nptl/pthread_rwlock_trywrlock.c
+++ b/nptl/pthread_rwlock_trywrlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock)
 {
   int result = EBUSY;
 
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
     {
@@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock)
       result = 0;
     }
 
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_rwlock_unlock.c b/nptl/pthread_rwlock_unlock.c
index 87a77a94ab..a7ef71a113 100644
--- a/nptl/pthread_rwlock_unlock.c
+++ b/nptl/pthread_rwlock_unlock.c
@@ -27,7 +27,7 @@
 int
 __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
 {
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
   if (rwlock->__data.__writer)
     rwlock->__data.__writer = 0;
   else
@@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
       if (rwlock->__data.__nr_writers_queued)
 	{
 	  ++rwlock->__data.__writer_wakeup;
-	  lll_mutex_unlock (rwlock->__data.__lock);
+	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 	  lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
-			  // XYZ check mutex flag
-			  LLL_SHARED);
+			  rwlock->__data.__shared);
 	  return 0;
 	}
       else if (rwlock->__data.__nr_readers_queued)
 	{
 	  ++rwlock->__data.__readers_wakeup;
-	  lll_mutex_unlock (rwlock->__data.__lock);
+	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 	  lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
-			  // XYZ check mutex flag
-			  LLL_SHARED);
+			  rwlock->__data.__shared);
 	  return 0;
 	}
     }
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
   return 0;
 }
 
diff --git a/nptl/pthread_rwlock_wrlock.c b/nptl/pthread_rwlock_wrlock.c
index 134b3e95d0..64fe970125 100644
--- a/nptl/pthread_rwlock_wrlock.c
+++ b/nptl/pthread_rwlock_wrlock.c
@@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock)
   int result = 0;
 
   /* Make sure we are along.  */
-  lll_mutex_lock (rwlock->__data.__lock);
+  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   while (1)
     {
@@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock)
       int waitval = rwlock->__data.__writer_wakeup;
 
       /* Free the lock.  */
-      lll_mutex_unlock (rwlock->__data.__lock);
+      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* Wait for the writer or reader(s) to finish.  */
       lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+		      rwlock->__data.__shared);
 
       /* Get the lock.  */
-      lll_mutex_lock (rwlock->__data.__lock);
+      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
 
       /* To start over again, remove the thread from the writer list.  */
       --rwlock->__data.__nr_writers_queued;
     }
 
   /* We are done, free the lock.  */
-  lll_mutex_unlock (rwlock->__data.__lock);
+  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
 
   return result;
 }
diff --git a/nptl/pthread_setschedparam.c b/nptl/pthread_setschedparam.c
index 8129dec82c..1b0062facb 100644
--- a/nptl/pthread_setschedparam.c
+++ b/nptl/pthread_setschedparam.c
@@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, policy, param)
 
   int result = 0;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
 
   struct sched_param p;
   const struct sched_param *orig_param = param;
@@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, policy, param)
       pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
diff --git a/nptl/pthread_setschedprio.c b/nptl/pthread_setschedprio.c
index 59462ec2a1..c99f7cec07 100644
--- a/nptl/pthread_setschedprio.c
+++ b/nptl/pthread_setschedprio.c
@@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio)
   struct sched_param param;
   param.sched_priority = prio;
 
-  lll_lock (pd->lock);
+  lll_lock (pd->lock, LLL_PRIVATE);
 
   /* If the thread should have higher priority because of some
      PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority.  */
@@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio)
       pd->flags |= ATTR_FLAG_SCHED_SET;
     }
 
-  lll_unlock (pd->lock);
+  lll_unlock (pd->lock, LLL_PRIVATE);
 
   return result;
 }
diff --git a/nptl/sem_close.c b/nptl/sem_close.c
index 279522d086..9bde63d983 100644
--- a/nptl/sem_close.c
+++ b/nptl/sem_close.c
@@ -47,7 +47,7 @@ sem_close (sem)
   int result = 0;
 
   /* Get the lock.  */
-  lll_lock (__sem_mappings_lock);
+  lll_lock (__sem_mappings_lock, LLL_PRIVATE);
 
   /* Locate the entry for the mapping the caller provided.  */
   rec = NULL;
@@ -75,7 +75,7 @@ sem_close (sem)
     }
 
   /* Release the lock.  */
-  lll_unlock (__sem_mappings_lock);
+  lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
 
   return result;
 }
diff --git a/nptl/sem_open.c b/nptl/sem_open.c
index 27d308e920..e58dde9472 100644
--- a/nptl/sem_open.c
+++ b/nptl/sem_open.c
@@ -147,7 +147,7 @@ __sem_search (const void *a, const void *b)
 void *__sem_mappings attribute_hidden;
 
 /* Lock to protect the search tree.  */
-lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
+int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
 
 
 /* Search for existing mapping and if possible add the one provided.  */
@@ -161,7 +161,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
   if (__fxstat64 (_STAT_VER, fd, &st) == 0)
     {
       /* Get the lock.  */
-      lll_lock (__sem_mappings_lock);
+      lll_lock (__sem_mappings_lock, LLL_PRIVATE);
 
       /* Search for an existing mapping given the information we have.  */
       struct inuse_sem *fake;
@@ -210,7 +210,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
 	}
 
       /* Release the lock.  */
-      lll_unlock (__sem_mappings_lock);
+      lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
     }
 
   if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
diff --git a/nptl/semaphoreP.h b/nptl/semaphoreP.h
index 9659059900..7d6fd25259 100644
--- a/nptl/semaphoreP.h
+++ b/nptl/semaphoreP.h
@@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once attribute_hidden;
 extern void *__sem_mappings attribute_hidden;
 
 /* Lock to protect the search tree.  */
-extern lll_lock_t __sem_mappings_lock attribute_hidden;
+extern int __sem_mappings_lock attribute_hidden;
 
 
 /* Initializer for mountpoint.  */
diff --git a/nptl/sysdeps/pthread/bits/libc-lock.h b/nptl/sysdeps/pthread/bits/libc-lock.h
index 0c8c0ada88..45eba06133 100644
--- a/nptl/sysdeps/pthread/bits/libc-lock.h
+++ b/nptl/sysdeps/pthread/bits/libc-lock.h
@@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t;
 /* Lock the named lock variable.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
 # define __libc_lock_lock(NAME) \
-  ({ lll_lock (NAME); 0; })
+  ({ lll_lock (NAME, LLL_PRIVATE); 0; })
 #else
 # define __libc_lock_lock(NAME) \
   __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
@@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t;
     void *self = THREAD_SELF;						      \
     if ((NAME).owner != self)						      \
       {									      \
-	lll_lock ((NAME).lock);						      \
+	lll_lock ((NAME).lock, LLL_PRIVATE);				      \
 	(NAME).owner = self;						      \
       }									      \
     ++(NAME).cnt;							      \
@@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t;
 /* Unlock the named lock variable.  */
 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
 # define __libc_lock_unlock(NAME) \
-  lll_unlock (NAME)
+  lll_unlock (NAME, LLL_PRIVATE)
 #else
 # define __libc_lock_unlock(NAME) \
   __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
@@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t;
     if (--(NAME).cnt == 0)						      \
       {									      \
 	(NAME).owner = NULL;						      \
-	lll_unlock ((NAME).lock);					      \
+	lll_unlock ((NAME).lock, LLL_PRIVATE);				      \
       }									      \
   } while (0)
 #else
diff --git a/nptl/sysdeps/pthread/bits/stdio-lock.h b/nptl/sysdeps/pthread/bits/stdio-lock.h
index 5f2382104b..b8efdd8d5f 100644
--- a/nptl/sysdeps/pthread/bits/stdio-lock.h
+++ b/nptl/sysdeps/pthread/bits/stdio-lock.h
@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
     void *__self = THREAD_SELF;						      \
     if ((_name).owner != __self)					      \
       {									      \
-        lll_lock ((_name).lock);					      \
+	lll_lock ((_name).lock, LLL_PRIVATE);				      \
         (_name).owner = __self;						      \
       }									      \
     ++(_name).cnt;							      \
@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
     if (--(_name).cnt == 0)						      \
       {									      \
         (_name).owner = NULL;						      \
-        lll_unlock ((_name).lock);					      \
+	lll_unlock ((_name).lock, LLL_PRIVATE);				      \
       }									      \
   } while (0)
 
diff --git a/nptl/sysdeps/pthread/createthread.c b/nptl/sysdeps/pthread/createthread.c
index 88658a16e1..66571b2175 100644
--- a/nptl/sysdeps/pthread/createthread.c
+++ b/nptl/sysdeps/pthread/createthread.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
     /* We Make sure the thread does not run far by forcing it to get a
        lock.  We lock it here too so that the new thread cannot continue
        until we tell it to.  */
-    lll_lock (pd->lock);
+    lll_lock (pd->lock, LLL_PRIVATE);
 
   /* One more thread.  We cannot have the thread do this itself, since it
      might exist but not have been scheduled yet by the time we've returned
@@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
 	      __nptl_create_event ();
 
 	      /* And finally restart the new thread.  */
-	      lll_unlock (pd->lock);
+	      lll_unlock (pd->lock, LLL_PRIVATE);
 	    }
 
 	  return res;
@@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
 
   if (res == 0 && stopped)
     /* And finally restart the new thread.  */
-    lll_unlock (pd->lock);
+    lll_unlock (pd->lock, LLL_PRIVATE);
 
   return res;
 }
diff --git a/nptl/sysdeps/sparc/tls.h b/nptl/sysdeps/sparc/tls.h
index 601b53732b..dc1b3868c9 100644
--- a/nptl/sysdeps/sparc/tls.h
+++ b/nptl/sysdeps/sparc/tls.h
@@ -27,6 +27,7 @@
 # include <stdint.h>
 # include <stdlib.h>
 # include <list.h>
+# include <kernel-features.h>
 
 /* Type for the dtv.  */
 typedef union dtv
@@ -55,6 +56,9 @@ typedef struct
 #if __WORDSIZE != 64
   int gscope_flag;
 #endif
+#ifndef __ASSUME_PRIVATE_FUTEX
+  int private_futex;
+#endif
 } tcbhead_t;
 
 #else /* __ASSEMBLER__ */
diff --git a/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
index 5f08673c43..f3f291979a 100644
--- a/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
@@ -70,9 +70,6 @@
 #endif
 
 
-/* Initializer for compatibility lock.	*/
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
 #define lll_futex_wait(futexp, val, private) \
   lll_futex_timed_wait (futexp, val, NULL, private)
 
@@ -96,7 +93,7 @@
     INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;		      \
   })
 
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv) \
   do									      \
     {									      \
       int *__futexp = &(futexv);					      \
@@ -132,149 +129,130 @@
 
 
 static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
 }
-#define lll_mutex_trylock(lock)	__lll_mutex_trylock (&(lock))
+#define lll_trylock(lock)	__lll_trylock (&(lock))
 
 
 static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
 {
   return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
 }
-#define lll_mutex_cond_trylock(lock)	__lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock)	__lll_cond_trylock (&(lock))
 
 
 static inline int __attribute__((always_inline))
-__lll_robust_mutex_trylock(int *futex, int id)
+__lll_robust_trylock(int *futex, int id)
 {
   return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
 }
-#define lll_robust_mutex_trylock(lock, id) \
-  __lll_robust_mutex_trylock (&(lock), id)
+#define lll_robust_trylock(lock, id) \
+  __lll_robust_trylock (&(lock), id)
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+__lll_lock(int *futex, int private)
 {
   if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    __lll_lock_wait (futex);
+    {
+      if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+	__lll_lock_wait_private (futex);
+      else
+	__lll_lock_wait (futex, private);
+    }
 }
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
 
 
 static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_lock_wait (futex);
+    result = __lll_robust_lock_wait (futex, private);
   return result;
 }
-#define lll_robust_mutex_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 
 static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
   if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
-    __lll_lock_wait (futex);
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
 
 
-#define lll_robust_mutex_cond_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-	attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
-	attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+				 int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+					int private) attribute_hidden;
 
 static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    result = __lll_timedlock_wait (futex, abstime);
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 
 static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
-			      int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+			int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
     result = __lll_robust_timedlock_wait (futex, abstime);
   return result;
 }
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
-  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
 
 static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+__lll_unlock (int *futex, int private)
 {
   int val = atomic_exchange_rel (futex, 0);
   if (__builtin_expect (val > 1, 0))
-    lll_futex_wake (futex, 1, LLL_SHARED);
+    lll_futex_wake (futex, 1, private);
 }
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
 
 
 static inline void __attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
+__lll_robust_unlock (int *futex, int private)
 {
   int val = atomic_exchange_rel (futex, 0);
-  if (__builtin_expect (val & mask, 0))
-    lll_futex_wake (futex, 1, LLL_SHARED);
+  if (__builtin_expect (val & FUTEX_WAITERS, 0))
+    lll_futex_wake (futex, 1, private);
 }
-#define lll_robust_mutex_unlock(futex) \
-  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
 
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
-  (void) atomic_exchange_rel (futex, 0);
-  lll_futex_wake (futex, 1, LLL_SHARED);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
 
-
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
-
-/* Our internal lock implementation is identical to the binary-compatible
-   mutex implementation. */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-   >1  -  taken by more users */
-
-#define lll_trylock(lock)	lll_mutex_trylock (lock)
-#define lll_lock(lock)		lll_mutex_lock (lock)
-#define lll_unlock(lock)	lll_mutex_unlock (lock)
-#define lll_islocked(lock)	lll_mutex_islocked (lock)
 
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
@@ -298,26 +276,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;						\
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/fork.c b/nptl/sysdeps/unix/sysv/linux/fork.c
index f9913c343e..1683de80fd 100644
--- a/nptl/sysdeps/unix/sysv/linux/fork.c
+++ b/nptl/sysdeps/unix/sysv/linux/fork.c
@@ -183,7 +183,7 @@ __libc_fork (void)
 	}
 
       /* Initialize the fork lock.  */
-      __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
+      __fork_lock = LLL_LOCK_INITIALIZER;
     }
   else
     {
diff --git a/nptl/sysdeps/unix/sysv/linux/fork.h b/nptl/sysdeps/unix/sysv/linux/fork.h
index 6458977b99..032b68f083 100644
--- a/nptl/sysdeps/unix/sysv/linux/fork.h
+++ b/nptl/sysdeps/unix/sysv/linux/fork.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,7 +26,7 @@ extern unsigned long int __fork_generation attribute_hidden;
 extern unsigned long int *__fork_generation_pointer attribute_hidden;
 
 /* Lock to protect allocation and deallocation of fork handlers.  */
-extern lll_lock_t __fork_lock attribute_hidden;
+extern int __fork_lock attribute_hidden;
 
 /* Elements of the fork handler lists.  */
 struct fork_handler
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
index 830f628578..ce8ad27aa7 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
@@ -17,19 +17,4 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-#include <kernel-features.h>
-
-/* All locks in libc are private.  Use the kernel feature if possible.  */
-#define FUTEX_PRIVATE_FLAG	128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT		(0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE		(1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg) \
-	movl	%gs:PRIVATE_FUTEX, reg
-# define LOAD_FUTEX_WAKE(reg) \
-	movl	%gs:PRIVATE_FUTEX, reg ; \
-	orl	$FUTEX_WAKE, reg
-#endif
-
 #include "lowlevellock.S"
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
index cfcc7dafc4..745ab91239 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -19,42 +19,53 @@
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
 
 	.text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg
 # else
-#  define LOCK lock
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
 # endif
-#endif
-
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT		0
-# define FUTEX_WAKE		1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
 # if FUTEX_WAIT == 0
 #  define LOAD_FUTEX_WAIT(reg) \
-	xorl	reg, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg
 # else
 #  define LOAD_FUTEX_WAIT(reg) \
-	movl	$FUTEX_WAIT, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
 # endif
 # define LOAD_FUTEX_WAKE(reg) \
-	movl	$FUTEX_WAKE, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
 #endif
 
-
-	.globl	__lll_mutex_lock_wait
-	.type	__lll_mutex_lock_wait,@function
-	.hidden	__lll_mutex_lock_wait
+	.globl	__lll_lock_wait_private
+	.type	__lll_lock_wait_private,@function
+	.hidden	__lll_lock_wait_private
 	.align	16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
 	cfi_startproc
 	pushl	%edx
 	cfi_adjust_cfa_offset(4)
@@ -69,7 +80,7 @@ __lll_mutex_lock_wait:
 	movl	$2, %edx
 	movl	%ecx, %ebx
 	xorl	%esi, %esi	/* No timeout.  */
-	LOAD_FUTEX_WAIT (%ecx)
+	LOAD_PRIVATE_FUTEX_WAIT (%ecx)
 
 	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
 	jne 2f
@@ -94,15 +105,60 @@ __lll_mutex_lock_wait:
 	cfi_restore(%edx)
 	ret
 	cfi_endproc
-	.size	__lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
 
 #ifdef NOT_IN_libc
-	.globl	__lll_mutex_timedlock_wait
-	.type	__lll_mutex_timedlock_wait,@function
-	.hidden	__lll_mutex_timedlock_wait
+	.globl	__lll_lock_wait
+	.type	__lll_lock_wait,@function
+	.hidden	__lll_lock_wait
 	.align	16
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+	cfi_startproc
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%esi
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%edx, -8)
+	cfi_offset(%ebx, -12)
+	cfi_offset(%esi, -16)
+
+	movl	%edx, %ebx
+	movl	$2, %edx
+	xorl	%esi, %esi	/* No timeout.  */
+	LOAD_FUTEX_WAIT (%ecx)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne 2f
+
+1:	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%ebx)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popl	%esi
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%esi)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	ret
+	cfi_endproc
+	.size	__lll_lock_wait,.-__lll_lock_wait
+
+	.globl	__lll_timedlock_wait
+	.type	__lll_timedlock_wait,@function
+	.hidden	__lll_timedlock_wait
+	.align	16
+__lll_timedlock_wait:
 	cfi_startproc
 	/* Check for a valid timeout value.  */
 	cmpl	$1000000000, 4(%edx)
@@ -132,7 +188,7 @@ __lll_mutex_timedlock_wait:
 	/* Get current time.  */
 	movl	%esp, %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
@@ -165,6 +221,7 @@ __lll_mutex_timedlock_wait:
 
 	/* Futex call.  */
 	movl	%esp, %esi
+	movl	16(%esp), %ecx
 	LOAD_FUTEX_WAIT (%ecx)
 	movl	$SYS_futex, %eax
 	ENTER_KERNEL
@@ -215,15 +272,51 @@ __lll_mutex_timedlock_wait:
 5:	movl	$ETIMEDOUT, %eax
 	jmp	6b
 	cfi_endproc
-	.size	__lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
 #endif
 
+	.globl	__lll_unlock_wake_private
+	.type	__lll_unlock_wake_private,@function
+	.hidden	__lll_unlock_wake_private
+	.align	16
+__lll_unlock_wake_private:
+	cfi_startproc
+	pushl	%ebx
+	cfi_adjust_cfa_offset(4)
+	pushl	%ecx
+	cfi_adjust_cfa_offset(4)
+	pushl	%edx
+	cfi_adjust_cfa_offset(4)
+	cfi_offset(%ebx, -8)
+	cfi_offset(%ecx, -12)
+	cfi_offset(%edx, -16)
+
+	movl	%eax, %ebx
+	movl	$0, (%eax)
+	LOAD_PRIVATE_FUTEX_WAKE (%ecx)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	ENTER_KERNEL
+
+	popl	%edx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%edx)
+	popl	%ecx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ecx)
+	popl	%ebx
+	cfi_adjust_cfa_offset(-4)
+	cfi_restore(%ebx)
+	ret
+	cfi_endproc
+	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
 
-	.globl	__lll_mutex_unlock_wake
-	.type	__lll_mutex_unlock_wake,@function
-	.hidden	__lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+	.globl	__lll_unlock_wake
+	.type	__lll_unlock_wake,@function
+	.hidden	__lll_unlock_wake
 	.align	16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
 	cfi_startproc
 	pushl	%ebx
 	cfi_adjust_cfa_offset(4)
@@ -253,10 +346,8 @@ __lll_mutex_unlock_wake:
 	cfi_restore(%ebx)
 	ret
 	cfi_endproc
-	.size	__lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+	.size	__lll_unlock_wake,.-__lll_unlock_wake
 
-
-#ifdef NOT_IN_libc
 	.globl	__lll_timedwait_tid
 	.type	__lll_timedwait_tid,@function
 	.hidden	__lll_timedwait_tid
@@ -274,7 +365,7 @@ __lll_timedwait_tid:
 	/* Get current time.  */
 2:	movl	%esp, %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
index 73d8bc4ccc..7c2e1d135c 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,31 +19,36 @@
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <lowlevellock.h>
 #include <lowlevelrobustlock.h>
+#include <kernel-features.h>
 
 	.text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
-# else
-#  define LOCK lock
-# endif
-#endif
-
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
 #define FUTEX_WAITERS		0x80000000
 #define FUTEX_OWNER_DIED	0x40000000
 
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%gs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
+# endif
+#endif
 
-	.globl	__lll_robust_mutex_lock_wait
-	.type	__lll_robust_mutex_lock_wait,@function
-	.hidden	__lll_robust_mutex_lock_wait
+	.globl	__lll_robust_lock_wait
+	.type	__lll_robust_lock_wait,@function
+	.hidden	__lll_robust_lock_wait
 	.align	16
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
 	cfi_startproc
 	pushl	%edx
 	cfi_adjust_cfa_offset(4)
@@ -55,9 +60,9 @@ __lll_robust_mutex_lock_wait:
 	cfi_offset(%ebx, -12)
 	cfi_offset(%esi, -16)
 
-	movl	%ecx, %ebx
+	movl	%edx, %ebx
 	xorl	%esi, %esi	/* No timeout.  */
-	xorl	%ecx, %ecx	/* movl $FUTEX_WAIT, %ecx */
+	LOAD_FUTEX_WAIT (%ecx)
 
 4:	movl	%eax, %edx
 	orl	$FUTEX_WAITERS, %edx
@@ -98,14 +103,14 @@ __lll_robust_mutex_lock_wait:
 	cfi_restore(%edx)
 	ret
 	cfi_endproc
-	.size	__lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+	.size	__lll_robust_lock_wait,.-__lll_robust_lock_wait
 
 
-	.globl	__lll_robust_mutex_timedlock_wait
-	.type	__lll_robust_mutex_timedlock_wait,@function
-	.hidden	__lll_robust_mutex_timedlock_wait
+	.globl	__lll_robust_timedlock_wait
+	.type	__lll_robust_timedlock_wait,@function
+	.hidden	__lll_robust_timedlock_wait
 	.align	16
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
 	cfi_startproc
 	/* Check for a valid timeout value.  */
 	cmpl	$1000000000, 4(%edx)
@@ -136,7 +141,7 @@ __lll_robust_mutex_timedlock_wait:
 	/* Get current time.  */
 	movl	%esp, %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
@@ -177,7 +182,8 @@ __lll_robust_mutex_timedlock_wait:
 2:
 	/* Futex call.  */
 	movl	%esp, %esi
-	xorl	%ecx, %ecx	/* movl $FUTEX_WAIT, %ecx */
+	movl	20(%esp), %ecx
+	LOAD_FUTEX_WAIT (%ecx)
 	movl	$SYS_futex, %eax
 	ENTER_KERNEL
 	movl	%eax, %ecx
@@ -224,4 +230,4 @@ __lll_robust_mutex_timedlock_wait:
 8:	movl	$ETIMEDOUT, %eax
 	jmp	6b
 	cfi_endproc
-	.size	__lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+	.size	__lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
index 29857195f0..77d252de8f 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -18,19 +18,9 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelbarrier.h>
 
-#define SYS_futex	240
-#define FUTEX_WAIT	0
-#define FUTEX_WAKE	1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	pthread_barrier_wait
@@ -152,19 +142,27 @@ pthread_barrier_wait:
 	popl	%ebx
 	ret
 
-1:	leal	MUTEX(%ebx), %ecx
-	call	__lll_mutex_lock_wait
+1:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
-4:	leal	MUTEX(%ebx), %eax
-	call	__lll_mutex_unlock_wake
+4:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	5b
 
-6:	leal	MUTEX(%ebx), %eax
-	call	__lll_mutex_unlock_wake
+6:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	7b
 
-9:	leal	MUTEX(%ebx), %eax
-	call	__lll_mutex_unlock_wake
+9:	movl	PRIVATE(%ebx), %ecx
+	leal	MUTEX(%ebx), %eax
+	xorl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	10b
 	.size	pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
index 56f7be8246..122d83afee 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,24 +19,11 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <kernel-features.h>
 #include <pthread-pi-defines.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_REQUEUE		3
-#define FUTEX_CMP_REQUEUE	4
-
-#define EINVAL			22
-
+#include <pthread-errnos.h>
 
 	.text
 
@@ -141,21 +128,27 @@ __pthread_cond_broadcast:
 	/* Initial locking failed.  */
 1:
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 	/* Unlock in loop requires waekup.  */
 5:	leal	cond_lock-cond_futex(%ebx), %eax
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	6b
 
 	/* Unlock in loop requires waekup.  */
 7:	leal	cond_lock-cond_futex(%ebx), %eax
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	8b
 
 9:	/* The futex requeue functionality is not available.  */
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
index d0f931ff15..e3510c8ab1 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,23 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_WAKE_OP		5
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
-
-#define EINVAL			22
+#include <pthread-errnos.h>
 
 
 	.text
@@ -119,17 +106,21 @@ __pthread_cond_signal:
 
 	/* Unlock in loop requires wakeup.  */
 5:	movl	%edi, %eax
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	6b
 
 	/* Initial locking failed.  */
 1:
 #if cond_lock == 0
-	movl	%edi, %ecx
+	movl	%edi, %edx
 #else
-	leal	cond_lock(%edi), %ecx
+	leal	cond_lock(%edi), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 	.size	__pthread_cond_signal, .-__pthread_cond_signal
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
index 93f4d56b32..79a7497e8c 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -19,20 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <pthread-errnos.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
 
 	.text
 
@@ -127,7 +117,7 @@ __pthread_cond_timedwait:
 	/* Get the current time.  */
 	leal	4(%esp), %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 	movl	%edx, %ebx
 
@@ -285,11 +275,13 @@ __pthread_cond_timedwait:
 1:
 .LSbl1:
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 	/* Unlock in loop requires wakeup.  */
@@ -300,17 +292,21 @@ __pthread_cond_timedwait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	4b
 
 	/* Locking in loop failed.  */
 5:
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	6b
 
 	/* Unlock after loop requires wakeup.  */
@@ -320,7 +316,9 @@ __pthread_cond_timedwait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 	/* The initial unlocking of the mutex failed.  */
@@ -340,7 +338,9 @@ __pthread_cond_timedwait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 
 	movl	%esi, %eax
 	jmp	18b
@@ -350,7 +350,7 @@ __pthread_cond_timedwait:
 .LSbl4:
 19:	leal	4(%esp), %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 	movl	%edx, %ebx
 
@@ -396,11 +396,13 @@ __condvar_tw_cleanup:
 	jz	1f
 
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 
 1:	movl	broadcast_seq(%ebx), %eax
 	cmpl	20(%esp), %eax
@@ -457,7 +459,9 @@ __condvar_tw_cleanup:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 
 	/* Wake up all waiters to make sure no signal gets lost.  */
 2:	testl	%edi, %edi
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
index c92cfbc718..68741bffe8 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
@@ -19,19 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <tcb-offsets.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
 
 	.text
 
@@ -202,11 +193,13 @@ __pthread_cond_wait:
 1:
 .LSbl1:
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 	/* Unlock in loop requires waekup.  */
@@ -217,17 +210,21 @@ __pthread_cond_wait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	4b
 
 	/* Locking in loop failed.  */
 5:
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 	jmp	6b
 
 	/* Unlock after loop requires wakeup.  */
@@ -237,7 +234,9 @@ __pthread_cond_wait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 	/* The initial unlocking of the mutex failed.  */
@@ -257,7 +256,9 @@ __pthread_cond_wait:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 
 	movl	%esi, %eax
 	jmp	14b
@@ -287,11 +288,13 @@ __condvar_w_cleanup:
 	jz	1f
 
 #if cond_lock == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	cond_lock(%ebx), %ecx
+	leal	cond_lock(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_lock_wait
 
 1:	movl	broadcast_seq(%ebx), %eax
 	cmpl	12(%esp), %eax
@@ -348,7 +351,9 @@ __condvar_w_cleanup:
 #else
 	leal	cond_lock(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %ecx
+	call	__lll_unlock_wake
 
 	/* Wake up all waiters to make sure no signal gets lost.  */
 2:	testl	%edi, %edi
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
index c61c697985..d8a62ed2b7 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
@@ -18,21 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 
 
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_rdlock
@@ -108,7 +98,7 @@ __pthread_rwlock_rdlock:
 13:	subl	$1, READERS_QUEUED(%ebx)
 	jmp	2b
 
-5:	xorl	%ecx, %ecx
+5:	xorl	%edx, %edx
 	addl	$1, NR_READERS(%ebx)
 	je	8f
 9:	LOCK
@@ -120,24 +110,25 @@ __pthread_rwlock_rdlock:
 	jne	6f
 7:
 
-	movl	%ecx, %eax
+	movl	%edx, %eax
 	popl	%ebx
 	popl	%esi
 	ret
 
 1:
 #if MUTEX == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	MUTEX(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%gs:TID, %eax
 	jne	3b
 	/* Deadlock detected.  */
-	movl	$EDEADLK, %ecx
+	movl	$EDEADLK, %edx
 	jmp	9b
 
 6:
@@ -146,17 +137,18 @@ __pthread_rwlock_rdlock:
 #else
 	leal	MUTEX(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
 	jmp	7b
 
 	/* Overflow.  */
 8:	subl	$1, NR_READERS(%ebx)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 	/* Overflow.  */
 4:	subl	$1, READERS_QUEUED(%ebx)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 10:
@@ -165,16 +157,18 @@ __pthread_rwlock_rdlock:
 #else
 	leal	MUTEX(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 12:
 #if MUTEX == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	MUTEX(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
 	jmp	13b
 	.size	__pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
 
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
index c6a584fed0..71b97c60f5 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
@@ -18,22 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 
 
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	pthread_rwlock_timedrdlock
@@ -88,7 +77,7 @@ pthread_rwlock_timedrdlock:
 	/* Get current time.  */
 11:	movl	%esp, %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
@@ -124,7 +113,7 @@ pthread_rwlock_timedrdlock:
 	leal	READERS_WAKEUP(%ebp), %ebx
 	movl	$SYS_futex, %eax
 	ENTER_KERNEL
-	movl	%eax, %ecx
+	movl	%eax, %esi
 17:
 
 	/* Reget the lock.  */
@@ -139,14 +128,14 @@ pthread_rwlock_timedrdlock:
 	jnz	12f
 
 13:	subl	$1, READERS_QUEUED(%ebp)
-	cmpl	$-ETIMEDOUT, %ecx
+	cmpl	$-ETIMEDOUT, %esi
 	jne	2b
 
-18:	movl	$ETIMEDOUT, %ecx
+18:	movl	$ETIMEDOUT, %edx
 	jmp	9f
 
 
-5:	xorl	%ecx, %ecx
+5:	xorl	%edx, %edx
 	addl	$1, NR_READERS(%ebp)
 	je	8f
 9:	LOCK
@@ -157,7 +146,7 @@ pthread_rwlock_timedrdlock:
 #endif
 	jne	6f
 
-7:	movl	%ecx, %eax
+7:	movl	%edx, %eax
 
 	addl	$8, %esp
 	popl	%ebp
@@ -168,16 +157,17 @@ pthread_rwlock_timedrdlock:
 
 1:
 #if MUTEX == 0
-	movl	%ebp, %ecx
+	movl	%ebp, %edx
 #else
-	leal	MUTEX(%ebp), %ecx
+	leal	MUTEX(%ebp), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%gs:TID, %eax
 	jne	3b
-	movl	$EDEADLK, %ecx
+	movl	$EDEADLK, %edx
 	jmp	9b
 
 6:
@@ -186,17 +176,18 @@ pthread_rwlock_timedrdlock:
 #else
 	leal	MUTEX(%ebp), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
 	jmp	7b
 
 	/* Overflow.  */
 8:	subl	$1, NR_READERS(%ebp)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 	/* Overflow.  */
 4:	subl	$1, READERS_QUEUED(%ebp)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 10:
@@ -205,21 +196,23 @@ pthread_rwlock_timedrdlock:
 #else
 	leal	MUTEX(%ebp), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 12:
 #if MUTEX == 0
-	movl	%ebp, %ecx
+	movl	%ebp, %edx
 #else
-	leal	MUTEX(%ebp), %ecx
+	leal	MUTEX(%ebp), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
 	jmp	13b
 
-16:	movl	$-ETIMEDOUT, %ecx
+16:	movl	$-ETIMEDOUT, %esi
 	jmp	17b
 
-19:	movl	$EINVAL, %ecx
+19:	movl	$EINVAL, %edx
 	jmp	9b
 	.size	pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
index 5e9faf93fb..c002472085 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
@@ -18,22 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 
 
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	pthread_rwlock_timedwrlock
@@ -86,7 +75,7 @@ pthread_rwlock_timedwrlock:
 	/* Get current time.  */
 11:	movl	%esp, %ebx
 	xorl	%ecx, %ecx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
@@ -122,7 +111,7 @@ pthread_rwlock_timedwrlock:
 	leal	WRITERS_WAKEUP(%ebp), %ebx
 	movl	$SYS_futex, %eax
 	ENTER_KERNEL
-	movl	%eax, %ecx
+	movl	%eax, %esi
 17:
 
 	/* Reget the lock.  */
@@ -137,14 +126,14 @@ pthread_rwlock_timedwrlock:
 	jnz	12f
 
 13:	subl	$1, WRITERS_QUEUED(%ebp)
-	cmpl	$-ETIMEDOUT, %ecx
+	cmpl	$-ETIMEDOUT, %esi
 	jne	2b
 
-18:	movl	$ETIMEDOUT, %ecx
+18:	movl	$ETIMEDOUT, %edx
 	jmp	9f
 
 
-5:	xorl	%ecx, %ecx
+5:	xorl	%edx, %edx
 	movl	%gs:TID, %eax
 	movl	%eax, WRITER(%ebp)
 9:	LOCK
@@ -155,7 +144,7 @@ pthread_rwlock_timedwrlock:
 #endif
 	jne	6f
 
-7:	movl	%ecx, %eax
+7:	movl	%edx, %eax
 
 	addl	$8, %esp
 	popl	%ebp
@@ -166,16 +155,17 @@ pthread_rwlock_timedwrlock:
 
 1:
 #if MUTEX == 0
-	movl	%ebp, %ecx
+	movl	%ebp, %edx
 #else
-	leal	MUTEX(%ebp), %ecx
+	leal	MUTEX(%ebp), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%gs:TID, %eax
 	jne	3b
-20:	movl	$EDEADLK, %ecx
+20:	movl	$EDEADLK, %edx
 	jmp	9b
 
 6:
@@ -184,12 +174,13 @@ pthread_rwlock_timedwrlock:
 #else
 	leal	MUTEX(%ebp), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
 	jmp	7b
 
 	/* Overflow.  */
 4:	subl	$1, WRITERS_QUEUED(%ebp)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 10:
@@ -198,21 +189,23 @@ pthread_rwlock_timedwrlock:
 #else
 	leal	MUTEX(%ebp), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 12:
 #if MUTEX == 0
-	movl	%ebp, %ecx
+	movl	%ebp, %edx
 #else
-	leal	MUTEX(%ebp), %ecx
+	leal	MUTEX(%ebp), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebp), %ecx
+	call	__lll_lock_wait
 	jmp	13b
 
-16:	movl	$-ETIMEDOUT, %ecx
+16:	movl	$-ETIMEDOUT, %esi
 	jmp	17b
 
-19:	movl	$EINVAL, %ecx
+19:	movl	$EINVAL, %edx
 	jmp	9b
 	.size	pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
index 35c40c2c1c..fdad432e30 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
@@ -18,20 +18,10 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 
 
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_unlock
@@ -115,29 +105,32 @@ __pthread_rwlock_unlock:
 
 1:
 #if MUTEX == 0
-	movl	%edi, %ecx
+	movl	%edi, %edx
 #else
-	leal	MUTEX(%edx), %ecx
+	leal	MUTEX(%edi), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%edi), %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 3:
 #if MUTEX == 0
 	movl	%edi, %eax
 #else
-	leal	MUTEX(%edx), %eax
+	leal	MUTEX(%edi), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%edi), %ecx
+	call	__lll_unlock_wake
 	jmp	4b
 
 7:
 #if MUTEX == 0
 	movl	%edi, %eax
 #else
-	leal	MUTEX(%edx), %eax
+	leal	MUTEX(%edi), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%edi), %ecx
+	call	__lll_unlock_wake
 	jmp	8b
 
 	.size	__pthread_rwlock_unlock,.-__pthread_rwlock_unlock
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
index 88044c040b..3f55c82930 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
@@ -18,21 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 
 
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_wrlock
@@ -106,7 +96,7 @@ __pthread_rwlock_wrlock:
 13:	subl	$1, WRITERS_QUEUED(%ebx)
 	jmp	2b
 
-5:	xorl	%ecx, %ecx
+5:	xorl	%edx, %edx
 	movl	%gs:TID, %eax
 	movl	%eax, WRITER(%ebx)
 9:	LOCK
@@ -118,23 +108,24 @@ __pthread_rwlock_wrlock:
 	jne	6f
 7:
 
-	movl	%ecx, %eax
+	movl	%edx, %eax
 	popl	%ebx
 	popl	%esi
 	ret
 
 1:
 #if MUTEX == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	MUTEX(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%gs:TID	, %eax
 	jne	3b
-	movl	$EDEADLK, %ecx
+	movl	$EDEADLK, %edx
 	jmp	9b
 
 6:
@@ -143,11 +134,12 @@ __pthread_rwlock_wrlock:
 #else
 	leal	MUTEX(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
 	jmp	7b
 
 4:	subl	$1, WRITERS_QUEUED(%ebx)
-	movl	$EAGAIN, %ecx
+	movl	$EAGAIN, %edx
 	jmp	9b
 
 10:
@@ -156,16 +148,18 @@ __pthread_rwlock_wrlock:
 #else
 	leal	MUTEX(%ebx), %eax
 #endif
-	call	__lll_mutex_unlock_wake
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_unlock_wake
 	jmp	11b
 
 12:
 #if MUTEX == 0
-	movl	%ebx, %ecx
+	movl	%ebx, %edx
 #else
-	leal	MUTEX(%ebx), %ecx
+	leal	MUTEX(%ebx), %edx
 #endif
-	call	__lll_mutex_lock_wait
+	movl	PSHARED(%ebx), %ecx
+	call	__lll_lock_wait
 	jmp	13b
 	.size	__pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
 
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
index 280dc2fe27..8f656b4e0f 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
@@ -21,15 +21,7 @@
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAKE		1
+#include <lowlevellock.h>
 
 
 	.text
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
index 57b5b58186..13a36d64cc 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
@@ -21,16 +21,7 @@
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_gettimeofday	__NR_gettimeofday
-#define SYS_futex		240
-#define FUTEX_WAIT		0
+#include <lowlevellock.h>
 
 
 #if VALUE != 0
@@ -82,7 +73,7 @@ sem_timedwait:
 7:	xorl	%ecx, %ecx
 	movl	%esp, %ebx
 	movl	%ecx, %edx
-	movl	$SYS_gettimeofday, %eax
+	movl	$__NR_gettimeofday, %eax
 	ENTER_KERNEL
 
 	/* Compute relative timeout.  */
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
index fbc3b3c932..2d49934f02 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -20,12 +20,7 @@
 #include <sysdep.h>
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <lowlevellock.h>
 
 	.text
 
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
index d0eef75144..93c0a64a9e 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
@@ -21,15 +21,7 @@
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAIT		0
+#include <lowlevellock.h>
 
 
 #if VALUE != 0
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 56f63a4623..2f663aa68b 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -20,28 +20,41 @@
 #ifndef _LOWLEVELLOCK_H
 #define _LOWLEVELLOCK_H	1
 
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <kernel-features.h>
-#include <tcb-offsets.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-#  define LOCK_INSTR	/* nothing */
-# else
-#  define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+#  ifdef UP
+#   define LOCK_INSTR	/* nothing */
+#  else
+#   define LOCK_INSTR "lock;"
+#  endif
+# endif
+#else
+# ifndef LOCK
+#  ifdef UP
+#   define LOCK
+#  else
+#   define LOCK lock
+#  endif
 # endif
 #endif
 
 #define SYS_futex		240
 #define FUTEX_WAIT		0
 #define FUTEX_WAKE		1
+#define FUTEX_CMP_REQUEUE	4
+#define FUTEX_WAKE_OP		5
 #define FUTEX_LOCK_PI		6
 #define FUTEX_UNLOCK_PI		7
 #define FUTEX_TRYLOCK_PI	8
 #define FUTEX_PRIVATE_FLAG	128
 
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
 
 /* Values for 'private' parameter of locking macros.  Yes, the
    definition seems to be backwards.  But it is not.  The bit will be
@@ -76,11 +89,12 @@
 # endif	      
 #endif
 
+#ifndef __ASSEMBLER__
 
 /* Initializer for compatibility lock.  */
-#define LLL_MUTEX_LOCK_INITIALIZER		(0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED	(1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS	(2)
+#define LLL_LOCK_INITIALIZER		(0)
+#define LLL_LOCK_INITIALIZER_LOCKED	(1)
+#define LLL_LOCK_INITIALIZER_WAITERS	(2)
 
 
 #ifdef PIC
@@ -102,7 +116,7 @@
 #endif
 
 /* Delay in spinlock loop.  */
-#define BUSY_WAIT_NOP          asm ("rep; nop")
+#define BUSY_WAIT_NOP	asm ("rep; nop")
 
 
 #define LLL_STUB_UNWIND_INFO_START \
@@ -217,332 +231,309 @@ LLL_STUB_UNWIND_INFO_END
   } while (0)
 
 
-/* Does not preserve %eax and %ecx.  */
-extern int __lll_mutex_lock_wait (int val, int *__futex)
-     __attribute ((regparm (2))) attribute_hidden;
-/* Does not preserve %eax, %ecx, and %edx.  */
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
-				       const struct timespec *abstime)
-     __attribute ((regparm (3))) attribute_hidden;
-/* Preserves all registers but %eax.  */
-extern int __lll_mutex_unlock_wake (int *__futex)
-     __attribute ((regparm (1))) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
    after the cmpxchg instruction.  In case the operation succeded this
    value is zero.  In case the operation failed, the cmpxchg instruction
    has loaded the current value of the memory work which is guaranteed
    to be nonzero.  */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
+			   "je 0f\n\t"					      \
+			   "lock\n"					      \
+			   "0:\tcmpxchgl %2, %1"
+#endif
+
+#define lll_trylock(futex) \
   ({ int ret;								      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
+     __asm __volatile (__lll_trylock_asm				      \
 		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER)		      \
+		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
+			 "0" (LLL_LOCK_INITIALIZER),			      \
+			 "i" (MULTIPLE_THREADS_OFFSET)			      \
 		       : "memory");					      \
      ret; })
 
-
-#define lll_robust_mutex_trylock(futex, id) \
+#define lll_robust_trylock(futex, id) \
   ({ int ret;								      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
 		       : "=a" (ret), "=m" (futex)			      \
 		       : "r" (id), "m" (futex),				      \
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER)		      \
+			 "0" (LLL_LOCK_INITIALIZER)			      \
 		       : "memory");					      \
      ret; })
 
 
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
   ({ int ret;								      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
 		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS),	      \
-			  "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER)	      \
+		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
+			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
 		       : "memory");					      \
      ret; })
 
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t"			      \
+			      "je 0f\n\t"				      \
+			      "lock\n"					      \
+			      "0:\tcmpxchgl %1, %2\n\t"
+#endif
 
-#define lll_mutex_lock(futex) \
-  (void) ({ int ignore1, ignore2;					      \
-	    __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
-			      "jnz _L_mutex_lock_%=\n\t"		      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_mutex_lock_%=,@function\n"	      \
-			      "_L_mutex_lock_%=:\n"			      \
-			      "1:\tleal %2, %%ecx\n"			      \
-			      "2:\tcall __lll_mutex_lock_wait\n"	      \
-			      "3:\tjmp 18f\n"				      \
-			      "4:\t.size _L_mutex_lock_%=, 4b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-			      : "0" (0), "1" (1), "m" (futex)		      \
-			      : "memory"); })
-
-
-#define lll_robust_mutex_lock(futex, id) \
-  ({ int result, ignore;						      \
+#define lll_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2;						      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_lock_asm_start				      \
+			   "jnz _L_lock_%=\n\t"				      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_lock_%=,@function\n"		      \
+			   "_L_lock_%=:\n"				      \
+			   "1:\tleal %2, %%ecx\n"			      \
+			   "2:\tcall __lll_lock_wait_private\n" 	      \
+			   "3:\tjmp 18f\n"				      \
+			   "4:\t.size _L_lock_%=, 4b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_3			      \
+			   "18:"					      \
+			   : "=a" (ignore1), "=c" (ignore2), "=m" (futex)     \
+			   : "0" (0), "1" (1), "m" (futex),		      \
+			     "i" (MULTIPLE_THREADS_OFFSET)		      \
+			   : "memory");					      \
+       else								      \
+	 {								      \
+	   int ignore3;							      \
+	   __asm __volatile (__lll_lock_asm_start			      \
+			     "jnz _L_lock_%=\n\t"			      \
+			     ".subsection 1\n\t"			      \
+			     ".type _L_lock_%=,@function\n"		      \
+			     "_L_lock_%=:\n"				      \
+			     "1:\tleal %2, %%edx\n"			      \
+			     "0:\tmovl %8, %%ecx\n"			      \
+			     "2:\tcall __lll_lock_wait\n"		      \
+			     "3:\tjmp 18f\n"				      \
+			     "4:\t.size _L_lock_%=, 4b-1b\n\t"		      \
+			     ".previous\n"				      \
+			     LLL_STUB_UNWIND_INFO_4			      \
+			     "18:"					      \
+			     : "=a" (ignore1), "=c" (ignore2),		      \
+			       "=m" (futex), "=&d" (ignore3) 		      \
+			     : "1" (1), "m" (futex),			      \
+			       "i" (MULTIPLE_THREADS_OFFSET), "0" (0),	      \
+			       "g" (private)				      \
+			     : "memory");				      \
+	 }								      \
+    })
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;					      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \
-		       "jnz _L_robust_mutex_lock_%=\n\t"		      \
+		       "jnz _L_robust_lock_%=\n\t"			      \
 		       ".subsection 1\n\t"				      \
-		       ".type _L_robust_mutex_lock_%=,@function\n"	      \
-		       "_L_robust_mutex_lock_%=:\n"			      \
-		       "1:\tleal %2, %%ecx\n"				      \
-		       "2:\tcall __lll_robust_mutex_lock_wait\n"	      \
+		       ".type _L_robust_lock_%=,@function\n"		      \
+		       "_L_robust_lock_%=:\n"				      \
+		       "1:\tleal %2, %%edx\n"				      \
+		       "0:\tmovl %7, %%ecx\n"				      \
+		       "2:\tcall __lll_robust_lock_wait\n"		      \
 		       "3:\tjmp 18f\n"					      \
-		       "4:\t.size _L_robust_mutex_lock_%=, 4b-1b\n\t"	      \
+		       "4:\t.size _L_robust_lock_%=, 4b-1b\n\t"		      \
 		       ".previous\n"					      \
-		       LLL_STUB_UNWIND_INFO_3				      \
+		       LLL_STUB_UNWIND_INFO_4				      \
 		       "18:"						      \
-		       : "=a" (result), "=c" (ignore), "=m" (futex)	      \
-		       : "0" (0), "1" (id), "m" (futex)			      \
+		       : "=a" (result), "=c" (ignore1), "=m" (futex),	      \
+			 "=&d" (ignore2)				      \
+		       : "0" (0), "1" (id), "m" (futex), "g" (private)	      \
 		       : "memory");					      \
      result; })
 
 
-/* Special version of lll_mutex_lock which causes the unlock function to
+/* Special version of lll_lock which causes the unlock function to
    always wakeup waiters.  */
-#define lll_mutex_cond_lock(futex) \
-  (void) ({ int ignore1, ignore2;					      \
-	    __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
-			      "jnz _L_mutex_cond_lock_%=\n\t"		      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_mutex_cond_lock_%=,@function\n"	      \
-			      "_L_mutex_cond_lock_%=:\n"		      \
-			      "1:\tleal %2, %%ecx\n"			      \
-			      "2:\tcall __lll_mutex_lock_wait\n"	      \
-			      "3:\tjmp 18f\n"				      \
-			      "4:\t.size _L_mutex_cond_lock_%=, 4b-1b\n\t"    \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-			      : "0" (0), "1" (2), "m" (futex)		      \
-			      : "memory"); })
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
-  ({ int result, ignore;						      \
+#define lll_cond_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2, ignore3;					      \
+       __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
+			 "jnz _L_cond_lock_%=\n\t"			      \
+			 ".subsection 1\n\t"				      \
+			 ".type _L_cond_lock_%=,@function\n"		      \
+			 "_L_cond_lock_%=:\n"				      \
+			 "1:\tleal %2, %%edx\n"				      \
+			 "0:\tmovl %7, %%ecx\n"				      \
+			 "2:\tcall __lll_lock_wait\n"			      \
+			 "3:\tjmp 18f\n"				      \
+			 "4:\t.size _L_cond_lock_%=, 4b-1b\n\t"		      \
+			 ".previous\n"					      \
+			 LLL_STUB_UNWIND_INFO_4				      \
+			 "18:"						      \
+			 : "=a" (ignore1), "=c" (ignore2), "=m" (futex),      \
+			   "=&d" (ignore3)				      \
+			 : "0" (0), "1" (2), "m" (futex), "g" (private)	      \
+			 : "memory");					      \
+    })
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int result, ignore1, ignore2;					      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \
-		       "jnz _L_robust_mutex_cond_lock_%=\n\t"		      \
+		       "jnz _L_robust_cond_lock_%=\n\t"			      \
 		       ".subsection 1\n\t"				      \
-		       ".type _L_robust_mutex_cond_lock_%=,@function\n"	      \
-		       "_L_robust_mutex_cond_lock_%=:\n"		      \
-		       "1:\tleal %2, %%ecx\n"				      \
-		       "2:\tcall __lll_robust_mutex_lock_wait\n"	      \
+		       ".type _L_robust_cond_lock_%=,@function\n"	      \
+		       "_L_robust_cond_lock_%=:\n"			      \
+		       "1:\tleal %2, %%edx\n"				      \
+		       "0:\tmovl %7, %%ecx\n"				      \
+		       "2:\tcall __lll_robust_lock_wait\n"		      \
 		       "3:\tjmp 18f\n"					      \
-		       "4:\t.size _L_robust_mutex_cond_lock_%=, 4b-1b\n\t"    \
+		       "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t"	      \
 		       ".previous\n"					      \
-		       LLL_STUB_UNWIND_INFO_3				      \
+		       LLL_STUB_UNWIND_INFO_4				      \
 		       "18:"						      \
-		       : "=a" (result), "=c" (ignore), "=m" (futex)	      \
-		       : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex)	      \
+		       : "=a" (result), "=c" (ignore1), "=m" (futex),	      \
+			 "=&d" (ignore2)				      \
+		       : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex),      \
+			 "g" (private)					      \
 		       : "memory");					      \
      result; })
 
 
-#define lll_mutex_timedlock(futex, timeout) \
-  ({ int result, ignore1, ignore2;					      \
+#define lll_timedlock(futex, timeout, private) \
+  ({ int result, ignore1, ignore2, ignore3;				      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \
-		       "jnz _L_mutex_timedlock_%=\n\t"			      \
+		       "jnz _L_timedlock_%=\n\t"			      \
 		       ".subsection 1\n\t"				      \
-		       ".type _L_mutex_timedlock_%=,@function\n"	      \
-		       "_L_mutex_timedlock_%=:\n"			      \
+		       ".type _L_timedlock_%=,@function\n"		      \
+		       "_L_timedlock_%=:\n"				      \
 		       "1:\tleal %3, %%ecx\n"				      \
-		       "0:\tmovl %7, %%edx\n"				      \
-		       "2:\tcall __lll_mutex_timedlock_wait\n"		      \
+		       "0:\tmovl %8, %%edx\n"				      \
+		       "2:\tcall __lll_timedlock_wait\n"		      \
 		       "3:\tjmp 18f\n"					      \
-		       "4:\t.size _L_mutex_timedlock_%=, 4b-1b\n\t"	      \
+		       "4:\t.size _L_timedlock_%=, 4b-1b\n\t"		      \
 		       ".previous\n"					      \
 		       LLL_STUB_UNWIND_INFO_4				      \
 		       "18:"						      \
 		       : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
-			 "=m" (futex)					      \
-		       : "0" (0), "1" (1), "m" (futex), "m" (timeout)	      \
+			 "=m" (futex), "=S" (ignore3)			      \
+		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
+			 "4" (private)					      \
 		       : "memory");					      \
      result; })
 
 
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
-  ({ int result, ignore1, ignore2;					      \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int result, ignore1, ignore2, ignore3;				      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \
-		       "jnz _L_robust_mutex_timedlock_%=\n\t"		      \
+		       "jnz _L_robust_timedlock_%=\n\t"			      \
 		       ".subsection 1\n\t"				      \
-		       ".type _L_robust_mutex_timedlock_%=,@function\n"	      \
-		       "_L_robust_mutex_timedlock_%=:\n"		      \
+		       ".type _L_robust_timedlock_%=,@function\n"	      \
+		       "_L_robust_timedlock_%=:\n"			      \
 		       "1:\tleal %3, %%ecx\n"				      \
-		       "0:\tmovl %7, %%edx\n"				      \
-		       "2:\tcall __lll_robust_mutex_timedlock_wait\n"	      \
+		       "0:\tmovl %8, %%edx\n"				      \
+		       "2:\tcall __lll_robust_timedlock_wait\n"		      \
 		       "3:\tjmp 18f\n"					      \
-		       "4:\t.size _L_robust_mutex_timedlock_%=, 4b-1b\n\t"    \
+		       "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t"	      \
 		       ".previous\n"					      \
 		       LLL_STUB_UNWIND_INFO_4				      \
 		       "18:"						      \
 		       : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
-			 "=m" (futex)					      \
-		       : "0" (0), "1" (id), "m" (futex), "m" (timeout)	      \
+			 "=m" (futex), "=S" (ignore3)			      \
+		       : "0" (0), "1" (id), "m" (futex), "m" (timeout),	      \
+			 "4" (private)					      \
 		       : "memory");					      \
      result; })
 
-
-#define lll_mutex_unlock(futex) \
-  (void) ({ int ignore;							      \
-            __asm __volatile (LOCK_INSTR "subl $1, %0\n\t"		      \
-			      "jne _L_mutex_unlock_%=\n\t"		      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_mutex_unlock_%=,@function\n"	      \
-			      "_L_mutex_unlock_%=:\n"			      \
-			      "1:\tleal %0, %%eax\n"			      \
-			      "2:\tcall __lll_mutex_unlock_wake\n"	      \
-			      "3:\tjmp 18f\n"				      \
-			      "4:\t.size _L_mutex_unlock_%=, 4b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=m" (futex), "=&a" (ignore)		      \
-			      : "m" (futex)				      \
-			      : "memory"); })
-
-
-#define lll_robust_mutex_unlock(futex) \
-  (void) ({ int ignore;							      \
-            __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"		      \
-			      "jne _L_robust_mutex_unlock_%=\n\t"	      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_robust_mutex_unlock_%=,@function\n"   \
-			      "_L_robust_mutex_unlock_%=:\n\t"		      \
-			      "1:\tleal %0, %%eax\n"			      \
-			      "2:\tcall __lll_mutex_unlock_wake\n"	      \
-			      "3:\tjmp 18f\n"				      \
-			      "4:\t.size _L_robust_mutex_unlock_%=, 4b-1b\n\t"\
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=m" (futex), "=&a" (ignore)		      \
-			      : "i" (FUTEX_WAITERS), "m" (futex)	      \
-			      : "memory"); })
-
-
-#define lll_robust_mutex_dead(futex) \
-  (void) ({ int __ignore;						      \
-	    register int _nr asm ("edx") = 1;				      \
-	    __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t"		      \
-			      LLL_EBX_LOAD				      \
-			      LLL_ENTER_KERNEL				      \
-			      LLL_EBX_LOAD				      \
-			      : "=a" (__ignore)				      \
-			      : "0" (SYS_futex), LLL_EBX_REG (&(futex)),      \
-				"c" (FUTEX_WAKE), "d" (_nr),		      \
-				"i" (FUTEX_OWNER_DIED),			      \
-				"i" (offsetof (tcbhead_t, sysinfo))); })
-
-
-#define lll_mutex_islocked(futex) \
-  (futex != 0)
-
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
-/* Initializers for lock.  */
-#define LLL_LOCK_INITIALIZER		(0)
-#define LLL_LOCK_INITIALIZER_LOCKED	(1)
-
-
-extern int __lll_lock_wait (int val, int *__futex)
-     __attribute ((regparm (2))) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex)
-     __attribute ((regparm (1))) attribute_hidden;
-
-
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-    2  -  taken by more users */
-
-
 #if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
+# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
 #else
-/* Special versions of the macros for use in libc itself.  They avoid
-   the lock prefix when the thread library is not used.
-
-   XXX In future we might even want to avoid it on UP machines.  */
-# include <tls.h>
-
-# define lll_trylock(futex) \
-  ({ unsigned char ret;							      \
-     __asm __volatile ("cmpl $0, %%gs:%P5\n\t"				      \
-		       "je 0f\n\t"					      \
-		       "lock\n"						      \
-		       "0:\tcmpxchgl %2, %1; setne %0"			      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER),		      \
-		         "i" (offsetof (tcbhead_t, multiple_threads))	      \
-		       : "memory");					      \
-     ret; })
-
-
-# define lll_lock(futex) \
-  (void) ({ int ignore1, ignore2;					      \
-	    __asm __volatile ("cmpl $0, %%gs:%P6\n\t"			      \
-			      "je 0f\n\t"				      \
-			      "lock\n"					      \
-			      "0:\tcmpxchgl %1, %2\n\t"			      \
-			      "jnz _L_lock_%=\n\t"			      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_lock_%=,@function\n"		      \
-			      "_L_lock_%=:\n"				      \
-			      "1:\tleal %2, %%ecx\n"			      \
-			      "2:\tcall __lll_mutex_lock_wait\n"	      \
-			      "3:\tjmp 18f\n"				      \
-			      "4:\t.size _L_lock_%=, 4b-1b\n\t"		      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=a" (ignore1), "=c" (ignore2), "=m" (futex)  \
-			      : "0" (0), "1" (1), "m" (futex),		      \
-		                "i" (offsetof (tcbhead_t, multiple_threads))  \
-			      : "memory"); })
-
-
-# define lll_unlock(futex) \
-  (void) ({ int ignore;							      \
-            __asm __volatile ("cmpl $0, %%gs:%P3\n\t"			      \
-			      "je 0f\n\t"				      \
-			      "lock\n"					      \
-			      "0:\tsubl $1,%0\n\t"			      \
-			      "jne _L_unlock_%=\n\t"			      \
-			      ".subsection 1\n\t"			      \
-			      ".type _L_unlock_%=,@function\n"		      \
-			      "_L_unlock_%=:\n"				      \
-			      "1:\tleal %0, %%eax\n"			      \
-			      "2:\tcall __lll_mutex_unlock_wake\n"	      \
-			      "3:\tjmp 18f\n\t"				      \
-			      "4:\t.size _L_unlock_%=, 4b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_3			      \
-			      "18:"					      \
-			      : "=m" (futex), "=&a" (ignore)		      \
-			      : "m" (futex),				      \
-				"i" (offsetof (tcbhead_t, multiple_threads))  \
-			      : "memory"); })
+# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t"			      \
+			  "je 0f\n\t"					      \
+			  "lock\n"					      \
+			  "0:\tsubl $1,%0\n\t"
 #endif
 
+#define lll_unlock(futex, private) \
+  (void)								      \
+    ({ int ignore;							      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_unlock_asm				      \
+			   "jne _L_unlock_%=\n\t"			      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_unlock_%=,@function\n"		      \
+			   "_L_unlock_%=:\n"				      \
+			   "1:\tleal %0, %%eax\n"			      \
+			   "2:\tcall __lll_unlock_wake_private\n"	      \
+			   "3:\tjmp 18f\n"				      \
+			   "4:\t.size _L_unlock_%=, 4b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_3			      \
+			   "18:"					      \
+			   : "=m" (futex), "=&a" (ignore)		      \
+			   : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET)	      \
+			   : "memory");					      \
+       else								      \
+	 {								      \
+	   int ignore2;							      \
+	   __asm __volatile (__lll_unlock_asm				      \
+			     "jne _L_unlock_%=\n\t"			      \
+			     ".subsection 1\n\t"			      \
+			     ".type _L_unlock_%=,@function\n"		      \
+			     "_L_unlock_%=:\n"				      \
+			     "1:\tleal %0, %%eax\n"			      \
+			     "0:\tmovl %5, %%ecx\n"			      \
+			     "2:\tcall __lll_unlock_wake\n"		      \
+			     "3:\tjmp 18f\n"				      \
+			     "4:\t.size _L_unlock_%=, 4b-1b\n\t"	      \
+			     ".previous\n"				      \
+			     LLL_STUB_UNWIND_INFO_4			      \
+			     "18:"					      \
+			     : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)  \
+			     : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex),    \
+			       "g" (private)				      \
+			     : "memory");				      \
+	 }								      \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  (void)								      \
+    ({ int ignore, ignore2;						      \
+       __asm __volatile (LOCK_INSTR "andl %3, %0\n\t"			      \
+			 "jne _L_robust_unlock_%=\n\t"			      \
+			 ".subsection 1\n\t"				      \
+			 ".type _L_robust_unlock_%=,@function\n"	      \
+			 "_L_robust_unlock_%=:\n\t"			      \
+			 "1:\tleal %0, %%eax\n"				      \
+			 "0:\tmovl %5, %%ecx\n"				      \
+			 "2:\tcall __lll_unlock_wake\n"			      \
+			 "3:\tjmp 18f\n"				      \
+			 "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t"	      \
+			 ".previous\n"					      \
+			 LLL_STUB_UNWIND_INFO_4				      \
+			 "18:"						      \
+			 : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)      \
+			 : "i" (FUTEX_WAITERS), "m" (futex), "g" (private)    \
+			 : "memory");					      \
+    })
+
+
+#define lll_robust_dead(futex, private) \
+  (void)								      \
+    ({ int __ignore;							      \
+       register int _nr asm ("edx") = 1;				      \
+       __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t"			      \
+			 LLL_EBX_LOAD					      \
+			 LLL_ENTER_KERNEL				      \
+			 LLL_EBX_LOAD					      \
+			 : "=a" (__ignore)				      \
+			 : "0" (SYS_futex), LLL_EBX_REG (&(futex)),	      \
+			   "c" (__lll_private_flag (FUTEX_WAKE, private)),    \
+			   "d" (_nr), "i" (FUTEX_OWNER_DIED),		      \
+			   "i" (offsetof (tcbhead_t, sysinfo)));	      \
+    })
 
 #define lll_islocked(futex) \
   (futex != LLL_LOCK_INITIALIZER)
 
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -581,28 +572,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
       }									      \
     __result; })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     __attribute ((regparm (2))) attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     __attribute ((regparm (1))) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
+#endif  /* !__ASSEMBLER__ */
 
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S b/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
index 8ff0dad27f..f31d968bc5 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
@@ -20,19 +20,9 @@
 #include <unwindbuf.h>
 #include <sysdep.h>
 #include <kernel-features.h>
+#include <lowlevellock.h>
 
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define SYS_futex		240
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
 	.comm	__fork_generation, 4, 4
 
 	.text
diff --git a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
index 095f0e8aca..3c28a397ea 100644
--- a/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h
@@ -73,9 +73,6 @@
 /* Delay in spinlock loop.  */
 #define BUSY_WAIT_NOP          asm ("hint @pause")
 
-/* Initializer for compatibility lock.	*/
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
 #define lll_futex_wait(futex, val, private) \
   lll_futex_timed_wait (futex, val, NULL, private)
 
@@ -95,12 +92,13 @@
    _r10 == -1 ? -_retval : _retval;					\
 })
 
-#define lll_robust_mutex_dead(futexv)					\
+#define lll_robust_dead(futexv, private)				\
 do									\
   {									\
     int *__futexp = &(futexv);						\
     atomic_or (__futexp, FUTEX_OWNER_DIED);				\
-    DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1);	\
+    DO_INLINE_SYSCALL(futex, 3, (long) __futexp,			\
+		      __lll_private_flag (FUTEX_WAKE, private), 1);	\
   }									\
 while (0)
 
@@ -123,156 +121,144 @@ while (0)
 })
 
 
-#define __lll_mutex_trylock(futex) \
+#define __lll_trylock(futex) \
   (atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0)
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
 
 
-#define __lll_robust_mutex_trylock(futex, id) \
+#define __lll_robust_trylock(futex, id) \
   (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
-#define lll_robust_mutex_trylock(futex, id) \
-  __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+  __lll_robust_trylock (&(futex), id)
 
 
-#define __lll_mutex_cond_trylock(futex) \
+#define __lll_cond_trylock(futex) \
   (atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 
-#define __lll_mutex_lock(futex)						\
-  ((void) ({								\
-    int *__futex = (futex);						\
-    if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0)	\
-      __lll_lock_wait (__futex);					\
+#define __lll_lock(futex, private)					      \
+  ((void) ({								      \
+    int *__futex = (futex);						      \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex,      \
+								1, 0), 0))    \
+      {									      \
+	if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	  __lll_lock_wait_private (__futex);				      \
+	else								      \
+	  __lll_lock_wait (__futex, private);				      \
   }))
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
-
-
-#define __lll_robust_mutex_lock(futex, id)				\
-  ({									\
-    int *__futex = (futex);						\
-    int __val = 0;							\
-									\
-    if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0)	\
-      __val = __lll_robust_lock_wait (__futex);				\
-    __val;								\
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+#define __lll_robust_lock(futex, id, private)				      \
+  ({									      \
+    int *__futex = (futex);						      \
+    int __val = 0;							      \
+									      \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+								0), 0))	      \
+      __val = __lll_robust_lock_wait (__futex, private);		      \
+    __val;								      \
   })
-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 
-#define __lll_mutex_cond_lock(futex)					\
-  ((void) ({								\
-    int *__futex = (futex);						\
-    if (atomic_compare_and_exchange_bool_acq (__futex, 2, 0) != 0)	\
-      __lll_lock_wait (__futex);					\
+#define __lll_cond_lock(futex, private)					      \
+  ((void) ({								      \
+    int *__futex = (futex);						      \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 2,   \
+								0), 0))	      \
+      __lll_lock_wait (__futex, private);				      \
   }))
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
-
-
-#define __lll_robust_mutex_cond_lock(futex, id)				\
-  ({									\
-    int *__futex = (futex);						\
-    int __val = 0;							\
-    int __id = (id) | FUTEX_WAITERS;					\
-									\
-    if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0)	\
-      __val = __lll_robust_lock_wait (__futex);				\
-    __val;								\
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define __lll_robust_cond_lock(futex, id, private)			      \
+  ({									      \
+    int *__futex = (futex);						      \
+    int __val = 0;							      \
+    int __id = (id) | FUTEX_WAITERS;					      \
+									      \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex,      \
+								__id, 0), 0)) \
+      __val = __lll_robust_lock_wait (__futex, private);		      \
+    __val;								      \
   })
-#define lll_robust_mutex_cond_lock(futex, id) \
-  __lll_robust_mutex_cond_lock (&(futex), id)
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_cond_lock (&(futex), id, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-     attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
-     attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+				 int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+					int private) attribute_hidden;
 
 
-#define __lll_mutex_timedlock(futex, abstime)				\
-  ({									\
-     int *__futex = (futex);						\
-     int __val = 0;							\
-									\
-     if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0)	\
-       __val = __lll_timedlock_wait (__futex, abstime);			\
-     __val;								\
+#define __lll_timedlock(futex, abstime, private)			      \
+  ({									      \
+     int *__futex = (futex);						      \
+     int __val = 0;							      \
+									      \
+     if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 1,  \
+								 0), 0))      \
+       __val = __lll_timedlock_wait (__futex, abstime, private);	      \
+     __val;								      \
   })
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
-
-
-#define __lll_robust_mutex_timedlock(futex, abstime, id)		\
-  ({									\
-    int *__futex = (futex);						\
-    int __val = 0;							\
-									\
-    if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0)	\
-      __val = __lll_robust_timedlock_wait (__futex, abstime);		\
-    __val;								\
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
+
+
+#define __lll_robust_timedlock(futex, abstime, id, private)		      \
+  ({									      \
+    int *__futex = (futex);						      \
+    int __val = 0;							      \
+									      \
+    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
+								0), 0))	      \
+      __val = __lll_robust_timedlock_wait (__futex, abstime, private);	      \
+    __val;								      \
   })
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
-  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
 
-#define __lll_mutex_unlock(futex)			\
-  ((void) ({						\
-    int *__futex = (futex);				\
-    int __val = atomic_exchange_rel (__futex, 0);	\
-							\
-    if (__builtin_expect (__val > 1, 0))		\
-      lll_futex_wake (__futex, 1, LLL_SHARED);		\
+#define __lll_unlock(futex, private)					      \
+  ((void) ({								      \
+    int *__futex = (futex);						      \
+    int __val = atomic_exchange_rel (__futex, 0);			      \
+									      \
+    if (__builtin_expect (__val > 1, 0))				      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
-#define lll_mutex_unlock(futex) \
-  __lll_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
 
 
-#define __lll_robust_mutex_unlock(futex)		\
-  ((void) ({						\
-    int *__futex = (futex);				\
-    int __val = atomic_exchange_rel (__futex, 0);	\
-							\
-    if (__builtin_expect (__val & FUTEX_WAITERS, 0))	\
-      lll_futex_wake (__futex, 1, LLL_SHARED);		\
+#define __lll_robust_unlock(futex, private)				      \
+  ((void) ({								      \
+    int *__futex = (futex);						      \
+    int __val = atomic_exchange_rel (__futex, 0);			      \
+									      \
+    if (__builtin_expect (__val & FUTEX_WAITERS, 0))			      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
-#define lll_robust_mutex_unlock(futex) \
-  __lll_robust_mutex_unlock(&(futex))
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
 
 
-#define __lll_mutex_unlock_force(futex)		\
-  ((void) ({					\
-    int *__futex = (futex);			\
-    (void) atomic_exchange_rel (__futex, 0);	\
-    lll_futex_wake (__futex, 1, LLL_SHARED);	\
-  }))
-#define lll_mutex_unlock_force(futex) \
-  __lll_mutex_unlock_force(&(futex))
-
-
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  We can use the lll_mutex_*.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 
-#define lll_trylock(futex)	lll_mutex_trylock (futex)
-#define lll_lock(futex)		lll_mutex_lock (futex)
-#define lll_unlock(futex)	lll_mutex_unlock (futex)
-#define lll_islocked(futex)	lll_mutex_islocked (futex)
-
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -297,26 +283,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;						\
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/lowlevellock.c b/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
index ab7f605f0c..1187800148 100644
--- a/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
+++ b/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
@@ -25,22 +25,35 @@
 
 
 void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
 {
   do
     {
       int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
       if (oldval != 0)
-	lll_futex_wait (futex, 2,
-			// XYZ check mutex flag
-			LLL_SHARED);
+	lll_futex_wait (futex, 2, LLL_PRIVATE);
+    }
+  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
+/* These functions doesn't get included in libc.so  */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+  do
+    {
+      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+      if (oldval != 0)
+	lll_futex_wait (futex, 2, private);
     }
   while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
 }
 
 
 int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 {
   /* Reject invalid timeouts.  */
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
       /* Wait.  */
       int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
       if (oldval != 0)
-	lll_futex_timed_wait (futex, 2, &rt,
-			      // XYZ check mutex flag
-			      LLL_SHARED);
+	lll_futex_timed_wait (futex, 2, &rt, private);
     }
   while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
 
@@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
 }
 
 
-/* This function doesn't get included in libc.so  */
-#ifdef IS_IN_libpthread
 int
 __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
 {
diff --git a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c
index 54cee0859b..3830f94daa 100644
--- a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c
+++ b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c
@@ -25,7 +25,7 @@
 
 
 int
-__lll_robust_lock_wait (int *futex)
+__lll_robust_lock_wait (int *futex, int private)
 {
   int oldval = *futex;
   int tid = THREAD_GETMEM (THREAD_SELF, tid);
@@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex)
 	  && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
 	continue;
 
-      lll_futex_wait (futex, newval,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wait (futex, newval, private);
 
     try:
       ;
@@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex)
 
 
 int
-__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime,
+			     int private)
 {
   /* Reject invalid timeouts.  */
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
 	  && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
 	continue;
 
-      lll_futex_timed_wait (futex, newval, &rt,
-			    // XYZ check mutex flag
-			    LLL_SHARED);
+      lll_futex_timed_wait (futex, newval, &rt, private);
 
     try:
       ;
diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
index cf91f21483..41804d1372 100644
--- a/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
@@ -69,9 +69,6 @@
 # endif	      
 #endif
 
-/* Initializer for compatibility lock.	*/
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
 #define lll_futex_wait(futexp, val, private) \
   lll_futex_timed_wait (futexp, val, NULL, private)
 
@@ -97,14 +94,15 @@
     INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;		      \
   })
 
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
   do									      \
     {									      \
       INTERNAL_SYSCALL_DECL (__err);					      \
       int *__futexp = &(futexv);					      \
 									      \
       atomic_or (__futexp, FUTEX_OWNER_DIED);				      \
-      INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0);	      \
+      INTERNAL_SYSCALL (futex, __err, 4, __futexp,			      \
+			__lll_private_flag (FUTEX_WAKE, private), 1, 0);      \
     }									      \
   while (0)
 
@@ -171,119 +169,111 @@
      __val;								      \
   })
 
-#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
 
 /* Set *futex to 1 if it is 0, atomically.  Returns the old value */
 #define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
 
-#define lll_mutex_trylock(lock)	__lll_trylock (&(lock))
+#define lll_trylock(lock)	__lll_trylock (&(lock))
 
 /* Set *futex to 2 if it is 0, atomically.  Returns the old value */
 #define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
 
-#define lll_mutex_cond_trylock(lock)	__lll_cond_trylock (&(lock))
+#define lll_cond_trylock(lock)	__lll_cond_trylock (&(lock))
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
-#define lll_mutex_lock(lock) \
+#define lll_lock(lock, private) \
   (void) ({								      \
     int *__futex = &(lock);						      \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
 			  0) != 0)					      \
-      __lll_lock_wait (__futex);					      \
+      {									      \
+	if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	  __lll_lock_wait_private (__futex);				      \
+	else								      \
+	  __lll_lock_wait (__futex, private);				      \
+      }									      \
   })
 
-#define lll_robust_mutex_lock(lock, id) \
+#define lll_robust_lock(lock, id, private) \
   ({									      \
     int *__futex = &(lock);						      \
     int __val = 0;							      \
     if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
 								0), 0))	      \
-      __val = __lll_robust_lock_wait (__futex);				      \
+      __val = __lll_robust_lock_wait (__futex, private);		      \
     __val;								      \
   })
 
-#define lll_mutex_cond_lock(lock) \
+#define lll_cond_lock(lock, private) \
   (void) ({								      \
     int *__futex = &(lock);						      \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
 			  0) != 0)					      \
-      __lll_lock_wait (__futex);					      \
+      __lll_lock_wait (__futex, private);				      \
   })
 
-#define lll_robust_mutex_cond_lock(lock, id) \
+#define lll_robust_cond_lock(lock, id, private) \
   ({									      \
     int *__futex = &(lock);						      \
     int __val = 0;							      \
     int __id = id | FUTEX_WAITERS;					      \
     if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
 								0), 0))	      \
-      __val = __lll_robust_lock_wait (__futex);				      \
+      __val = __lll_robust_lock_wait (__futex, private);		      \
     __val;								      \
   })
 
 
 extern int __lll_timedlock_wait
-  (int *futex, const struct timespec *) attribute_hidden;
+  (int *futex, const struct timespec *, int private) attribute_hidden;
 extern int __lll_robust_timedlock_wait
-  (int *futex, const struct timespec *) attribute_hidden;
+  (int *futex, const struct timespec *, int private) attribute_hidden;
 
-#define lll_mutex_timedlock(lock, abstime) \
+#define lll_timedlock(lock, abstime, private) \
   ({									      \
     int *__futex = &(lock);						      \
     int __val = 0;							      \
     if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
 			  0) != 0)					      \
-      __val = __lll_timedlock_wait (__futex, abstime);			      \
+      __val = __lll_timedlock_wait (__futex, abstime, private);		      \
     __val;								      \
   })
 
-#define lll_robust_mutex_timedlock(lock, abstime, id) \
+#define lll_robust_timedlock(lock, abstime, id, private) \
   ({									      \
     int *__futex = &(lock);						      \
     int __val = 0;							      \
     if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \
 								0), 0))	      \
-      __val = __lll_robust_timedlock_wait (__futex, abstime);		      \
+      __val = __lll_robust_timedlock_wait (__futex, abstime, private);	      \
     __val;								      \
   })
 
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
     int __val = atomic_exchange_rel (__futex, 0);			      \
     if (__builtin_expect (__val > 1, 0))				      \
-      lll_futex_wake (__futex, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
 
-#define lll_robust_mutex_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
     int __val = atomic_exchange_rel (__futex, 0);			      \
     if (__builtin_expect (__val & FUTEX_WAITERS, 0))			      \
-      lll_futex_wake (__futex, 1, LLL_SHARED);				      \
-  }))
-
-#define lll_mutex_unlock_force(lock) \
-  ((void) ({								      \
-    int *__futex = &(lock);						      \
-    *__futex = 0;							      \
-    __asm __volatile (__lll_rel_instr ::: "memory");			      \
-    lll_futex_wake (__futex, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
 
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
 
-/* Our internal lock implementation is identical to the binary-compatible
-   mutex implementation. */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
@@ -293,11 +283,6 @@ typedef int lll_lock_t;
     1  -  taken by one user
    >1  -  taken by more users */
 
-#define lll_trylock(lock)	lll_mutex_trylock (lock)
-#define lll_lock(lock)		lll_mutex_lock (lock)
-#define lll_unlock(lock)	lll_mutex_unlock (lock)
-#define lll_islocked(lock)	lll_mutex_islocked (lock)
-
 /* The kernel notifies a process which uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;								      \
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c b/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
index 0b18cdbcde..fd92f7b32d 100644
--- a/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
+++ b/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
@@ -29,11 +29,37 @@
 int
 __new_sem_post (sem_t *sem)
 {
+  struct new_sem *isem = (struct new_sem *) sem;
+
+  __asm __volatile (__lll_rel_instr ::: "memory");
+  atomic_increment (&isem->value);
+  __asm __volatile (__lll_acq_instr ::: "memory");
+  if (isem->nwaiters > 0)
+    {
+      int err = lll_futex_wake (&isem->value, 1,
+				isem->private ^ FUTEX_PRIVATE_FLAG);
+      if (__builtin_expect (err, 0) < 0)
+	{
+	  __set_errno (-err);
+	  return -1;
+	}
+    }
+  return 0;
+}
+versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
+
+#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
+
+int
+attribute_compat_text_section
+__old_sem_post (sem_t *sem)
+{
   int *futex = (int *) sem;
 
   __asm __volatile (__lll_rel_instr ::: "memory");
   int nr = atomic_increment_val (futex);
-  int err = lll_futex_wake (futex, nr, LLL_SHARED);
+  /* We always have to assume it is a shared semaphore.  */
+  int err = lll_futex_wake (futex, 1, LLL_SHARED);
   if (__builtin_expect (err, 0) < 0)
     {
       __set_errno (-err);
@@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem)
     }
   return 0;
 }
-versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
-#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
-strong_alias (__new_sem_post, __old_sem_post)
+
 compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0);
 #endif
diff --git a/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c b/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
index a97351f880..81ecd6556b 100644
--- a/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
+++ b/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
@@ -1,8 +1,8 @@
 #include <pthreadP.h>
 
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex)
-#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id)
+#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED)
+#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED)
 #define __pthread_mutex_lock __pthread_mutex_cond_lock
 #define NO_INCR
 
diff --git a/nptl/sysdeps/unix/sysv/linux/register-atfork.c b/nptl/sysdeps/unix/sysv/linux/register-atfork.c
index cb5b2b832f..231fc9b091 100644
--- a/nptl/sysdeps/unix/sysv/linux/register-atfork.c
+++ b/nptl/sysdeps/unix/sysv/linux/register-atfork.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -24,7 +24,7 @@
 
 
 /* Lock to protect allocation and deallocation of fork handlers.  */
-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
+int __fork_lock = LLL_LOCK_INITIALIZER;
 
 
 /* Number of pre-allocated handler entries.  */
@@ -85,7 +85,7 @@ __register_atfork (prepare, parent, child, dso_handle)
      void *dso_handle;
 {
   /* Get the lock to not conflict with other allocations.  */
-  lll_lock (__fork_lock);
+  lll_lock (__fork_lock, LLL_PRIVATE);
 
   struct fork_handler *newp = fork_handler_alloc ();
 
@@ -102,7 +102,7 @@ __register_atfork (prepare, parent, child, dso_handle)
     }
 
   /* Release the lock.  */
-  lll_unlock (__fork_lock);
+  lll_unlock (__fork_lock, LLL_PRIVATE);
 
   return newp == NULL ? ENOMEM : 0;
 }
@@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork)
 libc_freeres_fn (free_mem)
 {
   /* Get the lock to not conflict with running forks.  */
-  lll_lock (__fork_lock);
+  lll_lock (__fork_lock, LLL_PRIVATE);
 
   /* No more fork handlers.  */
   __fork_handlers = NULL;
@@ -123,7 +123,7 @@ libc_freeres_fn (free_mem)
   memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
 
   /* Release the lock.  */
-  lll_unlock (__fork_lock);
+  lll_unlock (__fork_lock, LLL_PRIVATE);
 
   /* We can free the memory after releasing the lock.  */
   while (runp != NULL)
diff --git a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
index 4758b63bd0..ad4d27300f 100644
--- a/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h
@@ -68,9 +68,6 @@
 # endif	      
 #endif
 
-/* Initializer for compatibility lock.	*/
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
 #define lll_futex_wait(futex, val, private) \
   lll_futex_timed_wait (futex, val, NULL, private)
 
@@ -108,13 +105,13 @@
   })
 
 
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
   do									      \
     {									      \
       int *__futexp = &(futexv);					      \
 									      \
       atomic_or (__futexp, FUTEX_OWNER_DIED);				      \
-      lll_futex_wake (__futexp, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futexp, 1, private);				      \
     }									      \
   while (0)
 
@@ -175,7 +172,7 @@
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_trylock (int *futex)
+__lll_trylock (int *futex)
 {
     unsigned int old;
 
@@ -184,12 +181,12 @@ __lll_mutex_trylock (int *futex)
 		       : "0" (0), "d" (1), "m" (*futex) : "cc", "memory" );
     return old != 0;
 }
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
 
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_cond_trylock (int *futex)
+__lll_cond_trylock (int *futex)
 {
     unsigned int old;
 
@@ -198,12 +195,12 @@ __lll_mutex_cond_trylock (int *futex)
 		       : "0" (0), "d" (2), "m" (*futex) : "cc", "memory" );
     return old != 0;
 }
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
 
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_trylock (int *futex, int id)
+__lll_robust_trylock (int *futex, int id)
 {
     unsigned int old;
 
@@ -212,141 +209,121 @@ __lll_robust_mutex_trylock (int *futex, int id)
 		       : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
     return old != 0;
 }
-#define lll_robust_mutex_trylock(futex, id) \
-  __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+  __lll_robust_trylock (&(futex), id)
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 static inline void
 __attribute__ ((always_inline))
-__lll_mutex_lock (int *futex)
+__lll_lock (int *futex, int private)
 {
-  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    __lll_lock_wait (futex);
+  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+    {
+      if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+	__lll_lock_wait_private (futex);
+      else
+	__lll_lock_wait (futex, private);
+    }
 }
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
 {
   int result = 0;
-  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_lock_wait (futex);
+  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
+			0))
+    result = __lll_robust_lock_wait (futex, private);
   return result;
 }
-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 static inline void
 __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
-  if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
-    __lll_lock_wait (futex);
+  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0))
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
 
-#define lll_robust_mutex_cond_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 extern int __lll_timedlock_wait
-  (int *futex, const struct timespec *) attribute_hidden;
+  (int *futex, const struct timespec *, int private) attribute_hidden;
 extern int __lll_robust_timedlock_wait
-  (int *futex, const struct timespec *) attribute_hidden;
+  (int *futex, const struct timespec *, int private) attribute_hidden;
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int result = 0;
-  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
-    result = __lll_timedlock_wait (futex, abstime);
+  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
-			      int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+			int id, int private)
 {
   int result = 0;
-  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_timedlock_wait (futex, abstime);
+  if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
+			0))
+    result = __lll_robust_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
-  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
 
 static inline void
 __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+__lll_unlock (int *futex, int private)
 {
   int oldval;
   int newval = 0;
 
   lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
-  if (oldval > 1)
-    lll_futex_wake (futex, 1, LLL_SHARED);
+  if (__builtin_expect (oldval > 1, 0))
+    lll_futex_wake (futex, 1, private);
 }
-#define lll_mutex_unlock(futex) \
-  __lll_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
 
 
 static inline void
 __attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
+__lll_robust_unlock (int *futex, int private)
 {
   int oldval;
   int newval = 0;
 
   lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
-  if (oldval & mask)
-    lll_futex_wake (futex, 1, LLL_SHARED);
-}
-#define lll_robust_mutex_unlock(futex) \
-  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
-
-static inline void
-__attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
-  *futex = 0;
-  lll_futex_wake (futex, 1, LLL_SHARED);
+  if (__builtin_expect (oldval & FUTEX_WAITERS, 0))
+    lll_futex_wake (futex, 1, private);
 }
-#define lll_mutex_unlock_force(futex) \
-  __lll_mutex_unlock_force(&(futex))
+#define lll_robust_unlock(futex, private) \
+  __lll_robust_unlock(&(futex), private)
 
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
 
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  We can use the lll_mutex_*.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 
-#define lll_trylock(futex)      lll_mutex_trylock (futex)
-#define lll_lock(futex)         lll_mutex_lock (futex)
-#define lll_unlock(futex)       lll_mutex_unlock (futex)
-#define lll_islocked(futex)     lll_mutex_islocked (futex)
-
-/* The states of a lock are:
-    1  -  untaken
-    0  -  taken by one user
-   <0  -  taken by more users */
-
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -373,25 +350,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;								      \
   })
 
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_post.c b/nptl/sysdeps/unix/sysv/linux/sem_post.c
index 7f90325585..25b676fcd2 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_post.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_post.c
@@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem)
   if (isem->nwaiters > 0)
     {
       int err = lll_futex_wake (&isem->value, 1,
-				// XYZ check mutex flag
-				LLL_SHARED);
+				isem->private ^ FUTEX_PRIVATE_FLAG);
       if (__builtin_expect (err, 0) < 0)
 	{
 	  __set_errno (-err);
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c b/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
index 8f92d78abe..fdf0d74011 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
@@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
       int oldtype = __pthread_enable_asynccancel ();
 
       err = lll_futex_timed_wait (&isem->value, 0, &rt,
-				  // XYZ check mutex flag
-				  LLL_SHARED);
+				  isem->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (oldtype);
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_wait.c b/nptl/sysdeps/unix/sysv/linux/sem_wait.c
index 12f3f16c2d..20e2b481df 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_wait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_wait.c
@@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem)
       int oldtype = __pthread_enable_asynccancel ();
 
       err = lll_futex_wait (&isem->value, 0,
-			    // XYZ check mutex flag
-			    LLL_SHARED);
+			    isem->private ^ FUTEX_PRIVATE_FLAG);
 
       /* Disable asynchronous cancellation.  */
       __pthread_disable_asynccancel (oldtype);
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h b/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
new file mode 100644
index 0000000000..4d0ea51723
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
@@ -0,0 +1,18 @@
+#ifndef _INTERNALTYPES_H
+#include "../internaltypes.h"
+
+union sparc_pthread_barrier
+{
+  struct pthread_barrier b;
+  struct sparc_pthread_barrier_s
+    {
+      unsigned int curr_event;
+      int lock;
+      unsigned int left;
+      unsigned int init_count;
+      unsigned char left_lock;
+      unsigned char pshared;
+    } s;
+};
+
+#endif
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index 922e3c21d3..38692bbd2d 100644
--- a/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -70,9 +70,6 @@
 #endif
 
 
-/* Initializer for compatibility lock.	*/
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
 #define lll_futex_wait(futexp, val, private) \
   lll_futex_timed_wait (futexp, val, NULL, private)
 
@@ -110,12 +107,12 @@
     INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \
   })
 
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
   do									      \
     {									      \
       int *__futexp = &(futexv);					      \
       atomic_or (__futexp, FUTEX_OWNER_DIED);				      \
-      lll_futex_wake (__futexp, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futexp, 1, private);				      \
     }									      \
   while (0)
 
@@ -139,146 +136,132 @@
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_trylock (int *futex)
+__lll_trylock (int *futex)
 {
   return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
 }
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_cond_trylock (int *futex)
+__lll_cond_trylock (int *futex)
 {
   return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
 }
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_trylock (int *futex, int id)
+__lll_robust_trylock (int *futex, int id)
 {
   return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
 }
-#define lll_robust_mutex_trylock(futex, id) \
-  __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+  __lll_robust_trylock (&(futex), id)
 
 
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
 
 static inline void
 __attribute__ ((always_inline))
-__lll_mutex_lock (int *futex)
+__lll_lock (int *futex, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
 
   if (__builtin_expect (val != 0, 0))
-    __lll_lock_wait (futex);
+    {
+      if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+	__lll_lock_wait_private (futex);
+      else
+	__lll_lock_wait (futex, private);
+    }
 }
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_lock_wait (futex);
+    result = __lll_robust_lock_wait (futex, private);
   return result;
 }
-#define lll_robust_mutex_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), id, private)
 
 static inline void
 __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
 
   if (__builtin_expect (val != 0, 0))
-    __lll_lock_wait (futex);
+    __lll_lock_wait (futex, private);
 }
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
 
-#define lll_robust_mutex_cond_lock(futex, id) \
-  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
 
 
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
-     attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
-     attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+				 int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+					int private) attribute_hidden;
 
 static inline int
 __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
 {
   int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
   int result = 0;
 
   if (__builtin_expect (val != 0, 0))
-    result = __lll_timedlock_wait (futex, abstime);
+    result = __lll_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_mutex_timedlock(futex, abstime) \
-  __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+  __lll_timedlock (&(futex), abstime, private)
 
 static inline int
 __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
-			      int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+			int id, int private)
 {
   int result = 0;
   if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
-    result = __lll_robust_timedlock_wait (futex, abstime);
+    result = __lll_robust_timedlock_wait (futex, abstime, private);
   return result;
 }
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
-  __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+  __lll_robust_timedlock (&(futex), abstime, id, private)
 
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
     int __val = atomic_exchange_24_rel (__futex, 0);			      \
     if (__builtin_expect (__val > 1, 0))				      \
-      lll_futex_wake (__futex, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
 
-#define lll_robust_mutex_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
   ((void) ({								      \
     int *__futex = &(lock);						      \
     int __val = atomic_exchange_rel (__futex, 0);			      \
     if (__builtin_expect (__val & FUTEX_WAITERS, 0))			      \
-      lll_futex_wake (__futex, 1, LLL_SHARED);				      \
-  }))
-
-#define lll_mutex_unlock_force(lock) \
-  ((void) ({								      \
-    int *__futex = &(lock);						      \
-    (void) atomic_exchange_24_rel (__futex, 0);				      \
-    lll_futex_wake (__futex, 1, LLL_SHARED);				      \
+      lll_futex_wake (__futex, 1, private);				      \
   }))
 
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
   (futex != 0)
 
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  We can use the lll_mutex_*.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
 /* Initializers for lock.  */
 #define LLL_LOCK_INITIALIZER		(0)
 #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 
-#define lll_trylock(futex)	lll_mutex_trylock (futex)
-#define lll_lock(futex)		lll_mutex_lock (futex)
-#define lll_unlock(futex)	lll_mutex_unlock (futex)
-#define lll_islocked(futex)	lll_mutex_islocked (futex)
-
-
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
    wakeup when the clone terminates.  The memory location contains the
    thread ID while the clone is running and is reset to zero
@@ -303,26 +286,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
     __res;						\
   })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
-     attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
-     attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
-     attribute_hidden;
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
new file mode 100644
index 0000000000..b677fb6cf1
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
@@ -0,0 +1,45 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_destroy (barrier)
+     pthread_barrier_t *barrier;
+{
+  union sparc_pthread_barrier *ibarrier;
+  int result = EBUSY;
+
+  ibarrier = (union sparc_pthread_barrier *) barrier;
+
+  int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+  lll_lock (ibarrier->b.lock, private);
+
+  if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
+    /* The barrier is not used anymore.  */
+    result = 0;
+  else
+    /* Still used, return with an error.  */
+    lll_unlock (ibarrier->b.lock, private);
+
+  return result;
+}
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
new file mode 100644
index 0000000000..0a1159cf8a
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
@@ -0,0 +1,55 @@
+/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_init (barrier, attr, count)
+     pthread_barrier_t *barrier;
+     const pthread_barrierattr_t *attr;
+     unsigned int count;
+{
+  union sparc_pthread_barrier *ibarrier;
+
+  if (__builtin_expect (count == 0, 0))
+    return EINVAL;
+
+  struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
+  if (iattr != NULL)
+    {
+      if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+	  && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
+	/* Invalid attribute.  */
+	return EINVAL;
+    }
+
+  ibarrier = (union sparc_pthread_barrier *) barrier;
+
+  /* Initialize the individual fields.  */
+  ibarrier->b.lock = LLL_LOCK_INITIALIZER;
+  ibarrier->b.left = count;
+  ibarrier->b.init_count = count;
+  ibarrier->b.curr_event = 0;
+  ibarrier->s.left_lock = 0;
+  ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
+
+  return 0;
+}
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c
new file mode 100644
index 0000000000..f67785fced
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c
@@ -0,0 +1,78 @@
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthreadP.h>
+
+/* Wait on barrier.  */
+int
+pthread_barrier_wait (barrier)
+     pthread_barrier_t *barrier;
+{
+  union sparc_pthread_barrier *ibarrier
+    = (union sparc_pthread_barrier *) barrier;
+  int result = 0;
+  int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+  /* Make sure we are alone.  */
+  lll_lock (ibarrier->b.lock, private);
+
+  /* One more arrival.  */
+  --ibarrier->b.left;
+
+  /* Are these all?  */
+  if (ibarrier->b.left == 0)
+    {
+      /* Yes. Increment the event counter to avoid invalid wake-ups and
+	 tell the current waiters that it is their turn.  */
+      ++ibarrier->b.curr_event;
+
+      /* Wake up everybody.  */
+      lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
+
+      /* This is the thread which finished the serialization.  */
+      result = PTHREAD_BARRIER_SERIAL_THREAD;
+    }
+  else
+    {
+      /* The number of the event we are waiting for.  The barrier's event
+	 number must be bumped before we continue.  */
+      unsigned int event = ibarrier->b.curr_event;
+
+      /* Before suspending, make the barrier available to others.  */
+      lll_unlock (ibarrier->b.lock, private);
+
+      /* Wait for the event counter of the barrier to change.  */
+      do
+	lll_futex_wait (&ibarrier->b.curr_event, event, private);
+      while (event == ibarrier->b.curr_event);
+    }
+
+  /* Make sure the init_count is stored locally or in a register.  */
+  unsigned int init_count = ibarrier->b.init_count;
+
+  /* If this was the last woken thread, unlock.  */
+  if (atomic_increment_val (&ibarrier->b.left) == init_count)
+    /* We are done.  */
+    lll_unlock (ibarrier->b.lock, private);
+
+  return result;
+}
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
index cb9578b47b..1ee9b4737b 100644
--- a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
@@ -25,20 +25,35 @@
 
 
 void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
 {
   do
     {
       int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
       if (oldval != 0)
-	lll_futex_wait (futex, 2);
+	lll_futex_wait (futex, 2, LLL_PRIVATE);
+    }
+  while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
+}
+
+#ifdef IS_IN_libpthread
+/* These functions don't get included in libc.so  */
+
+void
+__lll_lock_wait (int *futex, int private)
+{
+  do
+    {
+      int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
+      if (oldval != 0)
+	lll_futex_wait (futex, 2, private);
     }
   while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
 }
 
 
 int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
 {
   /* Reject invalid timeouts.  */
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -68,7 +83,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
       /* Wait.  */
       int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
       if (oldval != 0)
-	lll_futex_timed_wait (futex, 2, &rt);
+	lll_futex_timed_wait (futex, 2, &rt, private);
     }
   while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
 
@@ -76,8 +91,6 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
 }
 
 
-/* This function doesn't get included in libc.so  */
-#ifdef IS_IN_libpthread
 int
 __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
 {
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
index 868e0d2819..89a23490f0 100644
--- a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
@@ -22,24 +22,18 @@
 #include <lowlevellock.h>
 #include <pthreadP.h>
 
-struct sparc_pthread_barrier
-{
-  struct pthread_barrier b;
-  unsigned char left_lock;
-  unsigned char pshared;
-};
-
 /* Wait on barrier.  */
 int
 pthread_barrier_wait (barrier)
      pthread_barrier_t *barrier;
 {
-  struct sparc_pthread_barrier *ibarrier
-    = (struct sparc_pthread_barrier *) barrier;
+  union sparc_pthread_barrier *ibarrier
+    = (union sparc_pthread_barrier *) barrier;
   int result = 0;
+  int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
 
   /* Make sure we are alone.  */
-  lll_lock (ibarrier->b.lock);
+  lll_lock (ibarrier->b.lock, private);
 
   /* One more arrival.  */
   --ibarrier->b.left;
@@ -52,9 +46,7 @@ pthread_barrier_wait (barrier)
       ++ibarrier->b.curr_event;
 
       /* Wake up everybody.  */
-      lll_futex_wake (&ibarrier->b.curr_event, INT_MAX,
-		      // XYZ check mutex flag
-		      LLL_SHARED);
+      lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
 
       /* This is the thread which finished the serialization.  */
       result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -66,13 +58,11 @@ pthread_barrier_wait (barrier)
       unsigned int event = ibarrier->b.curr_event;
 
       /* Before suspending, make the barrier available to others.  */
-      lll_unlock (ibarrier->b.lock);
+      lll_unlock (ibarrier->b.lock, private);
 
       /* Wait for the event counter of the barrier to change.  */
       do
-	lll_futex_wait (&ibarrier->b.curr_event, event,
-			// XYZ check mutex flag
-			LLL_SHARED);
+	lll_futex_wait (&ibarrier->b.curr_event, event, private);
       while (event == ibarrier->b.curr_event);
     }
 
@@ -80,11 +70,11 @@ pthread_barrier_wait (barrier)
   unsigned int init_count = ibarrier->b.init_count;
 
   /* If this was the last woken thread, unlock.  */
-  if (__atomic_is_v9 || ibarrier->pshared == 0)
+  if (__atomic_is_v9 || ibarrier->s.pshared == 0)
     {
       if (atomic_increment_val (&ibarrier->b.left) == init_count)
 	/* We are done.  */
-	lll_unlock (ibarrier->b.lock);
+	lll_unlock (ibarrier->b.lock, private);
     }
   else
     {
@@ -92,12 +82,12 @@ pthread_barrier_wait (barrier)
       /* Slightly more complicated.  On pre-v9 CPUs, atomic_increment_val
 	 is only atomic for threads within the same process, not for
 	 multiple processes.  */
-      __sparc32_atomic_do_lock24 (&ibarrier->left_lock);
+      __sparc32_atomic_do_lock24 (&ibarrier->s.left_lock);
       left = ++ibarrier->b.left;
-      __sparc32_atomic_do_unlock24 (&ibarrier->left_lock);
+      __sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock);
       if (left == init_count)
         /* We are done.  */
-        lll_unlock (ibarrier->b.lock);
+	lll_unlock (ibarrier->b.lock, private);
     }
 
   return result;
diff --git a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c
index 55d20eb8f8..2d32e58240 100644
--- a/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c
@@ -1 +1 @@
-#include "../../../../../../../pthread_barrier_wait.c"
+#include "../../pthread_barrier_wait.c"
diff --git a/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c b/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
index bc5b7537a3..56a4f149e1 100644
--- a/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
+++ b/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
@@ -54,7 +54,7 @@ __unregister_atfork (dso_handle)
      that there couldn't have been another thread deleting something.
      The __unregister_atfork function is only called from the
      dlclose() code which itself serializes the operations.  */
-  lll_lock (__fork_lock);
+  lll_lock (__fork_lock, LLL_PRIVATE);
 
   /* We have to create a new list with all the entries we don't remove.  */
   struct deleted_handler
@@ -89,7 +89,7 @@ __unregister_atfork (dso_handle)
   while (runp != NULL);
 
   /* Release the lock.  */
-  lll_unlock (__fork_lock);
+  lll_unlock (__fork_lock, LLL_PRIVATE);
 
   /* Walk the list of all entries which have to be deleted.  */
   while (deleted != NULL)
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
index 3265eee0ed..ce8ad27aa7 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S
@@ -17,19 +17,4 @@
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
-#include <kernel-features.h>
-
-/* All locks in libc are private.  Use the kernel feature if possible.  */
-#define FUTEX_PRIVATE_FLAG	128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT		(0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE		(1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg) \
-	movl	%fs:PRIVATE_FUTEX, reg
-# define LOAD_FUTEX_WAKE(reg) \
-	movl	%fs:PRIVATE_FUTEX, reg ; \
-	orl	$FUTEX_WAKE, reg
-#endif
-
 #include "lowlevellock.S"
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
index 502f1d442f..7065cfac32 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -19,33 +19,46 @@
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
 
 	.text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+	xorl	$(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl    %fs:PRIVATE_FUTEX, reg
 # else
-#  define LOCK lock
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+	movl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
 # endif
-#endif
-
-#define SYS_futex		202
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT		0
-# define FUTEX_WAKE		1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+	movl    %fs:PRIVATE_FUTEX, reg ; \
+	orl     $FUTEX_WAKE, reg
 # if FUTEX_WAIT == 0
 #  define LOAD_FUTEX_WAIT(reg) \
-	xorl	reg, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg
 # else
 #  define LOAD_FUTEX_WAIT(reg) \
-	movl	$FUTEX_WAIT, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
 # endif
 # define LOAD_FUTEX_WAKE(reg) \
-	movl	$FUTEX_WAKE, reg
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAKE, reg
 #endif
 
 
@@ -53,11 +66,11 @@
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
 
-	.globl	__lll_mutex_lock_wait
-	.type	__lll_mutex_lock_wait,@function
-	.hidden	__lll_mutex_lock_wait
+	.globl	__lll_lock_wait_private
+	.type	__lll_lock_wait_private,@function
+	.hidden	__lll_lock_wait_private
 	.align	16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
 	cfi_startproc
 	pushq	%r10
 	cfi_adjust_cfa_offset(8)
@@ -67,7 +80,7 @@ __lll_mutex_lock_wait:
 	cfi_offset(%rdx, -24)
 	xorq	%r10, %r10	/* No timeout.  */
 	movl	$2, %edx
-	LOAD_FUTEX_WAIT (%esi)
+	LOAD_PRIVATE_FUTEX_WAIT (%esi)
 
 	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
 	jne	2f
@@ -89,15 +102,52 @@ __lll_mutex_lock_wait:
 	cfi_restore(%r10)
 	retq
 	cfi_endproc
-	.size	__lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
 
 #ifdef NOT_IN_libc
-	.globl	__lll_mutex_timedlock_wait
-	.type	__lll_mutex_timedlock_wait,@function
-	.hidden	__lll_mutex_timedlock_wait
+	.globl	__lll_lock_wait
+	.type	__lll_lock_wait,@function
+	.hidden	__lll_lock_wait
 	.align	16
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+	cfi_startproc
+	pushq	%r10
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%r10, -16)
+	cfi_offset(%rdx, -24)
+	xorq	%r10, %r10	/* No timeout.  */
+	movl	$2, %edx
+	LOAD_FUTEX_WAIT (%esi)
+
+	cmpl	%edx, %eax	/* NB:	 %edx == 2 */
+	jne	2f
+
+1:	movl	$SYS_futex, %eax
+	syscall
+
+2:	movl	%edx, %eax
+	xchgl	%eax, (%rdi)	/* NB:	 lock is implied */
+
+	testl	%eax, %eax
+	jnz	1b
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%r10
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r10)
+	retq
+	cfi_endproc
+	.size	__lll_lock_wait,.-__lll_lock_wait
+
+	.globl	__lll_timedlock_wait
+	.type	__lll_timedlock_wait,@function
+	.hidden	__lll_timedlock_wait
+	.align	16
+__lll_timedlock_wait:
 	cfi_startproc
 	/* Check for a valid timeout value.  */
 	cmpq	$1000000000, 8(%rdx)
@@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait:
 	cfi_offset(%r12, -32)
 	cfi_offset(%r13, -40)
 	cfi_offset(%r14, -48)
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
 
 	/* Stack frame for the timespec and timeval structs.  */
-	subq	$16, %rsp
-	cfi_adjust_cfa_offset(16)
+	subq	$24, %rsp
+	cfi_adjust_cfa_offset(24)
 
 	movq	%rdi, %r12
 	movq	%rdx, %r13
@@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait:
 	je	8f
 
 	movq	%rsp, %r10
+	movl	24(%rsp), %esi
 	LOAD_FUTEX_WAIT (%esi)
 	movq	%r12, %rdi
 	movl	$SYS_futex, %eax
@@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait:
 	cmpxchgl %edx, (%r12)
 	jnz	7f
 
-6:	addq	$16, %rsp
-	cfi_adjust_cfa_offset(-16)
+6:	addq	$32, %rsp
+	cfi_adjust_cfa_offset(-32)
 	popq	%r14
 	cfi_adjust_cfa_offset(-8)
 	cfi_restore(%r14)
@@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait:
 3:	movl	$EINVAL, %eax
 	retq
 
-	cfi_adjust_cfa_offset(56)
+	cfi_adjust_cfa_offset(72)
 	cfi_offset(%r8, -16)
 	cfi_offset(%r9, -24)
 	cfi_offset(%r12, -32)
@@ -216,15 +269,15 @@ __lll_mutex_timedlock_wait:
 5:	movl	$ETIMEDOUT, %eax
 	jmp	6b
 	cfi_endproc
-	.size	__lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
 #endif
 
 
-	.globl	__lll_mutex_unlock_wake
-	.type	__lll_mutex_unlock_wake,@function
-	.hidden	__lll_mutex_unlock_wake
+	.globl	__lll_unlock_wake_private
+	.type	__lll_unlock_wake_private,@function
+	.hidden	__lll_unlock_wake_private
 	.align	16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake_private:
 	cfi_startproc
 	pushq	%rsi
 	cfi_adjust_cfa_offset(8)
@@ -234,7 +287,7 @@ __lll_mutex_unlock_wake:
 	cfi_offset(%rdx, -24)
 
 	movl	$0, (%rdi)
-	LOAD_FUTEX_WAKE (%esi)
+	LOAD_PRIVATE_FUTEX_WAKE (%esi)
 	movl	$1, %edx	/* Wake one thread.  */
 	movl	$SYS_futex, %eax
 	syscall
@@ -247,10 +300,38 @@ __lll_mutex_unlock_wake:
 	cfi_restore(%rsi)
 	retq
 	cfi_endproc
-	.size	__lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
 
 #ifdef NOT_IN_libc
+	.globl	__lll_unlock_wake
+	.type	__lll_unlock_wake,@function
+	.hidden	__lll_unlock_wake
+	.align	16
+__lll_unlock_wake:
+	cfi_startproc
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
+	pushq	%rdx
+	cfi_adjust_cfa_offset(8)
+	cfi_offset(%rsi, -16)
+	cfi_offset(%rdx, -24)
+
+	movl	$0, (%rdi)
+	LOAD_FUTEX_WAKE (%esi)
+	movl	$1, %edx	/* Wake one thread.  */
+	movl	$SYS_futex, %eax
+	syscall
+
+	popq	%rdx
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rdx)
+	popq	%rsi
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%rsi)
+	retq
+	cfi_endproc
+	.size	__lll_unlock_wake,.-__lll_unlock_wake
+
 	.globl	__lll_timedwait_tid
 	.type	__lll_timedwait_tid,@function
 	.hidden	__lll_timedwait_tid
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index 7ec7deff17..192d203926 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -20,17 +20,27 @@
 #ifndef _LOWLEVELLOCK_H
 #define _LOWLEVELLOCK_H	1
 
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <kernel-features.h>
-#include <tcb-offsets.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-#  define LOCK_INSTR	/* nothing */
-# else
-#  define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+#  ifdef UP
+#   define LOCK_INSTR	/* nothing */
+#  else
+#   define LOCK_INSTR "lock;"
+#  endif
+# endif
+#else
+# ifndef LOCK
+#  ifdef UP
+#   define LOCK
+#  else
+#   define LOCK lock
+#  endif
 # endif
 #endif
 
@@ -38,11 +48,13 @@
 #define FUTEX_WAIT		0
 #define FUTEX_WAKE		1
 #define FUTEX_CMP_REQUEUE	4
+#define FUTEX_WAKE_OP	   5
 #define FUTEX_LOCK_PI		6
 #define FUTEX_UNLOCK_PI		7
 #define FUTEX_TRYLOCK_PI	8
 #define FUTEX_PRIVATE_FLAG	128
 
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
 
 /* Values for 'private' parameter of locking macros.  Yes, the
    definition seems to be backwards.  But it is not.  The bit will be
@@ -50,6 +62,8 @@
 #define LLL_PRIVATE	0
 #define LLL_SHARED	FUTEX_PRIVATE_FLAG
 
+#ifndef __ASSEMBLER__
+
 #if !defined NOT_IN_libc || defined IS_IN_rtld
 /* In libc.so or ld.so all futexes are private.  */
 # ifdef __ASSUME_PRIVATE_FUTEX
@@ -76,13 +90,13 @@
 # endif	      
 #endif
 
-/* Initializer for compatibility lock.  */
-#define LLL_MUTEX_LOCK_INITIALIZER		(0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED	(1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS	(2)
+/* Initializer for lock.  */
+#define LLL_LOCK_INITIALIZER		(0)
+#define LLL_LOCK_INITIALIZER_LOCKED	(1)
+#define LLL_LOCK_INITIALIZER_WAITERS	(2)
 
 /* Delay in spinlock loop.  */
-#define BUSY_WAIT_NOP          asm ("rep; nop")
+#define BUSY_WAIT_NOP	  asm ("rep; nop")
 
 
 #define LLL_STUB_UNWIND_INFO_START \
@@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END
 		      : "=a" (__status)					      \
 		      : "0" (SYS_futex), "D" (futex),			      \
 			"S" (__lll_private_flag (FUTEX_WAIT, private)),	      \
-		        "d" (_val), "r" (__to)				      \
+			"d" (_val), "r" (__to)				      \
 		      : "memory", "cc", "r11", "cx");			      \
     __status;								      \
   })
@@ -215,242 +229,308 @@ LLL_STUB_UNWIND_INFO_END
   } while (0)
 
 
-
-/* Does not preserve %eax and %ecx.  */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx.  */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
-				       const struct timespec *__abstime)
-     attribute_hidden;
-/* Preserves all registers but %eax.  */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
    after the cmpxchg instruction.  In case the operation succeded this
    value is zero.  In case the operation failed, the cmpxchg instruction
    has loaded the current value of the memory work which is guaranteed
    to be nonzero.  */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t"      \
+			   "je 0f\n\t"					      \
+			   "lock; cmpxchgl %2, %1\n\t"			      \
+			   "jmp 1f\n\t"					      \
+			   "0:\tcmpxchgl %2, %1\n\t"			      \
+			   "1:"
+#endif
+
+#define lll_trylock(futex) \
   ({ int ret;								      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
+     __asm __volatile (__lll_trylock_asm				      \
 		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER)		      \
+		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
+			 "0" (LLL_LOCK_INITIALIZER)			      \
 		       : "memory");					      \
      ret; })
 
-
-#define lll_robust_mutex_trylock(futex, id)				      \
+#define lll_robust_trylock(futex, id) \
   ({ int ret;								      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
 		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (id), "m" (futex),				      \
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER)		      \
+		       : "r" (id), "m" (futex),	"0" (LLL_LOCK_INITIALIZER)    \
 		       : "memory");					      \
      ret; })
 
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
   ({ int ret;								      \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
 		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS),	      \
-			 "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER)	      \
+		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
+			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
 		       : "memory");					      \
      ret; })
 
-
-#define lll_mutex_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;				      \
-	    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"		      \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
+			      "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
+			      "je 0f\n\t"				      \
+			      "lock; cmpxchgl %4, %2\n\t"		      \
 			      "jnz 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_mutex_lock_%=, @function\n"	      \
-			      "_L_mutex_lock_%=:\n"			      \
-			      "1:\tleaq %2, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_lock_wait\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_mutex_lock_%=, 6b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-				"=a" (ignore3)				      \
-			      : "0" (1), "m" (futex), "3" (0)		      \
-			      : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_lock(futex, id) \
+		  	      "jmp 24f\n"				      \
+			      "0:\tcmpxchgl %4, %2\n\t"			      \
+			      "jnz 1f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2, ignore3;					      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_lock_asm_start				      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_lock_%=, @function\n"		      \
+			   "_L_lock_%=:\n"				      \
+			   "1:\tleaq %2, %%rdi\n"			      \
+			   "2:\tsubq $128, %%rsp\n"			      \
+			   "3:\tcallq __lll_lock_wait_private\n"	      \
+			   "4:\taddq $128, %%rsp\n"			      \
+			   "5:\tjmp 24f\n"				      \
+			   "6:\t.size _L_lock_%=, 6b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_5			      \
+			   "24:"					      \
+			   : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),   \
+			     "=a" (ignore3)				      \
+			   : "0" (1), "m" (futex), "3" (0)		      \
+			   : "cx", "r11", "cc", "memory");		      \
+       else								      \
+	 __asm __volatile (__lll_lock_asm_start				      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_lock_%=, @function\n"		      \
+			   "_L_lock_%=:\n"				      \
+			   "1:\tleaq %2, %%rdi\n"			      \
+			   "2:\tsubq $128, %%rsp\n"			      \
+			   "3:\tcallq __lll_lock_wait\n"		      \
+			   "4:\taddq $128, %%rsp\n"			      \
+			   "5:\tjmp 24f\n"				      \
+			   "6:\t.size _L_lock_%=, 6b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_5			      \
+			   "24:"					      \
+			   : "=S" (ignore1), "=D" (ignore2), "=m" (futex),    \
+			     "=a" (ignore3)				      \
+			   : "1" (1), "m" (futex), "3" (0), "0" (private)     \
+			   : "cx", "r11", "cc", "memory");		      \
+    })									      \
+
+#define lll_robust_lock(futex, id, private) \
   ({ int result, ignore1, ignore2;					      \
-    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"			      \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \
 		      "jnz 1f\n\t"					      \
 		      ".subsection 1\n\t"				      \
-		      ".type  _L_robust_mutex_lock_%=, @function\n"	      \
-		      "_L_robust_mutex_lock_%=:\n"			      \
+		      ".type _L_robust_lock_%=, @function\n"		      \
+		      "_L_robust_lock_%=:\n"				      \
 		      "1:\tleaq %2, %%rdi\n"				      \
 		      "2:\tsubq $128, %%rsp\n"				      \
-		      "3:\tcallq __lll_robust_mutex_lock_wait\n"	      \
+		      "3:\tcallq __lll_robust_lock_wait\n"		      \
 		      "4:\taddq $128, %%rsp\n"				      \
 		      "5:\tjmp 24f\n"					      \
-		      "6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t"	      \
+		      "6:\t.size _L_robust_lock_%=, 6b-1b\n\t"		      \
 		      ".previous\n"					      \
 		      LLL_STUB_UNWIND_INFO_5				      \
 		      "24:"						      \
-		      : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),	      \
+		      : "=S" (ignore1), "=D" (ignore2), "=m" (futex),	      \
 			"=a" (result)					      \
-		      : "0" (id), "m" (futex), "3" (0)			      \
+		      : "1" (id), "m" (futex), "3" (0), "0" (private)	      \
 		      : "cx", "r11", "cc", "memory");			      \
     result; })
 
-
-#define lll_mutex_cond_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;				      \
-	    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"		      \
-			      "jnz 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_mutex_cond_lock_%=, @function\n"     \
-			      "_L_mutex_cond_lock_%=:\n"		      \
-			      "1:\tleaq %2, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_lock_wait\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t"    \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-				"=a" (ignore3)				      \
-			      : "0" (2), "m" (futex), "3" (0)		      \
-			      : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
+#define lll_cond_lock(futex, private) \
+  (void)								      \
+    ({ int ignore1, ignore2, ignore3;					      \
+       __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \
+			 "jnz 1f\n\t"					      \
+			 ".subsection 1\n\t"				      \
+			 ".type _L_cond_lock_%=, @function\n"		      \
+			 "_L_cond_lock_%=:\n"				      \
+			 "1:\tleaq %2, %%rdi\n"				      \
+			 "2:\tsubq $128, %%rsp\n"			      \
+			 "3:\tcallq __lll_lock_wait\n"			      \
+			 "4:\taddq $128, %%rsp\n"			      \
+			 "5:\tjmp 24f\n"				      \
+			 "6:\t.size _L_cond_lock_%=, 6b-1b\n\t"		      \
+			 ".previous\n"					      \
+			 LLL_STUB_UNWIND_INFO_5				      \
+			 "24:"						      \
+			 : "=S" (ignore1), "=D" (ignore2), "=m" (futex),      \
+			   "=a" (ignore3)				      \
+			 : "1" (2), "m" (futex), "3" (0), "0" (private)	      \
+			 : "cx", "r11", "cc", "memory");		      \
+    })
+
+#define lll_robust_cond_lock(futex, id, private) \
   ({ int result, ignore1, ignore2;					      \
-    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"			      \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \
 		      "jnz 1f\n\t"					      \
 		      ".subsection 1\n\t"				      \
-		      ".type  _L_robust_mutex_cond_lock_%=, @function\n"      \
-		      "_L_robust_mutex_cond_lock_%=:\n"			      \
+		      ".type _L_robust_cond_lock_%=, @function\n"	      \
+		      "_L_robust_cond_lock_%=:\n"			      \
 		      "1:\tleaq %2, %%rdi\n"				      \
 		      "2:\tsubq $128, %%rsp\n"				      \
-		      "3:\tcallq __lll_robust_mutex_lock_wait\n"	      \
+		      "3:\tcallq __lll_robust_lock_wait\n"		      \
 		      "4:\taddq $128, %%rsp\n"				      \
 		      "5:\tjmp 24f\n"					      \
-		      "6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t"     \
+		      "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t"	      \
 		      ".previous\n"					      \
 		      LLL_STUB_UNWIND_INFO_5				      \
 		      "24:"						      \
-		      : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),	      \
+		      : "=S" (ignore1), "=D" (ignore2), "=m" (futex),	      \
 			"=a" (result)					      \
-		      : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0)	      \
+		      : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0),	      \
+			"0" (private)					      \
 		      : "cx", "r11", "cc", "memory");			      \
     result; })
 
-
-#define lll_mutex_timedlock(futex, timeout) \
+#define lll_timedlock(futex, timeout, private) \
   ({ int result, ignore1, ignore2, ignore3;				      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t"			      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \
 		       "jnz 1f\n\t"					      \
 		       ".subsection 1\n\t"				      \
-		       ".type  _L_mutex_timedlock_%=, @function\n"	      \
-		       "_L_mutex_timedlock_%=:\n"			      \
+		       ".type _L_timedlock_%=, @function\n"		      \
+		       "_L_timedlock_%=:\n"				      \
 		       "1:\tleaq %4, %%rdi\n"				      \
 		       "0:\tmovq %8, %%rdx\n"				      \
 		       "2:\tsubq $128, %%rsp\n"				      \
-		       "3:\tcallq __lll_mutex_timedlock_wait\n"		      \
+		       "3:\tcallq __lll_timedlock_wait\n"		      \
 		       "4:\taddq $128, %%rsp\n"				      \
 		       "5:\tjmp 24f\n"					      \
-		       "6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t"	      \
+		       "6:\t.size _L_timedlock_%=, 6b-1b\n\t"		      \
 		       ".previous\n"					      \
 		       LLL_STUB_UNWIND_INFO_6				      \
 		       "24:"						      \
-		       : "=a" (result), "=&D" (ignore1), "=S" (ignore2),      \
+		       : "=a" (result), "=D" (ignore1), "=S" (ignore2),	      \
 			 "=&d" (ignore3), "=m" (futex)			      \
-		       : "0" (0), "2" (1), "m" (futex), "m" (timeout)	      \
+		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
+			 "2" (private)					      \
 		       : "memory", "cx", "cc", "r10", "r11");		      \
      result; })
 
-
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
   ({ int result, ignore1, ignore2, ignore3;				      \
-     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t"			      \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \
 		       "jnz 1f\n\t"					      \
 		       ".subsection 1\n\t"				      \
-		       ".type  _L_robust_mutex_timedlock_%=, @function\n"     \
-		       "_L_robust_mutex_timedlock_%=:\n"		      \
+		       ".type _L_robust_timedlock_%=, @function\n"	      \
+		       "_L_robust_timedlock_%=:\n"			      \
 		       "1:\tleaq %4, %%rdi\n"				      \
 		       "0:\tmovq %8, %%rdx\n"				      \
 		       "2:\tsubq $128, %%rsp\n"				      \
-		       "3:\tcallq __lll_robust_mutex_timedlock_wait\n"	      \
+		       "3:\tcallq __lll_robust_timedlock_wait\n"	      \
 		       "4:\taddq $128, %%rsp\n"				      \
 		       "5:\tjmp 24f\n"					      \
-		       "6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t"    \
+		       "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t"	      \
 		       ".previous\n"					      \
 		       LLL_STUB_UNWIND_INFO_6				      \
 		       "24:"						      \
-		       : "=a" (result), "=&D" (ignore1), "=S" (ignore2),      \
+		       : "=a" (result), "=D" (ignore1), "=S" (ignore2),       \
 			 "=&d" (ignore3), "=m" (futex)			      \
-		       : "0" (0), "2" (id), "m" (futex), "m" (timeout)	      \
+		       : "0" (0), "1" (id), "m" (futex), "m" (timeout),	      \
+			 "2" (private)					      \
 		       : "memory", "cx", "cc", "r10", "r11");		      \
      result; })
 
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t"		      \
+				"jne 1f\n\t"
+#else
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+				"je 0f\n\t"				      \
+				"lock; decl %0\n\t"			      \
+				"jne 1f\n\t"				      \
+				"jmp 24f\n\t"				      \
+				"0:\tdecl %0\n\t"			      \
+				"jne 1f\n\t"
+#endif
 
-#define lll_mutex_unlock(futex) \
-  (void) ({ int ignore;							      \
-            __asm __volatile (LOCK_INSTR "decl %0\n\t"			      \
-			      "jne 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_mutex_unlock_%=, @function\n"	      \
-			      "_L_mutex_unlock_%=:\n"			      \
-			      "1:\tleaq %0, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_unlock_wake\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=m" (futex), "=&D" (ignore)		      \
-			      : "m" (futex)				      \
-			      : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_unlock(futex) \
-  (void) ({ int ignore;							      \
-	    __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"		      \
-			      "jne 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_robust_mutex_unlock_%=, @function\n" \
-			      "_L_robust_mutex_unlock_%=:\n"		      \
-			      "1:\tleaq %0, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_unlock_wake\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=m" (futex), "=&D" (ignore)		      \
-			      : "i" (FUTEX_WAITERS), "m" (futex)	      \
-			      : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_dead(futex) \
-  (void) ({ int ignore;		     \
-	    __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t"		      \
-			      "syscall"					      \
-			      : "=m" (futex), "=a" (ignore)		      \
-			      : "D" (&(futex)), "i" (FUTEX_OWNER_DIED),	      \
-				"S" (FUTEX_WAKE), "1" (__NR_futex),	      \
-				"d" (1)					      \
-			      : "cx", "r11", "cc", "memory"); })
-
+#define lll_unlock(futex, private) \
+  (void)								      \
+    ({ int ignore;							      \
+       if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
+	 __asm __volatile (__lll_unlock_asm_start			      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_unlock_%=, @function\n"		      \
+			   "_L_unlock_%=:\n"				      \
+			   "1:\tleaq %0, %%rdi\n"			      \
+			   "2:\tsubq $128, %%rsp\n"			      \
+			   "3:\tcallq __lll_unlock_wake_private\n"	      \
+			   "4:\taddq $128, %%rsp\n"			      \
+			   "5:\tjmp 24f\n"				      \
+			   "6:\t.size _L_unlock_%=, 6b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_5			      \
+			   "24:"					      \
+			   : "=m" (futex), "=&D" (ignore)		      \
+			   : "m" (futex)				      \
+			   : "ax", "cx", "r11", "cc", "memory");	      \
+       else								      \
+	 __asm __volatile (__lll_unlock_asm_start			      \
+			   ".subsection 1\n\t"				      \
+			   ".type _L_unlock_%=, @function\n"		      \
+			   "_L_unlock_%=:\n"				      \
+			   "1:\tleaq %0, %%rdi\n"			      \
+			   "2:\tsubq $128, %%rsp\n"			      \
+			   "3:\tcallq __lll_unlock_wake\n"		      \
+			   "4:\taddq $128, %%rsp\n"			      \
+			   "5:\tjmp 24f\n"				      \
+			   "6:\t.size _L_unlock_%=, 6b-1b\n\t"		      \
+			   ".previous\n"				      \
+			   LLL_STUB_UNWIND_INFO_5			      \
+			   "24:"					      \
+			   : "=m" (futex), "=&D" (ignore)		      \
+			   : "m" (futex), "S" (private)			      \
+			   : "ax", "cx", "r11", "cc", "memory");	      \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  do									      \
+    {									      \
+      int ignore;							      \
+      __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"			      \
+			"jne 1f\n\t"					      \
+			".subsection 1\n\t"				      \
+			".type _L_robust_unlock_%=, @function\n"	      \
+			"_L_robust_unlock_%=:\n"			      \
+			"1:\tleaq %0, %%rdi\n"				      \
+			"2:\tsubq $128, %%rsp\n"			      \
+			"3:\tcallq __lll_unlock_wake\n"			      \
+			"4:\taddq $128, %%rsp\n"			      \
+			"5:\tjmp 24f\n"					      \
+			"6:\t.size _L_robust_unlock_%=, 6b-1b\n\t"	      \
+			".previous\n"					      \
+			LLL_STUB_UNWIND_INFO_5				      \
+			"24:"						      \
+			: "=m" (futex), "=&D" (ignore)			      \
+			: "i" (FUTEX_WAITERS), "m" (futex),		      \
+			  "S" (private)					      \
+			: "ax", "cx", "r11", "cc", "memory");		      \
+    }									      \
+  while (0)
+
+#define lll_robust_dead(futex, private) \
+  do									      \
+    {									      \
+      int ignore;							      \
+      __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t"			      \
+			"syscall"					      \
+			: "=m" (futex), "=a" (ignore)			      \
+			: "D" (&(futex)), "i" (FUTEX_OWNER_DIED),	      \
+			  "S" (__lll_private_flag (FUTEX_WAKE, private)),     \
+			  "1" (__NR_futex), "d" (1)			      \
+			: "cx", "r11", "cc", "memory");			      \
+    }									      \
+  while (0)
 
 /* Returns non-zero if error happened, zero if success.  */
 #define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
@@ -461,117 +541,13 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
      __asm __volatile ("syscall"					      \
 		       : "=a" (__res)					      \
 		       : "0" (__NR_futex), "D" ((void *) ftx),		      \
-		         "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake),	      \
-		         "r" (__nr_move), "r" (__mutex), "r" (__val)	      \
+			 "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake),	      \
+			 "r" (__nr_move), "r" (__mutex), "r" (__val)	      \
 		       : "cx", "r11", "cc", "memory");			      \
      __res < 0; })
 
-
-#define lll_mutex_islocked(futex) \
-  (futex != LLL_MUTEX_LOCK_INITIALIZER)
-
-
-/* We have a separate internal lock implementation which is not tied
-   to binary compatibility.  */
-
-/* Type for lock object.  */
-typedef int lll_lock_t;
-
-/* Initializers for lock.  */
-#define LLL_LOCK_INITIALIZER		(0)
-#define LLL_LOCK_INITIALIZER_LOCKED	(1)
-
-
-/* The states of a lock are:
-    0  -  untaken
-    1  -  taken by one user
-    2  -  taken by more users */
-
-
-#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself.  They avoid
-   the lock prefix when the thread library is not used.
-
-   The code sequence to avoid unnecessary lock prefixes is what the AMD
-   guys suggested.  If you do not like it, bring it up with AMD.
-
-   XXX In future we might even want to avoid it on UP machines.  */
-
-# define lll_trylock(futex) \
-  ({ unsigned char ret;							      \
-     __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"	      \
-		       "je 0f\n\t"					      \
-		       "lock; cmpxchgl %2, %1\n\t"			      \
-		       "jmp 1f\n"					      \
-		       "0:\tcmpxchgl %2, %1\n\t"			      \
-		       "1:setne %0"					      \
-		       : "=a" (ret), "=m" (futex)			      \
-		       : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
-			 "0" (LLL_MUTEX_LOCK_INITIALIZER)		      \
-		       : "memory");					      \
-     ret; })
-
-
-# define lll_lock(futex) \
-  (void) ({ int ignore1, ignore2, ignore3;				      \
-	    __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
-			      "je 0f\n\t"				      \
-			      "lock; cmpxchgl %0, %2\n\t"		      \
-			      "jnz 1f\n\t"				      \
-		  	      "jmp 24f\n"				      \
-			      "0:\tcmpxchgl %0, %2\n\t"			      \
-			      "jnz 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_lock_%=, @function\n"		      \
-			      "_L_lock_%=:\n"				      \
-			      "1:\tleaq %2, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_lock_wait\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_lock_%=, 6b-1b\n\t"		      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
-				"=a" (ignore3)				      \
-			      : "0" (1), "m" (futex), "3" (0)		      \
-			      : "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
-  (void) ({ int ignore;							      \
-            __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t"   \
-			      "je 0f\n\t"				      \
-			      "lock; decl %0\n\t"			      \
-			      "jne 1f\n\t"				      \
-		  	      "jmp 24f\n"				      \
-			      "0:\tdecl %0\n\t"				      \
-			      "jne 1f\n\t"				      \
-			      ".subsection 1\n\t"			      \
-			      ".type  _L_unlock_%=, @function\n"	      \
-			      "_L_unlock_%=:\n"				      \
-			      "1:\tleaq %0, %%rdi\n"			      \
-			      "2:\tsubq $128, %%rsp\n"			      \
-			      "3:\tcallq __lll_mutex_unlock_wake\n"	      \
-			      "4:\taddq $128, %%rsp\n"			      \
-			      "5:\tjmp 24f\n"				      \
-			      "6:\t.size _L_unlock_%=, 6b-1b\n\t"	      \
-			      ".previous\n"				      \
-			      LLL_STUB_UNWIND_INFO_5			      \
-			      "24:"					      \
-			      : "=m" (futex), "=&D" (ignore)		      \
-			      : "m" (futex)				      \
-			      : "ax", "cx", "r11", "cc", "memory"); })
-#endif
-
-
 #define lll_islocked(futex) \
-  (futex != LLL_MUTEX_LOCK_INITIALIZER)
+  (futex != LLL_LOCK_INITIALIZER)
 
 
 /* The kernel notifies a process with uses CLONE_CLEARTID via futex
@@ -610,25 +586,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
       }									      \
     __result; })
 
-
-/* Conditional variable handling.  */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
-				 const struct timespec *abstime)
-     attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
-  __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
-  __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
-  __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
-  __lll_cond_broadcast (cond)
-
+#endif  /* !__ASSEMBLER__ */
 
 #endif	/* lowlevellock.h */
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
index 69243950d7..fa7516ef71 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,33 +20,40 @@
 
 #include <sysdep.h>
 #include <pthread-errnos.h>
+#include <lowlevellock.h>
 #include <lowlevelrobustlock.h>
+#include <kernel-features.h>
 
 	.text
 
-#ifndef LOCK
-# ifdef UP
-#  define LOCK
+#define FUTEX_WAITERS		0x80000000
+#define FUTEX_OWNER_DIED	0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+	xorl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg
 # else
-#  define LOCK lock
+#  define LOAD_FUTEX_WAIT(reg) \
+	xorl	$FUTEX_PRIVATE_FLAG, reg ; \
+	andl	%fs:PRIVATE_FUTEX, reg ; \
+	orl	$FUTEX_WAIT, reg
 # endif
 #endif
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_WAITERS		0x80000000
-#define FUTEX_OWNER_DIED	0x40000000
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
 
-	.globl	__lll_robust_mutex_lock_wait
-	.type	__lll_robust_mutex_lock_wait,@function
-	.hidden	__lll_robust_mutex_lock_wait
+	.globl	__lll_robust_lock_wait
+	.type	__lll_robust_lock_wait,@function
+	.hidden	__lll_robust_lock_wait
 	.align	16
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
 	cfi_startproc
 	pushq	%r10
 	cfi_adjust_cfa_offset(8)
@@ -55,11 +63,7 @@ __lll_robust_mutex_lock_wait:
 	cfi_offset(%rdx, -24)
 
 	xorq	%r10, %r10	/* No timeout.  */
-#if FUTEX_WAIT == 0
-	xorl	%esi, %esi
-#else
-	movl	$FUTEX_WAIT, %esi
-#endif
+	LOAD_FUTEX_WAIT (%esi)
 
 4:	movl	%eax, %edx
 	orl	$FUTEX_WAITERS, %edx
@@ -97,14 +101,14 @@ __lll_robust_mutex_lock_wait:
 	cfi_restore(%r10)
 	retq
 	cfi_endproc
-	.size	__lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+	.size	__lll_robust_lock_wait,.-__lll_robust_lock_wait
 
 
-	.globl	__lll_robust_mutex_timedlock_wait
-	.type	__lll_robust_mutex_timedlock_wait,@function
-	.hidden	__lll_robust_mutex_timedlock_wait
+	.globl	__lll_robust_timedlock_wait
+	.type	__lll_robust_timedlock_wait,@function
+	.hidden	__lll_robust_timedlock_wait
 	.align	16
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
 	cfi_startproc
 	/* Check for a valid timeout value.  */
 	cmpq	$1000000000, 8(%rdx)
@@ -122,10 +126,12 @@ __lll_robust_mutex_timedlock_wait:
 	cfi_offset(%r9, -24)
 	cfi_offset(%r12, -32)
 	cfi_offset(%r13, -40)
+	pushq	%rsi
+	cfi_adjust_cfa_offset(8)
 
 	/* Stack frame for the timespec and timeval structs.  */
-	subq	$24, %rsp
-	cfi_adjust_cfa_offset(24)
+	subq	$32, %rsp
+	cfi_adjust_cfa_offset(32)
 
 	movq	%rdi, %r12
 	movq	%rdx, %r13
@@ -174,11 +180,8 @@ __lll_robust_mutex_timedlock_wait:
 	jnz	5f
 
 2:	movq	%rsp, %r10
-#if FUTEX_WAIT == 0
-	xorl	%esi, %esi
-#else
-	movl	$FUTEX_WAIT, %esi
-#endif
+	movl	32(%rsp), %esi
+	LOAD_FUTEX_WAIT (%esi)
 	movq	%r12, %rdi
 	movl	$SYS_futex, %eax
 	syscall
@@ -195,8 +198,8 @@ __lll_robust_mutex_timedlock_wait:
 	cmpxchgl %edx, (%r12)
 	jnz	7f
 
-6:	addq	$24, %rsp
-	cfi_adjust_cfa_offset(-24)
+6:	addq	$40, %rsp
+	cfi_adjust_cfa_offset(-40)
 	popq	%r13
 	cfi_adjust_cfa_offset(-8)
 	cfi_restore(%r13)
@@ -214,7 +217,7 @@ __lll_robust_mutex_timedlock_wait:
 3:	movl	$EINVAL, %eax
 	retq
 
-	cfi_adjust_cfa_offset(56)
+	cfi_adjust_cfa_offset(72)
 	cfi_offset(%r8, -16)
 	cfi_offset(%r9, -24)
 	cfi_offset(%r12, -32)
@@ -226,4 +229,4 @@ __lll_robust_mutex_timedlock_wait:
 8:	movl	$ETIMEDOUT, %eax
 	jmp	6b
 	cfi_endproc
-	.size	__lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+	.size	__lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
index 63771b3840..15ad534fa0 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
@@ -18,18 +18,9 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelbarrier.h>
 
-#define SYS_futex	202
-#define FUTEX_WAIT	0
-#define FUTEX_WAKE	1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
 
 	.text
 
@@ -142,21 +133,29 @@ pthread_barrier_wait:
 
 	retq
 
-1:	addq	$MUTEX, %rdi
-	callq	__lll_mutex_lock_wait
+1:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 	subq	$MUTEX, %rdi
 	jmp	2b
 
-4:	addq	$MUTEX, %rdi
-	callq	__lll_mutex_unlock_wake
+4:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	5b
 
-6:	addq	$MUTEX, %rdi
-	callq	__lll_mutex_unlock_wake
+6:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	subq	$MUTEX, %rdi
 	jmp	7b
 
-9:	addq	$MUTEX, %rdi
-	callq	__lll_mutex_unlock_wake
+9:	movl	PRIVATE(%rdi), %esi
+	addq	$MUTEX, %rdi
+	xorl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	10b
 	.size	pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
index 006de2696e..0c619bf271 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+   Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,23 +20,11 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <kernel-features.h>
 #include <pthread-pi-defines.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_REQUEUE		3
-#define FUTEX_CMP_REQUEUE	4
-
-#define EINVAL			22
+#include <pthread-errnos.h>
 
 
 	.text
@@ -115,7 +104,9 @@ __pthread_cond_broadcast:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
@@ -123,12 +114,16 @@ __pthread_cond_broadcast:
 
 	/* Unlock in loop requires wakeup.  */
 5:	addq	$cond_lock-cond_futex, %rdi
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	6b
 
 	/* Unlock in loop requires wakeup.  */
 7:	addq	$cond_lock-cond_futex, %rdi
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	subq	$cond_lock-cond_futex, %rdi
 	jmp	8b
 
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
index 3dbb9e81e3..2fc9d1fad7 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -19,23 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_WAKE_OP		5
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
-
-#define EINVAL			22
+#include <pthread-errnos.h>
 
 
 	.text
@@ -111,7 +98,9 @@ __pthread_cond_signal:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
@@ -120,7 +109,9 @@ __pthread_cond_signal:
 	/* Unlock in loop requires wakeup.  */
 5:
 	movq	%r8, %rdi
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	6b
 	.size	__pthread_cond_signal, .-__pthread_cond_signal
 versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index 2afd601b8c..003069fb6b 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -19,19 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <pthread-errnos.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
@@ -301,7 +292,9 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 	jmp	2b
 
 	/* Unlock in loop requires wakeup.  */
@@ -309,7 +302,9 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	4b
 
 	/* Locking in loop failed.  */
@@ -317,7 +312,9 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
@@ -328,7 +325,9 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	11b
 
 	/* The initial unlocking of the mutex failed.  */
@@ -345,7 +344,9 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 
 17:	movq	(%rsp), %rax
 	jmp	18b
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
index aaad22e020..34ef2c7b77 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -19,19 +19,10 @@
 
 #include <sysdep.h>
 #include <shlib-compat.h>
+#include <lowlevellock.h>
 #include <lowlevelcond.h>
 #include <tcb-offsets.h>
 
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-
 
 	.text
 
@@ -58,7 +49,9 @@ __condvar_cleanup:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
@@ -105,7 +98,9 @@ __condvar_cleanup:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 
 	/* Wake up all waiters to make sure no signal gets lost.  */
 2:	testq	%r12, %r12
@@ -307,7 +302,9 @@ __pthread_cond_wait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 	jmp	2b
 
 	/* Unlock in loop requires wakeup.  */
@@ -315,7 +312,9 @@ __pthread_cond_wait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	4b
 
 	/* Locking in loop failed.  */
@@ -323,7 +322,9 @@ __pthread_cond_wait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_lock_wait
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
@@ -334,7 +335,9 @@ __pthread_cond_wait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 	jmp	11b
 
 	/* The initial unlocking of the mutex failed.  */
@@ -351,7 +354,9 @@ __pthread_cond_wait:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	/* XYZ */
+	movl	$LLL_SHARED, %esi
+	callq	__lll_unlock_wake
 
 13:	movq	%r10, %rax
 	jmp	14b
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
index 7740c599d1..c3b2b51bdb 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
@@ -19,17 +19,8 @@
 
 #include <kernel-features.h>
 #include <tcb-offsets.h>
+#include <lowlevellock.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
 
 	.comm	__fork_generation, 4, 4
 
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
index e8d257502f..80fedd4ab1 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
@@ -18,23 +18,12 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <kernel-features.h>
 
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_rdlock
@@ -123,11 +112,11 @@ __pthread_rwlock_rdlock:
 	movq	%rdx, %rax
 	retq
 
-1:
+1:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
@@ -139,11 +128,11 @@ __pthread_rwlock_rdlock:
 	movl	$EDEADLK, %edx
 	jmp	9b
 
-6:
+6:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
@@ -159,21 +148,21 @@ __pthread_rwlock_rdlock:
 	movl	$EAGAIN, %edx
 	jmp	9b
 
-10:
+10:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
 	jmp	11b
 
-12:
+12:	movl	PSHARED(%rdi), %esi
 #if MUTEX == 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
index f703eeb29f..0fa2714426 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
@@ -18,27 +18,15 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <kernel-features.h>
 
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	pthread_rwlock_timedrdlock
@@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock:
 	popq	%r12
 	retq
 
-1:
+1:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%fs:TID, %eax
@@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock:
 	movl	$EDEADLK, %edx
 	jmp	9b
 
-6:
+6:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leal	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	7b
 
 	/* Overflow.  */
@@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock:
 	movl	$EAGAIN, %edx
 	jmp	9b
 
-10:
+10:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leaq	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	11b
 
-12:
+12:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leaq	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 	jmp	13b
 
 16:	movq	$-ETIMEDOUT, %rdx
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
index fc3bf3d69e..1e43933ca9 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
@@ -18,26 +18,15 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <kernel-features.h>
 
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	pthread_rwlock_timedwrlock
@@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock:
 	popq	%r12
 	retq
 
-1:
+1:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 	jmp	2b
 
 14:	cmpl	%fs:TID, %eax
@@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock:
 20:	movl	$EDEADLK, %edx
 	jmp	9b
 
-6:
+6:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leal	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	7b
 
 	/* Overflow.  */
@@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock:
 	movl	$EAGAIN, %edx
 	jmp	9b
 
-10:
+10:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leaq	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	11b
 
-12:
+12:	movl	PSHARED(%r12), %esi
 #if MUTEX == 0
 	movq	%r12, %rdi
 #else
 	leaq	MUTEX(%r12), %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 	jmp	13b
 
 16:	movq	$-ETIMEDOUT, %rdx
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
index c4597f60f7..cf7f607d9c 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
@@ -18,22 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <kernel-features.h>
 
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_unlock
@@ -107,28 +96,28 @@ __pthread_rwlock_unlock:
 4:	xorl	%eax, %eax
 	retq
 
-1:
+1:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
 	jmp	2b
 
-3:
+3:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	4b
 
-7:
+7:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	8b
 
 	.size	__pthread_rwlock_unlock,.-__pthread_rwlock_unlock
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
index 7f65726849..209c0e9a94 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
@@ -18,23 +18,12 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <lowlevelrwlock.h>
 #include <pthread-errnos.h>
 #include <kernel-features.h>
 
 
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-#define FUTEX_WAKE		1
-#define FUTEX_PRIVATE_FLAG	128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
 	.text
 
 	.globl	__pthread_rwlock_wrlock
@@ -121,11 +110,11 @@ __pthread_rwlock_wrlock:
 	movq	%rdx, %rax
 	retq
 
-1:
+1:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
@@ -136,32 +125,32 @@ __pthread_rwlock_wrlock:
 	movl	$EDEADLK, %edx
 	jmp	9b
 
-6:
+6:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 	jmp	7b
 
 4:	decl	WRITERS_QUEUED(%rdi)
 	movl	$EAGAIN, %edx
 	jmp	9b
 
-10:
+10:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_unlock_wake
+	callq	__lll_unlock_wake
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
 	jmp	11b
 
-12:
+12:	movl	PSHARED(%rdi), %esi
 #if MUTEX != 0
 	addq	$MUTEX, %rdi
 #endif
-	callq	__lll_mutex_lock_wait
+	callq	__lll_lock_wait
 #if MUTEX != 0
 	subq	$MUTEX, %rdi
 #endif
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
index 4919c11fd2..adbbcdfa71 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
@@ -18,19 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAKE		1
-
 
 	.text
 
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
index 4068a1b6b8..88e99cf6a1 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -18,23 +18,15 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
 
 /* For the calculation see asm/vsyscall.h.  */
 #define VSYSCALL_ADDR_vgettimeofday	0xffffffffff600000
 
-
 	.text
 
 	.globl	sem_timedwait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
index 643090f065..a5719cadda 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
@@ -18,15 +18,10 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
 	.text
 
 	.globl	sem_trywait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
index e099ede029..5320a91e19 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -18,19 +18,11 @@
    02111-1307 USA.  */
 
 #include <sysdep.h>
+#include <lowlevellock.h>
 #include <shlib-compat.h>
 #include <pthread-errnos.h>
 #include <structsem.h>
 
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex		202
-#define FUTEX_WAIT		0
-
 
 	.text
 
diff --git a/nptl/tpp.c b/nptl/tpp.c
index 367dd8162a..0325010b44 100644
--- a/nptl/tpp.c
+++ b/nptl/tpp.c
@@ -1,5 +1,5 @@
 /* Thread Priority Protect helpers.
-   Copyright (C) 2006 Free Software Foundation, Inc.
+   Copyright (C) 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
 
@@ -93,7 +93,7 @@ __pthread_tpp_change_priority (int previous_prio, int new_prio)
   if (priomax == newpriomax)
     return 0;
 
-  lll_lock (self->lock);
+  lll_lock (self->lock, LLL_PRIVATE);
 
   tpp->priomax = newpriomax;
 
@@ -129,7 +129,7 @@ __pthread_tpp_change_priority (int previous_prio, int new_prio)
 	}
     }
 
-  lll_unlock (self->lock);
+  lll_unlock (self->lock, LLL_PRIVATE);
 
   return result;
 }
@@ -144,7 +144,7 @@ __pthread_current_priority (void)
 
   int result = 0;
 
-  lll_lock (self->lock);
+  lll_lock (self->lock, LLL_PRIVATE);
 
   if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
     {
@@ -166,7 +166,7 @@ __pthread_current_priority (void)
   if (result != -1)
     result = self->schedparam.sched_priority;
 
-  lll_unlock (self->lock);
+  lll_unlock (self->lock, LLL_PRIVATE);
 
   return result;
 }
diff --git a/nptl/tst-rwlock14.c b/nptl/tst-rwlock14.c
index fc0d3d219f..00e1becbfa 100644
--- a/nptl/tst-rwlock14.c
+++ b/nptl/tst-rwlock14.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2004.
 
@@ -130,8 +130,8 @@ do_test (void)
       result = 1;
     }
 
-  ts.tv_nsec = 0x100001000LL;
-  if (ts.tv_nsec != 0x100001000LL)
+  ts.tv_nsec = (__typeof (ts.tv_nsec)) 0x100001000LL;
+  if ((__typeof (ts.tv_nsec)) 0x100001000LL != 0x100001000LL)
     ts.tv_nsec = 2000000000;
 
   e = pthread_rwlock_timedrdlock (&r, &ts);
diff --git a/stdio-common/tst-fmemopen2.c b/stdio-common/tst-fmemopen2.c
index 81beddddef..c2a4baace9 100644
--- a/stdio-common/tst-fmemopen2.c
+++ b/stdio-common/tst-fmemopen2.c
@@ -28,7 +28,7 @@ do_test (void)
   o = ftello (fp);
   if (o != 0)
     {
-      printf ("second ftello returned %ld, expected %zu\n", o, (off_t) 0);
+      printf ("second ftello returned %ld, expected 0\n", o);
       result = 1;
     }
   if (fseeko (fp, 0, SEEK_END) != 0)
diff --git a/stdlib/tst-strtod2.c b/stdlib/tst-strtod2.c
index 30d8d9df65..a7df82ebbd 100644
--- a/stdlib/tst-strtod2.c
+++ b/stdlib/tst-strtod2.c
@@ -32,7 +32,7 @@ do_test (void)
 	}
       if (ep != tests[i].str + tests[i].offset)
 	{
-	  printf ("test %zu strtod parsed %ju characters, expected %zu\n",
+	  printf ("test %zu strtod parsed %tu characters, expected %zu\n",
 		  i, ep - tests[i].str, tests[i].offset);
 	  status = 1;
 	}
diff --git a/sysdeps/generic/unwind-dw2-fde-glibc.c b/sysdeps/generic/unwind-dw2-fde-glibc.c
index 0038a0cbf6..932a972743 100644
--- a/sysdeps/generic/unwind-dw2-fde-glibc.c
+++ b/sysdeps/generic/unwind-dw2-fde-glibc.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2001, 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2002, 2007 Free Software Foundation, Inc.
    Contributed by Jakub Jelinek <jakub@redhat.com>.
 
    This file is part of the GNU C Library.
@@ -235,10 +235,11 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
   if (data->ret != NULL)
     {
       unsigned int encoding = get_fde_encoding (data->ret);
+      _Unwind_Ptr func;
       read_encoded_value_with_base (encoding,
 				    base_from_cb_data (encoding, data),
-				    data->ret->pc_begin,
-				    (_Unwind_Ptr *)&data->func);
+				    data->ret->pc_begin, &func);
+      data->func = (void *) func;
     }
   return 1;
 }
diff --git a/sysdeps/generic/unwind-dw2-fde.c b/sysdeps/generic/unwind-dw2-fde.c
index 13945b9719..8d62e46114 100644
--- a/sysdeps/generic/unwind-dw2-fde.c
+++ b/sysdeps/generic/unwind-dw2-fde.c
@@ -1,5 +1,5 @@
 /* Subroutines needed for unwinding stack frames for exception handling.  */
-/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006
+/* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006, 2007
    Free Software Foundation, Inc.
    Contributed by Jason Merrill <jason@cygnus.com>.
 
@@ -301,7 +301,8 @@ get_cie_encoding (struct dwarf_cie *cie)
   if (aug[0] != 'z')
     return DW_EH_PE_absptr;
 
-  p = aug + strlen (aug) + 1;		/* Skip the augmentation string.  */
+  /* Skip the augmentation string.  */
+  p = aug + strlen ((const char *) aug) + 1;
   p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
   p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
   p++;					/* Skip return address column.  */
@@ -838,7 +839,7 @@ linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
       else
 	{
 	  _Unwind_Ptr mask;
-	  const char *p;
+	  const unsigned char *p;
 
 	  p = read_encoded_value_with_base (encoding, base,
 					    this_fde->pc_begin, &pc_begin);
@@ -908,7 +909,7 @@ binary_search_single_encoding_fdes (struct object *ob, void *pc)
       size_t i = (lo + hi) / 2;
       fde *f = vec->array[i];
       _Unwind_Ptr pc_begin, pc_range;
-      const char *p;
+      const unsigned char *p;
 
       p = read_encoded_value_with_base (encoding, base, f->pc_begin,
 					&pc_begin);
@@ -936,7 +937,7 @@ binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
       size_t i = (lo + hi) / 2;
       fde *f = vec->array[i];
       _Unwind_Ptr pc_begin, pc_range;
-      const char *p;
+      const unsigned char *p;
       int encoding;
 
       encoding = get_fde_encoding (f);
@@ -1046,6 +1047,7 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
   if (f)
     {
       int encoding;
+      _Unwind_Ptr func;
 
       bases->tbase = ob->tbase;
       bases->dbase = ob->dbase;
@@ -1054,7 +1056,8 @@ _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
       if (ob->s.b.mixed_encoding)
 	encoding = get_fde_encoding (f);
       read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
-				    f->pc_begin, (_Unwind_Ptr *)&bases->func);
+				    f->pc_begin, &func);
+      bases->func = (void *) func;
     }
 
   return f;
diff --git a/sysdeps/generic/unwind-dw2.c b/sysdeps/generic/unwind-dw2.c
index d818e5dfd8..ba5723aa62 100644
--- a/sysdeps/generic/unwind-dw2.c
+++ b/sysdeps/generic/unwind-dw2.c
@@ -1,5 +1,5 @@
 /* DWARF2 exception handling and frame unwind runtime interface routines.
-   Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006
+   Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006,2007
    	Free Software Foundation, Inc.
 
    This file is part of the GNU C Library.
@@ -309,8 +309,9 @@ extract_cie_info (struct dwarf_cie *cie, struct _Unwind_Context *context,
       /* "P" indicates a personality routine in the CIE augmentation.  */
       else if (aug[0] == 'P')
 	{
-	  p = read_encoded_value (context, *p, p + 1,
-				  (_Unwind_Ptr *) &fs->personality);
+	  _Unwind_Ptr personality;
+	  p = read_encoded_value (context, *p, p + 1, &personality);
+	  fs->personality = (_Unwind_Personality_Fn) personality;
 	  aug += 1;
 	}
 
@@ -771,8 +772,12 @@ execute_cfa_program (const unsigned char *insn_ptr,
       else switch (insn)
 	{
 	case DW_CFA_set_loc:
-	  insn_ptr = read_encoded_value (context, fs->fde_encoding,
-					 insn_ptr, (_Unwind_Ptr *) &fs->pc);
+	  {
+	    _Unwind_Ptr pc;
+	    insn_ptr = read_encoded_value (context, fs->fde_encoding,
+					   insn_ptr, &pc);
+	    fs->pc = (void *) pc;
+	  }
 	  break;
 
 	case DW_CFA_advance_loc1:
@@ -992,8 +997,11 @@ uw_frame_state_for (struct _Unwind_Context *context, _Unwind_FrameState *fs)
       insn = aug + i;
     }
   if (fs->lsda_encoding != DW_EH_PE_omit)
-    aug = read_encoded_value (context, fs->lsda_encoding, aug,
-			      (_Unwind_Ptr *) &context->lsda);
+    {
+      _Unwind_Ptr lsda;
+      aug = read_encoded_value (context, fs->lsda_encoding, aug, &lsda);
+      context->lsda = (void *) lsda;
+    }
 
   /* Then the insns in the FDE up to our target PC.  */
   if (insn == NULL)
diff --git a/sysdeps/unix/sysv/linux/getsysstats.c b/sysdeps/unix/sysv/linux/getsysstats.c
index d655ba3b27..ee7a539a6f 100644
--- a/sysdeps/unix/sysv/linux/getsysstats.c
+++ b/sysdeps/unix/sysv/linux/getsysstats.c
@@ -1,5 +1,5 @@
 /* Determine various system internal values, Linux version.
-   Copyright (C) 1996-2001, 2002, 2003, 2006 Free Software Foundation, Inc.
+   Copyright (C) 1996-2001, 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1996.
 
@@ -21,7 +21,9 @@
 #include <alloca.h>
 #include <assert.h>
 #include <ctype.h>
+#include <dirent.h>
 #include <errno.h>
+#include <fcntl.h>
 #include <mntent.h>
 #include <paths.h>
 #include <stdio.h>
@@ -32,6 +34,7 @@
 #include <sys/sysinfo.h>
 
 #include <atomic.h>
+#include <not-cancel.h>
 
 
 /* How we can determine the number of available processors depends on
@@ -64,14 +67,13 @@
   while (0)
 #endif
 
-int
-__get_nprocs ()
+
+static int
+count_processors_in_proc (void)
 {
   char buffer[8192];
   int result = 1;
 
-  /* XXX Here will come a test for the new system call.  */
-
   /* The /proc/stat format is more uniform, use it by default.  */
   FILE *fp = fopen ("/proc/stat", "rc");
   if (fp != NULL)
@@ -100,38 +102,115 @@ __get_nprocs ()
 
   return result;
 }
+
+
+int
+__get_nprocs ()
+{
+  /* XXX Here will come a test for the new system call.  */
+
+  /* Try to use the sysfs filesystem.  It has actual information about
+     online processors.  */
+  DIR *dir = __opendir ("/sys/devices/system/cpu");
+  if (dir != NULL)
+    {
+      int dfd = dirfd (dir);
+      int count = 0;
+      struct dirent64 *d;
+
+      while ((d = __readdir64 (dir)) != NULL)
+	/* NB: the sysfs has d_type support.  */
+	if (d->d_type == DT_DIR && strncmp (d->d_name, "cpu", 3) == 0)
+	  {
+	    char *endp;
+	    unsigned long int nr = strtoul (d->d_name + 3, &endp, 10);
+	    if (nr != ULONG_MAX && endp != d->d_name + 3 && *endp == '\0')
+	      {
+		/* Try reading the online file.  */
+		char oname[_D_ALLOC_NAMLEN (d) + sizeof "/online"];
+		strcpy (stpcpy (oname, d->d_name), "/online");
+
+		/* We unconditionally use openat since the "online"
+		   file became readable only after the openat system
+		   call was introduced.  */
+		char buf[1];
+		int fd = openat_not_cancel_3 (dfd, oname, O_RDONLY);
+
+		/* If we cannot read the online file we have to assume
+		   the CPU is online.  */
+		if (fd < 0)
+		  ++count;
+		else
+		  {
+		    if (read_not_cancel (fd, buf, sizeof (buf)) < 0
+			|| buf[0] == '1')
+		      ++count;
+
+		    close_not_cancel_no_status (fd);
+		  }
+	      }
+	  }
+
+      __closedir (dir);
+
+      return count;
+    }
+
+  return count_processors_in_proc ();
+}
 weak_alias (__get_nprocs, get_nprocs)
 
 
-#ifdef GET_NPROCS_CONF_PARSER
 /* On some architectures it is possible to distinguish between configured
    and active cpus.  */
 int
 __get_nprocs_conf ()
 {
-  char buffer[8192];
-  int result = 1;
-
   /* XXX Here will come a test for the new system call.  */
 
+  /* Try to use the sysfs filesystem.  It has actual information about
+     online processors.  */
+  DIR *dir = __opendir ("/sys/devices/system/cpu");
+  if (dir != NULL)
+    {
+      int count = 0;
+      struct dirent64 *d;
+
+      while ((d = __readdir64 (dir)) != NULL)
+	/* NB: the sysfs has d_type support.  */
+	if (d->d_type == DT_DIR && strncmp (d->d_name, "cpu", 3) == 0)
+	  {
+	    char *endp;
+	    unsigned long int nr = strtoul (d->d_name + 3, &endp, 10);
+	    if (nr != ULONG_MAX && endp != d->d_name + 3 && *endp == '\0')
+	      ++count;
+	  }
+
+      __closedir (dir);
+
+      return count;
+    }
+
+  int result = 1;
+
+#ifdef GET_NPROCS_CONF_PARSER
   /* If we haven't found an appropriate entry return 1.  */
   FILE *fp = fopen ("/proc/cpuinfo", "rc");
   if (fp != NULL)
     {
+      char buffer[8192];
+
       /* No threads use this stream.  */
       __fsetlocking (fp, FSETLOCKING_BYCALLER);
       GET_NPROCS_CONF_PARSER (fp, buffer, result);
       fclose (fp);
     }
+#else
+  result = count_processors_in_proc ();
+#endif
 
   return result;
 }
-#else
-/* As far as I know Linux has no separate numbers for configured and
-   available processors.  So make the `get_nprocs_conf' function an
-   alias.  */
-strong_alias (__get_nprocs, __get_nprocs_conf)
-#endif
 weak_alias (__get_nprocs_conf, get_nprocs_conf)
 
 /* General function to get information about memory status from proc
diff --git a/sysdeps/unix/sysv/linux/i386/posix_fallocate.c b/sysdeps/unix/sysv/linux/i386/posix_fallocate.c
new file mode 100644
index 0000000000..c974d06afc
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/posix_fallocate.c
@@ -0,0 +1,58 @@
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <fcntl.h>
+#include <kernel-features.h>
+#include <sysdep.h>
+
+#define posix_fallocate static internal_fallocate
+#include <sysdeps/posix/posix_fallocate.c>
+#undef posix_fallocate
+
+#if !defined __ASSUME_FALLOCATE && defined __NR_fallocate
+int __have_fallocate attribute_hidden;
+#endif
+
+extern int __fallocate64 (int fd, int mode, __off64_t offset, __off64_t len)
+     attribute_hidden;
+
+/* Reserve storage for the data of the file associated with FD.  */
+int
+posix_fallocate (int fd, __off_t offset, __off_t len)
+{
+#ifdef __NR_fallocate
+# ifndef __ASSUME_FALLOCATE
+  if (__builtin_expect (__have_fallocate >= 0, 1))
+# endif
+    {
+      int res = __fallocate64 (fd, 0, offset, len);
+      if (! res)
+	return 0;
+
+# ifndef __ASSUME_FALLOCATE
+      if (__builtin_expect (res == ENOSYS, 0))
+	__have_fallocate = -1;
+      else
+# endif
+	if (res != EOPNOTSUPP)
+	  return res;
+    }
+#endif
+
+  return internal_fallocate (fd, offset, len);
+}
diff --git a/sysdeps/unix/sysv/linux/i386/posix_fallocate64.c b/sysdeps/unix/sysv/linux/i386/posix_fallocate64.c
new file mode 100644
index 0000000000..d5b4d597c1
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/i386/posix_fallocate64.c
@@ -0,0 +1,61 @@
+/* Copyright (C) 2007 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <fcntl.h>
+#include <kernel-features.h>
+#include <sysdep.h>
+
+extern int __posix_fallocate64_l64 (int fd, __off64_t offset, __off64_t len);
+#define __posix_fallocate64_l64 static internal_fallocate64
+#include <sysdeps/posix/posix_fallocate64.c>
+#undef __posix_fallocate64_l64
+
+#if !defined __ASSUME_FALLOCATE && defined __NR_fallocate
+/* Defined in posix_fallocate.c.  */
+extern int __have_fallocate attribute_hidden;
+#endif
+
+extern int __fallocate64 (int fd, int mode, __off64_t offset, __off64_t len)
+     attribute_hidden;
+
+/* Reserve storage for the data of the file associated with FD.  */
+int
+__posix_fallocate64_l64 (int fd, __off64_t offset, __off64_t len)
+{
+#ifdef __NR_fallocate
+# ifndef __ASSUME_FALLOCATE
+  if (__builtin_expect (__have_fallocate >= 0, 1))
+# endif
+    {
+      int res = __fallocate64 (fd, 0, offset, len);
+
+      if (! res)
+	return 0;
+
+# ifndef __ASSUME_FALLOCATE
+      if (__builtin_expect (res == ENOSYS, 0))
+	__have_fallocate = -1;
+      else
+# endif
+	if (res != EOPNOTSUPP)
+	  return res;
+    }
+#endif
+
+  return internal_fallocate64 (fd, offset, len);
+}
diff --git a/sysdeps/unix/sysv/linux/i386/syscalls.list b/sysdeps/unix/sysv/linux/i386/syscalls.list
index 3ff3a73aab..c532a78869 100644
--- a/sysdeps/unix/sysv/linux/i386/syscalls.list
+++ b/sysdeps/unix/sysv/linux/i386/syscalls.list
@@ -6,3 +6,4 @@ vm86		-	vm86		i:ip	__vm86		vm86@@GLIBC_2.3.4
 oldgetrlimit	EXTRA	getrlimit	i:ip	__old_getrlimit	getrlimit@GLIBC_2.0
 oldsetrlimit	EXTRA	setrlimit	i:ip	__old_setrlimit	setrlimit@GLIBC_2.0
 waitpid		-	waitpid		Ci:ipi	__waitpid	waitpid	__libc_waitpid
+fallocate64	EXTRA	fallocate	Vi:iiiiii	__fallocate64
diff --git a/sysdeps/unix/sysv/linux/posix_fallocate.c b/sysdeps/unix/sysv/linux/posix_fallocate.c
index 9cfade60ca..6944793fa2 100644
--- a/sysdeps/unix/sysv/linux/posix_fallocate.c
+++ b/sysdeps/unix/sysv/linux/posix_fallocate.c
@@ -39,7 +39,7 @@ posix_fallocate (int fd, __off_t offset, __off_t len)
 # endif
     {
       INTERNAL_SYSCALL_DECL (err);
-      int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0,
+      int res = INTERNAL_SYSCALL (fallocate, err, 6, fd, 0,
 				  __LONG_LONG_PAIR (offset >> 31, offset),
 				  __LONG_LONG_PAIR (len >> 31, len));