about summary refs log tree commit diff
path: root/sysdeps/unix/sysv/linux/sh
diff options
context:
space:
mode:
authorRoland McGrath <roland@hack.frob.com>2014-06-12 09:05:54 -0700
committerRoland McGrath <roland@hack.frob.com>2014-06-12 09:05:54 -0700
commit45262aeedf2f56dcd3b30e37630ea85bb4f55603 (patch)
tree726ce7b82a5d652713e514fa2811244fe647e5a7 /sysdeps/unix/sysv/linux/sh
parent35a5e3e338ae17f3d42c60a708763c5d498fb840 (diff)
downloadglibc-45262aeedf2f56dcd3b30e37630ea85bb4f55603.tar.gz
glibc-45262aeedf2f56dcd3b30e37630ea85bb4f55603.tar.xz
glibc-45262aeedf2f56dcd3b30e37630ea85bb4f55603.zip
Move SH code out of nptl/ subdirectory.
Diffstat (limited to 'sysdeps/unix/sysv/linux/sh')
-rw-r--r--sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S18
-rw-r--r--sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h80
-rw-r--r--sysdeps/unix/sysv/linux/sh/lowlevellock.S574
-rw-r--r--sysdeps/unix/sysv/linux/sh/lowlevellock.h419
-rw-r--r--sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S278
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S239
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S292
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S201
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S769
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S687
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_once.S257
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S270
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S339
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S323
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S221
-rw-r--r--sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S252
-rw-r--r--sysdeps/unix/sysv/linux/sh/sem_post.S111
-rw-r--r--sysdeps/unix/sysv/linux/sh/sem_timedwait.S281
-rw-r--r--sysdeps/unix/sysv/linux/sh/sem_trywait.S102
-rw-r--r--sysdeps/unix/sysv/linux/sh/sem_wait.S229
-rw-r--r--sysdeps/unix/sysv/linux/sh/sh4/lowlevellock.h4
-rw-r--r--sysdeps/unix/sysv/linux/sh/smp.h23
-rw-r--r--sysdeps/unix/sysv/linux/sh/sysdep-cancel.h169
23 files changed, 6138 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
new file mode 100644
index 0000000000..ad6188d096
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
@@ -0,0 +1,18 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "lowlevellock.S"
diff --git a/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h b/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
new file mode 100644
index 0000000000..d580ca3ce5
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
@@ -0,0 +1,80 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifdef __ASSEMBLER__
+
+#define _IMP1 #1
+#define _IMM1 #-1
+#define _IMM4 #-4
+#define _IMM6 #-6
+#define _IMM8 #-8
+
+#define	INC(mem, reg) \
+	.align	2; \
+	mova	99f, r0; \
+	mov	r15, r1; \
+	mov	_IMM6, r15; \
+98:	mov.l	mem, reg; \
+	add	_IMP1, reg; \
+	mov.l	reg, mem; \
+99:	mov	r1, r15
+
+#define	DEC(mem, reg) \
+	.align	2; \
+	mova	99f, r0; \
+	mov	r15, r1; \
+	mov	_IMM6, r15; \
+98:	mov.l	mem, reg; \
+	add	_IMM1, reg; \
+	mov.l	reg, mem; \
+99:	mov	r1, r15
+
+#define	XADD(reg, mem, old, tmp) \
+	.align	2; \
+	mova	99f, r0; \
+	nop; \
+	mov	r15, r1; \
+	mov	_IMM8, r15; \
+98:	mov.l	mem, old; \
+	mov	reg, tmp; \
+	add	old, tmp; \
+	mov.l	tmp, mem; \
+99:	mov	r1, r15
+
+#define	XCHG(reg, mem, old) \
+	.align	2; \
+	mova	99f, r0; \
+	nop; \
+	mov	r15, r1; \
+	mov	_IMM4, r15; \
+98:	mov.l	mem, old; \
+	mov.l	reg, mem; \
+99:	mov	r1, r15
+
+#define	CMPXCHG(reg, mem, new, old) \
+	.align	2; \
+	mova	99f, r0; \
+	nop; \
+	mov	r15, r1; \
+	mov	_IMM8, r15; \
+98:	mov.l	mem, old; \
+	cmp/eq	old, reg; \
+	bf	99f; \
+	mov.l	new, mem; \
+99:	mov	r1, r15
+
+#endif  /* __ASSEMBLER__ */
diff --git a/sysdeps/unix/sysv/linux/sh/lowlevellock.S b/sysdeps/unix/sysv/linux/sh/lowlevellock.S
new file mode 100644
index 0000000000..84b8edb86c
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/lowlevellock.S
@@ -0,0 +1,574 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg; \
+	extu.b	reg, reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg; \
+	extu.b	reg, reg
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+	extu.b	tmp, tmp; \
+	xor	tmp, reg
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG), tmp; \
+	extu.b	tmp, tmp; \
+	mov	#(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+	swap.b	tmp2, tmp2; \
+	or	tmp2, tmp; \
+	xor	tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), tmp; \
+	extu.b	tmp, tmp; \
+	xor	tmp, reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, reg	; \
+	add	reg, tmp	; \
+	bra	98f		; \
+	 mov.l	@tmp, reg	; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:
+# else
+#  define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, reg	; \
+	add	reg, tmp	; \
+	mov.l	@tmp, reg	; \
+	bra	98f		; \
+	 mov	#FUTEX_WAIT, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	or	tmp, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, reg	; \
+	add	reg, tmp	; \
+	mov.l	@tmp, reg	; \
+	bra	98f		; \
+	 mov	#FUTEX_WAKE, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	or	tmp, reg
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg	; \
+	mov	#FUTEX_WAIT, tmp ; \
+	or	tmp, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg	; \
+	mov	#FUTEX_WAIT_BITSET, tmp ; \
+	mov	#(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+	swap.b	tmp2, tmp2; \
+	or	tmp2, tmp; \
+	or	tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg	; \
+	mov	#FUTEX_WAKE, tmp ; \
+	or	tmp, reg
+#endif
+
+	.globl	__lll_lock_wait_private
+	.type	__lll_lock_wait_private,@function
+	.hidden	__lll_lock_wait_private
+	.align	5
+	cfi_startproc
+__lll_lock_wait_private:
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r4, r6
+	mov	r5, r8
+	mov	#0, r7		/* No timeout.  */
+	LOAD_PRIVATE_FUTEX_WAIT (r5, r0, r1)
+
+	mov	#2, r4
+	cmp/eq	r4, r6
+	bf	2f
+
+1:
+	mov	r8, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+2:
+	mov	#2, r6
+	XCHG (r6, @r8, r2)
+	tst	r2, r2
+	bf	1b
+
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	rts
+	 mov	r2, r0
+	cfi_endproc
+	.size	__lll_lock_wait_private,.-__lll_lock_wait_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_lock_wait
+	.type	__lll_lock_wait,@function
+	.hidden	__lll_lock_wait
+	.align	5
+	cfi_startproc
+__lll_lock_wait:
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r6, r9
+	mov	r4, r6
+	mov	r5, r8
+	mov	#0, r7		/* No timeout.  */
+	mov	r9, r5
+	LOAD_FUTEX_WAIT (r5, r0, r1)
+
+	mov	#2, r4
+	cmp/eq	r4, r6
+	bf	2f
+
+1:
+	mov	r8, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+2:
+	mov	#2, r6
+	XCHG (r6, @r8, r2)
+	tst	r2, r2
+	bf	1b
+
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	ret
+	 mov	r2, r0
+	cfi_endproc
+	.size	__lll_lock_wait,.-__lll_lock_wait
+
+	/*      r5  (r8): futex
+		r7 (r11): flags
+		r6  (r9): timeout
+		r4 (r10): futex value
+	*/
+	.globl	__lll_timedlock_wait
+	.type	__lll_timedlock_wait,@function
+	.hidden	__lll_timedlock_wait
+	.align	5
+	cfi_startproc
+__lll_timedlock_wait:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r12, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	mov.l	.Lhave, r1
+#  ifdef PIC
+	mova	.Lgot, r0
+	mov.l	.Lgot, r12
+	add	r0, r12
+	add	r12, r1
+#  endif
+	mov.l	@r1, r0
+	tst	r0, r0
+	bt	.Lreltmo
+# endif
+
+	/* if (timeout->tv_sec < 0) return ETIMEDOUT; */
+	mov.l	@r6, r1
+	cmp/pz	r1
+	bf/s	5f
+	 mov	#ETIMEDOUT, r0
+
+	mov	r4, r2
+	mov	r5, r4
+	mov	r7, r5
+	mov	r6, r7
+	LOAD_FUTEX_WAIT_ABS (r5, r0, r1)
+
+	mov	#2, r6
+	cmp/eq	r6, r2
+	bf/s	2f
+	 mov	r6, r2
+
+1:
+	mov	#2, r6
+	mov	#-1, r1
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x16
+	SYSCALL_INST_PAD
+	mov	r0, r6
+
+2:
+	XCHG	(r2, @r4, r3)	/* NB:   lock is implied */
+
+	tst	r3, r3
+	bt/s	3f
+	 mov	r6, r0
+
+	cmp/eq	#-ETIMEDOUT, r0
+	bt	4f
+	cmp/eq	#-EINVAL, r0
+	bf	1b
+4:
+	neg	r0, r3
+3:
+	mov	r3, r0
+5:
+	rts
+	 mov.l	@r15+, r12
+	/* Omit CFI for restore in delay slot.  */
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.align	2
+# ifdef PIC
+.Lgot:
+	.long	_GLOBAL_OFFSET_TABLE_
+.Lhave:
+	.long	__have_futex_clock_realtime@GOTOFF
+# else
+.Lhave:
+	.long	__have_futex_clock_realtime
+# endif
+
+.Lreltmo:
+	/* Check for a valid timeout value.  */
+	mov.l	@(4,r6), r1
+	mov.l	.L1g, r0
+	cmp/hs	r0, r1
+	bt	3f
+
+	cfi_remember_state
+
+	mov.l	r11, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r11, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r7, r11
+	mov	r4, r10
+	mov	r6, r9
+	mov	r5, r8
+
+	/* Stack frame for the timespec and timeval structs.  */
+	add	#-8, r15
+	cfi_adjust_cfa_offset(8)
+
+	mov	#2, r2
+	XCHG (r2, @r8, r3)
+
+	tst	r3, r3
+	bt	6f
+
+1:
+	/* Get current time.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	/* Compute relative timeout.  */
+	mov.l	@(4,r15), r0
+	mov.w	.L1k, r1
+	dmulu.l	r0, r1		/* Micro seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	4f
+	mov.l	.L1g, r1
+	add	r1, r3
+	add	#-1, r2
+4:
+	cmp/pz	r2
+	bf	2f		/* Time is already up.  */
+
+	mov.l	r2, @r15	/* Store relative timeout.  */
+	mov.l	r3, @(4,r15)
+
+	mov	r8, r4
+	mov	r11, r5
+	LOAD_FUTEX_WAIT (r5, r0, r1)
+	mov	r10, r6
+	mov	r15, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	r0, r5
+
+	mov	#2, r2
+	XCHG (r2, @r8, r3)
+
+	tst	r3, r3
+	bt/s	6f
+	 mov	#-ETIMEDOUT, r1
+	cmp/eq	r5, r1
+	bf	1b
+
+2:	mov	#ETIMEDOUT, r3
+
+6:
+	add	#8, r15
+	cfi_adjust_cfa_offset (-8)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r11
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r11)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	r3, r0
+
+	cfi_restore_state
+
+3:
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#EINVAL, r0
+# endif
+	cfi_endproc
+
+.L1k:
+	.word	1000
+	.align	2
+.L1g:
+	.long	1000000000
+
+	.size	__lll_timedlock_wait,.-__lll_timedlock_wait
+#endif
+
+	.globl	__lll_unlock_wake_private
+	.type	__lll_unlock_wake_private,@function
+	.hidden	__lll_unlock_wake_private
+	.align	5
+	cfi_startproc
+__lll_unlock_wake_private:
+	LOAD_PRIVATE_FUTEX_WAKE (r5, r0, r1)
+	mov	#1, r6		/* Wake one thread.  */
+	mov	#0, r7
+	mov.l	r7, @r4		/* Stores 0.  */
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	rts
+	 nop
+	cfi_endproc
+	.size	__lll_unlock_wake_private,.-__lll_unlock_wake_private
+
+#ifdef NOT_IN_libc
+	.globl	__lll_unlock_wake
+	.type	__lll_unlock_wake,@function
+	.hidden	__lll_unlock_wake
+	.align	5
+	cfi_startproc
+__lll_unlock_wake:
+	LOAD_FUTEX_WAKE (r5, r0, r1)
+	mov	#1, r6		/* Wake one thread.  */
+	mov	#0, r7
+	mov.l	r7, @r4		/* Stores 0.  */
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	rts
+	 nop
+	cfi_endproc
+	.size	__lll_unlock_wake,.-__lll_unlock_wake
+
+	.globl	__lll_timedwait_tid
+	.type	__lll_timedwait_tid,@function
+	.hidden	__lll_timedwait_tid
+	.align	5
+	cfi_startproc
+__lll_timedwait_tid:
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r4, r8
+	mov	r5, r9
+
+	/* Stack frame for the timespec and timeval structs.  */
+	add	#-8, r15
+	cfi_adjust_cfa_offset(8)
+
+2:
+	/* Get current time.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	/* Compute relative timeout.  */
+	mov.l	@(4,r15), r0
+	mov.w	.L1k2, r1
+	dmulu.l	r0, r1		/* Micro seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	5f
+	mov.l	.L1g2, r1
+	add	r1, r3
+	add	#-1, r2
+5:
+	cmp/pz	r2
+	bf	6f		/* Time is already up.  */
+
+	mov.l	r2, @r15	/* Store relative timeout.  */
+	mov.l	r3, @(4,r15)
+
+	mov.l	@r8, r2
+	tst	r2, r2
+	bt	4f
+
+	mov	r8, r4
+	/* XXX The kernel so far uses global futex for the wakeup at
+	   all times.  */
+	mov	#0, r5
+	extu.b	r5, r5
+	mov	r2, r6
+	mov	r15, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	@r8, r2
+	tst	r2, r2
+	bf	1f
+4:
+	mov	#0, r0
+3:
+	cfi_remember_state
+	add	#8, r15
+	cfi_adjust_cfa_offset (-8)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	rts
+	 mov.l	@r15+, r9
+	/* Omit CFI for restore in delay slot.  */
+	cfi_restore_state
+1:
+	/* Check whether the time expired.  */
+	mov	#-ETIMEDOUT, r1
+	cmp/eq	r0, r1
+	bf	2b
+6:
+	bra	3b
+	 mov	#ETIMEDOUT, r0
+	cfi_endproc
+
+.L1k2:
+	.word	1000
+	.align	2
+.L1g2:
+	.long	1000000000
+	.size	__lll_timedwait_tid,.-__lll_timedwait_tid
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/lowlevellock.h b/sysdeps/unix/sysv/linux/sh/lowlevellock.h
new file mode 100644
index 0000000000..438632d962
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/lowlevellock.h
@@ -0,0 +1,419 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _LOWLEVELLOCK_H
+#define _LOWLEVELLOCK_H	1
+
+#ifndef __ASSEMBLER__
+#include <time.h>
+#include <sys/param.h>
+#include <bits/pthreadtypes.h>
+#include <kernel-features.h>
+#endif
+
+#define SYS_futex		240
+#define FUTEX_WAIT		0
+#define FUTEX_WAKE		1
+#define FUTEX_CMP_REQUEUE	4
+#define FUTEX_WAKE_OP		5
+#define FUTEX_LOCK_PI		6
+#define FUTEX_UNLOCK_PI		7
+#define FUTEX_TRYLOCK_PI	8
+#define FUTEX_WAIT_BITSET	9
+#define FUTEX_WAKE_BITSET	10
+#define FUTEX_PRIVATE_FLAG	128
+#define FUTEX_CLOCK_REALTIME	256
+
+#define FUTEX_BITSET_MATCH_ANY	0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros.  Yes, the
+   definition seems to be backwards.  But it is not.  The bit will be
+   reversed before passing to the system call.  */
+#define LLL_PRIVATE    0
+#define LLL_SHARED     FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private.  */
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+#  define __lll_private_flag(fl, private) \
+  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+#  define __lll_private_flag(fl, private) \
+  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+#  define __lll_private_flag(fl, private) \
+  (__builtin_constant_p (private)					      \
+   ? ((private) == 0							      \
+      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \
+      : (fl))								      \
+   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)				      \
+	      & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
+
+#ifndef __ASSEMBLER__
+
+/* Initializer for compatibility lock.  */
+#define LLL_LOCK_INITIALIZER		(0)
+#define LLL_LOCK_INITIALIZER_LOCKED	(1)
+#define LLL_LOCK_INITIALIZER_WAITERS	(2)
+
+extern int __lll_lock_wait_private (int val, int *__futex)
+  attribute_hidden;
+extern int __lll_lock_wait (int val, int *__futex, int private)
+  attribute_hidden;
+extern int __lll_timedlock_wait (int val, int *__futex,
+				 const struct timespec *abstime, int private)
+  attribute_hidden;
+extern int __lll_robust_lock_wait (int val, int *__futex, int private)
+  attribute_hidden;
+extern int __lll_robust_timedlock_wait (int val, int *__futex,
+					const struct timespec *abstime,
+					int private)
+  attribute_hidden;
+extern int __lll_unlock_wake_private (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
+
+#define lll_trylock(futex) \
+  ({ unsigned char __result; \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%1,r2\n\
+	cmp/eq r2,%3\n\
+	bf 1f\n\
+	mov.l %2,@%1\n\
+     1: mov r1,r15\n\
+	mov #-1,%0\n\
+	negc %0,%0"\
+	: "=r" (__result) \
+	: "r" (&(futex)), \
+	  "r" (LLL_LOCK_INITIALIZER_LOCKED), \
+	  "r" (LLL_LOCK_INITIALIZER) \
+	: "r0", "r1", "r2", "t", "memory"); \
+     __result; })
+
+#define lll_robust_trylock(futex, id)	\
+  ({ unsigned char __result; \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%1,r2\n\
+	cmp/eq r2,%3\n\
+	bf 1f\n\
+	mov.l %2,@%1\n\
+     1: mov r1,r15\n\
+	mov #-1,%0\n\
+	negc %0,%0"\
+	: "=r" (__result) \
+	: "r" (&(futex)), \
+	  "r" (id), \
+	  "r" (LLL_LOCK_INITIALIZER) \
+	: "r0", "r1", "r2", "t", "memory"); \
+     __result; })
+
+#define lll_cond_trylock(futex) \
+  ({ unsigned char __result; \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%1,r2\n\
+	cmp/eq r2,%3\n\
+	bf 1f\n\
+	mov.l %2,@%1\n\
+     1: mov r1,r15\n\
+	mov #-1,%0\n\
+	negc %0,%0"\
+	: "=r" (__result) \
+	: "r" (&(futex)), \
+	  "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+	  "r" (LLL_LOCK_INITIALIZER) \
+	: "r0", "r1", "r2", "t", "memory"); \
+     __result; })
+
+#define lll_lock(futex, private) \
+  (void) ({ int __result, *__futex = &(futex); \
+	    __asm __volatile ("\
+		.align 2\n\
+		mova 1f,r0\n\
+		nop\n\
+		mov r15,r1\n\
+		mov #-8,r15\n\
+	     0: mov.l @%2,%0\n\
+		tst %0,%0\n\
+		bf 1f\n\
+		mov.l %1,@%2\n\
+	     1: mov r1,r15"\
+		: "=&r" (__result) : "r" (1), "r" (__futex) \
+		: "r0", "r1", "t", "memory"); \
+	    if (__result) \
+	      { \
+		if (__builtin_constant_p (private) \
+		    && (private) == LLL_PRIVATE) \
+		  __lll_lock_wait_private (__result, __futex); \
+	        else \
+		  __lll_lock_wait (__result, __futex, (private));	\
+	      } \
+    })
+
+#define lll_robust_lock(futex, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+      0: mov.l @%2,%0\n\
+	tst %0,%0\n\
+	bf 1f\n\
+	mov.l %1,@%2\n\
+      1: mov r1,r15"\
+	: "=&r" (__result) : "r" (id), "r" (__futex) \
+	: "r0", "r1", "t", "memory"); \
+     if (__result) \
+       __result = __lll_robust_lock_wait (__result, __futex, private); \
+     __result; })
+
+/* Special version of lll_mutex_lock which causes the unlock function to
+   always wakeup waiters.  */
+#define lll_cond_lock(futex, private) \
+  (void) ({ int __result, *__futex = &(futex); \
+	    __asm __volatile ("\
+		.align 2\n\
+		mova 1f,r0\n\
+		nop\n\
+		mov r15,r1\n\
+		mov #-8,r15\n\
+	     0: mov.l @%2,%0\n\
+		tst %0,%0\n\
+		bf 1f\n\
+		mov.l %1,@%2\n\
+	     1: mov r1,r15"\
+		: "=&r" (__result) : "r" (2), "r" (__futex) \
+		: "r0", "r1", "t", "memory"); \
+	    if (__result) \
+	      __lll_lock_wait (__result, __futex, private); })
+
+#define lll_robust_cond_lock(futex, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%2,%0\n\
+	tst %0,%0\n\
+	bf 1f\n\
+	mov.l %1,@%2\n\
+     1: mov r1,r15"\
+	: "=&r" (__result) : "r" (id | FUTEX_WAITERS), "r" (__futex) \
+	: "r0", "r1", "t", "memory"); \
+      if (__result) \
+	__result = __lll_robust_lock_wait (__result, __futex, private); \
+      __result; })
+
+#define lll_timedlock(futex, timeout, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%2,%0\n\
+	tst %0,%0\n\
+	bf 1f\n\
+	mov.l %1,@%2\n\
+     1: mov r1,r15"\
+	: "=&r" (__result) : "r" (1), "r" (__futex) \
+	: "r0", "r1", "t", "memory"); \
+    if (__result) \
+      __result = __lll_timedlock_wait (__result, __futex, timeout, private); \
+    __result; })
+
+#define lll_robust_timedlock(futex, timeout, id, private) \
+  ({ int __result, *__futex = &(futex); \
+     __asm __volatile ("\
+	.align 2\n\
+	mova 1f,r0\n\
+	nop\n\
+	mov r15,r1\n\
+	mov #-8,r15\n\
+     0: mov.l @%2,%0\n\
+	tst %0,%0\n\
+	bf 1f\n\
+	mov.l %1,@%2\n\
+     1: mov r1,r15"\
+	: "=&r" (__result) : "r" (id), "r" (__futex) \
+	: "r0", "r1", "t", "memory"); \
+    if (__result) \
+      __result = __lll_robust_timedlock_wait (__result, __futex, \
+					      timeout, private); \
+    __result; })
+
+#define lll_unlock(futex, private) \
+  (void) ({ int __result, *__futex = &(futex); \
+	    __asm __volatile ("\
+		.align 2\n\
+		mova 1f,r0\n\
+		mov r15,r1\n\
+		mov #-6,r15\n\
+	     0: mov.l @%1,%0\n\
+		add #-1,%0\n\
+		mov.l %0,@%1\n\
+	     1: mov r1,r15"\
+		: "=&r" (__result) : "r" (__futex) \
+		: "r0", "r1", "memory"); \
+	    if (__result) \
+	      { \
+		if (__builtin_constant_p (private) \
+		    && (private) == LLL_PRIVATE) \
+		  __lll_unlock_wake_private (__futex); \
+	        else \
+		  __lll_unlock_wake (__futex, (private)); \
+	      } \
+    })
+
+#define lll_robust_unlock(futex, private) \
+  (void) ({ int __result, *__futex = &(futex); \
+	    __asm __volatile ("\
+		.align 2\n\
+		mova 1f,r0\n\
+		mov r15,r1\n\
+		mov #-6,r15\n\
+	     0: mov.l @%1,%0\n\
+		and %2,%0\n\
+		mov.l %0,@%1\n\
+	     1: mov r1,r15"\
+		: "=&r" (__result) : "r" (__futex), "r" (FUTEX_WAITERS) \
+		: "r0", "r1", "memory");	\
+	    if (__result) \
+	      __lll_unlock_wake (__futex, private); })
+
+#define lll_robust_dead(futex, private)		       \
+  (void) ({ int __ignore, *__futex = &(futex); \
+	    __asm __volatile ("\
+		.align 2\n\
+		mova 1f,r0\n\
+		mov r15,r1\n\
+		mov #-6,r15\n\
+	     0: mov.l @%1,%0\n\
+		or %2,%0\n\
+		mov.l %0,@%1\n\
+	     1: mov r1,r15"\
+		: "=&r" (__ignore) : "r" (__futex), "r" (FUTEX_OWNER_DIED) \
+		: "r0", "r1", "memory");	\
+	    lll_futex_wake (__futex, 1, private); })
+
+# ifdef NEED_SYSCALL_INST_PAD
+#  define SYSCALL_WITH_INST_PAD "\
+	trapa #0x14; or r0,r0; or r0,r0; or r0,r0; or r0,r0; or r0,r0"
+# else
+#  define SYSCALL_WITH_INST_PAD "\
+	trapa #0x14"
+# endif
+
+#define lll_futex_wait(futex, val, private) \
+  lll_futex_timed_wait (futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+  ({									      \
+    int __status;							      \
+    register unsigned long __r3 asm ("r3") = SYS_futex;			      \
+    register unsigned long __r4 asm ("r4") = (unsigned long) (futex);	      \
+    register unsigned long __r5 asm ("r5")				      \
+      = __lll_private_flag (FUTEX_WAIT, private);			      \
+    register unsigned long __r6 asm ("r6") = (unsigned long) (val);	      \
+    register unsigned long __r7 asm ("r7") = (timeout);			      \
+    __asm __volatile (SYSCALL_WITH_INST_PAD				      \
+		      : "=z" (__status)					      \
+		      : "r" (__r3), "r" (__r4), "r" (__r5),		      \
+			"r" (__r6), "r" (__r7)				      \
+		      : "memory", "t");					      \
+    __status;								      \
+  })
+
+
+#define lll_futex_wake(futex, nr, private) \
+  do {									      \
+    int __ignore;							      \
+    register unsigned long __r3 asm ("r3") = SYS_futex;			      \
+    register unsigned long __r4 asm ("r4") = (unsigned long) (futex);	      \
+    register unsigned long __r5 asm ("r5")				      \
+      = __lll_private_flag (FUTEX_WAKE, private);			      \
+    register unsigned long __r6 asm ("r6") = (unsigned long) (nr);	      \
+    register unsigned long __r7 asm ("r7") = 0;				      \
+    __asm __volatile (SYSCALL_WITH_INST_PAD				      \
+		      : "=z" (__ignore)					      \
+		      : "r" (__r3), "r" (__r4), "r" (__r5),		      \
+			"r" (__r6), "r" (__r7)				      \
+		      : "memory", "t");					      \
+  } while (0)
+
+
+#define lll_islocked(futex) \
+  (futex != LLL_LOCK_INITIALIZER)
+
+/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
+   wakeup when the clone terminates.  The memory location contains the
+   thread ID while the clone is running and is reset to zero
+   afterwards.  */
+
+#define lll_wait_tid(tid) \
+  do {									      \
+    __typeof (tid) *__tid = &(tid);					      \
+    while (*__tid != 0)							      \
+      lll_futex_wait (__tid, *__tid, LLL_SHARED);			      \
+  } while (0)
+
+extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
+     attribute_hidden;
+#define lll_timedwait_tid(tid, abstime) \
+  ({									      \
+    int __result = 0;							      \
+    if (tid != 0)							      \
+      {									      \
+	if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)	      \
+	  __result = EINVAL;						      \
+	else								      \
+	  __result = __lll_timedwait_tid (&tid, abstime);		      \
+      }									      \
+    __result; })
+
+#endif  /* !__ASSEMBLER__ */
+
+#endif  /* lowlevellock.h */
diff --git a/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
new file mode 100644
index 0000000000..65b8d0cdcd
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
@@ -0,0 +1,278 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+#define FUTEX_WAITERS		0x80000000
+#define FUTEX_OWNER_DIED	0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	mov	#(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+	extu.b	tmp, tmp; \
+	xor	tmp, reg
+#else
+# if FUTEX_WAIT == 0
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp 	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg
+# else
+#  define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+	stc	gbr, tmp	; \
+	mov.w	99f, tmp2	; \
+	add	tmp2, tmp 	; \
+	mov.l	@tmp, tmp2	; \
+	bra	98f		; \
+	 mov	#FUTEX_PRIVATE_FLAG, tmp ; \
+99:	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:	extu.b	tmp, tmp	; \
+	xor	tmp, reg	; \
+	and	tmp2, reg	; \
+	mov	#FUTEX_WAIT, tmp ; \
+	or	tmp, reg
+# endif
+#endif
+
+	.globl	__lll_robust_lock_wait
+	.type	__lll_robust_lock_wait,@function
+	.hidden	__lll_robust_lock_wait
+	.align	5
+	cfi_startproc
+__lll_robust_lock_wait:
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r5, r8
+	mov	#0, r7		/* No timeout.  */
+	mov	r6, r5
+	LOAD_FUTEX_WAIT (r5, r0, r1)
+
+4:
+	mov	r4, r6
+	mov.l	.L_FUTEX_WAITERS, r0
+	or	r0, r6
+	shlr	r0		/* r0 = FUTEX_OWNER_DIED */
+	tst	r0, r4
+	bf/s	3f
+	 cmp/eq	r4, r6
+	bt	1f
+
+	CMPXCHG (r4, @r8, r6, r2)
+	bf	2f
+
+1:
+	mov	r8, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	@r8, r2
+
+2:
+	tst	r2, r2
+	bf/s	4b
+	 mov	r2, r4
+
+	stc	gbr, r1
+	mov.w	.Ltidoff, r2
+	add	r2, r1
+	mov.l	@r1, r6
+	mov	#0, r3
+	CMPXCHG (r3, @r8, r6, r4)
+	bf	4b
+	mov	#0, r4
+
+3:
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	ret
+	 mov	r4, r0
+	cfi_endproc
+	.align	2
+.L_FUTEX_WAITERS:
+	.long	FUTEX_WAITERS
+.Ltidoff:
+	.word	TID - TLS_PRE_TCB_SIZE
+	.size	__lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+	.globl	__lll_robust_timedlock_wait
+	.type	__lll_robust_timedlock_wait,@function
+	.hidden	__lll_robust_timedlock_wait
+	.align	5
+	cfi_startproc
+__lll_robust_timedlock_wait:
+	/* Check for a valid timeout value.  */
+	mov.l	@(4,r6), r1
+	mov.l	.L1g, r0
+	cmp/hs	r0, r1
+	bt	3f
+
+	cfi_remember_state
+
+	mov.l	r11, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r11, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset(4)
+	cfi_rel_offset (r8, 0)
+	mov	r7, r11
+	mov	r4, r10
+	mov	r6, r9
+	mov	r5, r8
+
+	/* Stack frame for the timespec and timeval structs.  */
+	add	#-8, r15
+	cfi_adjust_cfa_offset(8)
+
+1:
+	/* Get current time.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	/* Compute relative timeout.  */
+	mov.l	@(4,r15), r0
+	mov.w	.L1k, r1
+	dmulu.l	r0, r1		/* Micro seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	4f
+	mov.l	.L1g, r1
+	add	r1, r3
+	add	#-1, r2
+4:
+	cmp/pz	r2
+	bf	8f		/* Time is already up.  */
+
+	mov.l	r2, @r15	/* Store relative timeout.  */
+	mov.l	r3, @(4,r15)
+
+	mov	r10, r6
+	mov.l	.L_FUTEX_WAITERS2, r0
+	or	r0, r6
+	shlr	r0		/* r0 = FUTEX_OWNER_DIED */
+	tst	r0, r4
+	bf/s	6f
+	 cmp/eq	r4, r6
+	bt	2f
+
+	CMPXCHG (r4, @r8, r6, r2)
+	bf/s	5f
+	 mov	#0, r5
+
+2:
+	mov	r8, r4
+	mov	r11, r5
+	LOAD_FUTEX_WAIT (r5, r0, r1)
+	mov	r10, r6
+	mov	r15, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	r0, r5
+
+	mov.l	@r8, r2
+
+5:
+	tst	r2, r2
+	bf/s	7f
+	 mov	r2, r10
+
+	stc	gbr, r1
+	mov.w	.Ltidoff2, r2
+	add	r2, r1
+	mov.l	@r1, r4
+	mov	#0, r3
+	CMPXCHG (r3, @r8, r4, r10)
+	bf	7f
+	mov	#0, r0
+
+6:
+	cfi_remember_state
+	add	#8, r15
+	cfi_adjust_cfa_offset (-8)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	rts
+	 mov.l	@r15+, r11
+	/* Omit CFI for restore in delay slot.  */
+	cfi_restore_state
+
+7:
+	/* Check whether the time expired.  */
+	mov	#-ETIMEDOUT, r1
+	cmp/eq	r5, r1
+	bf	1b
+
+8:
+	bra	6b
+	 mov	#ETIMEDOUT, r0
+
+	cfi_restore_state
+3:
+	rts
+	 mov	#EINVAL, r0
+	cfi_endproc
+	.align	2
+.L_FUTEX_WAITERS2:
+	.long	FUTEX_WAITERS
+.L1g:
+	.long	1000000000
+.Ltidoff2:
+	.word	TID - TLS_PRE_TCB_SIZE
+.L1k:
+	.word	1000
+	.size	__lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
new file mode 100644
index 0000000000..946b1d746f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
@@ -0,0 +1,239 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelbarrier.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+	.globl	pthread_barrier_wait
+	.type	pthread_barrier_wait,@function
+	.align	5
+	cfi_startproc
+pthread_barrier_wait:
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get the mutex.  */
+	mov	#0, r3
+	mov	#1, r4
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+	bf	1f
+
+	/* One less waiter.  If this was the last one needed wake
+	   everybody.  */
+2:
+	mov.l	@(LEFT,r8), r0
+	add	#-1, r0
+	mov.l	r0, @(LEFT,r8)
+	tst	r0, r0
+	bt	3f
+
+	/* There are more threads to come.  */
+	mov.l	@(CURR_EVENT,r8), r6
+
+	/* Release the mutex.  */
+	DEC (@(MUTEX,r8), r2)
+	tst	r2, r2
+	bf	6f
+7:
+	/* Wait for the remaining threads.  The call will return immediately
+	   if the CURR_EVENT memory has meanwhile been changed.  */
+	mov	r8, r4
+#if CURR_EVENT != 0
+	add	#CURR_EVENT, r4
+#endif
+#if FUTEX_WAIT == 0
+	mov.l	@(PRIVATE,r8), r5
+#else
+	mov	#FUTEX_WAIT, r5
+	mov.l	@(PRIVATE,r8), r0
+	or	r0, r5
+#endif
+	mov	#0, r7
+8:
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	/* Don't return on spurious wakeups.  The syscall does not change
+	   any register except r0 so there is no need to reload any of
+	   them.  */
+	mov.l	@(CURR_EVENT,r8), r0
+	cmp/eq	r0, r6
+	bt	8b
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	mov	#1, r3
+	mov.l	@(INIT_COUNT,r8), r4
+	XADD	(r3, @(LEFT,r8), r2, r5)
+	add	#-1, r4
+	cmp/eq	r2, r4
+	bf	10f
+
+	/* Release the mutex.  We cannot release the lock before
+	   waking the waiting threads since otherwise a new thread might
+	   arrive and gets waken up, too.  */
+	DEC (@(MUTEX,r8), r2)
+	tst	r2, r2
+	bf	9f
+
+10:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	rts
+	 mov	#0, r0		/* != PTHREAD_BARRIER_SERIAL_THREAD */
+	cfi_restore_state
+
+3:
+	/* The necessary number of threads arrived.  */
+	mov.l	@(CURR_EVENT,r8), r1
+	add	#1, r1
+	mov.l	r1, @(CURR_EVENT,r8)
+
+	/* Wake up all waiters.  The count is a signed number in the kernel
+	   so 0x7fffffff is the highest value.  */
+	mov.l	.Lall, r6
+	mov	r8, r4
+#if CURR_EVENT != 0
+	add	#CURR_EVENT, r4
+#endif
+	mov	#0, r7
+	mov	#FUTEX_WAKE, r5
+	mov.l	@(PRIVATE,r8), r0
+	or	r0, r5
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	/* Increment LEFT.  If this brings the count back to the
+	   initial count unlock the object.  */
+	mov	#1, r3
+	mov.l	@(INIT_COUNT,r8), r4
+	XADD	(r3, @(LEFT,r8), r2, r5)
+	add	#-1, r4
+	cmp/eq	r2, r4
+	bf	5f
+
+	/* Release the mutex.  */
+	DEC (@(MUTEX,r8), r2)
+	tst	r2, r2
+	bf	4f
+5:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	rts
+	 mov	#-1, r0		/* == PTHREAD_BARRIER_SERIAL_THREAD */
+	cfi_restore_state
+
+1:
+	mov.l	@(PRIVATE,r8), r6
+	mov	#LLL_SHARED, r0
+	extu.b	r0, r0
+	xor	r0, r6
+	mov	r2, r4
+	mov	r8, r5
+	mov.l	.Lwait0, r1
+	bsrf	r1
+	 add	#MUTEX, r5
+.Lwait0b:
+	bra	2b
+	 nop
+
+4:
+	mov.l	@(PRIVATE,r8), r5
+	mov	#LLL_SHARED, r0
+	extu.b	r0, r0
+	xor	r0, r5
+	mov	r8, r4
+	mov.l	.Lwake0, r1
+	bsrf	r1
+	 add	#MUTEX, r4
+.Lwake0b:
+	bra	5b
+	 nop
+
+6:
+	mov	r6, r9
+	mov.l	@(PRIVATE,r8), r5
+	mov	#LLL_SHARED, r0
+	extu.b	r0, r0
+	xor	r0, r5
+	mov	r8, r4
+	mov.l	.Lwake1, r1
+	bsrf	r1
+	 add	#MUTEX, r4
+.Lwake1b:
+	bra	7b
+	 mov	r9, r6
+
+9:
+	mov	r6, r9
+	mov.l	@(PRIVATE,r8), r5
+	mov	#LLL_SHARED, r0
+	extu.b	r0, r0
+	xor	r0, r5
+	mov	r8, r4
+	mov.l	.Lwake2, r1
+	bsrf	r1
+	 add	#MUTEX, r4
+.Lwake2b:
+	bra	10b
+	 mov	r9, r6
+	cfi_endproc
+
+	.align	2
+.Lall:
+	.long	0x7fffffff
+.Lwait0:
+	.long	__lll_lock_wait-.Lwait0b
+.Lwake0:
+	.long	__lll_unlock_wake-.Lwake0b
+.Lwake1:
+	.long	__lll_unlock_wake-.Lwake1b
+.Lwake2:
+	.long	__lll_unlock_wake-.Lwake2b
+	.size	pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
new file mode 100644
index 0000000000..89b32ccf0e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
@@ -0,0 +1,292 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+	/* int pthread_cond_broadcast (pthread_cond_t *cond) */
+	.globl	__pthread_cond_broadcast
+	.type	__pthread_cond_broadcast, @function
+	.align	5
+	cfi_startproc
+__pthread_cond_broadcast:
+	mov.l   r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l   r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(total_seq+4,r8),r0
+	mov.l	@(total_seq,r8),r1
+	mov.l	@(wakeup_seq+4,r8), r2
+	cmp/hi	r2, r0
+	bt	3f
+	cmp/hi	r0, r2
+	bt	4f
+	mov.l	@(wakeup_seq,r8), r2
+	cmp/hi	r2, r1
+	bf	4f
+
+3:
+	/* Cause all currently waiting threads to recognize they are
+	   woken up.  */
+	mov.l	r1, @(wakeup_seq,r8)
+	mov.l	r0, @(wakeup_seq+4,r8)
+	mov.l	r1, @(woken_seq,r8)
+	mov.l	r0, @(woken_seq+4,r8)
+	mov.l	@(broadcast_seq,r8), r2
+	add	#1, r2
+	mov.l	r2, @(broadcast_seq,r8)
+	add	r1, r1
+	mov	r1, r10
+	mov.l	r10, @(cond_futex,r8)
+
+	/* Get the address of the mutex used.  */
+	mov.l	@(dep_mutex,r8), r9
+
+	/* Unlock.  */
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	7f
+
+8:
+	/* Don't use requeue for pshared condvars.  */
+	mov	#-1, r0
+	cmp/eq	r0, r9
+	mov	r8, r4
+	bt/s	9f
+	 add	#cond_futex, r4
+
+	/* XXX: The kernel only supports FUTEX_CMP_REQUEUE to the same
+	   type of futex (private resp. shared).  */
+	mov.l	@(MUTEX_KIND,r9), r0
+	tst	#(PI_BIT|PS_BIT), r0
+	bf	9f
+
+	/* Wake up all threads.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_CMP_REQUEUE, r0
+	or	r0, r5
+#endif
+	mov	#1, r6
+	mov	#-1, r7
+	shlr	r7		/* r7 = 0x7fffffff */
+	mov	r9, r0
+# if MUTEX_FUTEX != 0
+	add	#MUTEX_FUTEX, r0
+# endif
+	mov	r10, r1
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x16
+	SYSCALL_INST_PAD
+
+	/* For any kind of error, which mainly is EAGAIN, we try again
+	   with WAKE.  The general test also covers running on old
+	   kernels.  */
+	mov	r0, r1
+	mov	#-12, r2
+	shad	r2, r1
+	not	r1, r1
+	tst	r1, r1
+	mov	r8, r4
+	bt/s	9f
+	 add	#cond_futex, r4
+
+10:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+
+4:
+	/* Unlock.  */
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	5f
+6:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+
+1:
+	/* Initial locking failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait5, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait5b:
+	bra	2b
+	 nop
+
+5:
+	/* Unlock in loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake5, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake5b:
+	bra	6b
+	 nop
+
+7:
+	/* Unlock in loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov	#-1, r0
+	cmp/eq	r0, r9
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake6, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake6b:
+	bra	8b
+	 nop
+
+9:
+	mov	#-1, r0
+	cmp/eq	r0, r9
+	bt/s	99f
+	 mov	#FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+99:
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	bra	10b
+	 nop
+	cfi_endproc
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
+	.align	2
+.Lwait5:
+	.long	__lll_lock_wait-.Lwait5b
+.Lwake5:
+	.long	__lll_unlock_wake-.Lwake5b
+.Lwake6:
+	.long	__lll_unlock_wake-.Lwake6b
+	.size	__pthread_cond_broadcast, .-__pthread_cond_broadcast
+versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
new file mode 100644
index 0000000000..865a9eeb52
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
@@ -0,0 +1,201 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <kernel-features.h>
+#include <pthread-errnos.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+	/* int pthread_cond_signal (pthread_cond_t *cond) */
+	.globl	__pthread_cond_signal
+	.type	__pthread_cond_signal, @function
+	.align	5
+	cfi_startproc
+__pthread_cond_signal:
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(total_seq+4,r8),r0
+	mov.l	@(total_seq,r8),r1
+	mov.l	@(wakeup_seq+4,r8), r2
+	cmp/hi	r2, r0
+	bt	3f
+	cmp/hi	r0, r2
+	bt	4f
+	mov.l	@(wakeup_seq,r8), r2
+	cmp/hi	r2, r1
+	bf	4f
+
+3:
+	/* Bump the wakeup number.  */
+	mov	#1, r2
+	mov	#0, r3
+	clrt
+	mov.l	@(wakeup_seq,r8),r0
+	mov.l	@(wakeup_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(wakeup_seq,r8)
+	mov.l	r1,@(wakeup_seq+4,r8)
+	mov.l	@(cond_futex,r8),r0
+	add	r2, r0
+	mov.l	r0,@(cond_futex,r8)
+
+	/* Wake up one thread.  */
+	mov	r8, r4
+	add	#cond_futex, r4
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAKE_OP, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE_OP, r0
+	or	r0, r5
+#endif
+99:
+	mov	#1, r6
+	mov	#0, r7
+	mov	r8, r0
+	add	#cond_lock, r0
+	mov.l	.Lfutexop, r1
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	/* For any kind of error, we try again with WAKE.
+	   The general test also covers running on old kernels.  */
+	mov	r0, r1
+	mov	#-12, r2
+	shad	r2, r1
+	not	r1, r1
+	tst	r1, r1
+	bt	7f
+
+6:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.Lfutexop:
+	.long	FUTEX_OP_CLEAR_WAKE_IF_GT_ONE
+
+7:
+	/* r5 should be either FUTEX_WAKE_OP or
+	   FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG from the previous syscall.  */
+	mov	#(FUTEX_WAKE ^ FUTEX_WAKE_OP), r0
+	xor	r0, r5
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+4:
+	/* Unlock.  */
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bt	6b
+
+5:
+	/* Unlock in loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake4, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake4b:
+	bra	6b
+	 nop
+
+1:
+	/* Initial locking failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait4, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait4b:
+	bra	2b
+	 nop
+	cfi_endproc
+
+	.align	2
+.Lwait4:
+	.long	__lll_lock_wait-.Lwait4b
+.Lwake4:
+	.long	__lll_unlock_wake-.Lwake4b
+	.size	__pthread_cond_signal, .-__pthread_cond_signal
+versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
+		  GLIBC_2_3_2)
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
new file mode 100644
index 0000000000..94b99e724b
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
@@ -0,0 +1,769 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <tcb-offsets.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
+			       const struct timespec *abstime)  */
+	.globl	__pthread_cond_timedwait
+	.type	__pthread_cond_timedwait, @function
+	.align	5
+	cfi_startproc
+__pthread_cond_timedwait:
+.LSTARTCODE:
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_absptr, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_absptr, .LexceptSTART)
+#endif
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r11, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r11, 0)
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r13, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r13, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	add	#-64, r15
+	cfi_adjust_cfa_offset (64)
+
+	mov	r4, r8
+	mov	r5, r9
+	mov	r6, r13
+#ifdef PIC
+	mova	.Lgot0, r0
+	mov.l	.Lgot0, r12
+	add	r0, r12
+#endif
+
+	mov.l	@(4,r13), r0
+	mov.l	.L1g, r1
+	cmp/hs	r1, r0
+	bf	0f
+	bra	18f
+	 mov	#EINVAL, r0
+0:
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bt	2f
+	bra	1f
+	 nop
+#ifdef PIC
+	.align	2
+.Lgot0:
+	.long	_GLOBAL_OFFSET_TABLE_
+#endif
+
+2:
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+	mov.l	@(dep_mutex,r8),r0
+	cmp/eq	#-1, r0
+	bt	17f
+	mov.l	r9, @(dep_mutex,r8)
+
+17:
+	/* Unlock the mutex.  */
+	mov.l	.Lmunlock1, r1
+	mov	#0, r5
+	bsrf	r1
+	 mov	r9, r4
+.Lmunlock1b:
+
+	tst	r0, r0
+	bt	0f
+	bra	16f
+	 nop
+0:
+	mov	#1, r2
+	mov	#0, r3
+
+	clrt
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(total_seq,r8)
+	mov.l	r1,@(total_seq+4,r8)
+	mov.l	@(cond_futex,r8), r0
+	add	r2, r0
+	mov.l	r0, @(cond_futex,r8)
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8), r0
+	add	r2, r0
+	mov.l	r0, @(cond_nwaiters,r8)
+
+	/* Get and store current wakeup_seq value.  */
+	mov.l	@(wakeup_seq,r8), r10
+	mov.l	@(wakeup_seq+4,r8), r11
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	r0, @(4,r15)
+
+8:
+	/* Get current time.  */
+#ifdef __NR_clock_gettime
+	/* Get the clock number.	 */
+	mov.l	@(cond_nwaiters,r8), r4
+	mov	#((1 << nwaiters_shift) - 1), r0
+	and	r0, r4
+	/* Only clocks 0 and 1 are allowed.  Both are handled in the
+	   kernel.  */
+	mov	r15, r5
+	add	#16, r5
+	mov.w	.L__NR_clock_gettime, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	/* Compute relative timeout.  */
+	mov.l	@r13, r2
+	mov.l	@(4,r13), r3
+	mov.l	@(16,r15), r0
+	bra	0f
+	 mov.l	@(20,r15), r1
+.L__NR_clock_gettime:
+	.word	__NR_clock_gettime
+
+0:
+#else
+	mov	r15, r4
+	add	#16, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	/* Compute relative timeout.  */
+	mov.l	@(20,r15), r0
+	mov.w	.L1k, r1
+	dmulu.l	r0, r1		/* Micro seconds to nano seconds.  */
+	mov.l	@r13, r2
+	mov.l	@(4,r13), r3
+	mov.l	@(16,r15), r0
+	sts	macl, r1
+#endif
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	12f
+	mov.l	.L1g, r1
+	add	r1, r3
+	add	#-1, r2
+12:
+	mov	#-ETIMEDOUT, r1
+	mov.l	r1, @(12,r15)
+	cmp/pz	r2
+	bf	6f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	mov.l	r2, @(16,r15)
+	mov.l	r3, @(20,r15)
+	mov.l	@(cond_futex,r8), r1
+	mov.l	r1, @(8,r15)
+
+	/* Unlock.  */
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bt	4f
+	bra	3f
+	 nop
+4:
+.LcleanupSTART:
+	mov.l	.Lenable1, r1
+	bsrf	r1
+	 nop
+.Lenable1b:
+	mov.l	r0, @r15
+
+	mov	r15, r7
+	add	#16, r7
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+#endif
+99:
+	mov.l	@(8,r15), r6
+	mov	r8, r4
+	add	#cond_futex, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov.l	r0, @(12,r15)
+
+	mov.l	.Ldisable1, r1
+	bsrf	r1
+	 mov.l	@r15, r4
+.Ldisable1b:
+.LcleanupEND:
+
+	/* Lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bf	5f
+6:
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	@(4,r15), r1
+	cmp/eq	r0, r1
+	bf	23f
+
+	mov.l	@(woken_seq,r8), r0
+	mov.l	@(woken_seq+4,r8), r1
+
+	mov.l	@(wakeup_seq,r8), r2
+	mov.l	@(wakeup_seq+4,r8), r3
+
+	cmp/eq	r3, r11
+	bf	7f
+	cmp/eq	r2, r10
+	bt	15f
+7:
+	cmp/eq	r1, r3
+	bf	9f
+	cmp/eq	r0, r2
+	bf	9f
+15:
+	mov.l	@(12,r15),r0
+	cmp/eq	#-ETIMEDOUT, r0
+	bf	8b
+
+	mov	#1, r2
+	mov	#0, r3
+
+	clrt
+	mov.l	@(wakeup_seq,r8),r0
+	mov.l	@(wakeup_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(wakeup_seq,r8)
+	mov.l	r1,@(wakeup_seq+4,r8)
+	mov.l	@(cond_futex,r8),r0
+	add	r2, r0
+	mov.l	r0,@(cond_futex,r8)
+	mov	#ETIMEDOUT, r0
+	bra	14f
+	 mov.l	r0, @(24,r15)
+
+23:
+	mov	#0, r0
+	bra	24f
+	 mov.l	r0, @(24,r15)
+
+9:
+	mov	#0, r0
+	mov.l	r0, @(24,r15)
+14:
+	mov	#1, r2
+	mov	#0, r3
+
+	clrt
+	mov.l	@(woken_seq,r8),r0
+	mov.l	@(woken_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(woken_seq,r8)
+	mov.l	r1,@(woken_seq+4,r8)
+
+24:
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8),r0
+	sub	r2, r0
+	mov.l	r0,@(cond_nwaiters,r8)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	and	r1, r0
+	not	r0, r0
+	cmp/eq	#0, r0
+	bf/s	25f
+	 mov	#((1 << nwaiters_shift) - 1), r1
+	not	r1, r1
+	mov.l	@(cond_nwaiters,r8),r0
+	tst	r1, r0
+	bf	25f
+
+	mov	r8, r4
+	add	#cond_nwaiters, r4
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+99:
+	mov	#1, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+25:
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	10f
+
+11:
+	mov	r9, r4
+	mov.l	.Lmlocki1, r1
+	bsrf	r1
+	 nop
+.Lmlocki1b:
+
+	/* We return the result of the mutex_lock operation if it failed.  */
+	tst	r0, r0
+	bf	18f
+	mov.l	@(24,r15), r0
+
+18:
+	cfi_remember_state
+	add	#64, r15
+	cfi_adjust_cfa_offset (-64)
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r13
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r13)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	mov.l	@r15+, r11
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r11)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	rts
+	 mov.l	@r15+, r8
+	/* Omit CFI for restore in delay slot.  */
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+.L1k:
+	.word	1000
+	.align	2
+.Lmunlock1:
+	.long	__pthread_mutex_unlock_usercnt-.Lmunlock1b
+.Lenable1:
+	.long	__pthread_enable_asynccancel-.Lenable1b
+.Ldisable1:
+	.long	__pthread_disable_asynccancel-.Ldisable1b
+.Lmlocki1:
+	.long	__pthread_mutex_cond_lock-.Lmlocki1b
+.L1g:
+	.long	1000000000
+
+1:
+	/* Initial locking failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait2, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait2b:
+	bra	2b
+	 nop
+
+3:
+	/* Unlock in loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lmwait2, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lmwait2b:
+	bra	4b
+	 nop
+
+5:
+	/* Locking in loop failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait3, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait3b:
+	bra	6b
+	 nop
+
+10:
+	/* Unlock after loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lmwait3, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lmwait3b:
+	bra	11b
+	 nop
+
+16:
+	/* The initial unlocking of the mutex failed.  */
+	mov.l	r0, @(24,r15)
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	17f
+
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lmwait4, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lmwait4b:
+17:
+	bra	18b
+	 mov.l	@(24,r15), r0
+
+	.align	2
+.Lwait2:
+	.long	__lll_lock_wait-.Lwait2b
+.Lmwait2:
+	.long	__lll_unlock_wake-.Lmwait2b
+.Lwait3:
+	.long	__lll_lock_wait-.Lwait3b
+.Lmwait3:
+	.long	__lll_unlock_wake-.Lmwait3b
+.Lmwait4:
+	.long	__lll_unlock_wake-.Lmwait4b
+	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
+versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
+		  GLIBC_2_3_2)
+
+
+	.type	__condvar_tw_cleanup, @function
+__condvar_tw_cleanup:
+	mov	r4, r11
+
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bt	1f
+	 nop
+
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait5, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait5b:
+
+1:
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	@(4,r15), r1
+	cmp/eq	r0, r1
+	bf	3f
+
+	mov	#1, r2
+	mov	#0, r3
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	mov.l	@(total_seq+4,r8), r0
+	mov.l	@(wakeup_seq+4,r8), r1
+	cmp/hi	r1, r0
+	bt/s	6f
+	 cmp/hi	r0, r1
+	bt	7f
+	mov.l	@(total_seq,r8), r0
+	mov.l	@(wakeup_seq,r8), r1
+	cmp/hs	r0, r1
+	bt	7f
+
+6:
+	clrt
+	mov.l	@(wakeup_seq,r8),r0
+	mov.l	@(wakeup_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(wakeup_seq,r8)
+	mov.l	r1,@(wakeup_seq+4,r8)
+	mov.l	@(cond_futex,r8),r0
+	add	r2, r0
+	mov.l	r0,@(cond_futex,r8)
+
+7:
+	clrt
+	mov.l	@(woken_seq,r8),r0
+	mov.l	@(woken_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(woken_seq,r8)
+	mov.l	r1,@(woken_seq+4,r8)
+
+3:
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8),r0
+	sub	r2, r0
+	mov.l	r0,@(cond_nwaiters,r8)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	mov	#0, r10
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	and	r1, r0
+	not	r0, r0
+	cmp/eq	#0, r0
+	bf/s	4f
+	 mov	#((1 << nwaiters_shift) - 1), r1
+	not	r1, r1
+	mov.l	@(cond_nwaiters,r8),r0
+	tst	r1, r0
+	bf	4f
+
+	mov	r8, r4
+	add	#cond_nwaiters, r4
+	mov	#FUTEX_WAKE, r5
+	mov	#1, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	#1, r10
+
+4:
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bt	2f
+
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lmwait5, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lmwait5b:
+
+2:
+	/* Wake up all waiters to make sure no signal gets lost.  */
+	tst	r10, r10
+	bf/s	5f
+	 mov	r8, r4
+	add	#cond_futex, r4
+	mov	#FUTEX_WAKE, r5
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+5:
+        mov.l   .Lmlocki5, r1
+        bsrf    r1
+         mov     r9, r4
+.Lmlocki5b:
+
+.LcallUR:
+	mov.l	.Lresume, r1
+#ifdef PIC
+	add	r12, r1
+#endif
+	jsr	@r1
+	 mov	r11, r4
+	sleep
+
+	.align	2
+.Lwait5:
+	.long   __lll_lock_wait-.Lwait5b
+.Lmwait5:
+        .long   __lll_unlock_wake-.Lmwait5b
+.Lmlocki5:
+	.long   __pthread_mutex_cond_lock-.Lmlocki5b
+.Lresume:
+#ifdef PIC
+	.long	_Unwind_Resume@GOTOFF
+#else
+	.long	_Unwind_Resume
+#endif
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_tw_cleanup, .-__condvar_tw_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			! @LPStart format (omit)
+	.byte	DW_EH_PE_omit			! @TType format (omit)
+	.byte	DW_EH_PE_sdata4			! call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.ualong	.LcleanupSTART-.LSTARTCODE
+	.ualong	.LcleanupEND-.LcleanupSTART
+	.ualong	__condvar_tw_cleanup-.LSTARTCODE
+	.uleb128  0
+	.ualong	.LcallUR-.LSTARTCODE
+	.ualong	.LENDCODE-.LcallUR
+	.ualong	0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden DW.ref.__gcc_personality_v0
+	.weak   DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align 4
+	.type   DW.ref.__gcc_personality_v0, @object
+	.size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long   __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
new file mode 100644
index 0000000000..ad01966fc6
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
@@ -0,0 +1,687 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <lowlevellock.h>
+#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+	.text
+
+/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
+	.globl	__pthread_cond_wait
+	.type	__pthread_cond_wait, @function
+	.align	5
+	cfi_startproc
+__pthread_cond_wait:
+.LSTARTCODE:
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_absptr, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_absptr, .LexceptSTART)
+#endif
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r11, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r11, 0)
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	add	#-48, r15
+	cfi_adjust_cfa_offset (48)
+
+	mov	r4, r8
+	mov	r5, r9
+#ifdef PIC
+	mova	.Lgot0, r0
+	mov.l	.Lgot0, r12
+	add	r0, r12
+#endif
+
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bt	2f
+	bra	1f
+	 nop
+#ifdef PIC
+	.align	2
+.Lgot0:
+	.long	_GLOBAL_OFFSET_TABLE_
+#endif
+
+2:
+	/* Store the reference to the mutex.  If there is already a
+	   different value in there this is a bad user bug.  */
+	mov.l	@(dep_mutex,r8),r0
+	cmp/eq	#-1, r0
+	bt	15f
+	mov.l	r9, @(dep_mutex,r8)
+
+15:
+	/* Unlock the mutex.  */
+	mov.l	.Lmunlock0, r1
+	mov	#0, r5
+	bsrf	r1
+	 mov	r9, r4
+.Lmunlock0b:
+
+	tst	r0, r0
+	bt	0f
+	bra	12f
+	 nop
+0:
+	mov	#1, r2
+	mov	#0, r3
+
+	clrt
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(total_seq,r8)
+	mov.l	r1,@(total_seq+4,r8)
+	mov.l	@(cond_futex,r8),r0
+	add	r2, r0
+	mov.l	r0,@(cond_futex,r8)
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8), r0
+	add	r2, r0
+	mov.l	r0, @(cond_nwaiters,r8)
+
+	/* Get and store current wakeup_seq value.  */
+	mov.l	@(wakeup_seq,r8), r10
+	mov.l	@(wakeup_seq+4,r8), r11
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	r0, @(4,r15)
+
+8:
+	mov.l	@(cond_futex,r8),r0
+	mov.l	r0, @(8,r15)
+
+	/* Unlock.  */
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	3f
+4:
+.LcleanupSTART:
+	mov.l	.Lenable0, r1
+	bsrf	r1
+	 nop
+.Lenable0b:
+	mov.l	r0, @r15
+
+	mov	#0, r7
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff0, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+#endif
+99:
+	mov.l	@(8,r15), r6
+	mov	r8, r4
+	add	#cond_futex, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	.Ldisable0, r1
+	bsrf	r1
+	 mov.l	@r15, r4
+.Ldisable0b:
+.LcleanupEND:
+
+	/* Lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bf	5f
+6:
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	@(4,r15), r1
+	cmp/eq	r0, r1
+	bf	16f
+
+	mov.l	@(woken_seq,r8), r0
+	mov.l	@(woken_seq+4,r8), r1
+
+	mov.l	@(wakeup_seq,r8), r2
+	mov.l	@(wakeup_seq+4,r8), r3
+
+	cmp/eq	r3, r11
+	bf	7f
+	cmp/eq	r2, r10
+	bt	8b
+7:
+	cmp/eq	r1, r3
+	bf	9f
+	cmp/eq	r0, r2
+	bt	8b
+9:
+	mov	#1, r2
+	mov	#0, r3
+
+	clrt
+	mov.l	@(woken_seq,r8),r0
+	mov.l	@(woken_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(woken_seq,r8)
+	mov.l	r1,@(woken_seq+4,r8)
+
+16:
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8),r0
+	sub	r2, r0
+	mov.l	r0,@(cond_nwaiters,r8)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	and	r1, r0
+	not	r0, r0
+	cmp/eq	#0, r0
+	bf/s	17f
+	 mov	#((1 << nwaiters_shift) - 1), r1
+	not	r1, r1
+	mov.l	@(cond_nwaiters,r8),r0
+	tst	r1, r0
+	bf	17f
+
+	mov	r8, r4
+	add	#cond_nwaiters, r4
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff0, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+99:
+	mov	#1, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+17:
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	10f
+
+11:
+	mov.l	.Lmlocki0, r1
+	bsrf	r1
+	 mov	r9, r4
+.Lmlocki0b:
+	/* We return the result of the mutex_lock operation.  */
+
+14:
+	cfi_remember_state
+	add	#48, r15
+	cfi_adjust_cfa_offset (-48)
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	mov.l	@r15+, r11
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r11)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	rts
+	 mov.l	@r15+, r8
+	/* Omit CFI for restore in delay slot.  */
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff0:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.Lmunlock0:
+	.long	__pthread_mutex_unlock_usercnt-.Lmunlock0b
+.Lenable0:
+	.long	__pthread_enable_asynccancel-.Lenable0b
+.Ldisable0:
+	.long	__pthread_disable_asynccancel-.Ldisable0b
+.Lmlocki0:
+	.long	__pthread_mutex_cond_lock-.Lmlocki0b
+
+1:
+	/* Initial locking failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait0, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait0b:
+	bra	2b
+	 nop
+3:
+	/* Unlock in loop requires waekup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake0, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake0b:
+	bra	4b
+	 nop
+
+5:
+	/* Locking in loop failed.  */
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait1, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait1b:
+	bra	6b
+	 nop
+
+10:
+	/* Unlock after loop requires wakeup.  */
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake1, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake1b:
+	bra	11b
+	 nop
+
+12:
+	/* The initial unlocking of the mutex failed.  */
+	mov.l	r0, @(12,r15)
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bf	13f
+
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake2, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake2b:
+
+13:
+	bra	14b
+	 mov.l	@(12,r15), r0
+
+	.align	2
+.Lwait0:
+	.long	__lll_lock_wait-.Lwait0b
+.Lwake0:
+	.long	__lll_unlock_wake-.Lwake0b
+.Lwait1:
+	.long	__lll_lock_wait-.Lwait1b
+.Lwake1:
+	.long	__lll_unlock_wake-.Lwake1b
+.Lwake2:
+	.long	__lll_unlock_wake-.Lwake2b
+	.size	__pthread_cond_wait, .-__pthread_cond_wait
+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
+		  GLIBC_2_3_2)
+
+
+	.type	__condvar_w_cleanup, @function
+__condvar_w_cleanup:
+	mov	r4, r11
+
+	/* Get internal lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if cond_lock != 0
+	CMPXCHG (r3, @(cond_lock,r8), r4, r2)
+#else
+	CMPXCHG (r3, @r8, r4, r2)
+#endif
+	bt	1f
+	 nop
+
+	mov	r8, r5
+#if cond_lock != 0
+	add	#cond_lock, r5
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r6
+	mov	#LLL_SHARED, r6
+99:
+	extu.b	r6, r6
+	mov.l	.Lwait3, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait3b:
+
+1:
+	mov.l	@(broadcast_seq,r8), r0
+	mov.l	@(4,r15), r1
+	cmp/eq	r0, r1
+	bf	3f
+
+	mov	#1, r2
+	mov	#0, r3
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	mov.l	@(total_seq+4,r8), r0
+	mov.l	@(wakeup_seq+4,r8), r1
+	cmp/hi	r1, r0
+	bt/s	6f
+	 cmp/hi	r0, r1
+	bt	7f
+	mov.l	@(total_seq,r8), r0
+	mov.l	@(wakeup_seq,r8), r1
+	cmp/hs	r0, r1
+	bt	7f
+
+6:
+	clrt
+	mov.l	@(wakeup_seq,r8),r0
+	mov.l	@(wakeup_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(wakeup_seq,r8)
+	mov.l	r1,@(wakeup_seq+4,r8)
+	mov.l	@(cond_futex,r8),r0
+	add	r2, r0
+	mov.l	r0,@(cond_futex,r8)
+
+7:
+	clrt
+	mov.l	@(woken_seq,r8),r0
+	mov.l	@(woken_seq+4,r8),r1
+	addc	r2, r0
+	addc	r3, r1
+	mov.l	r0,@(woken_seq,r8)
+	mov.l	r1,@(woken_seq+4,r8)
+
+3:
+	mov	#(1 << nwaiters_shift), r2
+	mov.l	@(cond_nwaiters,r8),r0
+	sub	r2, r0
+	mov.l	r0,@(cond_nwaiters,r8)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	mov	#0, r10
+	mov.l	@(total_seq,r8),r0
+	mov.l	@(total_seq+4,r8),r1
+	and	r1, r0
+	not	r0, r0
+	cmp/eq	#0, r0
+	bf/s	4f
+	 mov	#((1 << nwaiters_shift) - 1), r1
+	not	r1, r1
+	mov.l	@(cond_nwaiters,r8),r0
+	tst	r1, r0
+	bf	4f
+
+	mov	r8, r4
+	add	#cond_nwaiters, r4
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff1, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+99:
+	mov	#1, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	#1, r10
+
+4:
+#if cond_lock != 0
+	DEC (@(cond_lock,r8), r2)
+#else
+	DEC (@r8, r2)
+#endif
+	tst	r2, r2
+	bt	2f
+
+	mov	r8, r4
+#if cond_lock != 0
+	add	#cond_lock, r4
+#endif
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bf/s	99f
+	 mov	#LLL_PRIVATE, r5
+	mov	#LLL_SHARED, r5
+99:
+	mov.l	.Lwake3, r1
+	bsrf	r1
+	 extu.b	r5, r5
+.Lwake3b:
+
+2:
+	/* Wake up all waiters to make sure no signal gets lost.  */
+	tst	r10, r10
+	bf/s	5f
+	 mov	r8, r4
+	add	#cond_futex, r4
+	mov.l	@(dep_mutex,r8), r0
+	cmp/eq	#-1, r0
+	bt/s	99f
+	 mov	#FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff1, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+99:
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+5:
+        mov.l   .Lmlocki3, r1
+        bsrf    r1
+         mov     r9, r4
+.Lmlocki3b:
+
+.LcallUR:
+	mov.l	.Lresume, r1
+#ifdef PIC
+	add	r12, r1
+#endif
+	jsr	@r1
+	 mov	r11, r4
+	sleep
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff1:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.Lwait3:
+	.long   __lll_lock_wait-.Lwait3b
+.Lwake3:
+        .long   __lll_unlock_wake-.Lwake3b
+.Lmlocki3:
+	.long   __pthread_mutex_cond_lock-.Lmlocki3b
+.Lresume:
+#ifdef PIC
+	.long	_Unwind_Resume@GOTOFF
+#else
+	.long	_Unwind_Resume
+#endif
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			! @LPStart format (omit)
+	.byte	DW_EH_PE_omit			! @TType format (omit)
+	.byte	DW_EH_PE_sdata4			! call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.ualong	.LcleanupSTART-.LSTARTCODE
+	.ualong	.LcleanupEND-.LcleanupSTART
+	.ualong	__condvar_w_cleanup-.LSTARTCODE
+	.uleb128  0
+	.ualong	.LcallUR-.LSTARTCODE
+	.ualong	.LENDCODE-.LcallUR
+	.ualong	0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden DW.ref.__gcc_personality_v0
+	.weak   DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align 4
+	.type   DW.ref.__gcc_personality_v0, @object
+	.size   DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long   __gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_once.S b/sysdeps/unix/sysv/linux/sh/pthread_once.S
new file mode 100644
index 0000000000..b22cf4491e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_once.S
@@ -0,0 +1,257 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <unwindbuf.h>
+#include <sysdep.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+
+	.comm	__fork_generation, 4, 4
+
+	.text
+	.globl	__pthread_once
+	.type	__pthread_once,@function
+	.align	5
+	cfi_startproc
+__pthread_once:
+	mov.l	@r4, r0
+	tst	#2, r0
+	bt	1f
+	rts
+	 mov	#0, r0
+
+1:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r5, r8
+	mov	r4, r9
+
+	/* Not yet initialized or initialization in progress.
+	   Get the fork generation counter now.  */
+6:
+	mov.l	@r4, r1
+	mova	.Lgot, r0
+	mov.l	.Lgot, r12
+	add	r0, r12
+
+5:
+	mov	r1, r0
+
+	tst	#2, r0
+	bf	4f
+
+	and	#3, r0
+	mov.l	.Lfgen, r2
+#ifdef PIC
+	add	r12, r2
+#endif
+	mov.l	@r2, r3
+	or	r3, r0
+	or	#1, r0
+	mov	r0, r3
+	mov	r1, r5
+
+	CMPXCHG (r5, @r4, r3, r2)
+	bf	5b
+
+	/* Check whether another thread already runs the initializer.  */
+	mov	r2, r0
+	tst	#1, r0
+	bt	3f	/* No -> do it.  */
+
+	/* Check whether the initializer execution was interrupted
+	   by a fork.  */
+	xor	r3, r0
+	mov	#-4, r1	/* -4 = 0xfffffffc */
+	tst	r1, r0
+	bf	3f	/* Different for generation -> run initializer.  */
+
+	/* Somebody else got here first.  Wait.  */
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+# if FUTEX_WAIT != 0
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+# endif
+#endif
+	mov	r3, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	bra	6b
+	 nop
+
+	.align	2
+.Lgot:
+	.long	_GLOBAL_OFFSET_TABLE_
+#ifdef PIC
+.Lfgen:
+	.long	__fork_generation@GOTOFF
+#else
+.Lfgen:
+	.long	__fork_generation
+#endif
+
+3:
+	/* Call the initializer function after setting up the
+	   cancellation handler.  Note that it is not possible here
+	   to use the unwind-based cleanup handling.  This would require
+	   that the user-provided function and all the code it calls
+	   is compiled with exceptions.  Unfortunately this cannot be
+	   guaranteed.  */
+	add	#-UNWINDBUFSIZE, r15
+	cfi_adjust_cfa_offset (UNWINDBUFSIZE)
+
+	mov.l	.Lsigsetjmp, r1
+	mov	#UWJMPBUF, r4
+	add	r15, r4
+	bsrf	r1
+	 mov	#0, r5
+.Lsigsetjmp0:
+	tst	r0, r0
+	bf	7f
+
+	mov.l	.Lcpush, r1
+	bsrf	r1
+	 mov	r15, r4
+.Lcpush0:
+
+	/* Call the user-provided initialization function.  */
+	jsr	@r8
+	 nop
+
+	/* Pop the cleanup handler.  */
+	mov.l	.Lcpop, r1
+	bsrf	r1
+	 mov	r15, r4
+.Lcpop0:
+
+	add	#UNWINDBUFSIZE, r15
+	cfi_adjust_cfa_offset (-UNWINDBUFSIZE)
+
+	/* Sucessful run of the initializer.  Signal that we are done.  */
+	INC (@r9, r2)
+	/* Wake up all other threads.  */
+	mov	r9, r4
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+	extu.b	r5, r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+4:
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#0, r0
+
+7:
+	/* __sigsetjmp returned for the second time.  */
+	cfi_adjust_cfa_offset (UNWINDBUFSIZE+16)
+	cfi_offset (r12, -4)
+	cfi_offset (r9, -8)
+	cfi_offset (r8, -12)
+	cfi_offset (pr, -16)
+	mov	#0, r7
+	mov.l	r7, @r9
+	mov	r9, r4
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+#else
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+#endif
+	extu.b	r5, r5
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	.Lunext, r1
+	bsrf	r1
+	 mov	r15, r4
+.Lunext0:
+	/* NOTREACHED */
+	sleep
+	cfi_endproc
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.Lsigsetjmp:
+	.long	__sigsetjmp@PLT-(.Lsigsetjmp0-.)
+.Lcpush:
+	.long	HIDDEN_JUMPTARGET(__pthread_register_cancel)-.Lcpush0
+.Lcpop:
+	.long	HIDDEN_JUMPTARGET(__pthread_unregister_cancel)-.Lcpop0
+.Lunext:
+	.long	HIDDEN_JUMPTARGET(__pthread_unwind_next)-.Lunext0
+	.size	__pthread_once,.-__pthread_once
+
+hidden_def (__pthread_once)
+strong_alias (__pthread_once, pthread_once)
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
new file mode 100644
index 0000000000..34790fd3b8
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
@@ -0,0 +1,270 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	__pthread_rwlock_rdlock
+	.type	__pthread_rwlock_rdlock,@function
+	.align	5
+	cfi_startproc
+__pthread_rwlock_rdlock:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(WRITER,r8), r0
+	tst	r0, r0
+	bf	14f
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	tst	r0, r0
+	bt	5f
+	mov	#FLAGS, r0
+	mov.b	@(r0,r8), r0
+	tst	r0, r0
+	bt	5f
+3:
+	mov.l	@(READERS_QUEUED,r8), r0
+	add	#1, r0
+	mov.l	r0, @(READERS_QUEUED,r8)
+	tst	r0, r0
+	bt	4f
+
+	mov.l	@(READERS_WAKEUP,r8), r9
+
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	10f
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+	xor	r0, r5
+	extu.b	r5, r5
+#else
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+# if FUTEX_WAIT != 0
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+# endif
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r0
+	xor	r0, r5
+#endif
+	mov	r8, r4
+	add	#READERS_WAKEUP, r4
+	mov	r9, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	/* Reget the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	12f
+13:
+	mov.l	@(READERS_QUEUED,r8), r0
+	add	#-1, r0
+	bra	2b
+	 mov.l	r0, @(READERS_QUEUED,r8)
+
+5:
+	mov	#0, r3
+	mov.l	@(NR_READERS,r8), r0
+	add	#1, r0
+	mov.l	r0, @(NR_READERS,r8)
+	tst	r0, r0
+	bt	8f
+
+9:
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	6f
+7:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	r3, r0
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
+1:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait0, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait0b:
+	bra	2b
+	 nop
+14:
+	stc	gbr, r1
+	mov.w	.Ltidoff, r2
+	add	r2, r1
+	mov.l	@r1, r1
+	cmp/eq	r1, r0
+	bf	3b
+	/* Deadlock detected.  */
+	bra	9b
+	 mov	#EDEADLK, r3
+
+.Ltidoff:
+	.word	TID - TLS_PRE_TCB_SIZE
+
+6:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake0, r1
+	bsrf	r1
+	 nop
+.Lwake0b:
+	bra	7b
+	 mov	#0, r3
+
+8:
+	/* Overflow.  */
+	mov.l	@(NR_READERS,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(NR_READERS,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+4:
+	/* Overflow.  */
+	mov.l	@(READERS_QUEUED,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(READERS_QUEUED,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+10:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake1, r1
+	bsrf	r1
+	 nop
+.Lwake1b:
+	bra	11b
+	 nop
+
+12:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait1, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait1b:
+	bra	13b
+	 nop
+	cfi_endproc
+
+	.align	2
+.Lwait0:
+	.long	__lll_lock_wait-.Lwait0b
+.Lwake0:
+	.long	__lll_unlock_wake-.Lwake0b
+.Lwait1:
+	.long	__lll_lock_wait-.Lwait1b
+.Lwake1:
+	.long	__lll_unlock_wake-.Lwake1b
+	.size	__pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
+
+strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
+hidden_def (__pthread_rwlock_rdlock)
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
new file mode 100644
index 0000000000..07f7b21198
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
@@ -0,0 +1,339 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	pthread_rwlock_timedrdlock
+	.type	pthread_rwlock_timedrdlock,@function
+	.align	5
+	cfi_startproc
+pthread_rwlock_timedrdlock:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	add	#-8, r15
+	cfi_adjust_cfa_offset (8)
+	mov	r4, r8
+	mov	r5, r9
+
+	/* Get the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(WRITER,r8), r0
+	tst	r0, r0
+	bf	14f
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	tst	r0, r0
+	bt	5f
+	mov	#FLAGS, r0
+	mov.b	@(r0,r8), r0
+	tst	r0, r0
+	bt	5f
+3:
+	/* Check the value of the timeout parameter.  */
+	mov.l	.L1g0, r1
+	mov.l	@(4,r9), r0
+	cmp/hs	r1, r0
+	bt	19f
+
+	mov.l	@(READERS_QUEUED,r8), r0
+	add	#1, r0
+	mov.l	r0, @(READERS_QUEUED,r8)
+	tst	r0, r0
+	bt	4f
+
+	mov.l	@(READERS_WAKEUP,r8), r10
+
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	10f
+
+11:
+	/* Get current time.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	mov.l	@(4,r15), r0
+	mov.w	.L1k0, r1
+	dmulu.l	r0, r1		/* Milli seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	15f
+	mov.l	.L1g0, r1
+	add	r1, r3
+	add	#-1, r2
+15:
+	cmp/pz	r2
+	bf	16f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	mov.l	r2, @r15
+	mov.l	r3, @(4,r15)
+
+	/* Futex call.  */
+	mov	r15, r7
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+	xor	r0, r5
+	extu.b	r5, r5
+#else
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+# if FUTEX_WAIT != 0
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+# endif
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r0
+	xor	r0, r5
+#endif
+	mov	r10, r6
+	mov	r8, r4
+	add	#READERS_WAKEUP, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	r0, r3
+
+17:
+	/* Reget the lock.  */
+	mov	#0, r5
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r5, @r8, r4, r2)
+#else
+	CMPXCHG (r5, @(MUTEX,r8), r4, r2)
+#endif
+	bf	12f
+
+13:
+	mov.l	@(READERS_QUEUED,r8), r0
+	add	#-1, r0
+	mov.l	r0, @(READERS_QUEUED,r8)
+	mov	#-ETIMEDOUT, r0
+	cmp/eq	r0, r3
+	bf	2b
+
+18:
+	bra	9f
+	 mov	#ETIMEDOUT, r3
+
+5:
+	mov	#0, r3
+	mov.l	@(NR_READERS,r8), r0
+	add	#1, r0
+	mov.l	r0, @(NR_READERS,r8)
+	tst	r0, r0
+	bt	8f
+
+9:
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	6f
+7:
+	cfi_remember_state
+	add	#8,r15
+	cfi_adjust_cfa_offset (-8)
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	r3, r0
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.L1k0:
+	.long	1000
+.L1g0:
+	.long	1000000000
+
+1:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait2, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait2b:
+	bra	2b
+	 nop
+14:
+	stc	gbr, r1
+	mov.w	.Ltidoff, r2
+	add	r2, r1
+	mov.l	@r1, r1
+	cmp/eq	r1, r0
+	bf	3b
+	/* Deadlock detected.  */
+	bra	9b
+	 mov	#EDEADLK, r3
+
+.Ltidoff:
+	.word	TID - TLS_PRE_TCB_SIZE
+
+6:
+	mov	r3, r10
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake2, r1
+	bsrf	r1
+	 nop
+.Lwake2b:
+	bra	7b
+	 mov	r10, r3
+
+8:
+	/* Overflow.  */
+	mov.l	@(NR_READERS,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(NR_READERS,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+4:
+	/* Overflow.  */
+	mov.l	@(READERS_QUEUED,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(READERS_QUEUED,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+10:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake3, r1
+	bsrf	r1
+	 nop
+.Lwake3b:
+	bra	11b
+	 nop
+
+12:
+	mov	r3, r10
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait3, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait3b:
+	bra	13b
+	 mov	r10, r3
+
+16:
+	bra	17b
+	 mov	#-ETIMEDOUT, r3
+
+19:
+	bra	9b
+	 mov	#EINVAL, r3
+	cfi_endproc
+
+	.align	2
+.Lwait2:
+	.long	__lll_lock_wait-.Lwait2b
+.Lwake2:
+	.long	__lll_unlock_wake-.Lwake2b
+.Lwait3:
+	.long	__lll_lock_wait-.Lwait3b
+.Lwake3:
+	.long	__lll_unlock_wake-.Lwake3b
+	.size	pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
new file mode 100644
index 0000000000..dd25e95b96
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
@@ -0,0 +1,323 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	pthread_rwlock_timedwrlock
+	.type	pthread_rwlock_timedwrlock,@function
+	.align	5
+	cfi_startproc
+pthread_rwlock_timedwrlock:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	add	#-8, r15
+	cfi_adjust_cfa_offset (8)
+	mov	r4, r8
+	mov	r5, r9
+
+	/* Get the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(WRITER,r8), r0
+	tst	r0, r0
+	bf	14f
+	mov.l	@(NR_READERS,r8), r0
+	tst	r0, r0
+	bt	5f
+3:
+	/* Check the value of the timeout parameter.  */
+	mov.l	.L1g1, r1
+	mov.l	@(4,r9), r0
+	cmp/hs	r1, r0
+	bt	19f
+
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	add	#1, r0
+	mov.l	r0, @(WRITERS_QUEUED,r8)
+	tst	r0, r0
+	bt	4f
+
+	mov.l	@(WRITERS_WAKEUP,r8), r10
+
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	10f
+
+11:
+	/* Get current time.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	mov.l	@(4,r15), r0
+	mov.w	.L1k1, r1
+	dmulu.l	r0, r1		/* Milli seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	15f
+	mov.l	.L1g1, r1
+	add	r1, r3
+	add	#-1, r2
+15:
+	cmp/pz	r2
+	bf	16f		/* Time is already up.  */
+
+	/* Store relative timeout.  */
+	mov.l	r2, @r15
+	mov.l	r3, @(4,r15)
+
+	/* Futex call.  */
+	mov	r15, r7
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+	xor	r0, r5
+	extu.b	r5, r5
+#else
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+# if FUTEX_WAIT != 0
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+# endif
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r0
+	xor	r0, r5
+#endif
+	mov	r10, r6
+	mov	r8, r4
+	add	#WRITERS_WAKEUP, r4
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+	mov	r0, r3
+
+17:
+	/* Reget the lock.  */
+	mov	#0, r5
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r5, @r8, r4, r2)
+#else
+	CMPXCHG (r5, @(MUTEX,r8), r4, r2)
+#endif
+	bf	12f
+
+13:
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	add	#-1, r0
+	mov.l	r0, @(WRITERS_QUEUED,r8)
+	mov	#-ETIMEDOUT, r0
+	cmp/eq	r0, r3
+	bf	2b
+
+18:
+	bra	9f
+	 mov	#ETIMEDOUT, r3
+
+19:
+	bra	9f
+	 mov	#EINVAL, r3
+
+5:
+	mov	#0, r3
+	stc	gbr, r0
+	mov.w	.Ltidoff, r1
+	mov.l	@(r0,r1), r0
+	mov.l	r0, @(WRITER,r8)
+9:
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	6f
+7:
+	cfi_remember_state
+	add	#8,r15
+	cfi_adjust_cfa_offset (-8)
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	r3, r0
+	cfi_restore_state
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+.L1k1:
+	.word	1000
+	.align	2
+.L1g1:
+	.long	1000000000
+
+1:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait6, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait6b:
+	bra	2b
+	 nop
+14:
+	stc	gbr, r1
+	mov.w	.Ltidoff, r2
+	add	r2, r1
+	mov.l	@r1, r1
+	cmp/eq	r1, r0
+	bf	3b
+	bra	9b
+	 mov	#EDEADLK, r3
+6:
+	mov	r3, r10
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake6, r1
+	bsrf	r1
+	 nop
+.Lwake6b:
+	bra	7b
+	 mov	r10, r3
+
+.Ltidoff:
+	.word	TID - TLS_PRE_TCB_SIZE
+
+4:
+	/* Overflow.  */
+	mov.l	@(WRITERS_QUEUED,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(WRITERS_QUEUED,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+10:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake7, r1
+	bsrf	r1
+	 nop
+.Lwake7b:
+	bra	11b
+	 nop
+
+12:
+	mov	r3, r10
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait7, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait7b:
+	bra	13b
+	 mov	r10, r3
+
+16:
+	bra	17b
+	 mov	#-ETIMEDOUT, r3
+	cfi_endproc
+
+	.align	2
+.Lwait6:
+	.long	__lll_lock_wait-.Lwait6b
+.Lwake6:
+	.long	__lll_unlock_wake-.Lwake6b
+.Lwait7:
+	.long	__lll_lock_wait-.Lwait7b
+.Lwake7:
+	.long	__lll_unlock_wake-.Lwake7b
+	.size	pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
new file mode 100644
index 0000000000..db99ee4696
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
@@ -0,0 +1,221 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	__pthread_rwlock_unlock
+	.type	__pthread_rwlock_unlock,@function
+	.align	5
+	cfi_startproc
+__pthread_rwlock_unlock:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(WRITER,r8), r0
+	tst	r0, r0
+	bf	5f
+	mov.l	@(NR_READERS,r8), r0
+	add	#-1, r0
+	mov.l	r0, @(NR_READERS,r8)
+	tst	r0, r0
+	bf	6f
+5:
+	mov	#0, r0
+	mov.l	r0, @(WRITER,r8)
+	mov	#1, r6
+	mov	r8, r4
+	add	#WRITERS_WAKEUP, r4
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	tst	r0, r0
+	bf	0f
+
+	/* If also no readers waiting nothing to do.  */
+	mov.l	@(READERS_QUEUED,r8), r0
+	tst	r0, r0
+	bt	6f
+
+	mov	#-1, r6
+	shlr	r6		/* r6 = 0x7fffffff */
+	mov	r8, r4
+	add	#READERS_WAKEUP, r4
+
+0:
+	mov.l	@r4, r0
+	add	#1, r0
+	mov.l	r0, @r4
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	7f
+
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r0
+	xor	r0, r5
+	extu.b	r5, r5
+#else
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov	#FUTEX_WAKE, r0
+	or	r0, r5
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r0
+	xor	r0, r5
+#endif
+	mov	#SYS_futex, r3
+	mov	#0, r7
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+6:
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	3f
+4:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+
+1:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait8, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait8b:
+	bra	2b
+	 nop
+3:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake8, r1
+	bsrf	r1
+	 nop
+.Lwake8b:
+	bra	4b
+	 nop
+
+7:
+	mov.l	r4, @-r15
+	cfi_adjust_cfa_offset (4)
+	mov.l	r6, @-r15
+	cfi_adjust_cfa_offset (4)
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake9, r1
+	bsrf	r1
+	 nop
+.Lwake9b:
+
+	mov.l	@r15+, r6
+	cfi_adjust_cfa_offset (-4)
+	bra	8b
+	 mov.l	@r15+, r4
+
+	cfi_endproc
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+	.align	2
+.Lwait8:
+	.long	__lll_lock_wait-.Lwait8b
+.Lwake8:
+	.long	__lll_unlock_wake-.Lwake8b
+.Lwake9:
+	.long	__lll_unlock_wake-.Lwake9b
+	.size	__pthread_rwlock_unlock,.-__pthread_rwlock_unlock
+
+strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
+hidden_def (__pthread_rwlock_unlock)
diff --git a/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
new file mode 100644
index 0000000000..8802fa9383
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
@@ -0,0 +1,252 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <lowlevelrwlock.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <kernel-features.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	__pthread_rwlock_wrlock
+	.type	__pthread_rwlock_wrlock,@function
+	.align	5
+	cfi_startproc
+__pthread_rwlock_wrlock:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+
+	/* Get the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	1f
+2:
+	mov.l	@(WRITER,r8), r0
+	tst	r0, r0
+	bf	14f
+	mov.l	@(NR_READERS,r8), r0
+	tst	r0, r0
+	bt	5f
+3:
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	add	#1, r0
+	mov.l	r0, @(WRITERS_QUEUED,r8)
+	tst	r0, r0
+	bt	4f
+
+	mov.l	@(WRITERS_WAKEUP,r8), r9
+
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	10f
+11:
+	mov	r8, r4
+	add	#WRITERS_WAKEUP, r4
+#ifdef __ASSUME_PRIVATE_FUTEX
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	mov	#(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+	xor	r0, r5
+	extu.b	r5, r5
+#else
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+# if FUTEX_WAIT != 0
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+# endif
+	stc	gbr, r1
+	mov.w	.Lpfoff, r2
+	add	r2, r1
+	mov.l	@r1, r0
+	xor	r0, r5
+#endif
+	mov	r9, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	/* Reget the lock.  */
+	mov	#0, r3
+	mov	#1, r4
+#if MUTEX == 0
+	CMPXCHG (r3, @r8, r4, r2)
+#else
+	CMPXCHG (r3, @(MUTEX,r8), r4, r2)
+#endif
+	bf	12f
+13:
+	mov.l	@(WRITERS_QUEUED,r8), r0
+	add	#-1, r0
+	bra	2b
+	 mov.l	r0, @(WRITERS_QUEUED,r8)
+
+5:
+	mov	#0, r3
+	stc	gbr, r0
+	mov.w	.Ltidoff, r1
+	mov.l	@(r0,r1), r0
+	mov.l	r0, @(WRITER,r8)
+9:
+#if MUTEX == 0
+	DEC (@r8, r2)
+#else
+	DEC (@(MUTEX,r8), r2)
+#endif
+	tst	r2, r2
+	bf	6f
+7:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	r3, r0
+	cfi_restore_state
+
+1:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait4, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait4b:
+	bra	2b
+	 nop
+14:
+	stc	gbr, r1
+	mov.w	.Ltidoff, r2
+	add	r2, r1
+	mov.l	@r1, r1
+	cmp/eq	r1, r0
+	bf	3b
+	bra	9b
+	 mov	#EDEADLK, r3
+6:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake4, r1
+	bsrf	r1
+	 nop
+.Lwake4b:
+	bra	7b
+	 mov	#0, r3
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+	.word	PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+.Ltidoff:
+	.word	TID - TLS_PRE_TCB_SIZE
+
+4:
+	mov.l	@(WRITERS_QUEUED,r8), r1
+	add	#-1, r1
+	mov.l	r1, @(WRITERS_QUEUED,r8)
+	bra	9b
+	 mov	#EAGAIN, r3
+
+10:
+	mov	r8, r4
+#if MUTEX != 0
+	add	#MUTEX, r4
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r5
+	extu.b	r5, r5
+	mov.l	.Lwake5, r1
+	bsrf	r1
+	 nop
+.Lwake5b:
+	bra	11b
+	 nop
+
+12:
+	mov	r8, r5
+#if MUTEX != 0
+	add	#MUTEX, r5
+#endif
+	mov	#PSHARED, r0
+	mov.b	@(r0,r8), r6
+	extu.b	r6, r6
+	mov.l	.Lwait5, r1
+	bsrf	r1
+	 mov	r2, r4
+.Lwait5b:
+	bra	13b
+	 nop
+
+	cfi_endproc
+
+	.align	2
+.Lwait4:
+	.long	__lll_lock_wait-.Lwait4b
+.Lwake4:
+	.long	__lll_unlock_wake-.Lwake4b
+.Lwait5:
+	.long	__lll_lock_wait-.Lwait5b
+.Lwake5:
+	.long	__lll_unlock_wake-.Lwake5b
+
+strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
+hidden_def (__pthread_rwlock_wrlock)
diff --git a/sysdeps/unix/sysv/linux/sh/sem_post.S b/sysdeps/unix/sysv/linux/sh/sem_post.S
new file mode 100644
index 0000000000..ccc62550af
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sem_post.S
@@ -0,0 +1,111 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	__new_sem_post
+	.type	__new_sem_post,@function
+	.align	5
+	cfi_startproc
+__new_sem_post:
+	mov.l	@(VALUE,r4), r2
+0:
+	mov.l	.Lmax, r1
+	cmp/eq	r1, r2
+	bt/s	3f
+	 mov	r2, r3
+	mov	r3, r5
+	add	#1, r5
+	CMPXCHG (r3, @(VALUE,r4), r5, r2)
+	bf	0b
+	mov.l	@(NWAITERS,r4), r2
+	tst	r2, r2
+	bt	2f
+	mov	#FUTEX_WAKE, r5
+	mov.l	@(PRIVATE,r4), r1
+	or	r1, r5
+	mov	#1, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	cmp/pz	r0
+	bf	1f
+2:
+	rts
+	 mov	#0, r0
+
+1:
+	bra	4f
+	 mov	#EINVAL, r2
+
+3:
+	mov	#EOVERFLOW, r2
+4:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mova	.Lgot3, r0
+	mov.l	.Lgot3, r12
+	add	r0, r12
+
+	mov.l	.Lerrno3, r0
+	stc	gbr, r1
+	mov.l	@(r0, r12), r0
+	bra	.Lexit
+	 add	r1, r0
+	.align	2
+.Lerrno3:
+	.long	errno@GOTTPOFF
+.Lexit:
+	mov.l	r2, @r0
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#-1, r0
+	cfi_endproc
+
+	.align	2
+.Lmax:
+	.long	SEM_VALUE_MAX
+.Lgot3:
+	.long	_GLOBAL_OFFSET_TABLE_
+	.size	__new_sem_post,.-__new_sem_post
+	versioned_symbol(libpthread, __new_sem_post, sem_post, GLIBC_2_1)
diff --git a/sysdeps/unix/sysv/linux/sh/sem_timedwait.S b/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
new file mode 100644
index 0000000000..4803d033d6
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
@@ -0,0 +1,281 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
+
+	.text
+
+	.globl	sem_timedwait
+	.type	sem_timedwait,@function
+	.align	5
+	cfi_startproc
+sem_timedwait:
+.LSTARTCODE:
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_absptr, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_absptr, .LexceptSTART)
+#endif
+	mov.l	@r4, r0
+2:
+	tst	r0, r0
+	bt	1f
+	mov	r0, r3
+	mov	r0, r6
+	add	#-1, r3
+	CMPXCHG (r6, @r4, r3, r2)
+	bf/s	2b
+	 mov	r2, r0
+	rts
+	 mov	#0, r0
+
+1:
+	/* Check whether the timeout value is valid.  */
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	mov.l	r9, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r9, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	add	#-8, r15
+	cfi_adjust_cfa_offset (8)
+
+	mov	r4, r8
+	mov	r5, r9
+
+	/* Check for invalid nanosecond field.  */
+	mov.l	@(4,r9), r0
+	mov.l	.L1g, r1
+	cmp/hs	r1, r0
+	bt/s	.Lerrno_exit
+	 mov	#EINVAL, r10
+	INC (@(NWAITERS,r8),r2)
+
+7:
+	/* Compute relative timeout.  */
+	mov	r15, r4
+	mov	#0, r5
+	mov	#__NR_gettimeofday, r3
+	trapa	#0x12
+	SYSCALL_INST_PAD
+
+	mov.l	@(4,r15), r0
+	mov.w	.L1k, r1
+	dmulu.l	r0, r1		/* Milli seconds to nano seconds.  */
+	mov.l	@r9, r2
+	mov.l	@(4,r9), r3
+	mov.l	@r15, r0
+	sts	macl, r1
+	sub	r0, r2
+	clrt
+	subc	r1, r3
+	bf	5f
+	mov.l	.L1g, r1
+	add	r1, r3
+	add	#-1, r2
+5:
+	cmp/pz	r2
+	bf/s	6f		/* Time is already up.  */
+	 mov	#ETIMEDOUT, r0
+
+	/* Store relative timeout.  */
+	mov.l	r2, @r15
+	mov.l	r3, @(4,r15)
+
+.LcleanupSTART:
+	mov.l	.Lenable0, r1
+	bsrf	r1
+	 nop
+.Lenable0b:
+	mov	r0, r10
+
+	mov	r8, r4
+#if FUTEX_WAIT == 0
+	mov.l	@(PRIVATE,r8), r5
+#else
+	mov.l	@(PRIVATE,r8), r5
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+#endif
+	mov	#0, r6
+	mov	r15, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	.Ldisable0, r1
+	mov	r10, r4
+	bsrf	r1
+	 mov	r0, r10
+.Ldisable0b:
+	mov	r10, r0
+.LcleanupEND:
+
+	tst	r0, r0
+	bt	9f
+	cmp/eq	#-EWOULDBLOCK, r0
+	bf	3f
+9:
+	mov.l	@r8, r0
+8:
+	tst	r0, r0
+	bt	7b
+
+	mov	r0, r3
+	mov	r0, r4
+	add	#-1, r3
+	CMPXCHG (r4, @r8, r3, r2)
+	bf/s	8b
+	 mov	r2, r0
+
+	DEC (@(NWAITERS,r8), r2)
+	mov	#0, r0
+
+10:
+	cfi_remember_state
+	add	#8, r15
+	cfi_adjust_cfa_offset (-8)
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	mov.l	@r15+, r9
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r9)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	rts
+	 nop
+	cfi_restore_state
+
+3:
+	neg	r0, r0
+6:
+	mov	r0, r10
+	DEC (@(NWAITERS,r8), r2)
+.Lerrno_exit:
+	mova	.Lgot2, r0
+	mov.l	.Lgot2, r12
+	add	r0, r12
+
+	mov.l	.Lerrno2, r0
+	stc	gbr, r1
+	mov.l	@(r0, r12), r0
+	bra	.Lexit
+	 add	r1, r0
+	.align	2
+.Lerrno2:
+	.long	errno@GOTTPOFF
+.Lexit:
+	mov.l	r10, @r0
+	bra	10b
+	 mov	#-1, r0
+
+.L1k:
+	.word	1000
+	.align	2
+.L1g:
+	.long	1000000000
+.Lgot2:
+	.long	_GLOBAL_OFFSET_TABLE_
+.Lenable0:
+	.long	__pthread_enable_asynccancel-.Lenable0b
+.Ldisable0:
+	.long	__pthread_disable_asynccancel-.Ldisable0b
+	.size	sem_timedwait,.-sem_timedwait
+
+	.type	sem_wait_cleanup,@function
+sem_wait_cleanup:
+	DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+	mov.l	.Lresume, r1
+#ifdef PIC
+	add	r12, r1
+#endif
+	jsr	@r1
+	 nop
+	sleep
+
+	.align	2
+.Lresume:
+#ifdef PIC
+	.long	_Unwind_Resume@GOTOFF
+#else
+	.long	_Unwind_Resume
+#endif
+.LENDCODE:
+	cfi_endproc
+	.size	sem_wait_cleanup,.-sem_wait_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			! @LPStart format (omit)
+	.byte	DW_EH_PE_omit			! @TType format (omit)
+	.byte	DW_EH_PE_uleb128		! call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_wait_cleanup-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	4
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long	__gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/sem_trywait.S b/sysdeps/unix/sysv/linux/sh/sem_trywait.S
new file mode 100644
index 0000000000..8ff8792ff4
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sem_trywait.S
@@ -0,0 +1,102 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+
+	.text
+
+	.globl	__new_sem_trywait
+	.type	__new_sem_trywait,@function
+	.align	5
+	cfi_startproc
+__new_sem_trywait:
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+	mov	r4, r8
+	mov.l	@r8, r0
+2:
+	tst	r0, r0
+	bt	1f
+
+	mov	r0, r3
+	mov	r0, r4
+	add	#-1, r3
+	CMPXCHG (r4, @r8, r3, r2)
+	bf/s	2b
+	 mov	r2, r0
+
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#0, r0
+	cfi_restore_state
+
+1:
+	mov	#EAGAIN, r8
+	mova	.Lgot1, r0
+	mov.l	.Lgot1, r12
+	add	r0, r12
+
+	mov.l	.Lerrno1, r0
+	stc	gbr, r1
+	mov.l	@(r0, r12), r0
+	bra	.Lexit
+	 add	r1, r0
+	.align	2
+.Lerrno1:
+	.long	errno@GOTTPOFF
+.Lexit:
+	mov.l	r8, @r0
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r8
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r8)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	rts
+	 mov	#-1, r0
+
+	cfi_endproc
+
+	.align	2
+.Lgot1:
+	.long	_GLOBAL_OFFSET_TABLE_
+	.size	__new_sem_trywait,.-__new_sem_trywait
+	versioned_symbol(libpthread, __new_sem_trywait, sem_trywait, GLIBC_2_1)
diff --git a/sysdeps/unix/sysv/linux/sh/sem_wait.S b/sysdeps/unix/sysv/linux/sh/sem_wait.S
new file mode 100644
index 0000000000..04a6a405d4
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sem_wait.S
@@ -0,0 +1,229 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <shlib-compat.h>
+#include <pthread-errnos.h>
+#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
+#include "lowlevel-atomic.h"
+
+
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
+
+	.text
+
+	.globl	__new_sem_wait
+	.type	__new_sem_wait,@function
+	.align	5
+	cfi_startproc
+__new_sem_wait:
+.LSTARTCODE:
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_absptr, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_absptr, .LexceptSTART)
+#endif
+	mov.l	r8, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r8, 0)
+	mov.l	r10, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r10, 0)
+	mov.l	r12, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (r12, 0)
+	sts.l	pr, @-r15
+	cfi_adjust_cfa_offset (4)
+	cfi_rel_offset (pr, 0)
+
+	mov	r4, r8
+	mov.l	@r8, r0
+2:
+	tst	r0, r0
+	bt	1f
+	mov	r0, r3
+	mov	r0, r4
+	add	#-1, r3
+	CMPXCHG (r4, @r8, r3, r2)
+	bf/s	2b
+	 mov	r2, r0
+7:
+	mov	#0, r0
+9:
+	cfi_remember_state
+	lds.l	@r15+, pr
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (pr)
+	mov.l	@r15+, r12
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r12)
+	mov.l	@r15+, r10
+	cfi_adjust_cfa_offset (-4)
+	cfi_restore (r10)
+	rts
+	 mov.l	@r15+, r8
+	/* Omit CFI for restore in delay slot.  */
+	cfi_restore_state
+
+.Lafter_ret:
+1:
+	INC (@(NWAITERS,r8),r2)
+
+.LcleanupSTART:
+6:
+	mov.l	.Lenable0, r1
+	bsrf	r1
+	 nop
+.Lenable0b:
+	mov	r0, r10
+
+	mov	r8, r4
+#if FUTEX_WAIT == 0
+	mov.l	@(PRIVATE,r8), r5
+#else
+	mov.l	@(PRIVATE,r8), r5
+	mov	#FUTEX_WAIT, r0
+	or	r0, r5
+#endif
+	mov	#0, r6
+	mov	#0, r7
+	mov	#SYS_futex, r3
+	extu.b	r3, r3
+	trapa	#0x14
+	SYSCALL_INST_PAD
+
+	mov.l	.Ldisable0, r1
+	mov	r10, r4
+	bsrf	r1
+	 mov	r0, r10
+.Ldisable0b:
+	mov	r10, r0
+.LcleanupEND:
+
+	tst	r0, r0
+	bt	3f
+	cmp/eq	#-EWOULDBLOCK, r0
+	bf	4f
+
+3:
+	mov.l	@r8, r0
+5:
+	tst	r0, r0
+	bt	6b
+
+	mov	r0, r3
+	mov	r0, r4
+	add	#-1, r3
+	CMPXCHG (r4, @r8, r3, r2)
+	bf/s	5b
+	 mov	r2, r0
+
+	DEC (@(NWAITERS,r8), r2)
+	bra	7b
+	 nop
+
+4:
+	neg	r0, r0
+	mov	r0, r4
+	DEC (@(NWAITERS,r8), r2)
+	mov	r4, r8
+	mova	.Lgot0, r0
+	mov.l	.Lgot0, r12
+	add	r0, r12
+
+	mov.l	.Lerrno0, r0
+	stc	gbr, r1
+	mov.l	@(r0, r12), r0
+	bra	.Lexit
+	 add	r1, r0
+	.align	2
+.Lerrno0:
+	.long	errno@GOTTPOFF
+.Lexit:
+	mov.l	r8, @r0
+	bra	9b
+	 mov	#-1, r0
+
+	.align	2
+.Lgot0:
+	.long	_GLOBAL_OFFSET_TABLE_
+.Lenable0:
+	.long	__pthread_enable_asynccancel-.Lenable0b
+.Ldisable0:
+	.long	__pthread_disable_asynccancel-.Ldisable0b
+	.size	__new_sem_wait,.-__new_sem_wait
+	versioned_symbol(libpthread, __new_sem_wait, sem_wait, GLIBC_2_1)
+
+
+	.type	sem_wait_cleanup,@function
+sem_wait_cleanup:
+	DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+	mov.l	.Lresume, r1
+#ifdef PIC
+	add	r12, r1
+#endif
+	jsr	@r1
+	 nop
+	sleep
+
+	.align	2
+.Lresume:
+#ifdef PIC
+	.long	_Unwind_Resume@GOTOFF
+#else
+	.long	_Unwind_Resume
+#endif
+.LENDCODE:
+	cfi_endproc
+	.size	sem_wait_cleanup,.-sem_wait_cleanup
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			! @LPStart format (omit)
+	.byte	DW_EH_PE_omit			! @TType format (omit)
+	.byte	DW_EH_PE_uleb128		! call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 sem_wait_cleanup-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	4
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+	.long	__gcc_personality_v0
+#endif
diff --git a/sysdeps/unix/sysv/linux/sh/sh4/lowlevellock.h b/sysdeps/unix/sysv/linux/sh/sh4/lowlevellock.h
new file mode 100644
index 0000000000..90be7bd8d0
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sh4/lowlevellock.h
@@ -0,0 +1,4 @@
+/*  4 instruction cycles not accessing cache and TLB are needed after
+    trapa instruction to avoid an SH-4 silicon bug.  */
+#define NEED_SYSCALL_INST_PAD
+#include_next <lowlevellock.h>
diff --git a/sysdeps/unix/sysv/linux/sh/smp.h b/sysdeps/unix/sysv/linux/sh/smp.h
new file mode 100644
index 0000000000..c4c0a75105
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/smp.h
@@ -0,0 +1,23 @@
+/* Determine whether the host has multiple processors.  SH version.
+   Copyright (C) 2002-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Library General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Library General Public License for more details.
+
+   You should have received a copy of the GNU Library General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+static inline int
+is_smp_system (void)
+{
+  return 0;
+}
diff --git a/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h b/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
new file mode 100644
index 0000000000..4278f25d7b
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
@@ -0,0 +1,169 @@
+/* Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <tls.h>
+#ifndef __ASSEMBLER__
+# include <nptl/pthreadP.h>
+#endif
+
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+
+# define _IMM12 #-12
+# define _IMM16 #-16
+# define _IMP16 #16
+# undef PSEUDO
+# define PSEUDO(name, syscall_name, args) \
+  .text; \
+  ENTRY (name); \
+  .Lpseudo_start: \
+    SINGLE_THREAD_P; \
+    bf .Lpseudo_cancel; \
+    .type __##syscall_name##_nocancel,@function; \
+    .globl __##syscall_name##_nocancel; \
+    __##syscall_name##_nocancel: \
+    DO_CALL (syscall_name, args); \
+    mov r0,r1; \
+    mov _IMM12,r2; \
+    shad r2,r1; \
+    not r1,r1; \
+    tst r1,r1; \
+    bt .Lsyscall_error; \
+    bra .Lpseudo_end; \
+     nop; \
+    .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
+ .Lpseudo_cancel: \
+    sts.l pr,@-r15; \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (pr, 0); \
+    add _IMM16,r15; \
+    cfi_adjust_cfa_offset (16); \
+    SAVE_ARGS_##args; \
+    CENABLE; \
+    LOAD_ARGS_##args; \
+    add _IMP16,r15; \
+    cfi_adjust_cfa_offset (-16); \
+    lds.l @r15+,pr; \
+    cfi_adjust_cfa_offset (-4); \
+    cfi_restore (pr); \
+    DO_CALL(syscall_name, args); \
+    SYSCALL_INST_PAD; \
+    sts.l pr,@-r15; \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (pr, 0); \
+    mov.l r0,@-r15; \
+    cfi_adjust_cfa_offset (4); \
+    cfi_rel_offset (r0, 0); \
+    CDISABLE; \
+    mov.l @r15+,r0; \
+    cfi_adjust_cfa_offset (-4); \
+    cfi_restore (r0); \
+    lds.l @r15+,pr; \
+    cfi_adjust_cfa_offset (-4); \
+    cfi_restore (pr); \
+    mov r0,r1; \
+    mov _IMM12,r2; \
+    shad r2,r1; \
+    not r1,r1; \
+    tst r1,r1; \
+    bf .Lpseudo_end; \
+ .Lsyscall_error: \
+    SYSCALL_ERROR_HANDLER; \
+ .Lpseudo_end:
+
+# undef PSEUDO_END
+# define PSEUDO_END(sym) \
+  END (sym)
+
+# define SAVE_ARGS_0	/* Nothing.  */
+# define SAVE_ARGS_1	SAVE_ARGS_0; mov.l r4,@(0,r15); cfi_offset (r4,-4)
+# define SAVE_ARGS_2	SAVE_ARGS_1; mov.l r5,@(4,r15); cfi_offset (r5,-8)
+# define SAVE_ARGS_3	SAVE_ARGS_2; mov.l r6,@(8,r15); cfi_offset (r6,-12)
+# define SAVE_ARGS_4	SAVE_ARGS_3; mov.l r7,@(12,r15); cfi_offset (r7,-16)
+# define SAVE_ARGS_5	SAVE_ARGS_4
+# define SAVE_ARGS_6	SAVE_ARGS_5
+
+# define LOAD_ARGS_0	/* Nothing.  */
+# define LOAD_ARGS_1	LOAD_ARGS_0; mov.l @(0,r15),r4; cfi_restore (r4)
+# define LOAD_ARGS_2	LOAD_ARGS_1; mov.l @(4,r15),r5; cfi_restore (r5)
+# define LOAD_ARGS_3	LOAD_ARGS_2; mov.l @(8,r15),r6; cfi_restore (r6)
+# define LOAD_ARGS_4	LOAD_ARGS_3; mov.l @(12,r15),r7; cfi_restore (r7)
+# define LOAD_ARGS_5	LOAD_ARGS_4
+# define LOAD_ARGS_6	LOAD_ARGS_5
+
+# ifdef IS_IN_libpthread
+#  define __local_enable_asynccancel	__pthread_enable_asynccancel
+#  define __local_disable_asynccancel	__pthread_disable_asynccancel
+# elif !defined NOT_IN_libc
+#  define __local_enable_asynccancel	__libc_enable_asynccancel
+#  define __local_disable_asynccancel	__libc_disable_asynccancel
+# elif defined IS_IN_librt
+#  define __local_enable_asynccancel	__librt_enable_asynccancel
+#  define __local_disable_asynccancel	__librt_disable_asynccancel
+# else
+#  error Unsupported library
+# endif
+
+# define CENABLE \
+	mov.l 1f,r0; \
+	bsrf r0; \
+	 nop; \
+     0: bra 2f; \
+	 mov r0,r2; \
+	.align 2; \
+     1: .long __local_enable_asynccancel - 0b; \
+     2:
+
+# define CDISABLE \
+	mov.l 1f,r0; \
+	bsrf r0; \
+	 mov r2,r4; \
+     0: bra 2f; \
+	 nop; \
+	.align 2; \
+     1: .long __local_disable_asynccancel - 0b; \
+     2:
+
+# ifndef __ASSEMBLER__
+#  define SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+				   header.multiple_threads) == 0, 1)
+# else
+#  define SINGLE_THREAD_P \
+	stc gbr,r0; \
+	mov.w 0f,r1; \
+	sub r1,r0; \
+	mov.l @(MULTIPLE_THREADS_OFFSET,r0),r0; \
+	bra 1f; \
+	 tst r0,r0; \
+     0: .word TLS_PRE_TCB_SIZE; \
+     1:
+
+# endif
+
+#elif !defined __ASSEMBLER__
+
+# define SINGLE_THREAD_P (1)
+# define NO_CANCELLATION 1
+
+#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+				   header.multiple_threads) == 0, 1)
+#endif