about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2006-09-15 14:40:38 +0000
committerJakub Jelinek <jakub@redhat.com>2006-09-15 14:40:38 +0000
commit3d6b6fbc1f374f55fdb9e277d7e851fe112d68ff (patch)
tree30e69d0b0d6aa51c68853ea3eda28baa2783f915
parent894c14a4968bfcbdbe4f01e7cae6114f33747bae (diff)
downloadglibc-cvs/fedora-glibc-2_4_90-32.tar.gz
glibc-cvs/fedora-glibc-2_4_90-32.tar.xz
glibc-cvs/fedora-glibc-2_4_90-32.zip
power-cpu add-on, 2.05 ISA optimized libs cvs/fedora-glibc-2_4_90-32
-rw-r--r--fedora/Makefile2
-rw-r--r--fedora/glibc.spec.in75
-rw-r--r--fedora/makepatch.awk1
-rw-r--r--fedora/power6emul.c242
-rw-r--r--powerpc-cpu/ChangeLog66
-rw-r--r--powerpc-cpu/README98
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/970/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/970/fpu/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power4/fpu/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcmp.S985
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcpy.S425
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memset.S228
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power4/strncmp.S176
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S37
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S30
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S37
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S30
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S37
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S30
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S37
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S30
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power5/fpu/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power6/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/power6/fpu/Implies2
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrint.S43
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrintf.S39
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/970/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcmp.S981
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcpy.S417
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power4/strncmp.S180
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S38
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S31
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S38
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S31
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S38
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S31
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S38
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S31
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power5/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power6/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power6/fpu/Implies1
-rw-r--r--powerpc-cpu/sysdeps/powerpc/powerpc64/power6/memcpy.S1002
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/970/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power4/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5+/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power6/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power5+/fpu/Implies3
-rw-r--r--powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power6/fpu/Implies3
53 files changed, 5537 insertions, 3 deletions
diff --git a/fedora/Makefile b/fedora/Makefile
index e2d037341b..73a6fd2287 100644
--- a/fedora/Makefile
+++ b/fedora/Makefile
@@ -114,7 +114,7 @@ $(tar-name)-$(branch-name).patch: makepatch.awk glibc.spec \
 	mv -f patch.tmp $@
 
 # makepatch.awk omits these files from the patch; we put them in a tar file.
-outside-patch = fedora c_stubs rtkaio \
+outside-patch = fedora c_stubs rtkaio powerpc-cpu \
 		localedata/charmaps/GB18030 iconvdata/gb18030.c
 
 $(tar-name)-$(branch-name)-$(snapshot-name).tar.bz2: Makefile branch.mk \
diff --git a/fedora/glibc.spec.in b/fedora/glibc.spec.in
index 9b338cb3dc..ab0007ca20 100644
--- a/fedora/glibc.spec.in
+++ b/fedora/glibc.spec.in
@@ -8,6 +8,11 @@
 %define buildxen 0
 %define xenpackage 0
 %endif
+%ifarch ppc ppc64
+%define buildpower6 1
+%else
+%define buildpower6 0
+%endif
 %define rtkaioarches %{ix86} x86_64 ia64 ppc ppc64 s390 s390x
 %define debuginfocommonarches %{ix86} alpha alphaev6 sparc sparcv9
 %define _unpackaged_files_terminate_build 0
@@ -821,7 +826,7 @@ GXX="g++ -m64"
 BuildFlags="$BuildFlags -DNDEBUG=1"
 EnableKernel="--enable-kernel=%{enablekernel}"
 echo "$GCC" > Gcc
-AddOns=`echo */configure | sed -e 's!/configure!!g;s!\(linuxthreads\|nptl\|rtkaio\)\( \|$\)!!g;s! \+$!!;s! !,!g;s!^!,!;/^,\*$/d'`
+AddOns=`echo */configure | sed -e 's!/configure!!g;s!\(linuxthreads\|nptl\|rtkaio\|powerpc-cpu\)\( \|$\)!!g;s! \+$!!;s! !,!g;s!^!,!;/^,\*$/d'`
 %ifarch %{rtkaioarches}
 AddOns=,rtkaio$AddOns
 %endif
@@ -850,6 +855,27 @@ build_nptl linuxnptl
 build_nptl linuxnptl-nosegneg -mno-tls-direct-seg-refs
 %endif
 
+%if %{buildpower6}
+(
+platform=`LD_SHOW_AUXV=1 /bin/true | sed -n 's/^AT_PLATFORM:[[:blank:]]*//p'`
+if [ "$platform" != power6 ]; then
+  mkdir -p power6emul/{lib,lib64}
+  $GCC -shared -O2 -fpic -o power6emul/%{_lib}/power6emul.so fedora/power6emul.c -Wl,-z,initfirst
+%ifarch ppc
+  echo '' | gcc -shared -nostdlib -O2 -fpic -m64 -o power6emul/lib64/power6emul.so -xc -
+%endif
+%ifarch ppc64
+  echo '' | gcc -shared -nostdlib -O2 -fpic -m32 -o power6emul/lib/power6emul.so -xc -
+%endif
+  export LD_PRELOAD=`pwd`/power6emul/\$LIB/power6emul.so
+fi
+AddOns=",powerpc-cpu$AddOns --with-cpu=power6"
+GCC="$GCC -mcpu=power6"
+GXX="$GXX -mcpu=power6"
+build_nptl linuxnptl-power6
+)
+%endif
+
 cd build-%{nptl_target_cpu}-linuxnptl
 $GCC -static -L. -Os ../fedora/glibc_post_upgrade.c -o glibc_post_upgrade.%{_target_cpu} \
     -DNO_SIZE_OPTIMIZATION \
@@ -907,6 +933,29 @@ ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/rtkaio/$SubDir/librtkaio-*.so` $RPM_BUI
 cd ..
 %endif
 
+%if %{buildpower6}
+cd build-%{nptl_target_cpu}-linuxnptl-power6
+mkdir -p $RPM_BUILD_ROOT/%{_lib}/power6/
+cp -a libc.so $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libc-*.so`
+ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/libc-*.so` $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libc.so.*`
+cp -a math/libm.so $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libm-*.so`
+ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/libm-*.so` $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libm.so.*`
+cp -a nptl/libpthread.so $RPM_BUILD_ROOT/%{_lib}/power6/libpthread-%{version}.so
+pushd $RPM_BUILD_ROOT/%{_lib}/power6
+ln -sf libpthread-*.so `basename $RPM_BUILD_ROOT/%{_lib}/libpthread.so.*`
+popd
+cp -a rt/librt.so $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/librt-*.so`
+ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/librt-*.so` $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/librt.so.*`
+cp -a nptl_db/libthread_db.so $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libthread_db-*.so`
+ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/libthread_db-*.so` $RPM_BUILD_ROOT/%{_lib}/power6/`basename $RPM_BUILD_ROOT/%{_lib}/libthread_db.so.*`
+%ifarch %{rtkaioarches}
+mkdir -p $RPM_BUILD_ROOT/%{_lib}/rtkaio/power6
+cp -a rtkaio/librtkaio.so $RPM_BUILD_ROOT/%{_lib}/rtkaio/power6/`basename $RPM_BUILD_ROOT/%{_lib}/librt-*.so | sed s/librt-/librtkaio-/`
+ln -sf `basename $RPM_BUILD_ROOT/%{_lib}/rtkaio/power6/librtkaio-*.so` $RPM_BUILD_ROOT/%{_lib}/rtkaio/power6/`basename $RPM_BUILD_ROOT/%{_lib}/librt.so.*`
+%endif
+cd ..
+%endif
+
 # compatibility hack: this locale has vanished from glibc, but some other
 # programs are still using it. Normally we would handle it in the %pre
 # section but with glibc that is simply not an option
@@ -1156,6 +1205,19 @@ cd build-%{nptl_target_cpu}-linuxnptl-nosegneg
 ) | tee check.log || :
 cd ..
 %endif
+%if %{buildpower6}
+echo ====================TESTING -mcpu=power6=============
+cd build-%{nptl_target_cpu}-linuxnptl-power6
+( if [ -d ../power6emul ]; then
+    export LD_PRELOAD=`cd ../power6emul; pwd`/\$LIB/power6emul.so
+  fi
+  make -j$numprocs -k check PARALLELMFLAGS=-s 2>&1
+  sleep 10s
+  teepid="`ps -eo ppid,pid,command | awk '($1 == '${parent}' && $3 ~ /^tee/) { print $2 }'`"
+  [ -n "$teepid" ] && kill $teepid
+) | tee check.log || :
+cd ..
+%endif
 echo ====================TESTING DETAILS=================
 for i in `sed -n 's|^.*\*\*\* \[\([^]]*\.out\)\].*$|\1|p' build-*-linux*/check.log`; do
   echo =====$i=====
@@ -1373,6 +1435,12 @@ rm -f *.filelist*
 %dir /%{_lib}/rtkaio/%{nosegneg_subdir}
 %endif
 %endif
+%if %{buildpower6}
+%dir /%{_lib}/power6
+%ifarch %{rtkaioarches}
+%dir /%{_lib}/rtkaio/power6
+%endif
+%endif
 %ifarch s390x
 %dir /lib
 /lib/ld64.so.1
@@ -1397,6 +1465,7 @@ rm -f *.filelist*
 %if %{xenpackage}
 %files -f nosegneg.filelist xen
 %defattr(-,root,root)
+%dir /%{_lib}/%{nosegneg_subdir_base}
 %dir /%{_lib}/%{nosegneg_subdir}
 %endif
 
@@ -1456,7 +1525,9 @@ rm -f *.filelist*
 
 %changelog
 * Fri Sep 15 2006 Jakub Jelinek <jakub@redhat.com> 2.4.90-32
-- use just AT_PLATFORM and altivec AT_HWCAP bit for library selection
+- on ppc* use just AT_PLATFORM and altivec AT_HWCAP bit for library selection
+- fix lrintl and lroundl on ppc{,64}
+- use hidden visibility on fstatat{,64} and mknodat in libc_nonshared.a
 
 * Sun Sep 10 2006 Jakub Jelinek <jakub@redhat.com> 2.4.90-31
 - fix pthread_cond_{,timed}wait cancellation (BZ#3123)
diff --git a/fedora/makepatch.awk b/fedora/makepatch.awk
index fba827de4b..fadf1c7e56 100644
--- a/fedora/makepatch.awk
+++ b/fedora/makepatch.awk
@@ -32,6 +32,7 @@
 	if ($2 ~ /.cvsignore$/ ||
 	    $2 ~ /^c_stubs/ ||
 	    $2 ~ /^rtkaio/ ||
+	    $2 ~ /^powerpc-cpu/ ||
 	    $2 ~ /^fedora/ ||
 	    $2 ~ /^localedata\/charmaps\/GB18030/ ||
 	    $2 ~ /^iconvdata\/gb18030\.c/) {
diff --git a/fedora/power6emul.c b/fedora/power6emul.c
new file mode 100644
index 0000000000..f1d0d20e0f
--- /dev/null
+++ b/fedora/power6emul.c
@@ -0,0 +1,242 @@
+/* Emulate power6 mf[tf]gpr and fri[zpmn] instructions.
+   Copyright (C) 2006 Red Hat, Inc.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+   This library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   It is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <signal.h>
+#include <stdio.h>
+
+extern double frip (double), friz (double), frin (double), frim (double);
+asm (".globl frip, friz, frin, frim\n.hidden frip, friz, frin, frim\n\t"
+#ifdef __powerpc64__
+	".section \".toc\",\"aw\"\n"
+"8:"	".tc FD_43300000_0[TC],0x4330000000000000\n"
+"9:"	".tc FD_3fe00000_0[TC],0x3fe0000000000000\n\t"
+	".previous\n\t"
+#else
+	".rodata\n\t"
+	".align 2\n"
+"8:"	".long 0x59800000\n"
+"9:"	".long 0x3f000000\n\t"
+	".previous\n\t"
+#endif
+	"# frip == ceil\n"
+"frip:"	"mffs    11\n\t"
+#ifdef __powerpc64__
+	"lfd     13,8b@toc(2)\n\t"
+#else
+	"mflr    11\n\t"
+	"bcl     20,31,1f\n"
+"1:"	"mflr    9\n\t"
+	"addis   9,9,8b-1b@ha\n\t"
+	"lfs     13,8b-1b@l(9)\n\t"
+	"mtlr    11\n\t"
+#endif
+	"fabs    0,1\n\t"
+	"fsub    12,13,13\n\t"
+	"fcmpu   7,0,13\n\t"
+	"fcmpu   6,1,12\n\t"
+	"bnllr-  7\n\t"
+	"mtfsfi  7,2\n\t"
+	"ble-    6,2f\n\t"
+	"fadd    1,1,13\n\t"
+	"fsub    1,1,13\n\t"
+	"fabs    1,1\n\t"
+	"mtfsf   0x01,11\n\t"
+	"blr\n"
+"2:"	"bge-    6,3f\n\t"
+	"fsub    1,1,13\n\t"
+	"fadd    1,1,13\n\t"
+	"fnabs   1,1\n"
+"3:"	"mtfsf   0x01,11\n\t"
+	"blr\n\t"
+	"# friz == trunc\n"
+"friz:"	"mffs    11\n\t"
+#ifdef __powerpc64__
+	"lfd     13,8b@toc(2)\n\t"
+#else
+	"mflr    11\n\t"
+	"bcl     20,31,1f\n"
+"1:"	"mflr    9\n\t"
+	"addis   9,9,8b-1b@ha\n\t"
+	"lfs     13,8b-1b@l(9)\n\t"
+	"mtlr    11\n\t"
+#endif
+	"fabs    0,1\n\t"
+	"fsub    12,13,13\n\t"
+	"fcmpu   7,0,13\n\t"
+	"fcmpu   6,1,12\n\t"
+	"bnllr-  7\n\t"
+	"mtfsfi  7,1\n\t"
+	"ble-    6,2f\n\t"
+	"fadd    1,1,13\n\t"
+	"fsub    1,1,13\n\t"
+	"fabs    1,1\n\t"
+	"mtfsf   0x01,11\n\t"
+	"blr\n"
+"2:"	"bge-    6,3f\n\t"
+	"fsub    1,1,13\n\t"
+	"fadd    1,1,13\n\t"
+	"fnabs   1,1\n"
+"3:"	"mtfsf   0x01,11\n\t"
+	"blr\n\t"
+	"# frin == round\n"
+"frin:"	"mffs    11\n\t"
+#ifdef __powerpc64__
+	"lfd     13,8b@toc(2)\n\t"
+#else
+	"mflr    11\n\t"
+	"bcl     20,31,1f\n"
+"1:"	"mflr    9\n\t"
+	"addis   9,9,8b-1b@ha\n\t"
+	"addi    9,9,8b-1b@l\n\t"
+	"mtlr    11\n\t"
+	"lfs     13,0(9)\n\t"
+#endif
+	"fabs    0,1\n\t"
+	"fsub    12,13,13\n\t"
+	"fcmpu   7,0,13\n\t"
+	"fcmpu   6,1,12\n\t"
+	"bnllr-  7\n\t"
+	"mtfsfi  7,1\n\t"
+#ifdef __powerpc64__
+	"lfd     10,9b@toc(2)\n\t"
+#else
+	"lfs     10,9b-8b(9)\n\t"
+#endif
+	"ble-    6,2f\n\t"
+	"fadd    1,1,10\n\t"
+	"fadd    1,1,13\n\t"
+	"fsub    1,1,13\n\t"
+	"fabs    1,1\n\t"
+	"mtfsf   0x01,11\n\t"
+	"blr\n"
+"2:"	"fsub    9,1,10\n\t"
+	"bge-    6,3f\n\t"
+	"fsub    1,9,13\n\t"
+	"fadd    1,1,13\n\t"
+	"fnabs   1,1\n"
+"3:"	"mtfsf   0x01,11\n\t"
+	"blr\n\t"
+	"# frim == floor\n"
+"frim:"	"mffs    11\n\t"
+#ifdef __powerpc64__
+	"lfd     13,8b@toc(2)\n\t"
+#else
+	"mflr    11\n\t"
+	"bcl     20,31,1f\n"
+"1:"	"mflr    9\n\t"
+	"addis   9,9,8b-1b@ha\n\t"
+	"lfs     13,8b-1b@l(9)\n\t"
+	"mtlr    11\n\t"
+#endif
+	"fabs    0,1\n\t"
+	"fsub    12,13,13\n\t"
+	"fcmpu   7,0,13\n\t"
+	"fcmpu   6,1,12\n\t"
+	"bnllr-  7\n\t"
+	"mtfsfi  7,3\n\t"
+	"ble-    6,2f\n\t"
+	"fadd    1,1,13\n\t"
+	"fsub    1,1,13\n\t"
+	"fabs    1,1\n\t"
+	"mtfsf   0x01,11\n\t"
+	"blr\n"
+"2:"	"bge-    6,3f\n\t"
+	"fsub    1,1,13\n\t"
+	"fadd    1,1,13\n\t"
+	"fnabs   1,1\n"
+"3:"	"mtfsf   0x01,11\n\t"
+	"blr\n");
+
+static void
+catch_sigill (int signal, struct sigcontext *ctx)
+{
+  unsigned int insn = *(unsigned int *) (ctx->regs->nip);
+#ifdef __powerpc64__
+  if ((insn & 0xfc1f07ff) == 0x7c0005be) /* mftgpr */
+    {
+      unsigned long *regs = (unsigned long *) ctx->regs;
+      unsigned fpr = (insn >> 11) & 0x1f;
+      unsigned gpr = (insn >> 21) & 0x1f;
+      regs[gpr] = regs[fpr + 0x30];
+      ctx->regs->nip += 4;
+      return;
+    }
+  if ((insn & 0xfc1f07ff) == 0x7c0004be) /*mffgpr */
+    {
+      unsigned long *regs = (unsigned long *) ctx->regs;
+      unsigned fpr = (insn >> 21) & 0x1f;
+      unsigned gpr = (insn >> 11) & 0x1f;
+      regs[fpr + 0x30] = regs[gpr];
+      ctx->regs->nip += 4;
+      return;
+    }
+#endif
+  if ((insn & 0xfc1f073f) == 0xfc000310) /* fri[pznm] */
+    {
+#ifdef __powerpc64__
+      double *regs = (double *) (((char *) ctx->regs) + 0x30 * 8);
+      unsigned int *fpscr = (unsigned int *) (((char *) ctx->regs) + 0x50 * 8 + 4);
+#else
+      double *regs = (double *) (((char *) ctx->regs) + 0x30 * 4);
+      unsigned int *fpscr = (unsigned int *) (((char *) ctx->regs) + 0x30 * 4 + 0x20 * 8 + 4);
+#endif
+      unsigned dest = (insn >> 21) & 0x1f;
+      unsigned src = (insn >> 11) & 0x1f;
+      switch (insn & 0xc0)
+	{
+	case 0:
+	  regs[dest] = frin (regs[src]);
+	  break;
+	case 0x40:
+	  regs[dest] = friz (regs[src]);
+	  break;
+	case 0x80:
+	  regs[dest] = frip (regs[src]);
+	  break;
+	case 0xc0:
+	  regs[dest] = frim (regs[src]);
+	  break;
+	}
+      /* Update raised exceptions.  */
+      union { unsigned int i[2]; double d; } u;
+      asm volatile ("mffs %0" : "=f" (u.d));
+      u.i[1] &= 0xfffe0000; /* Is this correct?  */
+      *fpscr |= u.i[1];
+      ctx->regs->nip += 4;
+      return;
+    }
+
+  struct sigaction sa;
+  sa.sa_handler = SIG_DFL;
+  sigemptyset (&sa.sa_mask);
+  sa.sa_flags = 0;
+  sigaction (signal, &sa, NULL);
+  raise (signal);
+}
+
+static void
+__attribute__ ((constructor))
+install_handler (void)
+{
+  struct sigaction sa;
+  sa.sa_handler = (void *) catch_sigill;
+  sigemptyset (&sa.sa_mask);
+  sa.sa_flags = SA_RESTART;
+  sigaction (SIGILL, &sa, NULL);
+}
diff --git a/powerpc-cpu/ChangeLog b/powerpc-cpu/ChangeLog
new file mode 100644
index 0000000000..a60cf322fd
--- /dev/null
+++ b/powerpc-cpu/ChangeLog
@@ -0,0 +1,66 @@
+2006-07-06  Steven Munroe  <sjmunroe@us.ibm.com>
+
+	* sysdeps/powerpc/powerpc64/power6/memcpy.S: New file.
+
+2006-06-19  Steven Munroe  <sjmunroe@us.ibm.com>
+
+	* sysdeps/powerpc/powerpc32/power6/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power6/fpu/Implies: New file.
+	* sysdeps/powerpc/powerpc64/power6/Implies: New file.
+	* sysdeps/powerpc/powerpc64/power6/fpu/Implies: New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/power6/fpu/Implies:
+	New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc64/power6/fpu/Implies:
+	New file.
+
+2006-06-15  Steven Munroe  <sjmunroe@us.ibm.com>
+
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S: New file.
+	* sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S: New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc64/power5+/fpu/Implies:
+	New file.
+
+2006-03-20  Steven Munroe  <sjmunroe@us.ibm.com>
+
+	* Makefile: New file.
+	* README: New file.
+	* configure: New file.
+	* sysdeps/powerpc/powerpc32/970/Implies: New file.
+	* sysdeps/powerpc/powerpc32/970/fpu/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power4/fpu/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power4/memcmp.S: New file.
+	* sysdeps/powerpc/powerpc32/power4/memcpy.S: New file.
+	* sysdeps/powerpc/powerpc32/power4/memset.S: New file.
+	* sysdeps/powerpc/powerpc32/power4/strncmp.S: New file.
+	* sysdeps/powerpc/powerpc32/power5/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power5/fpu/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power5+/Implies: New file.
+	* sysdeps/powerpc/powerpc32/power5+/fpu/Implies: New file.
+	* sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrint.S: New file.
+	* sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrintf.S: New file.
+	* sysdeps/powerpc/powerpc64/970/Implies: New file.
+	* sysdeps/powerpc/powerpc64/power4/memcmp.S: New file.
+	* sysdeps/powerpc/powerpc64/power4/memcpy.S: New file.
+	* sysdeps/powerpc/powerpc64/power4/strncmp.S: New file.
+	* sysdeps/powerpc/powerpc64/power5/Implies: New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/970/fpu/Implies: New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/power4/fpu/Implies:
+	New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/power5/fpu/Implies:
+	New file.
+	* sysdeps/unix/sysv/linux/powerpc/powerpc32/power5+/fpu/Implies:
+	New file.
diff --git a/powerpc-cpu/README b/powerpc-cpu/README
new file mode 100644
index 0000000000..a83a4de24e
--- /dev/null
+++ b/powerpc-cpu/README
@@ -0,0 +1,98 @@
+Powerpc-cpu add-on V0.1
+
+What is it:
+
+The powerpc-cpu directory is an add-on for the GNU C Library (glibc).
+It provides additional platform/cpu specific optimizations when the
+--with-cpu= configure option is specified.  On the glibc configure,
+specifying --with-cpu=<cpu_type>, inserts the -mcpu=<cpu_type> option
+for all 'C' compiles in the glibc make.  It also inserts <cpu_type>
+specific directories into the source search path for glibc.  Source from
+these <cpu_type> specific directories and override header or code source
+from glibc.
+
+How do I use it:
+
+To build glibc with this add-on you need to configure glibc specifying both
+--enable-add-ons=powerpc-cpu,.. and --with-cpu=<cpu_type> options.  The
+add-on source can be a direct subdirectory of glibc (i.e.
+./libc/powerpc-cpu) which allows the short name, or a separate directory
+from glibc, this requires a fully qualified path. (i.e.
+--enable-add-ons=$HOME/powerpc-cpu,..).
+
+If you specify multiple add-ons, powerpc-cpu should be first to insure that
+any optimizations can override the corresponding source files from mainline
+glibc. For example: "--enable-add-ons=powerpc-cpu,nptl".
+
+How do I extent it:
+
+The optimized source code is found in the sysdeps/powerpc/powerpc32 and
+sysdeps/powerpc/powerpc64 subdirectories.  These directories support the 32-
+and 64-bit ELF ABIs of the powerpc platform.  The next directory level is
+"<cpu_type>" where the names match supported gcc -mcpu= options.  When
+--with-cpu=<cpu_type> is specified, the <cpu_type> must match one of the
+directories at this level.
+
+The mechanism is generalized and can be extended to any "cpu-type" that is
+accepted by gcc's -mcpu= option.  To support another "cpu_type" simply add a
+directory of the form:
+
+./powerpc-cpu/sysdeps/powerpc/powerpc32/<cpu_type>
+
+and for 64-bit implementations also:
+
+./powerpc-cpu/sysdeps/powerpc/powerpc64/<cpu_type>
+
+See the GCC online documentation <http://gcc.gnu.org/onlinedocs> 
+3.17.24 "IBM RS/6000 and PowerPC Options" for the complete list of -mcpu=
+options.
+
+Currently supported cpu_types are:
+
+power4
+power5
+power5+
+power6
+970
+
+The --with-cpu=<cpu_type> option requires that corresponding directory
+./powerpc-cpu/sysdeps/powerpc/powerpc[32|64]/<cpu_type> exists.  This
+directory can be empty in which case you get the benefit of -mcpu=<cpu_type>
+which implies -mtune=<cpu_type>.  To override source implementation from
+glibc mainline simple provide an alternative implementation with the same
+name in the appropriate ./<cpu_type> subdirectory. 
+
+So far 970, power4, power5, power5+, and power6 are enabled with specific
+assembler implementations and have corresponding directories for both
+powerpc32 and powerpc64.  For 64-bit <cpu_types>, implementations of the
+32-bit ABI can share code exploiting 64-bit instructions from the generic
+cpu_type powerpc64 (directory sysdeps/powerpc/powerpc32/powerpc64).
+Specifically an "Implies" file, can be included in any
+sysdeps/powerpc/powerpc32/<cpu_type> directory where <cpu_type> is a 64-bit
+processor.  This is useful when the implementation wants to exploit 64-bit
+instructions in 32-bit mode.
+
+Special note: While this add-on is currently focused on powerpc, the
+mechanism is general enough to be used by any platform which also supports
+gcc's -mcpu= option.  Simply add the appropriate ./sysdeps/<target>/<cpu_type>
+directories.
+
+Special note: Currently the "970" implementation is implied to the power4
+implementation.  The internal micro-architecture of the 970 chip is based on
+the power4 design and scheduling for integer and floating point
+units are the same for power4 and 970.  Any 970 unique codes would be specific
+to Altivec/VMX exploitation which we don't have any examples of yet.
+
+Special note: The directory search order has changed for glibc-2.4. So if your
+optimization needs to override source files in mainline
+./sysdeps/powerpc/powerpc[32|64]/fpu, additional tricks are needed.
+
+Normally ./sysdeps/powerpc/powerpc[32|64]/fpu from mainline will be searched
+before ./powerpc-cpu/sysdeps/powerpc/powerpc[32|64]/<cpu_type>/fpu. However
+./powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc[32|64]/<cpu_type>/fpu
+will be searched before either.  So add an "Implies" file containing
+"powerpc/powerpc[32|64]/<cpu_type>/fpu" in
+./powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc[32|64]/<cpu_type>/fpu as
+a work around. You will need to repeat this for each <cpu_type> that needs to
+override mainline.
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/970/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/970/Implies
new file mode 100644
index 0000000000..4e3a983426
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/970/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/970/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/970/fpu/Implies
new file mode 100644
index 0000000000..128f8aadcb
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/970/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/powerpc64/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/fpu/Implies
new file mode 100644
index 0000000000..128f8aadcb
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/powerpc64/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcmp.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcmp.S
new file mode 100644
index 0000000000..4715302739
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcmp.S
@@ -0,0 +1,985 @@
+/* Optimized strcmp implementation for PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* int [r3] memcmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (BP_SYM(memcmp), 4, 0)
+	CALL_MCOUNT
+
+#define rTMP	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3	r8	/* next word in s1 */
+#define rWORD4	r9	/* next word in s2 */
+#define rWORD5	r10	/* next word in s1 */
+#define rWORD6	r11	/* next word in s2 */
+#define rBITDIF	r12	/* bits that differ in s1 & s2 words */
+#define rWORD7	r30	/* next word in s1 */
+#define rWORD8	r31	/* next word in s2 */
+
+	xor	rTMP, rSTR2, rSTR1
+	cmplwi	cr6, rN, 0
+	cmplwi	cr1, rN, 12
+	clrlwi.	rTMP, rTMP, 30
+	clrlwi	rBITDIF, rSTR1, 30
+	cmplwi	cr5, rBITDIF, 0
+	beq-	cr6, L(zeroLength)
+	dcbt	0,rSTR1
+	dcbt	0,rSTR2
+/* If less than 8 bytes or not aligned, use the unaligned
+   byte loop.  */
+	blt	cr1, L(bytealigned)
+        stwu    1,-64(1)
+	cfi_adjust_cfa_offset(64)
+        stw     r31,48(1)	
+	cfi_offset(31,(48-64))
+        stw     r30,44(1)	
+	cfi_offset(30,(44-64))
+	bne	L(unaligned)
+/* At this point we know both strings have the same alignment and the
+   compare length is at least 8 bytes.  rBITDIF contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of rBITDIF to 0.  If rBITDIF == 0 then we are already word 
+   aligned and can perform the word aligned loop.
+  
+   Otherwise we know the two strings have the same alignment (but not
+   yet word aligned).  So we force the string addresses to the next lower
+   word boundary and special case this first word using shift left to
+   eliminate bits preceeding the first byte.  Since we want to join the
+   normal (word aligned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first word. This insures that the loop count is
+   correct and the first word (shifted) is in the expected register pair. */
+	.align 4
+L(samealignment):
+	clrrwi	rSTR1, rSTR1, 2
+	clrrwi	rSTR2, rSTR2, 2
+	beq	cr5, L(Waligned)
+	add	rN, rN, rBITDIF
+	slwi	r11, rBITDIF, 3
+	srwi	rTMP, rN, 4	 /* Divide by 16 */
+	andi.	rBITDIF, rN, 12  /* Get the word remainder */
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	cmplwi	cr1, rBITDIF, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dPs4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(dPs3)
+	beq	cr1, L(dPs2)
+
+/* Remainder is 4 */
+	.align 3
+L(dsP1):
+	slw	rWORD5, rWORD1, r11
+	slw	rWORD6, rWORD2, r11
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	b	L(dP1e)
+/* Remainder is 8 */
+	.align 4
+L(dPs2):
+	slw	rWORD5, rWORD1, r11
+	slw	rWORD6, rWORD2, r11
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	b	L(dP2e)
+/* Remainder is 12 */
+	.align 4
+L(dPs3):
+	slw	rWORD3, rWORD1, r11
+	slw	rWORD4, rWORD2, r11
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(dP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align 4
+L(dPs4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	slw	rWORD1, rWORD1, r11
+	slw	rWORD2, rWORD2, r11
+	cmplw	cr0, rWORD1, rWORD2
+	b	L(dP4e)
+
+/* At this point we know both strings are word aligned and the
+   compare length is at least 8 bytes.  */
+	.align 4
+L(Waligned):
+	andi.	rBITDIF, rN, 12  /* Get the word remainder */
+	srwi	rTMP, rN, 4	 /* Divide by 16 */
+	cmplwi	cr1, rBITDIF, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dP4)
+	bgt	cr1, L(dP3)
+	beq	cr1, L(dP2)
+		
+/* Remainder is 4 */
+	.align 4
+L(dP1):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
+   (8-15 byte compare), we want to use only volatile registers.  This
+   means we can avoid restoring non-volatile registers since we did not
+   change any on the early exit path.  The key here is the non-early
+   exit path only cares about the condition code (cr5), not about which 
+   register pair was used.  */
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+L(dP1e):
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	bne	cr0, L(dLcr0)
+	
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+	bne	cr1, L(dLcr1)
+	cmplw	cr5, rWORD7, rWORD8
+	bdnz	L(dLoop)
+	bne	cr6, L(dLcr6)
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+	.align 3
+L(dP1x):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+        lwz     1,0(1)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+		
+/* Remainder is 8 */
+	.align 4
+L(dP2):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+L(dP2e):
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+	bne	cr6, L(dLcr6)
+	bne	cr5, L(dLcr5)
+	b	L(dLoop2)
+/* Again we are on a early exit path (16-23 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align 4
+L(dP2x):
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+	cmplw	cr5, rWORD3, rWORD4
+	slwi.	r12, rN, 3
+	bne	cr6, L(dLcr6)
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+	bne	cr5, L(dLcr5)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+        lwz     1,0(1)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+		
+/* Remainder is 12 */
+	.align 4
+L(dP3):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	lwz	rWORD3, 0(rSTR1)
+	lwz	rWORD4, 0(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+L(dP3e):
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP3x)
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr1, L(dLcr1)
+	bne	cr6, L(dLcr6)
+	b	L(dLoop1)
+/* Again we are on a early exit path (24-31 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align 4
+L(dP3x):
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+	cmplw	cr5, rWORD1, rWORD2
+	slwi.	r12, rN, 3
+	bne	cr1, L(dLcr1)
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr6, L(dLcr6)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	bne	cr5, L(dLcr5)
+        lwz     1,0(1)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+	
+/* Count is a multiple of 16, remainder is 0 */
+	.align 4
+L(dP4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+L(dP4e):
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr0, L(dLcr0)
+	bne	cr1, L(dLcr1)
+	bdz-	L(d24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align 4
+L(dLoop):
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+L(dLoop1):
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+L(dLoop2):
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr0, L(dLcr0)
+L(dLoop3):
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+	bne-	cr1, L(dLcr1)
+	cmplw	cr0, rWORD1, rWORD2
+	bdnz+	L(dLoop)	
+	
+L(dL4):
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(d44):
+	bne	cr0, L(dLcr0)
+L(d34):
+	bne	cr1, L(dLcr1)
+L(d24):
+	bne	cr6, L(dLcr6)
+L(d14):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5) 
+L(d04):
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+        lwz     1,0(1)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	beq	L(zeroLength)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  Since
+   we are aligned it is safe to load the whole word, and use
+   shift right to eliminate bits beyond the compare length. */ 
+L(d00):
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2) 
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+        cmplw   rWORD1,rWORD2
+        li      rRTN,0
+        beqlr
+        li      rRTN,1
+        bgtlr
+        li      rRTN,-1
+        blr
+
+	.align 4
+L(dLcr0):
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+	li	rRTN, 1
+        lwz     1,0(1)
+	bgtlr	cr0
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr1):
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+	li	rRTN, 1
+        lwz     1,0(1)
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr6):
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+	li	rRTN, 1
+        lwz     1,0(1)
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr5):
+        lwz     r30,44(1)
+        lwz     r31,48(1)
+L(dLcr5x):
+	li	rRTN, 1
+        lwz     1,0(1)
+	bgtlr	cr5
+	li	rRTN, -1
+	blr
+	
+	.align 4
+L(bytealigned):
+	cfi_adjust_cfa_offset(-64)
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+
+/* We need to prime this loop.  This loop is swing modulo scheduled
+   to avoid pipe delays.  The dependent instruction latencies (load to 
+   compare to conditional branch) is 2 to 3 cycles.  In this loop each
+   dispatch group ends in a branch and takes 1 cycle.  Effectively
+   the first iteration of the loop only serves to load operands and 
+   branches based on compares are delayed until the next loop. 
+
+   So we must precondition some registers and condition codes so that
+   we don't exit the loop early on the first iteration.  */
+   
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	bdz-	L(b11)
+	cmplw	cr0, rWORD1, rWORD2
+	lbz	rWORD3, 1(rSTR1)
+	lbz	rWORD4, 1(rSTR2)
+	bdz-	L(b12)
+	cmplw	cr1, rWORD3, rWORD4
+	lbzu	rWORD5, 2(rSTR1)
+	lbzu	rWORD6, 2(rSTR2)
+	bdz-	L(b13)
+	.align 4
+L(bLoop):
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne-	cr0, L(bLcr0)
+
+	cmplw	cr6, rWORD5, rWORD6
+	bdz-	L(b3i)
+	
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	cr1, L(bLcr1)
+
+	cmplw	cr0, rWORD1, rWORD2
+	bdz-	L(b2i)
+
+	lbzu	rWORD5, 1(rSTR1)
+	lbzu	rWORD6, 1(rSTR2)
+	bne-	cr6, L(bLcr6)
+
+	cmplw	cr1, rWORD3, rWORD4
+	bdnz+	L(bLoop)
+	
+/* We speculatively loading bytes before we have tested the previous
+   bytes.  But we must avoid overrunning the length (in the ctr) to
+   prevent these speculative loads from causing a segfault.  In this 
+   case the loop will exit early (before the all pending bytes are
+   tested.  In this case we must complete the pending operations
+   before returning.  */
+L(b1i):
+	bne-	cr0, L(bLcr0)
+	bne-	cr1, L(bLcr1)
+	b	L(bx56)
+	.align 4
+L(b2i):
+	bne-	cr6, L(bLcr6)
+	bne-	cr0, L(bLcr0)
+	b	L(bx34)
+	.align 4
+L(b3i):
+	bne-	cr1, L(bLcr1)
+	bne-	cr6, L(bLcr6)
+	b	L(bx12)
+	.align 4
+L(bLcr0):
+	li	rRTN, 1
+	bgtlr	cr0
+	li	rRTN, -1
+	blr
+L(bLcr1):
+	li	rRTN, 1
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+L(bLcr6):
+	li	rRTN, 1
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+
+L(b13):
+	bne-	cr0, L(bx12)
+	bne-	cr1, L(bx34)
+L(bx56):
+	sub	rRTN, rWORD5, rWORD6
+	blr
+	nop
+L(b12):
+	bne-	cr0, L(bx12)
+L(bx34):	
+	sub	rRTN, rWORD3, rWORD4
+	blr
+
+L(b11):
+L(bx12):
+	sub	rRTN, rWORD1, rWORD2
+	blr
+
+	.align 4 
+L(zeroLengthReturn):
+
+L(zeroLength):
+	li	rRTN, 0
+	blr
+
+	cfi_adjust_cfa_offset(64)
+	.align 4
+/* At this point we know the strings have different alignment and the
+   compare length is at least 8 bytes.  rBITDIF contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of rBITDIF to 0.  If rBITDIF == 0 then rStr1 is word aligned and can 
+   perform the Wunaligned loop.
+  
+   Otherwise we know that rSTR1 is not aready word aligned yet.
+   So we can force the string addresses to the next lower word
+   boundary and special case this first word using shift left to
+   eliminate bits preceeding the first byte.  Since we want to join the
+   normal (Wualigned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first W. This insures that the loop count is
+   correct and the first W (shifted) is in the expected resister pair.  */
+#define rSHL		r29	/* Unaligned shift left count.  */
+#define rSHR		r28	/* Unaligned shift right count.  */
+#define rB		r27	/* Left rotation temp for rWORD2.  */
+#define rD		r26	/* Left rotation temp for rWORD4.  */
+#define rF		r25	/* Left rotation temp for rWORD6.  */
+#define rH		r24	/* Left rotation temp for rWORD8.  */
+#define rA		r0	/* Right rotation temp for rWORD2.  */
+#define rC		r12	/* Right rotation temp for rWORD4.  */
+#define rE		r0	/* Right rotation temp for rWORD6.  */
+#define rG		r12	/* Right rotation temp for rWORD8.  */
+L(unaligned):
+	stw     r29,40(r1)	
+	cfi_offset(r29,(40-64))	
+	clrlwi	rSHL, rSTR2, 30
+        stw     r28,36(r1)	
+	cfi_offset(r28,(36-64))
+	beq	cr5, L(Wunaligned)
+        stw     r27,32(r1)	
+	cfi_offset(r27,(32-64))
+/* Adjust the logical start of rSTR2 to compensate for the extra bits
+   in the 1st rSTR1 W.  */
+	sub	r27, rSTR2, rBITDIF
+/* But do not attempt to address the W before that W that contains
+   the actual start of rSTR2.  */
+	clrrwi	rSTR2, rSTR2, 2
+        stw     r26,28(r1)	
+	cfi_offset(r26,(28-64))
+/* Compute the left/right shift counts for the unalign rSTR2,
+   compensating for the logical (W aligned) start of rSTR1.  */ 
+	clrlwi	rSHL, r27, 30
+	clrrwi	rSTR1, rSTR1, 2	
+        stw     r25,24(r1)	
+	cfi_offset(r25,(24-64))
+	slwi	rSHL, rSHL, 3
+	cmplw	cr5, r27, rSTR2
+	add	rN, rN, rBITDIF
+	slwi	r11, rBITDIF, 3
+        stw     r24,20(r1)	
+	cfi_offset(r24,(20-64))
+	subfic	rSHR, rSHL, 32
+	srwi	rTMP, rN, 4      /* Divide by 16 */
+	andi.	rBITDIF, rN, 12  /* Get the W remainder */
+/* We normally need to load 2 Ws to start the unaligned rSTR2, but in
+   this special case those bits may be discarded anyway.  Also we
+   must avoid loading a W where none of the bits are part of rSTR2 as
+   this may cross a page boundary and cause a page fault.  */
+	li	rWORD8, 0
+	blt	cr5, L(dus0)
+	lwz	rWORD8, 0(rSTR2)
+	la	rSTR2, 4(rSTR2)
+	slw	rWORD8, rWORD8, rSHL
+
+L(dus0):
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	cmplwi	cr1, rBITDIF, 8
+	cmplwi	cr7, rN, 16
+	srw	rG, rWORD2, rSHR
+	clrlwi	rN, rN, 30
+	beq	L(duPs4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, rG, rWORD8
+	bgt	cr1, L(duPs3)
+	beq	cr1, L(duPs2)
+
+/* Remainder is 4 */
+	.align 4
+L(dusP1):
+	slw	rB, rWORD2, rSHL
+	slw	rWORD7, rWORD1, r11
+	slw	rWORD8, rWORD8, r11
+	bge	cr7, L(duP1e)
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	lwz	rWORD2, 4(rSTR2)
+	srw	rA, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align 4
+L(duPs2):
+	slw	rH, rWORD2, rSHL
+	slw	rWORD5, rWORD1, r11
+	slw	rWORD6, rWORD8, r11
+	b	L(duP2e)
+/* Remainder is 12 */
+	.align 4
+L(duPs3):
+	slw	rF, rWORD2, rSHL
+	slw	rWORD3, rWORD1, r11
+	slw	rWORD4, rWORD8, r11
+	b	L(duP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align 4
+L(duPs4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, rG, rWORD8
+	slw	rD, rWORD2, rSHL
+	slw	rWORD1, rWORD1, r11
+	slw	rWORD2, rWORD8, r11
+	b	L(duP4e)
+
+/* At this point we know rSTR1 is word aligned and the
+   compare length is at least 8 bytes.  */
+	.align 4
+L(Wunaligned):
+        stw     r27,32(r1)	
+	cfi_offset(r27,(32-64))
+	clrrwi	rSTR2, rSTR2, 2
+        stw     r26,28(r1)	
+	cfi_offset(r26,(28-64))
+	srwi	rTMP, rN, 4	 /* Divide by 16 */
+        stw     r25,24(r1)	
+	cfi_offset(r25,(24-64))
+	andi.	rBITDIF, rN, 12  /* Get the W remainder */
+        stw     r24,20(r1)	
+	cfi_offset(r24,(24-64))
+	slwi	rSHL, rSHL, 3
+	lwz	rWORD6, 0(rSTR2)
+	lwzu	rWORD8, 4(rSTR2)
+	cmplwi	cr1, rBITDIF, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	subfic	rSHR, rSHL, 32
+	slw	rH, rWORD6, rSHL
+	beq	L(duP4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(duP3)
+	beq	cr1, L(duP2)
+		
+/* Remainder is 4 */
+	.align 4
+L(duP1):
+	srw	rG, rWORD8, rSHR
+	lwz	rWORD7, 0(rSTR1)
+	slw	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP1x)
+L(duP1e):
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	srw	rA, rWORD2, rSHR
+	slw	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	srw	rC, rWORD4, rSHR
+	slw	rF, rWORD4, rSHL
+	bne	cr5, L(duLcr5)
+	or	rWORD4, rC, rD
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	srw	rE, rWORD6, rSHR
+	slw	rH, rWORD6, rSHL
+	bne	cr0, L(duLcr0)
+	or	rWORD6, rE, rF
+	cmplw	cr6, rWORD5, rWORD6
+	b	L(duLoop3)	
+	.align 4
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+L(duP1x):
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srw	rA, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align 4
+L(duP2):
+	srw	rE, rWORD8, rSHR
+	lwz	rWORD5, 0(rSTR1)
+	or	rWORD6, rE, rH
+	slw	rH, rWORD8, rSHL
+L(duP2e):
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	srw	rG, rWORD8, rSHR
+	slw	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP2x)
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	rA, rWORD2, rSHR
+	slw	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	bne	cr5, L(duLcr5)
+	srw	rC, rWORD4, rSHR
+	slw	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(duLoop2)
+	.align 4
+L(duP2x):
+	cmplw	cr5, rWORD7, rWORD8
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	lwz	rWORD2, 4(rSTR2)
+	srw	rA, rWORD2, rSHR
+	b	L(dutrim)
+		
+/* Remainder is 12 */
+	.align 4
+L(duP3):
+	srw	rC, rWORD8, rSHR
+	lwz	rWORD3, 0(rSTR1)
+	slw	rF, rWORD8, rSHL
+	or	rWORD4, rC, rH
+L(duP3e):
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	srw	rE, rWORD6, rSHR
+	slw	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	rG, rWORD8, rSHR
+	slw	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP3x)
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	rA, rWORD2, rSHR
+	slw	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	cmplw	cr0, rWORD1, rWORD2
+	b	L(duLoop1)
+	.align 4
+L(duP3x):
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr1, L(duLcr1)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	lwz	rWORD2, 4(rSTR2)
+	srw	rA, rWORD2, rSHR
+	b	L(dutrim)
+	
+/* Count is a multiple of 16, remainder is 0 */
+	.align 4
+L(duP4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	srw	rA, rWORD8, rSHR
+	lwz	rWORD1, 0(rSTR1)
+	slw	rD, rWORD8, rSHL
+	or	rWORD2, rA, rH
+L(duP4e):
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	srw	rC, rWORD4, rSHR
+	slw	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr0, L(duLcr0)
+	srw	rE, rWORD6, rSHR
+	slw	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	rG, rWORD8, rSHR
+	slw	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	cmplw	cr5, rWORD7, rWORD8
+	bdz-	L(du24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align 4
+L(duLoop):
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	srw	rA, rWORD2, rSHR
+	slw	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+L(duLoop1):
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	srw	rC, rWORD4, rSHR
+	slw	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+L(duLoop2):
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr0, L(duLcr0)
+	srw	rE, rWORD6, rSHR
+	slw	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+L(duLoop3):
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+	cmplw	cr0, rWORD1, rWORD2
+	bne-	cr1, L(duLcr1)
+	srw	rG, rWORD8, rSHR
+	slw	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	bdnz+	L(duLoop)	
+	
+L(duL4):
+	bne	cr1, L(duLcr1)
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(du44):
+	bne	cr0, L(duLcr0)
+L(du34):
+	bne	cr1, L(duLcr1)
+L(du24):
+	bne	cr6, L(duLcr6)
+L(du14):
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  We use
+   shift right to eliminate bits beyond the compare length. 
+
+   However it may not be safe to load rWORD2 which may be beyond the 
+   string length. So we compare the bit length of the remainder to
+   the right shift count (rSHR). If the bit count is less than or equal
+   we do not need to load rWORD2 (all significant bits are already in
+   rB).  */
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	lwz	rWORD2, 4(rSTR2)
+	srw	rA, rWORD2, rSHR
+	.align 4
+L(dutrim):
+	lwz	rWORD1, 4(rSTR1)
+        lwz     r31,48(1)
+	subfic	rN, rN, 32	/* Shift count is 32 - (rN * 8).  */ 
+	or	rWORD2, rA, rB
+        lwz     r30,44(1)
+        lwz     r29,40(r1)
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+        lwz     r28,36(r1)	
+        lwz     r27,32(r1)
+        cmplw   rWORD1,rWORD2
+        li      rRTN,0
+        beq     L(dureturn26)
+        li      rRTN,1
+        bgt     L(dureturn26)
+        li      rRTN,-1
+	b    L(dureturn26)
+	.align 4
+L(duLcr0):
+        lwz     r31,48(1)
+        lwz     r30,44(1)
+	li	rRTN, 1
+	bgt	cr0, L(dureturn29)	
+	lwz     r29,40(r1)
+        lwz     r28,36(r1)	
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr1):
+        lwz     r31,48(1)
+        lwz     r30,44(1)
+	li	rRTN, 1
+	bgt	cr1, L(dureturn29)	
+        lwz     r29,40(r1)
+        lwz     r28,36(r1)	
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr6):
+        lwz     r31,48(1)
+        lwz     r30,44(1)
+	li	rRTN, 1
+	bgt	cr6, L(dureturn29)	
+        lwz     r29,40(r1)
+        lwz     r28,36(r1)	
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr5):
+        lwz     r31,48(1)
+        lwz     r30,44(1)
+	li	rRTN, 1
+	bgt	cr5, L(dureturn29)	
+        lwz     r29,40(r1)
+        lwz     r28,36(r1)	
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	3
+L(duZeroReturn):
+	li	rRTN,0
+	.align	4
+L(dureturn):
+        lwz     r31,48(1)
+        lwz     r30,44(1)
+L(dureturn29):	
+        lwz     r29,40(r1)
+        lwz     r28,36(r1)	
+L(dureturn27):	
+        lwz     r27,32(r1)
+L(dureturn26):	
+        lwz     r26,28(r1)
+L(dureturn25):	
+        lwz     r25,24(r1)
+        lwz     r24,20(r1)
+        lwz     1,0(1)
+	blr
+END (BP_SYM (memcmp))
+
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp, bcmp)
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcpy.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcpy.S
new file mode 100644
index 0000000000..c48db2f3df
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memcpy.S
@@ -0,0 +1,425 @@
+/* Optimized memcpy implementation for PowerPC32 on PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.
+
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks 
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled 
+   with the appropriate combination of byte and halfword load/stores. 
+   There is minimal effort to optimize the alignment of short moves.  
+
+   Longer moves (>= 32-bytes) justify the effort to get at least the
+   destination word (4-byte) aligned.  Further optimization is
+   possible when both source and destination are word aligned.
+   Each case has an optimized unrolled loop.   */
+
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT
+
+    stwu  1,-32(1)
+    cfi_adjust_cfa_offset(32)
+    stw   30,20(1)
+    cfi_offset(30,(20-32))
+    mr    30,3
+    cmplwi cr1,5,31     
+    stw   31,24(1)
+    cfi_offset(31,(24-32))
+    neg   0,3
+    andi. 11,3,3	/* check alignment of dst.  */
+    clrlwi 0,0,30	/* Number of bytes until the 1st word of dst.  */
+    clrlwi 10,4,30	/* check alignment of src.  */
+    cmplwi cr6,5,8
+    ble-  cr1,.L2	/* If move < 32 bytes use short move code.  */
+    cmplw cr6,10,11  
+    mr    12,4
+    srwi  9,5,2		/* Number of full words remaining.  */
+    mtcrf 0x01,0
+    mr    31,5
+    beq   .L0
+  
+    subf  31,0,5
+  /* Move 0-3 bytes as needed to get the destination word aligned.  */
+1:  bf    31,2f
+    lbz   6,0(12)
+    addi  12,12,1
+    stb   6,0(3)
+    addi  3,3,1
+2:  bf    30,0f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+0:
+    clrlwi 10,12,30	/* check alignment of src again.  */     
+    srwi  9,31,2	/* Number of full words remaining.  */
+    
+  /* Copy words from source to destination, assuming the destination is 
+     aligned on a word boundary.
+
+     At this point we know there are at least 25 bytes left (32-7) to copy.
+     The next step is to determine if the source is also word aligned. 
+     If not branch to the unaligned move code at .L6. which uses
+     a load, shift, store strategy.
+     
+     Otherwise source and destination are word aligned, and we can use
+     the optimized word copy loop.  */
+.L0:
+    clrlwi	11,31,30  /* calculate the number of tail bytes */
+    mtcrf 0x01,9
+    bne-  cr6,.L6   /* If source is not word aligned.  */
+
+  /* Move words where destination and source are word aligned.
+     Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+     If the the copy is not an exact multiple of 16 bytes, 1-3 
+     words are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-3 bytes. These bytes are 
+     copied a halfword/byte at a time as needed to preserve alignment.  */
+
+    srwi  8,31,4    /* calculate the 16 byte loop count */
+    cmplwi	cr1,9,4
+    cmplwi	cr6,11,0
+    mr    11,12
+    
+    bf    30,1f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  11,12,8
+    mtctr 8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  10,3,8
+    bf    31,4f
+    lwz   0,8(12)
+    stw   0,8(3)    
+    blt   cr1,3f
+    addi  11,12,12
+    addi  10,3,12
+    b     4f
+    .align  4
+1:
+    mr    10,3
+    mtctr 8
+    bf    31,4f
+    lwz   6,0(12)
+    addi  11,12,4
+    stw   6,0(3)
+    addi  10,3,4
+    
+    .align  4
+4:
+    lwz   6,0(11)
+    lwz   7,4(11)
+    lwz   8,8(11)
+    lwz   0,12(11)
+    stw   6,0(10)
+    stw   7,4(10)
+    stw   8,8(10)
+    stw   0,12(10)
+    addi  11,11,16
+    addi  10,10,16
+    bdnz  4b
+3:  
+    clrrwi 0,31,2
+    mtcrf 0x01,31
+    beq   cr6,0f
+.L9:
+    add   3,3,0
+    add   12,12,0
+    
+/*  At this point we have a tail of 0-3 bytes and we know that the
+    destination is word aligned.  */
+2:  bf    30,1f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+1:  bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    mr  3,30
+    lwz 30,20(1)
+    lwz 31,24(1)
+    addi 1,1,32
+    blr
+       
+/* Copy up to 31 bytes.  This is divided into two cases 0-8 bytes and 
+   9-31 bytes.  Each case is handled without loops, using binary 
+   (1,2,4,8) tests.  
+   
+   In the short (0-8 byte) case no attempt is made to force alignment
+   of either source or destination.  The hardware will handle the 
+   unaligned load/stores with small delays for crossing 32- 64-byte, and 
+   4096-byte boundaries. Since these short moves are unlikely to be
+   unaligned or cross these boundaries, the overhead to force 
+   alignment is not justified.
+   
+   The longer (9-31 byte) move is more likely to cross 32- or 64-byte
+   boundaries.  Since only loads are sensitive to the 32-/64-byte
+   boundaries it is more important to align the source than the 
+   destination.  If the source is not already word aligned, we first
+   move 1-3 bytes as needed.  While the destination and stores may 
+   still be unaligned, this is only an issue for page (4096 byte
+   boundary) crossing, which should be rare for these short moves.  
+   The hardware handles this case automatically with a small delay.  */ 
+   
+    .align  4
+.L2:
+    mtcrf 0x01,5
+    neg   8,4
+    clrrwi 11,4,2
+    andi. 0,8,3
+    ble   cr6,.LE8	/* Handle moves of 0-8 bytes.  */
+/* At least 9 bytes left.  Get the source word aligned.  */
+    cmplwi	cr1,5,16
+    mr    10,5
+    mr    12,4
+    cmplwi	cr6,0,2
+    beq   .L3	/* If the source is already word aligned skip this.  */
+/* Copy 1-3 bytes to get source address word aligned.  */
+    lwz   6,0(11)
+    subf  10,0,5
+    add   12,4,0
+    blt   cr6,5f
+    srwi  7,6,16
+    bgt	  cr6,3f
+    sth   6,0(3)
+    b     7f
+    .align  4
+3:
+    stb   7,0(3)
+    sth   6,1(3)
+    b     7f
+    .align  4
+5:
+    stb   6,0(3)
+7:
+    cmplwi	cr1,10,16
+    add   3,3,0
+    mtcrf 0x01,10
+    .align  4
+.L3:
+/* At least 6 bytes left and the source is word aligned.  */
+    blt   cr1,8f
+16: /* Move 16 bytes.  */
+    lwz   6,0(12)
+    lwz   7,4(12)
+    stw   6,0(3)
+    lwz   6,8(12)
+    stw   7,4(3)
+    lwz   7,12(12)
+    addi  12,12,16
+    stw   6,8(3)
+    stw   7,12(3)
+    addi  3,3,16
+8:  /* Move 8 bytes.  */
+    bf    28,4f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  12,12,8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  3,3,8
+4:  /* Move 4 bytes.  */
+    bf    29,2f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4    
+2:  /* Move 2-3 bytes.  */
+    bf    30,1f
+    lhz   6,0(12)
+    sth   6,0(3) 
+    bf    31,0f
+    lbz   7,2(12)
+    stb   7,2(3)
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+1:  /* Move 1 byte.  */
+    bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    addi 1,1,32
+    blr
+
+/* Special case to copy 0-8 bytes.  */
+    .align  4
+.LE8:
+    mr    12,4
+    bne   cr6,4f
+    lwz   6,0(4)
+    lwz   7,4(4)
+    stw   6,0(3)
+    stw   7,4(3)
+  /* Return original dst pointer.  */
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+    .align  4
+4:  bf    29,2b
+    lwz   6,0(4)
+    stw   6,0(3)
+6:
+    bf    30,5f
+    lhz   7,4(4)
+    sth   7,4(3) 
+    bf    31,0f
+    lbz   8,6(4)
+    stb   8,6(3)
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+    .align  4
+5:  
+    bf    31,0f
+    lbz   6,4(4)
+    stb   6,4(3)
+    .align  4
+0:
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    addi 1,1,32
+    blr
+
+    .align  4
+.L6:
+
+  /* Copy words where the destination is aligned but the source is
+     not.  Use aligned word loads from the source, shifted to realign
+     the data, to allow aligned destination stores.  
+     Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+     A single word is retained for storing at loop exit to avoid walking
+     off the end of a page within the loop.
+     If the copy is not an exact multiple of 16 bytes, 1-3 
+     words are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-3 bytes. These bytes are 
+     copied a halfword/byte at a time as needed to preserve alignment.  */
+    
+
+    cmplwi  cr6,11,0  /* are there tail bytes left ? */
+    subf    5,10,12   /* back up src pointer to prev word alignment */
+    slwi    10,10,3   /* calculate number of bits to shift 1st word left */
+    addi    11,9,-1   /* we move one word after the loop */
+    srwi    8,11,2    /* calculate the 16 byte loop count */
+    lwz     6,0(5)    /* load 1st src word into R6 */
+    mr      4,3
+    lwz     7,4(5)    /* load 2nd src word into R7 */
+    mtcrf   0x01,11
+    subfic  9,10,32   /* number of bits to shift 2nd word right */
+    mtctr   8
+    bf      30,1f
+
+    /* there are at least two words to copy, so copy them */
+    slw   0,6,10  /* shift 1st src word to left align it in R0 */
+    srw   8,7,9   /* shift 2nd src word to right align it in R8 */
+    or    0,0,8   /* or them to get word to store */
+    lwz   6,8(5)  /* load the 3rd src word */
+    stw   0,0(4)  /* store the 1st dst word */
+    slw   0,7,10  /* now left align 2nd src word into R0 */
+    srw   8,6,9   /* shift 3rd src word to right align it in R8 */
+    or    0,0,8   /* or them to get word to store */
+    lwz   7,12(5)
+    stw   0,4(4)  /* store the 2nd dst word */
+    addi  4,4,8
+    addi  5,5,16
+    bf    31,4f
+    /* there is a third word to copy, so copy it */
+    slw   0,6,10  /* shift 3rd src word to left align it in R0 */
+    srw   8,7,9   /* shift 4th src word to right align it in R8 */
+    or    0,0,8   /* or them to get word to store */
+    stw   0,0(4)  /* store 3rd dst word */
+    mr    6,7
+    lwz   7,0(5)
+    addi  5,5,4
+    addi  4,4,4
+    b     4f
+    .align 4
+1:
+    slw     0,6,10  /* shift 1st src word to left align it in R0 */
+    srw     8,7,9   /* shift 2nd src word to right align it in R8 */
+    addi  5,5,8
+    or    0,0,8   /* or them to get word to store */
+    bf    31,4f
+    mr    6,7
+    lwz   7,0(5)
+    addi  5,5,4
+    stw   0,0(4)  /* store the 1st dst word */
+    addi  4,4,4
+
+    .align  4
+4:
+    /* copy 16 bytes at a time */
+    slw   0,6,10 
+    srw   8,7,9 
+    or    0,0,8
+    lwz   6,0(5)
+    stw   0,0(4)
+    slw   0,7,10
+    srw   8,6,9
+    or    0,0,8
+    lwz   7,4(5)
+    stw   0,4(4)
+    slw   0,6,10 
+    srw   8,7,9 
+    or    0,0,8
+    lwz   6,8(5)
+    stw   0,8(4)
+    slw   0,7,10
+    srw   8,6,9 
+    or    0,0,8
+    lwz   7,12(5)
+    stw   0,12(4)
+    addi  5,5,16
+    addi  4,4,16
+    bdnz+ 4b
+8:
+    /* calculate and store the final word */
+    slw   0,6,10 
+    srw   8,7,9 
+    or    0,0,8
+    stw   0,0(4)
+3:
+    clrrwi 0,31,2
+    mtcrf 0x01,31
+    bne   cr6,.L9	/* If the tail is 0 bytes we are done!  */
+
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    lwz  31,24(1)
+    addi 1,1,32
+    blr
+END (BP_SYM (memcpy))
+
+libc_hidden_builtin_def (memcpy)
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memset.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memset.S
new file mode 100644
index 0000000000..b07ed3c2d3
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/memset.S
@@ -0,0 +1,228 @@
+/* Optimized memset implementation for PowerPC64.
+   Copyright (C) 1997,99, 2000,02,03, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.
+
+   The memset is done in three sizes: byte (8 bits), word (32 bits),
+   cache line (1024 bits). There is a special case for setting cache lines
+   to 0, to take advantage of the dcbz instruction.  */
+
+EALIGN (BP_SYM (memset), 5, 0)
+	CALL_MCOUNT
+
+#define rTMP	r0
+#define rRTN	r3	/* Initial value of 1st argument.  */
+#define rMEMP0	r3	/* Original value of 1st arg.  */
+#define rCHR	r4	/* Char to set in each byte.  */
+#define rLEN	r5	/* Length of region to set.  */
+#define rMEMP	r6	/* Address at which we are storing.  */
+#define rALIGN	r7	/* Number of bytes we are setting now (when aligning). */
+#define rMEMP2	r8
+
+#define rNEG64	r8	/* Constant -64 for clearing with dcbz.  */
+#define rCLS	r8	/* Cache line size (known to be 128).  */
+#define rCLM	r9	/* Cache line size mask to check for cache alignment.  */
+L(_memset):
+/* Take care of case for size <= 4.  */
+	cmplwi	cr1, rLEN, 4
+	andi.	rALIGN, rMEMP0, 3
+	mr	rMEMP, rMEMP0
+	ble-	cr1, L(small)
+
+/* Align to word boundary.  */
+	cmplwi	cr5, rLEN, 31
+	rlwimi	rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword.  */
+	beq+	L(aligned)
+	mtcrf	0x01, rMEMP0
+	subfic	rALIGN, rALIGN, 4
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	bf+	31, L(g0)
+	stb	rCHR, 0(rMEMP0)
+	bt	30, L(aligned)
+L(g0):
+	sth	rCHR, -2(rMEMP)
+
+/* Handle the case of size < 31.  */
+L(aligned):
+	mtcrf	0x01, rLEN
+	rlwimi	rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word.  */
+	ble	cr5, L(medium)
+/* Align to 32-byte boundary.  */
+	andi.	rALIGN, rMEMP, 0x1C
+	subfic	rALIGN, rALIGN, 0x20
+	beq	L(caligned)
+	mtcrf	0x01, rALIGN
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	cmplwi	cr1, rALIGN, 0x10
+	mr	rMEMP2, rMEMP
+	bf	28, L(a1)
+        stw     rCHR, -4(rMEMP2)
+	stwu	rCHR, -8(rMEMP2)
+L(a1):	blt	cr1, L(a2)
+        stw     rCHR, -4(rMEMP2)
+	stw	rCHR, -8(rMEMP2)
+	stw	rCHR, -12(rMEMP2)
+	stwu	rCHR, -16(rMEMP2)
+L(a2):  bf      29, L(caligned)
+        stw     rCHR, -4(rMEMP2)
+
+/* Now aligned to a 32 byte boundary.  */
+L(caligned):
+	cmplwi	cr1, rCHR, 0
+	clrrwi.	rALIGN, rLEN, 5
+	mtcrf	0x01, rLEN
+	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
+L(nondcbz):
+	srwi	rTMP, rALIGN, 5
+	mtctr	rTMP
+	beq	L(medium)	/* We may not actually get to do a full line.  */
+	clrlwi.	rLEN, rLEN, 27
+	add	rMEMP, rMEMP, rALIGN
+	li	rNEG64, -0x40
+	bdz	L(cloopdone)
+
+        .align 4
+L(c3): 	dcbtst	rNEG64, rMEMP
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP)
+        stw     rCHR, -20(rMEMP)
+	stw	rCHR, -24(rMEMP)
+        stw     rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	bdnz	L(c3)
+L(cloopdone):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP)
+	cmplwi	cr1, rLEN, 16
+        stw     rCHR, -20(rMEMP)
+	stw	rCHR, -24(rMEMP)
+        stw     rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	beqlr
+	add	rMEMP, rMEMP, rALIGN
+	b	L(medium_tail2)
+
+	.align 5
+/* Clear lines of memory in 128-byte chunks.  */
+L(zloopstart):
+/* If the remaining length is less the 32 bytes, don't bother getting
+	 the cache line size.  */
+	beq	L(medium)
+	li      rCLS,128  /* cache line size is 128 */
+	dcbt	0,rMEMP
+L(getCacheAligned):
+	cmplwi	cr1,rLEN,32
+	andi.	rTMP,rMEMP,127
+	blt	cr1,L(handletail32)
+	beq	L(cacheAligned)
+	addi	rMEMP,rMEMP,32
+	addi	rLEN,rLEN,-32
+	stw	rCHR,-32(rMEMP)
+        stw     rCHR,-28(rMEMP)
+	stw	rCHR,-24(rMEMP)
+	stw     rCHR,-20(rMEMP)
+	stw	rCHR,-16(rMEMP)
+        stw     rCHR,-12(rMEMP)
+	stw	rCHR,-8(rMEMP)
+        stw     rCHR,-4(rMEMP)
+	b	L(getCacheAligned)
+
+/* Now we are aligned to the cache line and can use dcbz.  */
+        .align 4
+L(cacheAligned):
+	cmplw	cr1,rLEN,rCLS
+	blt	cr1,L(handletail32)
+	dcbz	0,rMEMP
+	subf	rLEN,rCLS,rLEN
+	add	rMEMP,rMEMP,rCLS
+	b	L(cacheAligned)
+
+/* We are here because the cache line size was set and the remainder 
+  (rLEN) is less than the actual cache line size.
+   So set up the preconditions for L(nondcbz) and go there.  */
+L(handletail32):
+	clrrwi.	rALIGN, rLEN, 5
+	b		L(nondcbz)
+
+	.align 5
+L(small):
+/* Memset of 4 bytes or less.  */
+	cmplwi	cr5, rLEN, 1
+	cmplwi	cr1, rLEN, 3
+	bltlr	cr5
+	stb	rCHR, 0(rMEMP)
+	beqlr	cr5
+	stb	rCHR, 1(rMEMP)
+	bltlr	cr1
+	stb	rCHR, 2(rMEMP)
+	beqlr	cr1
+	stb	rCHR, 3(rMEMP)
+	blr
+
+/* Memset of 0-31 bytes.  */
+	.align 5
+L(medium):
+	cmplwi	cr1, rLEN, 16
+L(medium_tail2):
+	add	rMEMP, rMEMP, rLEN
+L(medium_tail):
+	bt-	31, L(medium_31t)
+	bt-	30, L(medium_30t)
+L(medium_30f):
+	bt-	29, L(medium_29t)
+L(medium_29f):
+	bge-	cr1, L(medium_27t)
+	bflr-	28
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+
+L(medium_31t):
+	stbu	rCHR, -1(rMEMP)
+	bf-	30, L(medium_30f)
+L(medium_30t):
+	sthu	rCHR, -2(rMEMP)
+	bf-	29, L(medium_29f)
+L(medium_29t):
+	stwu	rCHR, -4(rMEMP)
+	blt-	cr1, L(medium_27f)
+L(medium_27t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stwu	rCHR, -16(rMEMP)
+L(medium_27f):
+	bflr-	28
+L(medium_28t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+END (BP_SYM (memset))
+libc_hidden_builtin_def (memset)
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/strncmp.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/strncmp.S
new file mode 100644
index 0000000000..fc0835ebe0
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power4/strncmp.S
@@ -0,0 +1,176 @@
+/* Optimized strcmp implementation for PowerPC32.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (BP_SYM(strncmp), 4, 0)
+
+#define rTMP	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+/* Note:  The Bounded pointer support in this code is broken.  This code
+   was inherited from PPC32 and and that support was never completed.  
+   Current PPC gcc does not support -fbounds-check or -fbounded-pointers.  */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3  r10
+#define rWORD4  r11
+#define rFEFE	r8	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r9	/* constant 0x7f7f7f7f */
+#define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+
+	dcbt	0,rSTR1
+	or	rTMP, rSTR2, rSTR1
+	lis	r7F7F, 0x7f7f
+	dcbt	0,rSTR2
+	clrlwi.	rTMP, rTMP, 30
+	cmplwi	cr1, rN, 0
+	lis	rFEFE, -0x101
+	bne	L(unaligned)
+/* We are word alligned so set up for two loops.  first a word
+   loop, then fall into the byte loop if any residual.  */
+	srwi.	rTMP, rN, 2
+	clrlwi	rN, rN, 30
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	cmplwi	cr1, rN, 0	
+	beq	L(unaligned)
+
+	mtctr	rTMP	/* Power4 wants mtctr 1st in dispatch group.  */
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	b	L(g1)
+
+L(g0):	
+	lwzu	rWORD1, 4(rSTR1)
+	bne-	cr1, L(different)
+	lwzu	rWORD2, 4(rSTR2)
+L(g1):	add	rTMP, rFEFE, rWORD1
+	nor	rNEG, r7F7F, rWORD1
+	bdz	L(tail)
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	beq+	L(g0)
+	
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+	
+L(endstring):
+	and	rTMP, r7F7F, rWORD1
+	beq	cr1, L(equal)
+	add	rTMP, rTMP, r7F7F
+	xor.	rBITDIF, rWORD1, rWORD2
+
+	andc	rNEG, rNEG, rTMP
+	blt-	L(highbit)
+	cntlzw	rBITDIF, rBITDIF
+	cntlzw	rNEG, rNEG
+	addi	rNEG, rNEG, 7
+	cmpw	cr1, rNEG, rBITDIF
+	sub	rRTN, rWORD1, rWORD2
+	blt-	cr1, L(equal)
+	srawi	rRTN, rRTN, 31
+	ori	rRTN, rRTN, 1
+	blr
+L(equal):
+	li	rRTN, 0
+	blr
+
+L(different):
+	lwzu	rWORD1, -4(rSTR1)
+	xor.	rBITDIF, rWORD1, rWORD2
+	sub	rRTN, rWORD1, rWORD2
+	blt-	L(highbit)
+	srawi	rRTN, rRTN, 31
+	ori	rRTN, rRTN, 1
+	blr
+L(highbit):
+	srwi	rWORD2, rWORD2, 24
+	srwi	rWORD1, rWORD1, 24
+	sub	rRTN, rWORD1, rWORD2
+	blr
+
+
+/* Oh well.  In this case, we just do a byte-by-byte comparison.  */
+	.align 4
+L(tail):
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	bne-	L(endstring)
+	addi	rSTR1, rSTR1, 4
+	bne-	cr1, L(different)
+	addi	rSTR2, rSTR2, 4
+	cmplwi	cr1, rN, 0
+L(unaligned):
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+	ble	cr1, L(ux)
+L(uz):
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	.align 4
+L(u1):
+	cmpwi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpw	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	lbzu    rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	L(u4)
+	cmpwi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpw	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne-    L(u3)
+	cmpwi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpw	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	L(u4)
+	cmpwi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpw	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	beq+    L(u1)
+
+L(u3):  sub     rRTN, rWORD3, rWORD4
+        blr
+L(u4):	sub	rRTN, rWORD1, rWORD2
+	blr
+L(ux):
+	li	rRTN, 0
+	blr
+END (BP_SYM (strncmp))
+libc_hidden_builtin_def (strncmp)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/Implies
new file mode 100644
index 0000000000..4e3a983426
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/Implies
new file mode 100644
index 0000000000..128f8aadcb
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/powerpc64/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S
new file mode 100644
index 0000000000..99cd6cc969
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S
@@ -0,0 +1,37 @@
+/* ceil function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__ceil, 4, 0)
+	frip	fp1, fp1
+	blr
+	END (__ceil)
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__ceil, ceill)
+strong_alias (__ceil, __ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S
new file mode 100644
index 0000000000..0a844b6f47
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S
@@ -0,0 +1,30 @@
+/* ceilf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__ceilf, 4, 0)
+	frip	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__ceilf)
+
+weak_alias (__ceilf, ceilf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S
new file mode 100644
index 0000000000..3b1d26f9fc
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S
@@ -0,0 +1,37 @@
+/* floor function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__floor, 4, 0)
+	frim	fp1, fp1
+	blr
+	END (__floor)
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__floor, floorl)
+strong_alias (__floor, __floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S
new file mode 100644
index 0000000000..640140c334
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S
@@ -0,0 +1,30 @@
+/* floorf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__floorf, 4, 0)
+	frim	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__floorf)
+
+weak_alias (__floorf, floorf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S
new file mode 100644
index 0000000000..a122a86834
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S
@@ -0,0 +1,37 @@
+/* round function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__round, 4, 0)
+	frin	fp1, fp1
+	blr
+	END (__round)
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__round, roundl)
+strong_alias (__round, __roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __round, roundl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S
new file mode 100644
index 0000000000..4193c69152
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S
@@ -0,0 +1,30 @@
+/* roundf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__roundf, 4, 0)
+	frin	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__roundf)
+
+weak_alias (__roundf, roundf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S
new file mode 100644
index 0000000000..588420ac1e
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S
@@ -0,0 +1,37 @@
+/* trunc function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__trunc, 4, 0)
+	friz	fp1, fp1
+	blr
+	END (__trunc)
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__trunc, truncl)
+strong_alias (__trunc, __truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S
new file mode 100644
index 0000000000..c575e2bbc2
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S
@@ -0,0 +1,30 @@
+/* truncf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__truncf, 4, 0)
+	friz	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__truncf)
+
+weak_alias (__truncf, truncf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/Implies
new file mode 100644
index 0000000000..4e3a983426
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/fpu/Implies
new file mode 100644
index 0000000000..128f8aadcb
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power5/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/powerpc64/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/Implies
new file mode 100644
index 0000000000..59fcd3c09d
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/fpu/Implies
new file mode 100644
index 0000000000..e44a900535
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/power6/fpu/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power5+/fpu
+powerpc/powerpc32/powerpc64/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrint.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrint.S
new file mode 100644
index 0000000000..fde4b3aa81
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrint.S
@@ -0,0 +1,43 @@
+/* Round double to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+
+/* long long int[r3, r4] __llrint (double x[fp1])  */
+ENTRY (__llrint)	
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+	nop	/* Insure the following load is in a different dispatch group */
+	nop	/* to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8(r1)
+	lwz	r4,12(r1)
+	addi	r1,r1,16	
+	blr
+	END (__llrint)
+
+weak_alias (__llrint, llrint)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrintf.S b/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrintf.S
new file mode 100644
index 0000000000..21353ffb5b
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc32/powerpc64/fpu/s_llrintf.S
@@ -0,0 +1,39 @@
+/* Round float to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+
+/* long long int[r3, r4] __llrintf (float x[fp1])  */
+ENTRY (__llrintf)	
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+	nop	/* Insure the following load is in a different dispatch group */
+	nop	/* to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8(r1)
+	lwz	r4,12(r1)
+	addi	r1,r1,16	
+	blr
+	END (__llrintf)
+
+weak_alias (__llrintf, llrintf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/970/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc64/970/Implies
new file mode 100644
index 0000000000..ac431fa96e
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/970/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcmp.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcmp.S
new file mode 100644
index 0000000000..8f74ca7044
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcmp.S
@@ -0,0 +1,981 @@
+/* Optimized strcmp implementation for PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* int [r3] memcmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (BP_SYM(memcmp), 4, 0)
+	CALL_MCOUNT 3
+
+#define rTMP	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+/* Note:  The Bounded pointer support in this code is broken.  This code
+   was inherited from PPC32 and and that support was never completed.  
+   Current PPC gcc does not support -fbounds-check or -fbounded-pointers.  */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3	r8	/* next word in s1 */
+#define rWORD4	r9	/* next word in s2 */
+#define rWORD5	r10	/* next word in s1 */
+#define rWORD6	r11	/* next word in s2 */
+#define rBITDIF	r12	/* bits that differ in s1 & s2 words */
+#define rWORD7	r30	/* next word in s1 */
+#define rWORD8	r31	/* next word in s2 */
+
+	xor	rTMP, rSTR2, rSTR1
+	cmpldi	cr6, rN, 0
+	cmpldi	cr1, rN, 12
+	clrldi.	rTMP, rTMP, 61
+	clrldi	rBITDIF, rSTR1, 61
+	cmpldi	cr5, rBITDIF, 0
+	beq-	cr6, L(zeroLength)
+	dcbt	0,rSTR1
+	dcbt	0,rSTR2
+/* If less than 8 bytes or not aligned, use the unalligned
+   byte loop.  */
+	blt	cr1, L(bytealigned)
+	std	rWORD8,-8(r1)	
+	cfi_offset(rWORD8,-8)
+	std	rWORD7,-16(r1)	
+	cfi_offset(rWORD7,-16)
+	bne	L(unaligned)
+/* At this point we know both strings have the same alignment and the
+   compare length is at least 8 bytes.  rBITDIF containes the low order
+   3 bits of rSTR1 and cr5 contains the result of the logical compare
+   of rBITDIF to 0.  If rBITDIF == 0 then we are already double word 
+   aligned and can perform the DWaligned loop.
+  
+   Otherwise we know the two strings have the same alignment (but not
+   yet DW).  So we can force the string addresses to the next lower DW
+   boundary and special case this first DW word using shift left to
+   ellimiate bits preceeding the first byte.  Since we want to join the
+   normal (DWaligned) compare loop, starting at the second double word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first DW. This insures that the loop count is
+   correct and the first DW (shifted) is in the expected resister pair.  */
+	.align 4
+L(samealignment):
+	clrrdi	rSTR1, rSTR1, 3
+	clrrdi	rSTR2, rSTR2, 3
+	beq	cr5, L(DWaligned)
+	add	rN, rN, rBITDIF
+	sldi	r11, rBITDIF, 3
+	srdi	rTMP, rN, 5	/* Divide by 32 */
+	andi.	rBITDIF, rN, 24	/* Get the DW remainder */
+	ld	rWORD1, 0(rSTR1)
+	ld	rWORD2, 0(rSTR2)
+	cmpldi	cr1, rBITDIF, 16
+	cmpldi	cr7, rN, 32
+	clrldi	rN, rN, 61
+	beq	L(dPs4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(dPs3)
+	beq	cr1, L(dPs2)
+
+/* Remainder is 8 */
+	.align 3
+L(dsP1):
+	sld	rWORD5, rWORD1, r11
+	sld	rWORD6, rWORD2, r11
+	cmpld	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	b	L(dP1e)
+/* Remainder is 16 */
+	.align 4
+L(dPs2):
+	sld	rWORD5, rWORD1, r11
+	sld	rWORD6, rWORD2, r11
+	cmpld	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+	ld	rWORD7, 8(rSTR1)
+	ld	rWORD8, 8(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	b	L(dP2e)
+/* Remainder is 24 */
+	.align 4
+L(dPs3):
+	sld	rWORD3, rWORD1, r11
+	sld	rWORD4, rWORD2, r11
+	cmpld	cr1, rWORD3, rWORD4
+	b	L(dP3e)
+/* Count is a multiple of 32, remainder is 0 */
+	.align 4
+L(dPs4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	sld	rWORD1, rWORD1, r11
+	sld	rWORD2, rWORD2, r11
+	cmpld	cr0, rWORD1, rWORD2
+	b	L(dP4e)
+
+/* At this point we know both strings are double word aligned and the
+   compare length is at least 8 bytes.  */
+	.align 4
+L(DWaligned):
+	andi.	rBITDIF, rN, 24	/* Get the DW remainder */
+	srdi	rTMP, rN, 5	/* Divide by 32 */
+	cmpldi	cr1, rBITDIF, 16
+	cmpldi	cr7, rN, 32
+	clrldi	rN, rN, 61
+	beq	L(dP4)
+	bgt	cr1, L(dP3)
+	beq	cr1, L(dP2)
+		
+/* Remainder is 8 */
+	.align 4
+L(dP1):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
+   (8-15 byte compare), we want to use only volitile registers.  This
+   means we can avoid restoring non-volitile registers since we did not
+   change any on the early exit path.  The key here is the non-early
+   exit path only cares about the condition code (cr5), not about which 
+   register pair was used.  */
+	ld	rWORD5, 0(rSTR1)
+	ld	rWORD6, 0(rSTR2)
+	cmpld	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+L(dP1e):
+	ld	rWORD3, 16(rSTR1)
+	ld	rWORD4, 16(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	ld	rWORD5, 24(rSTR1)
+	ld	rWORD6, 24(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	bne	cr0, L(dLcr0)
+	
+	ldu	rWORD7, 32(rSTR1)
+	ldu	rWORD8, 32(rSTR2)
+	bne	cr1, L(dLcr1)
+	cmpld	cr5, rWORD7, rWORD8
+	bdnz	L(dLoop)
+	bne	cr6, L(dLcr6)
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	.align 3
+L(dP1x):
+	sldi.	r12, rN, 3
+	bne	cr5, L(dLcr5)
+	subfic	rN, r12, 64	/* Shift count is 64 - (rN * 8).  */
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+		
+/* Remainder is 16 */
+	.align 4
+L(dP2):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	ld	rWORD5, 0(rSTR1)
+	ld	rWORD6, 0(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+	ld	rWORD7, 8(rSTR1)
+	ld	rWORD8, 8(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+L(dP2e):
+	ld	rWORD1, 16(rSTR1)
+	ld	rWORD2, 16(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	ld	rWORD3, 24(rSTR1)
+	ld	rWORD4, 24(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr6, L(dLcr6)
+	bne	cr5, L(dLcr5)
+	b	L(dLoop2)
+/* Again we are on a early exit path (16-23 byte compare), we want to
+   only use volitile registers and avoid restoring non-volitile
+   registers.  */
+	.align 4
+L(dP2x):
+	ld	rWORD3, 8(rSTR1)
+	ld	rWORD4, 8(rSTR2)
+	cmpld	cr5, rWORD3, rWORD4
+	sldi.	r12, rN, 3
+	bne	cr6, L(dLcr6)
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr5, L(dLcr5)
+	subfic	rN, r12, 64	/* Shift count is 64 - (rN * 8).  */
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+		
+/* Remainder is 24 */
+	.align 4
+L(dP3):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	ld	rWORD3, 0(rSTR1)
+	ld	rWORD4, 0(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+L(dP3e):
+	ld	rWORD5, 8(rSTR1)
+	ld	rWORD6, 8(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP3x)
+	ld	rWORD7, 16(rSTR1)
+	ld	rWORD8, 16(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	ld	rWORD1, 24(rSTR1)
+	ld	rWORD2, 24(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	addi	rSTR1, rSTR1, 16
+	addi	rSTR2, rSTR2, 16
+	bne	cr1, L(dLcr1)
+	bne	cr6, L(dLcr6)
+	b	L(dLoop1)
+/* Again we are on a early exit path (24-31 byte compare), we want to
+   only use volitile registers and avoid restoring non-volitile
+   registers.  */
+	.align 4
+L(dP3x):
+	ld	rWORD1, 16(rSTR1)
+	ld	rWORD2, 16(rSTR2)
+	cmpld	cr5, rWORD1, rWORD2
+	sldi.	r12, rN, 3
+	bne	cr1, L(dLcr1)
+	addi	rSTR1, rSTR1, 16
+	addi	rSTR2, rSTR2, 16
+	bne	cr6, L(dLcr6)
+	subfic	rN, r12, 64	/* Shift count is 64 - (rN * 8).  */
+	bne	cr5, L(dLcr5)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+	
+/* Count is a multiple of 32, remainder is 0 */
+	.align 4
+L(dP4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	ld	rWORD1, 0(rSTR1)
+	ld	rWORD2, 0(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+L(dP4e):
+	ld	rWORD3, 8(rSTR1)
+	ld	rWORD4, 8(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	ld	rWORD5, 16(rSTR1)
+	ld	rWORD6, 16(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	ldu	rWORD7, 24(rSTR1)
+	ldu	rWORD8, 24(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr0, L(dLcr0)
+	bne	cr1, L(dLcr1)
+	bdz-	L(d24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align 4
+L(dLoop):
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+L(dLoop1):
+	ld	rWORD3, 16(rSTR1)
+	ld	rWORD4, 16(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+L(dLoop2):
+	ld	rWORD5, 24(rSTR1)
+	ld	rWORD6, 24(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr0, L(dLcr0)
+L(dLoop3):
+	ldu	rWORD7, 32(rSTR1)
+	ldu	rWORD8, 32(rSTR2)
+	bne-	cr1, L(dLcr1)
+	cmpld	cr0, rWORD1, rWORD2
+	bdnz+	L(dLoop)	
+	
+L(dL4):
+	cmpld	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	cmpld	cr5, rWORD7, rWORD8
+L(d44):
+	bne	cr0, L(dLcr0)
+L(d34):
+	bne	cr1, L(dLcr1)
+L(d24):
+	bne	cr6, L(dLcr6)
+L(d14):
+	sldi.	r12, rN, 3
+	bne	cr5, L(dLcr5) 
+L(d04):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	subfic	rN, r12, 64	/* Shift count is 64 - (rN * 8).  */
+	beq	L(zeroLength)
+/* At this point we have a remainder of 1 to 7 bytes to compare.  Since
+   we are aligned it is safe to load the whole double word, and use
+   shift right double to elliminate bits beyond the compare length.  */ 
+L(d00):
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2) 
+	srd	rWORD1, rWORD1, rN
+	srd	rWORD2, rWORD2, rN
+	cmpld	cr5, rWORD1, rWORD2
+ 	bne	cr5, L(dLcr5x)
+	li	rRTN, 0
+	blr
+	.align 4
+L(dLcr0):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgtlr	cr0
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr1):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr6):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+	.align 4
+L(dLcr5):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+L(dLcr5x):
+	li	rRTN, 1
+	bgtlr	cr5
+	li	rRTN, -1
+	blr
+	
+	.align 4
+L(bytealigned):
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+	beq-	cr6, L(zeroLength)
+
+/* We need to prime this loop.  This loop is swing modulo scheduled
+   to avoid pipe delays.  The dependent instruction latencies (load to 
+   compare to conditional branch) is 2 to 3 cycles.  In this loop each
+   dispatch group ends in a branch and takes 1 cycle.  Effectively
+   the first iteration of the loop only serves to load operands and 
+   branches based on compares are delayed until the next loop. 
+
+   So we must precondition some registers and condition codes so that
+   we don't exit the loop early on the first iteration.  */
+   
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	bdz-	L(b11)
+	cmpld	cr0, rWORD1, rWORD2
+	lbz	rWORD3, 1(rSTR1)
+	lbz	rWORD4, 1(rSTR2)
+	bdz-	L(b12)
+	cmpld	cr1, rWORD3, rWORD4
+	lbzu	rWORD5, 2(rSTR1)
+	lbzu	rWORD6, 2(rSTR2)
+	bdz-	L(b13)
+	.align 4
+L(bLoop):
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne-	cr0, L(bLcr0)
+
+	cmpld	cr6, rWORD5, rWORD6
+	bdz-	L(b3i)
+	
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	cr1, L(bLcr1)
+
+	cmpld	cr0, rWORD1, rWORD2
+	bdz-	L(b2i)
+
+	lbzu	rWORD5, 1(rSTR1)
+	lbzu	rWORD6, 1(rSTR2)
+	bne-	cr6, L(bLcr6)
+
+	cmpld	cr1, rWORD3, rWORD4
+	bdnz+	L(bLoop)
+	
+/* We speculatively loading bytes before we have tested the previous
+   bytes.  But we must avoid overrunning the length (in the ctr) to
+   prevent these speculative loads from causing a segfault.  In this 
+   case the loop will exit early (before the all pending bytes are
+   tested.  In this case we must complete the pending operations
+   before returning.  */
+L(b1i):
+	bne-	cr0, L(bLcr0)
+	bne-	cr1, L(bLcr1)
+	b	L(bx56)
+	.align 4
+L(b2i):
+	bne-	cr6, L(bLcr6)
+	bne-	cr0, L(bLcr0)
+	b	L(bx34)
+	.align 4
+L(b3i):
+	bne-	cr1, L(bLcr1)
+	bne-	cr6, L(bLcr6)
+	b	L(bx12)
+	.align 4
+L(bLcr0):
+	li	rRTN, 1
+	bgtlr	cr0
+	li	rRTN, -1
+	blr
+L(bLcr1):
+	li	rRTN, 1
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+L(bLcr6):
+	li	rRTN, 1
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+
+L(b13):
+	bne-	cr0, L(bx12)
+	bne-	cr1, L(bx34)
+L(bx56):
+	sub	rRTN, rWORD5, rWORD6
+	blr
+	nop
+L(b12):
+	bne-	cr0, L(bx12)
+L(bx34):	
+	sub	rRTN, rWORD3, rWORD4
+	blr
+L(b11):
+L(bx12):
+	sub	rRTN, rWORD1, rWORD2
+	blr
+	.align 4 
+L(zeroLengthReturn):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+L(zeroLength):
+	li	rRTN, 0
+	blr
+
+	.align 4
+/* At this point we know the strings have different alignment and the
+   compare length is at least 8 bytes.  rBITDIF containes the low order
+   3 bits of rSTR1 and cr5 contains the result of the logical compare
+   of rBITDIF to 0.  If rBITDIF == 0 then rStr1 is double word 
+   aligned and can perform the DWunaligned loop.
+  
+   Otherwise we know that rSTR1 is not aready DW aligned yet.
+   So we can force the string addresses to the next lower DW
+   boundary and special case this first DW word using shift left to
+   ellimiate bits preceeding the first byte.  Since we want to join the
+   normal (DWaligned) compare loop, starting at the second double word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first DW. This insures that the loop count is
+   correct and the first DW (shifted) is in the expected resister pair.  */
+#define rSHL	r29	/* Unaligned shift left count.  */
+#define rSHR	r28	/* Unaligned shift right count.  */
+#define rB		r27	/* Left rotation temp for rWORD2.  */
+#define rD		r26	/* Left rotation temp for rWORD4.  */
+#define rF		r25	/* Left rotation temp for rWORD6.  */
+#define rH		r24	/* Left rotation temp for rWORD8.  */
+#define rA		r0	/* Right rotation temp for rWORD2.  */
+#define rC		r12	/* Right rotation temp for rWORD4.  */
+#define rE		r0	/* Right rotation temp for rWORD6.  */
+#define rG		r12	/* Right rotation temp for rWORD8.  */
+L(unaligned):
+	std	r29,-24(r1)	
+	cfi_offset(r29,-24)
+	clrldi	rSHL, rSTR2, 61
+	beq-	cr6, L(duzeroLength)
+	std	r28,-32(r1)	
+	cfi_offset(r28,-32)
+	beq	cr5, L(DWunaligned)
+	std	r27,-40(r1)	
+	cfi_offset(r27,-40)
+/* Adjust the logical start of rSTR2 ro compensate for the extra bits
+   in the 1st rSTR1 DW.  */
+	sub	r27, rSTR2, rBITDIF
+/* But do not attempt to address the DW before that DW that contains
+   the actual start of rSTR2.  */
+	clrrdi	rSTR2, rSTR2, 3
+	std	r26,-48(r1)	
+	cfi_offset(r26,-48)
+/* Compute the leaft/right shift counts for the unalign rSTR2,
+   compensating for the logical (DW aligned) start of rSTR1.  */ 
+	clrldi	rSHL, r27, 61
+	clrrdi	rSTR1, rSTR1, 3	
+	std	r25,-56(r1)	
+	cfi_offset(r25,-56)
+	sldi	rSHL, rSHL, 3
+	cmpld	cr5, r27, rSTR2
+	add	rN, rN, rBITDIF
+	sldi	r11, rBITDIF, 3
+	std	r24,-64(r1)	
+	cfi_offset(r24,-64)
+	subfic	rSHR, rSHL, 64
+	srdi	rTMP, rN, 5	/* Divide by 32 */
+	andi.	rBITDIF, rN, 24	/* Get the DW remainder */
+/* We normally need to load 2 DWs to start the unaligned rSTR2, but in
+   this special case those bits may be discarded anyway.  Also we
+   must avoid loading a DW where none of the bits are part of rSTR2 as
+   this may cross a page boundary and cause a page fault.  */
+	li	rWORD8, 0
+	blt	cr5, L(dus0)
+	ld	rWORD8, 0(rSTR2)
+	la	rSTR2, 8(rSTR2)
+	sld	rWORD8, rWORD8, rSHL
+
+L(dus0):
+	ld	rWORD1, 0(rSTR1)
+	ld	rWORD2, 0(rSTR2)
+	cmpldi	cr1, rBITDIF, 16
+	cmpldi	cr7, rN, 32
+	srd	rG, rWORD2, rSHR
+	clrldi	rN, rN, 61
+	beq	L(duPs4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, rG, rWORD8
+	bgt	cr1, L(duPs3)
+	beq	cr1, L(duPs2)
+
+/* Remainder is 8 */
+	.align 4
+L(dusP1):
+	sld	rB, rWORD2, rSHL
+	sld	rWORD7, rWORD1, r11
+	sld	rWORD8, rWORD8, r11
+	bge	cr7, L(duP1e)
+/* At this point we exit early with the first double word compare
+   complete and remainder of 0 to 7 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+	cmpld	cr5, rWORD7, rWORD8
+	sldi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmpld	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srd	rA, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 16 */
+	.align 4
+L(duPs2):
+	sld	rH, rWORD2, rSHL
+	sld	rWORD5, rWORD1, r11
+	sld	rWORD6, rWORD8, r11
+	b	L(duP2e)
+/* Remainder is 24 */
+	.align 4
+L(duPs3):
+	sld	rF, rWORD2, rSHL
+	sld	rWORD3, rWORD1, r11
+	sld	rWORD4, rWORD8, r11
+	b	L(duP3e)
+/* Count is a multiple of 32, remainder is 0 */
+	.align 4
+L(duPs4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, rG, rWORD8
+	sld	rD, rWORD2, rSHL
+	sld	rWORD1, rWORD1, r11
+	sld	rWORD2, rWORD8, r11
+	b	L(duP4e)
+
+/* At this point we know rSTR1 is double word aligned and the
+   compare length is at least 8 bytes.  */
+	.align 4
+L(DWunaligned):
+	std	r27,-40(r1)	
+	cfi_offset(r27,-40)
+	clrrdi	rSTR2, rSTR2, 3
+	std	r26,-48(r1)	
+	cfi_offset(r26,-48)
+	srdi	rTMP, rN, 5	/* Divide by 32 */
+	std	r25,-56(r1)	
+	cfi_offset(r25,-56)
+	andi.	rBITDIF, rN, 24	/* Get the DW remainder */
+	std	r24,-64(r1)	
+	cfi_offset(r24,-64)
+	sldi	rSHL, rSHL, 3
+	ld	rWORD6, 0(rSTR2)
+	ldu	rWORD8, 8(rSTR2)
+	cmpldi	cr1, rBITDIF, 16
+	cmpldi	cr7, rN, 32
+	clrldi	rN, rN, 61
+	subfic	rSHR, rSHL, 64
+	sld	rH, rWORD6, rSHL
+	beq	L(duP4)
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(duP3)
+	beq	cr1, L(duP2)
+		
+/* Remainder is 8 */
+	.align 4
+L(duP1):
+	srd	rG, rWORD8, rSHR
+	ld	rWORD7, 0(rSTR1)
+	sld	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP1x)
+L(duP1e):
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	srd	rA, rWORD2, rSHR
+	sld	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	ld	rWORD3, 16(rSTR1)
+	ld	rWORD4, 16(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	srd	rC, rWORD4, rSHR
+	sld	rF, rWORD4, rSHL
+	bne	cr5, L(duLcr5)
+	or	rWORD4, rC, rD
+	ld	rWORD5, 24(rSTR1)
+	ld	rWORD6, 24(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	srd	rE, rWORD6, rSHR
+	sld	rH, rWORD6, rSHL
+	bne	cr0, L(duLcr0)
+	or	rWORD6, rE, rF
+	cmpld	cr6, rWORD5, rWORD6
+	b	L(duLoop3)	
+	.align 4
+/* At this point we exit early with the first double word compare
+   complete and remainder of 0 to 7 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+L(duP1x):
+	cmpld	cr5, rWORD7, rWORD8
+	sldi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmpld	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srd	rA, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 16 */
+	.align 4
+L(duP2):
+	srd	rE, rWORD8, rSHR
+	ld	rWORD5, 0(rSTR1)
+	or	rWORD6, rE, rH
+	sld	rH, rWORD8, rSHL
+L(duP2e):
+	ld	rWORD7, 8(rSTR1)
+	ld	rWORD8, 8(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	srd	rG, rWORD8, rSHR
+	sld	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP2x)
+	ld	rWORD1, 16(rSTR1)
+	ld	rWORD2, 16(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srd	rA, rWORD2, rSHR
+	sld	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	ld	rWORD3, 24(rSTR1)
+	ld	rWORD4, 24(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	bne	cr5, L(duLcr5)
+	srd	rC, rWORD4, rSHR
+	sld	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	cmpld	cr1, rWORD3, rWORD4
+	b	L(duLoop2)
+	.align 4
+L(duP2x):
+	cmpld	cr5, rWORD7, rWORD8
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+	bne	cr6, L(duLcr6)
+	sldi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmpld	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srd	rA, rWORD2, rSHR
+	b	L(dutrim)
+		
+/* Remainder is 24 */
+	.align 4
+L(duP3):
+	srd	rC, rWORD8, rSHR
+	ld	rWORD3, 0(rSTR1)
+	sld	rF, rWORD8, rSHL
+	or	rWORD4, rC, rH
+L(duP3e):
+	ld	rWORD5, 8(rSTR1)
+	ld	rWORD6, 8(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	srd	rE, rWORD6, rSHR
+	sld	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+	ld	rWORD7, 16(rSTR1)
+	ld	rWORD8, 16(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srd	rG, rWORD8, rSHR
+	sld	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	blt	cr7, L(duP3x)
+	ld	rWORD1, 24(rSTR1)
+	ld	rWORD2, 24(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srd	rA, rWORD2, rSHR
+	sld	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+	addi	rSTR1, rSTR1, 16
+	addi	rSTR2, rSTR2, 16
+	cmpld	cr0, rWORD1, rWORD2
+	b	L(duLoop1)
+	.align 4
+L(duP3x):
+	addi	rSTR1, rSTR1, 16
+	addi	rSTR2, rSTR2, 16
+	bne	cr1, L(duLcr1)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	sldi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmpld	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srd	rA, rWORD2, rSHR
+	b	L(dutrim)
+	
+/* Count is a multiple of 32, remainder is 0 */
+	.align 4
+L(duP4):
+	mtctr   rTMP	/* Power4 wants mtctr 1st in dispatch group */
+	srd	rA, rWORD8, rSHR
+	ld	rWORD1, 0(rSTR1)
+	sld	rD, rWORD8, rSHL
+	or	rWORD2, rA, rH
+L(duP4e):
+	ld	rWORD3, 8(rSTR1)
+	ld	rWORD4, 8(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	srd	rC, rWORD4, rSHR
+	sld	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+	ld	rWORD5, 16(rSTR1)
+	ld	rWORD6, 16(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	bne	cr0, L(duLcr0)
+	srd	rE, rWORD6, rSHR
+	sld	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+	ldu	rWORD7, 24(rSTR1)
+	ldu	rWORD8, 24(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srd	rG, rWORD8, rSHR
+	sld	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	cmpld	cr5, rWORD7, rWORD8
+	bdz-	L(du24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align 4
+L(duLoop):
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD2, 8(rSTR2)
+	cmpld	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	srd	rA, rWORD2, rSHR
+	sld	rD, rWORD2, rSHL
+	or	rWORD2, rA, rB
+L(duLoop1):
+	ld	rWORD3, 16(rSTR1)
+	ld	rWORD4, 16(rSTR2)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	srd	rC, rWORD4, rSHR
+	sld	rF, rWORD4, rSHL
+	or	rWORD4, rC, rD
+L(duLoop2):
+	ld	rWORD5, 24(rSTR1)
+	ld	rWORD6, 24(rSTR2)
+	cmpld	cr5, rWORD7, rWORD8
+	bne	cr0, L(duLcr0)
+	srd	rE, rWORD6, rSHR
+	sld	rH, rWORD6, rSHL
+	or	rWORD6, rE, rF
+L(duLoop3):
+	ldu	rWORD7, 32(rSTR1)
+	ldu	rWORD8, 32(rSTR2)
+	cmpld	cr0, rWORD1, rWORD2
+	bne-	cr1, L(duLcr1)
+	srd	rG, rWORD8, rSHR
+	sld	rB, rWORD8, rSHL
+	or	rWORD8, rG, rH
+	bdnz+	L(duLoop)	
+	
+L(duL4):
+	bne	cr1, L(duLcr1)
+	cmpld	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	cmpld	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	cmpld	cr5, rWORD7, rWORD8
+L(du44):
+	bne	cr0, L(duLcr0)
+L(du34):
+	bne	cr1, L(duLcr1)
+L(du24):
+	bne	cr6, L(duLcr6)
+L(du14):
+	sldi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+/* At this point we have a remainder of 1 to 7 bytes to compare.  We use
+   shift right double to elliminate bits beyond the compare length. 
+   This allows the use of double word subtract to compute the final
+   result.
+
+   However it may not be safe to load rWORD2 which may be beyond the 
+   string length. So we compare the bit length of the remainder to
+   the right shift count (rSHR). If the bit count is less than or equal
+   we do not need to load rWORD2 (all significant bits are already in
+   rB).  */
+	cmpld	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	rA, 0
+	ble	cr7, L(dutrim)
+	ld	rWORD2, 8(rSTR2)
+	srd	rA, rWORD2, rSHR
+	.align 4
+L(dutrim):
+	ld	rWORD1, 8(rSTR1)
+	ld	rWORD8,-8(r1)
+	subfic	rN, rN, 64	/* Shift count is 64 - (rN * 8).  */ 
+	or	rWORD2, rA, rB
+	ld	rWORD7,-16(r1)	
+	ld	r29,-24(r1)
+	srd	rWORD1, rWORD1, rN
+	srd	rWORD2, rWORD2, rN
+	ld	r28,-32(r1)	
+	ld	r27,-40(r1)
+	li	rRTN, 0
+	cmpld	cr0, rWORD1, rWORD2	
+	ld	r26,-48(r1)
+	ld	r25,-56(r1)
+ 	beq	cr0, L(dureturn24)
+	li	rRTN, 1
+	ld	r24,-64(r1)
+	bgtlr	cr0
+	li	rRTN, -1
+	blr
+	.align 4
+L(duLcr0):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgt	cr0, L(dureturn29)	
+	ld	r29,-24(r1)
+	ld	r28,-32(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr1):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgt	cr1, L(dureturn29)	
+	ld	r29,-24(r1)
+	ld	r28,-32(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr6):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgt	cr6, L(dureturn29)	
+	ld	r29,-24(r1)
+	ld	r28,-32(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align 4
+L(duLcr5):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+	li	rRTN, 1
+	bgt	cr5, L(dureturn29)	
+	ld	r29,-24(r1)
+	ld	r28,-32(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	3
+L(duZeroReturn):
+	li	rRTN,0
+	.align	4
+L(dureturn):
+	ld	rWORD8,-8(r1)
+	ld	rWORD7,-16(r1)
+L(dureturn29):	
+	ld	r29,-24(r1)
+	ld	r28,-32(r1)
+L(dureturn27):	
+	ld	r27,-40(r1)
+L(dureturn26):	
+	ld	r26,-48(r1)
+L(dureturn25):	
+	ld	r25,-56(r1)
+L(dureturn24):
+	ld	r24,-64(r1)
+	blr
+L(duzeroLength):
+	li	rRTN,0
+	blr
+
+END (BP_SYM (memcmp))
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp, bcmp)
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcpy.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcpy.S
new file mode 100644
index 0000000000..9910ebda82
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/memcpy.S
@@ -0,0 +1,417 @@
+/* Optimized memcpy implementation for PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.
+
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks 
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled 
+   with the appropriate combination of byte and halfword load/stores. 
+   There is minimal effort to optimize the alignment of short moves.  
+   The 64-bit implementations of POWER3 and POWER4 do a reasonable job
+   of handling unligned load/stores that do not cross 32-byte boundries.
+
+   Longer moves (>= 32-bytes) justify the effort to get at least the
+   destination doubleword (8-byte) aligned.  Further optimization is
+   posible when both source and destination are doubleword aligned.
+   Each case has a optimized unrolled loop.   */
+
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT 3
+
+    cmpldi cr1,5,31
+    neg   0,3
+    std   3,-16(1)
+    std   31,-8(1)
+    cfi_offset(31,-8)
+    andi. 11,3,7	/* check alignement of dst.  */
+    clrldi 0,0,61	/* Number of bytes until the 1st doubleword of dst.  */
+    clrldi 10,4,61	/* check alignement of src.  */
+    cmpldi cr6,5,8
+    ble-  cr1,.L2	/* If move < 32 bytes use short move code.  */
+    cmpld cr6,10,11     
+    mr    12,4
+    srdi  9,5,3		/* Number of full double words remaining.  */
+    mtcrf 0x01,0
+    mr    31,5
+    beq   .L0
+  
+    subf  31,0,5
+  /* Move 0-7 bytes as needed to get the destination doubleword alligned.  */
+1:  bf    31,2f
+    lbz   6,0(12)
+    addi  12,12,1
+    stb   6,0(3)
+    addi  3,3,1
+2:  bf    30,4f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+4:  bf    29,0f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+0:
+    clrldi 10,12,61	/* check alignement of src again.  */     
+    srdi  9,31,3	/* Number of full double words remaining.  */
+    
+  /* Copy doublewords from source to destination, assumpting the
+     destination is aligned on a doubleword boundary.
+
+     At this point we know there are at least 25 bytes left (32-7) to copy.
+     The next step is to determine if the source is also doubleword aligned. 
+     If not branch to the unaligned move code at .L6. which uses
+     a load, shift, store strategy.
+     
+     Otherwise source and destination are doubleword aligned, and we can
+     the optimized doubleword copy loop.  */
+.L0:
+    clrldi  11,31,61
+    mtcrf   0x01,9
+    cmpldi  cr1,11,0
+    bne-    cr6,.L6   /* If source is not DW aligned.  */
+
+  /* Move doublewords where destination and source are DW aligned.
+     Use a unrolled loop to copy 4 doubleword (32-bytes) per iteration.
+     If the the copy is not an exact multiple of 32 bytes, 1-3 
+     doublewords are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-7 bytes. These byte are 
+     copied a word/halfword/byte at a time as needed to preserve alignment.  */
+
+    srdi  8,31,5
+    cmpldi	cr1,9,4
+    cmpldi	cr6,11,0
+    mr    11,12
+    
+    bf    30,1f
+    ld    6,0(12)
+    ld    7,8(12)
+    addi  11,12,16
+    mtctr 8
+    std   6,0(3)
+    std   7,8(3)
+    addi  10,3,16
+    bf    31,4f
+    ld    0,16(12)
+    std   0,16(3)    
+    blt   cr1,3f
+    addi  11,12,24
+    addi  10,3,24
+    b     4f
+    .align  4
+1:
+    mr    10,3
+    mtctr 8
+    bf    31,4f
+    ld    6,0(12)
+    addi  11,12,8
+    std   6,0(3)
+    addi  10,3,8
+    
+    .align  4
+4:
+    ld    6,0(11)
+    ld    7,8(11)
+    ld    8,16(11)
+    ld    0,24(11)
+    addi  11,11,32
+2:
+    std   6,0(10)
+    std   7,8(10)
+    std   8,16(10)
+    std   0,24(10)
+    addi  10,10,32
+    bdnz  4b
+3:  
+
+    rldicr 0,31,0,60
+    mtcrf 0x01,31
+    beq   cr6,0f
+.L9:
+    add   3,3,0
+    add   12,12,0
+    
+/*  At this point we have a tail of 0-7 bytes and we know that the
+    destiniation is double word aligned.  */
+4:  bf    29,2f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+2:  bf    30,1f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+1:  bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    ld 31,-8(1)
+    ld 3,-16(1)
+    blr
+       
+/* Copy up to 31 bytes.  This divided into two cases 0-8 bytes and 9-31 
+   bytes.  Each case is handled without loops, using binary (1,2,4,8) 
+   tests.  
+   
+   In the short (0-8 byte) case no attempt is made to force alignment
+   of either source or destination.  The hardware will handle the 
+   unaligned load/stores with small delays for crossing 32- 64-byte, and 
+   4096-byte boundaries. Since these short moves are unlikely to be
+   unaligned or cross these boundaries, the overhead to force 
+   alignment is not justified.
+   
+   The longer (9-31 byte) move is more likely to cross 32- or 64-byte
+   boundaries.  Since only loads are sensitive to the 32-/64-byte
+   boundaries it is more important to align the source then the 
+   destination.  If the source is not already word aligned, we first
+   move 1-3 bytes as needed.  Since we are only word aligned we don't 
+   use double word load/stores to insure that all loads are aligned. 
+   While the destination and stores may still be unaligned, this
+   is only an issue for page (4096 byte boundary) crossing, which
+   should be rare for these short moves.  The hardware handles this
+   case automatically with a small delay.  */ 
+   
+    .align  4
+.L2:
+    mtcrf 0x01,5
+    neg   8,4
+    clrrdi	11,4,2
+    andi. 0,8,3
+    ble   cr6,.LE8	/* Handle moves of 0-8 bytes.  */
+/* At least 9 bytes left.  Get the source word aligned.  */
+    cmpldi	cr1,5,16
+    mr    10,5
+    mr    12,4
+    cmpldi	cr6,0,2
+    beq   .L3	/* If the source is already word aligned skip this.  */
+/* Copy 1-3 bytes to get source address word aligned.  */
+    lwz   6,0(11)
+    subf  10,0,5
+    add   12,4,0
+    blt   cr6,5f
+    srdi  7,6,16
+    bgt	  cr6,3f
+    sth   6,0(3)
+    b     7f
+    .align  4
+3:
+    stb   7,0(3)
+    sth   6,1(3)
+    b     7f
+    .align  4
+5:
+    stb   6,0(3)
+7:
+    cmpldi	cr1,10,16
+    add   3,3,0
+    mtcrf 0x01,10
+    .align  4
+.L3:
+/* At least 6 bytes left and the source is word aligned.  */
+    blt   cr1,8f
+16: /* Move 16 bytes.  */
+    lwz   6,0(12)
+    lwz   7,4(12)
+    stw   6,0(3)
+    lwz   6,8(12)
+    stw   7,4(3)
+    lwz   7,12(12)
+    addi  12,12,16
+    stw   6,8(3)
+    stw   7,12(3)
+    addi  3,3,16
+8:  /* Move 8 bytes.  */
+    bf    28,4f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  12,12,8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  3,3,8
+4:  /* Move 4 bytes.  */
+    bf    29,2f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4    
+2:  /* Move 2-3 bytes.  */
+    bf    30,1f
+    lhz   6,0(12)
+    sth   6,0(3) 
+    bf    31,0f
+    lbz   7,2(12)
+    stb   7,2(3)
+    ld 3,-16(1)
+    blr
+1:  /* Move 1 byte.  */
+    bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+
+/* Special case to copy 0-8 bytes.  */
+    .align  4
+.LE8:
+    mr    12,4
+    bne   cr6,4f
+/* Would have liked to use use ld/std here but the 630 processors are
+   slow for load/store doubles that are not at least word aligned.  
+   Unaligned Load/Store word execute with only a 1 cycle penaltity.  */
+    lwz   6,0(4)
+    lwz   7,4(4)
+    stw   6,0(3)
+    stw   7,4(3)
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+4:  bf    29,2b
+    lwz   6,0(4)
+    stw   6,0(3)
+6:
+    bf    30,5f
+    lhz   7,4(4)
+    sth   7,4(3) 
+    bf    31,0f
+    lbz   8,6(4)
+    stb   8,6(3)
+    ld 3,-16(1)
+    blr
+    .align  4
+5:  
+    bf    31,0f
+    lbz   6,4(4)
+    stb   6,4(3)
+    .align  4
+0:
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+
+    .align  4
+.L6:
+
+  /* Copy doublewords where the destination is aligned but the source is
+     not.  Use aligned doubleword loads from the source, shifted to realign
+     the data, to allow aligned destination stores.  */
+    addi    11,9,-1  /* loop DW count is one less than total */
+    subf    5,10,12
+    sldi    10,10,3
+    mr      4,3
+    srdi    8,11,2   /* calculate the 32 byte loop count */
+    ld      6,0(5)
+    mtcrf   0x01,11
+    cmpldi  cr6,9,4
+    mtctr   8
+    ld      7,8(5)
+    subfic  9,10,64
+    bf      30,1f
+
+    /* there are at least two DWs to copy */
+    sld     0,6,10
+    srd     8,7,9
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sld     0,7,10
+    srd     8,6,9
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,8f  /* if total DWs = 3, then bypass loop */
+    bf      31,4f
+    /* there is a third DW to copy */
+    sld     0,6,10
+    srd     8,7,9
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,8f  /* if total DWs = 4, then bypass loop */
+    b       4f
+    .align 4
+1:
+    sld     0,6,10
+    srd     8,7,9
+    addi    5,5,16
+    or      0,0,8
+    bf      31,4f
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+4:  sld   0,6,10
+    srd   8,7,9
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sld   0,7,10
+    srd   8,6,9
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sld   0,6,10
+    srd   8,7,9
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sld   0,7,10
+    srd   8,6,9
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ 4b
+    .align 4
+8:
+    /* calculate and store the final DW */
+    sld   0,6,10
+    srd   8,7,9
+    or    0,0,8  
+    std   0,0(4)
+3:
+    rldicr 0,31,0,60
+    mtcrf 0x01,31
+    bne   cr1,.L9	/* If the tail is 0 bytes we are done!  */
+  /* Return original dst pointer.  */
+    ld 31,-8(1)
+    ld 3,-16(1)
+    blr
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/strncmp.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/strncmp.S
new file mode 100644
index 0000000000..7a1665d2bc
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power4/strncmp.S
@@ -0,0 +1,180 @@
+/* Optimized strcmp implementation for PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (BP_SYM(strncmp), 4, 0)
+	CALL_MCOUNT 3
+
+#define rTMP	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+/* Note:  The Bounded pointer support in this code is broken.  This code
+   was inherited from PPC32 and and that support was never completed.  
+   Current PPC gcc does not support -fbounds-check or -fbounded-pointers.  */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3  r10
+#define rWORD4  r11
+#define rFEFE	r8	/* constant 0xfefefefefefefeff (-0x0101010101010101) */
+#define r7F7F	r9	/* constant 0x7f7f7f7f7f7f7f7f */
+#define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f7f7f7f7f) */
+#define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+
+	dcbt	0,rSTR1
+	or	rTMP, rSTR2, rSTR1
+	lis	r7F7F, 0x7f7f
+	dcbt	0,rSTR2
+	clrldi.	rTMP, rTMP, 61
+	cmpldi	cr1, rN, 0
+	lis	rFEFE, -0x101
+	bne	L(unaligned)
+/* We are doubleword alligned so set up for two loops.  first a double word
+   loop, then fall into the byte loop if any residual.  */
+	srdi.	rTMP, rN, 3
+	clrldi	rN, rN, 61
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	cmpldi	cr1, rN, 0	
+	beq	L(unaligned)
+
+	mtctr	rTMP	/* Power4 wants mtctr 1st in dispatch group.  */
+	ld	rWORD1, 0(rSTR1)
+	ld	rWORD2, 0(rSTR2)
+	sldi	rTMP, rFEFE, 32
+	insrdi	r7F7F, r7F7F, 32, 0
+	add	rFEFE, rFEFE, rTMP
+	b	L(g1)
+
+L(g0):	
+	ldu	rWORD1, 8(rSTR1)
+	bne-	cr1, L(different)
+	ldu	rWORD2, 8(rSTR2)
+L(g1):	add	rTMP, rFEFE, rWORD1
+	nor	rNEG, r7F7F, rWORD1
+	bdz	L(tail)
+	and.	rTMP, rTMP, rNEG
+	cmpd	cr1, rWORD1, rWORD2
+	beq+	L(g0)
+	
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+	
+L(endstring):
+	and	rTMP, r7F7F, rWORD1
+	beq	cr1, L(equal)
+	add	rTMP, rTMP, r7F7F
+	xor.	rBITDIF, rWORD1, rWORD2
+
+	andc	rNEG, rNEG, rTMP
+	blt-	L(highbit)
+	cntlzd	rBITDIF, rBITDIF
+	cntlzd	rNEG, rNEG
+	addi	rNEG, rNEG, 7
+	cmpd	cr1, rNEG, rBITDIF
+	sub	rRTN, rWORD1, rWORD2
+	blt-	cr1, L(equal)
+	sradi	rRTN, rRTN, 63
+	ori	rRTN, rRTN, 1
+	blr
+L(equal):
+	li	rRTN, 0
+	blr
+
+L(different):
+	ldu	rWORD1, -8(rSTR1)
+	xor.	rBITDIF, rWORD1, rWORD2
+	sub	rRTN, rWORD1, rWORD2
+	blt-	L(highbit)
+	sradi	rRTN, rRTN, 63
+	ori	rRTN, rRTN, 1
+	blr
+L(highbit):
+	srdi	rWORD2, rWORD2, 56
+	srdi	rWORD1, rWORD1, 56
+	sub	rRTN, rWORD1, rWORD2
+	blr
+
+
+/* Oh well.  In this case, we just do a byte-by-byte comparison.  */
+	.align 4
+L(tail):
+	and.	rTMP, rTMP, rNEG
+	cmpd	cr1, rWORD1, rWORD2
+	bne-	L(endstring)
+	addi	rSTR1, rSTR1, 8
+	bne-	cr1, L(different)
+	addi	rSTR2, rSTR2, 8
+	cmpldi	cr1, rN, 0
+L(unaligned):
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+	ble	cr1, L(ux)
+L(uz):
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	.align 4
+L(u1):
+	cmpdi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpd	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	lbzu    rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	L(u4)
+	cmpdi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpd	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne-    L(u3)
+	cmpdi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpd	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	L(u4)
+	cmpdi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpd	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	beq+    L(u1)
+
+L(u3):  sub     rRTN, rWORD3, rWORD4
+        blr
+L(u4):	sub	rRTN, rWORD1, rWORD2
+	blr
+L(ux):
+	li	rRTN, 0
+	blr
+END (BP_SYM (strncmp))
+libc_hidden_builtin_def (strncmp)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/Implies
new file mode 100644
index 0000000000..ac431fa96e
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S
new file mode 100644
index 0000000000..1e4851db9e
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceil.S
@@ -0,0 +1,38 @@
+/* ceil function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__ceil, 4, 0)
+	CALL_MCOUNT 0
+	frip	fp1, fp1
+	blr
+	END (__ceil)
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__ceil, ceill)
+strong_alias (__ceil, __ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S
new file mode 100644
index 0000000000..38c51d5ff1
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_ceilf.S
@@ -0,0 +1,31 @@
+/* ceilf function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__ceilf, 4, 0)
+	CALL_MCOUNT 0
+	frip	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__ceilf)
+
+weak_alias (__ceilf, ceilf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S
new file mode 100644
index 0000000000..86f07c8b59
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floor.S
@@ -0,0 +1,38 @@
+/* floor function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__floor, 4, 0)
+	CALL_MCOUNT 0
+	frim	fp1, fp1
+	blr
+	END (__floor)
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__floor, floorl)
+strong_alias (__floor, __floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S
new file mode 100644
index 0000000000..e7bdccf513
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_floorf.S
@@ -0,0 +1,31 @@
+/* floorf function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__floorf, 4, 0)
+	CALL_MCOUNT 0
+	frim	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__floorf)
+
+weak_alias (__floorf, floorf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S
new file mode 100644
index 0000000000..4e25d701a4
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_round.S
@@ -0,0 +1,38 @@
+/* round function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__round, 4, 0)
+	CALL_MCOUNT 0
+	frin	fp1, fp1
+	blr
+	END (__round)
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__round, roundl)
+strong_alias (__round, __roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __round, roundl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S
new file mode 100644
index 0000000000..30b83be9e7
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_roundf.S
@@ -0,0 +1,31 @@
+/* roundf function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__roundf, 4, 0)
+	CALL_MCOUNT 0
+	frin	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__roundf)
+
+weak_alias (__roundf, roundf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S
new file mode 100644
index 0000000000..01eccbdf07
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_trunc.S
@@ -0,0 +1,38 @@
+/* trunc function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__trunc, 4, 0)
+	CALL_MCOUNT 0
+	friz	fp1, fp1
+	blr
+	END (__trunc)
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__trunc, truncl)
+strong_alias (__trunc, __truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_0)
+#endif
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S
new file mode 100644
index 0000000000..61d9cb372d
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5+/fpu/s_truncf.S
@@ -0,0 +1,31 @@
+/* truncf function.  PowerPC64/power5+ version.
+   Copyright (C) 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__truncf, 4, 0)
+	CALL_MCOUNT 0
+	friz	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__truncf)
+
+weak_alias (__truncf, truncf)
+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power5/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5/Implies
new file mode 100644
index 0000000000..ac431fa96e
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power5/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power4
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/Implies
new file mode 100644
index 0000000000..0f35f37c77
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power5+
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/fpu/Implies b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/fpu/Implies
new file mode 100644
index 0000000000..f09854edb6
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc64/power5+/fpu
diff --git a/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/memcpy.S b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/memcpy.S
new file mode 100644
index 0000000000..98798e5d6b
--- /dev/null
+++ b/powerpc-cpu/sysdeps/powerpc/powerpc64/power6/memcpy.S
@@ -0,0 +1,1002 @@
+/* Optimized memcpy implementation for PowerPC64.
+   Copyright (C) 2003, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+   02110-1301 USA.  */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.
+
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks 
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled 
+   with the appropriate combination of byte and halfword load/stores. 
+   There is minimal effort to optimize the alignment of short moves.  
+   The 64-bit implementations of POWER3 and POWER4 do a reasonable job
+   of handling unligned load/stores that do not cross 32-byte boundries.
+
+   Longer moves (>= 32-bytes) justify the effort to get at least the
+   destination doubleword (8-byte) aligned.  Further optimization is
+   posible when both source and destination are doubleword aligned.
+   Each case has a optimized unrolled loop.   */
+
+EALIGN (BP_SYM (memcpy), 5, 0)
+	CALL_MCOUNT 3
+
+    cmpldi cr1,5,31
+    neg   0,3
+    std   3,-16(1)
+    std   31,-8(1)
+    cfi_offset(31,-8)
+    andi. 11,3,7	/* check alignement of dst.  */
+    clrldi 0,0,61	/* Number of bytes until the 1st doubleword of dst.  */
+    clrldi 10,4,61	/* check alignement of src.  */
+    cmpldi cr6,5,8
+    ble-  cr1,.L2	/* If move < 32 bytes use short move code.  */
+    cmpld cr6,10,11     
+    mr    12,4
+    srdi  9,5,3		/* Number of full double words remaining.  */
+    mtcrf 0x01,0
+    mr    31,5
+    beq   .L0
+  
+    subf  31,0,5
+  /* Move 0-7 bytes as needed to get the destination doubleword alligned.  */
+1:  bf    31,2f
+    lbz   6,0(12)
+    addi  12,12,1
+    stb   6,0(3)
+    addi  3,3,1
+2:  bf    30,4f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+4:  bf    29,0f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+0:
+    clrldi 10,12,61	/* check alignement of src again.  */     
+    srdi  9,31,3	/* Number of full double words remaining.  */
+    
+  /* Copy doublewords from source to destination, assumpting the
+     destination is aligned on a doubleword boundary.
+
+     At this point we know there are at least 25 bytes left (32-7) to copy.
+     The next step is to determine if the source is also doubleword aligned. 
+     If not branch to the unaligned move code at .L6. which uses
+     a load, shift, store strategy.
+     
+     Otherwise source and destination are doubleword aligned, and we can
+     the optimized doubleword copy loop.  */
+.L0:
+    clrldi  11,31,61
+    mtcrf   0x01,9
+    cmpldi  cr1,11,0
+    bne-    cr6,.L6   /* If source is not DW aligned.  */
+
+  /* Move doublewords where destination and source are DW aligned.
+     Use a unrolled loop to copy 4 doubleword (32-bytes) per iteration.
+     If the the copy is not an exact multiple of 32 bytes, 1-3 
+     doublewords are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-7 bytes. These byte
+     are copied a word/halfword/byte at a time as needed to preserve
+     alignment.
+     
+     For POWER6 the L1 is store-through and the L2 is store-in.  The
+     L2 is clocked at half CPU clock so we can store 16 bytes every
+     other cycle.  POWER6 also has a load/store bypass so we can do
+     load, load, store, store every 2 cycles.
+     
+     For POWER6 unaligned loads will take a 20+ cycle hicup for any
+     L1 cache miss that crosses a 32- or 128-byte boundary.  Store
+     is more forgiving and does not take a hicup until page or 
+     segment boundaries.  So we require doubleword alignment for 
+     the source but may take a risk and only require word alignment
+     for the destination.  */
+
+    srdi  8,31,5
+    cmpldi	cr1,9,4
+    cmpldi	cr6,11,0
+    mr    11,12
+    
+    bf    30,1f
+    ld    6,0(12)
+    ld    7,8(12)
+    addi  11,12,16
+    mtctr 8
+    std   6,0(3)
+    std   7,8(3)
+    addi  10,3,16
+    bf    31,4f
+    ld    0,16(12)
+    std   0,16(3)    
+    blt   cr1,3f
+    addi  11,12,24
+    addi  10,3,24
+    b     4f
+    .align  4
+1:
+    mr    10,3
+    mtctr 8
+    bf    31,4f
+    ld    6,0(12)
+    addi  11,12,8
+    std   6,0(3)
+    addi  10,3,8
+    
+    .align  4
+4:
+    ld    6,0(11)
+    ld    7,8(11)
+    std   6,0(10)
+    std   7,8(10)
+    ld    8,16(11)
+    ld    0,24(11)
+    std   8,16(10)
+    std   0,24(10)
+    bdz   3f
+
+    ld    6,0+32(11)
+    ld    7,8+32(11)
+    std   6,0+32(10)
+    std   7,8+32(10)
+    ld    8,16+32(11)
+    ld    0,24+32(11)
+    std   8,16+32(10)
+    std   0,24+32(10)
+    bdz   3f
+
+    ld    6,0+64(11)
+    ld    7,8+64(11)
+    std   6,0+64(10)
+    std   7,8+64(10)
+    ld    8,16+64(11)
+    ld    0,24+64(11)
+    std   8,16+64(10)
+    std   0,24+64(10)
+    bdz   3f
+
+    ld    6,0+96(11)
+    ld    7,8+96(11)
+    std   6,0+96(10)
+    std   7,8+96(10)
+    ld    8,16+96(11)
+    ld    0,24+96(11)
+    addi  11,11,128
+    std   8,16+96(10)
+    std   0,24+96(10)
+    addi  10,10,128
+    bdnz  4b
+3:
+
+    rldicr 0,31,0,60
+    mtcrf 0x01,31
+    beq   cr6,0f
+.L9:
+    add   3,3,0
+    add   12,12,0
+    
+/*  At this point we have a tail of 0-7 bytes and we know that the
+    destiniation is double word aligned.  */
+4:  bf    29,2f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+2:  bf    30,1f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+1:  bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    ld 31,-8(1)
+    ld 3,-16(1)
+    blr
+
+/* Copy up to 31 bytes.  This divided into two cases 0-8 bytes and 9-31 
+   bytes.  Each case is handled without loops, using binary (1,2,4,8)
+   tests.
+
+   In the short (0-8 byte) case no attempt is made to force alignment
+   of either source or destination.  The hardware will handle the
+   unaligned load/stores with small delays for crossing 32- 128-byte,
+   and 4096-byte boundaries. Since these short moves are unlikely to be
+   unaligned or cross these boundaries, the overhead to force
+   alignment is not justified.
+
+   The longer (9-31 byte) move is more likely to cross 32- or 128-byte
+   boundaries.  Since only loads are sensitive to the 32-/128-byte
+   boundaries it is more important to align the source then the
+   destination.  If the source is not already word aligned, we first
+   move 1-3 bytes as needed.  Since we are only word aligned we don't
+   use double word load/stores to insure that all loads are aligned.
+   While the destination and stores may still be unaligned, this
+   is only an issue for page (4096 byte boundary) crossing, which
+   should be rare for these short moves.  The hardware handles this
+   case automatically with a small (~20 cycle) delay.  */
+    .align  4
+.L2:
+    mtcrf 0x01,5
+    neg   8,4
+    clrrdi	11,4,2
+    andi. 0,8,3
+    ble   cr6,.LE8	/* Handle moves of 0-8 bytes.  */
+/* At least 9 bytes left.  Get the source word aligned.  */
+    cmpldi	cr1,5,16
+    mr    10,5
+    mr    12,4
+    cmpldi	cr6,0,2
+    beq   L(dus_tail)	/* If the source is already word aligned skip this.  */
+/* Copy 1-3 bytes to get source address word aligned.  */
+    lwz   6,0(11)
+    subf  10,0,5
+    add   12,4,0
+    blt   cr6,5f
+    srdi  7,6,16
+    bgt	  cr6,3f
+    sth   6,0(3)
+    b     7f
+    .align  4
+3:
+    stb   7,0(3)
+    sth   6,1(3)
+    b     7f
+    .align  4
+5:
+    stb   6,0(3)
+7:
+    cmpldi	cr1,10,16
+    add   3,3,0
+    mtcrf 0x01,10
+    .align  4
+L(dus_tail):
+/* At least 6 bytes left and the source is word aligned.  This allows
+   some speculative loads up front.  */
+/* We need to special case the fall-through because the biggest delays
+   are due to address computation not being ready in time for the 
+   AGEN.  */
+    lwz   6,0(12)
+    lwz   7,4(12)
+    blt   cr1,L(dus_tail8)
+    cmpldi	cr0,10,24
+L(dus_tail16): /* Move 16 bytes.  */
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,8(12)
+    lwz   7,12(12)
+    stw   6,8(3)
+    stw   7,12(3)
+/* Move 8 bytes more.  */
+    bf    28,L(dus_tail16p8)
+    cmpldi	cr1,10,28
+    lwz   6,16(12)
+    lwz   7,20(12)
+    stw   6,16(3)
+    stw   7,20(3)
+/* Move 4 bytes more.  */
+    bf    29,L(dus_tail16p4)
+    lwz   6,24(12)
+    stw   6,24(3)
+    addi  12,12,28
+    addi  3,3,28
+    bgt   cr1,L(dus_tail2)
+ /* exactly 28 bytes.  Return original dst pointer and exit.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+L(dus_tail16p8):  /* less then 8 bytes left.  */
+    beq   cr1,L(dus_tailX) /* exactly 16 bytes, early exit.  */
+    cmpldi	cr1,10,20
+    bf    29,L(dus_tail16p2)
+/* Move 4 bytes more.  */
+    lwz   6,16(12)
+    stw   6,16(3)
+    addi  12,12,20
+    addi  3,3,20
+    bgt   cr1,L(dus_tail2)
+ /* exactly 20 bytes.  Return original dst pointer and exit.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+L(dus_tail16p4):  /* less then 4 bytes left.  */
+    addi  12,12,24
+    addi  3,3,24
+    bgt   cr0,L(dus_tail2)
+ /* exactly 24 bytes.  Return original dst pointer and exit.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+L(dus_tail16p2):  /* 16 bytes moved, less then 4 bytes left.  */
+    addi  12,12,16
+    addi  3,3,16
+    b     L(dus_tail2)
+
+    .align  4
+L(dus_tail8):  /* Move 8 bytes.  */
+/*  r6, r7 already loaded speculatively.  */
+    cmpldi	cr1,10,8
+    cmpldi	cr0,10,12
+    bf    28,L(dus_tail4)
+    stw   6,0(3)
+    stw   7,4(3)
+/* Move 4 bytes more.  */
+    bf    29,L(dus_tail8p4)
+    lwz   6,8(12)
+    stw   6,8(3)
+    addi  12,12,12
+    addi  3,3,12
+    bgt   cr0,L(dus_tail2)
+ /* exactly 12 bytes.  Return original dst pointer and exit.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+L(dus_tail8p4):  /* less then 4 bytes left.  */
+    addi  12,12,8
+    addi  3,3,8
+    bgt   cr1,L(dus_tail2)
+ /* exactly 8 bytes.  Return original dst pointer and exit.  */
+    ld    3,-16(1)
+    blr
+
+    .align  4
+L(dus_tail4):  /* Move 4 bytes.  */
+/*  r6 already loaded speculatively.  If we are here we know there is
+    more then 4 bytes left.  So there is no need to test.  */
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+L(dus_tail2):  /* Move 2-3 bytes.  */
+    bf    30,L(dus_tail1)
+    lhz   6,0(12)
+    sth   6,0(3) 
+    bf    31,L(dus_tailX)
+    lbz   7,2(12)
+    stb   7,2(3)
+    ld 3,-16(1)
+    blr
+L(dus_tail1):  /* Move 1 byte.  */
+    bf    31,L(dus_tailX)
+    lbz   6,0(12)
+    stb   6,0(3)
+L(dus_tailX):
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+
+/* Special case to copy 0-8 bytes.  */
+    .align  4
+.LE8:
+    mr    12,4
+    bne   cr6,L(dus_4)
+/* Exactly 8 bytes.  We may cross a 32-/128-byte boundry and take a ~20
+   cycle delay.  This case should be rare and any attempt to avoid this
+   would take most of 20 cycles any way.  */
+    ld   6,0(4)
+    std   6,0(3)
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+    .align  4
+L(dus_4):
+    bf    29,L(dus_tail2)
+    lwz   6,0(4)
+    stw   6,0(3)
+    bf    30,L(dus_5)
+    lhz   7,4(4)
+    sth   7,4(3) 
+    bf    31,L(dus_0)
+    lbz   8,6(4)
+    stb   8,6(3)
+    ld 3,-16(1)
+    blr
+    .align  4
+L(dus_5):
+    bf    31,L(dus_0)
+    lbz   6,4(4)
+    stb   6,4(3)
+L(dus_0):
+  /* Return original dst pointer.  */
+    ld    3,-16(1)
+    blr
+
+    .align  4
+.L6:
+
+  /* Copy doublewords where the destination is aligned but the source is
+     not.  Use aligned doubleword loads from the source, shifted to realign
+     the data, to allow aligned destination stores.  */
+    addi    11,9,-1  /* loop DW count is one less than total */
+    subf    5,10,12  /* Move source addr to previous full double word.  */
+    cmpldi  cr5, 10, 2
+    cmpldi  cr0, 10, 4
+    mr      4,3
+    srdi    8,11,2   /* calculate the 32 byte loop count */
+    ld      6,0(5)   /* pre load 1st full doubleword.  */
+    mtcrf   0x01,11
+    cmpldi  cr6,9,4
+    mtctr   8
+    ld      7,8(5)   /* pre load 2nd full doubleword.  */
+    bge     cr0, L(du4_do)
+    blt     cr5, L(du1_do)
+    beq     cr5, L(du2_do)
+    b       L(du3_do) 
+       
+    .align 4
+L(du1_do):
+    bf      30,L(du1_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 8
+    srdi     8,7, 64-8
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 8
+    srdi     8,6, 64-8
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du1_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du1_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 8
+    srdi     8,7, 64-8
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du1_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du1_loop)
+    .align 4
+L(du1_1dw):
+    sldi     0,6, 8
+    srdi     8,7, 64-8
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du1_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du1_loop):
+    sldi   0,6, 8
+    srdi   8,7, 64-8
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 8
+    srdi   8,6, 64-8
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 8
+    srdi   8,7, 64-8
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 8
+    srdi   8,6, 64-8
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du1_loop)
+    .align 4
+L(du1_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 8
+    srdi   8,7, 64-8
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du2_do):
+    bf      30,L(du2_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 16
+    srdi     8,7, 64-16
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 16
+    srdi     8,6, 64-16
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du2_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du2_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 16
+    srdi     8,7, 64-16
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du2_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du2_loop)
+    .align 4
+L(du2_1dw):
+    sldi     0,6, 16
+    srdi     8,7, 64-16
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du2_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du2_loop):
+    sldi   0,6, 16
+    srdi   8,7, 64-16
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 16
+    srdi   8,6, 64-16
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 16
+    srdi   8,7, 64-16
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 16
+    srdi   8,6, 64-16
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du2_loop)
+    .align 4
+L(du2_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 16
+    srdi   8,7, 64-16
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du3_do):
+    bf      30,L(du3_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 24
+    srdi     8,7, 64-24
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 24
+    srdi     8,6, 64-24
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du3_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du3_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 24
+    srdi     8,7, 64-24
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du3_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du3_loop)
+    .align 4
+L(du3_1dw):
+    sldi     0,6, 24
+    srdi     8,7, 64-24
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du3_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du3_loop):
+    sldi   0,6, 24
+    srdi   8,7, 64-24
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 24
+    srdi   8,6, 64-24
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 24
+    srdi   8,7, 64-24
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 24
+    srdi   8,6, 64-24
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du3_loop)
+    .align 4
+L(du3_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 24
+    srdi   8,7, 64-24
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du4_do):
+    cmpldi  cr5, 10, 6
+    beq     cr0, L(du4_dox)
+    blt     cr5, L(du5_do)
+    beq     cr5, L(du6_do)
+    b       L(du7_do)
+L(du4_dox):
+    bf      30,L(du4_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 32
+    srdi     8,7, 64-32
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 32
+    srdi     8,6, 64-32
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du4_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du4_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 32
+    srdi     8,7, 64-32
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du4_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du4_loop)
+    .align 4
+L(du4_1dw):
+    sldi     0,6, 32
+    srdi     8,7, 64-32
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du4_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du4_loop):
+    sldi   0,6, 32
+    srdi   8,7, 64-32
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 32
+    srdi   8,6, 64-32
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 32
+    srdi   8,7, 64-32
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 32
+    srdi   8,6, 64-32
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du4_loop)
+    .align 4
+L(du4_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 32
+    srdi   8,7, 64-32
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du5_do):
+    bf      30,L(du5_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 40
+    srdi     8,7, 64-40
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 40
+    srdi     8,6, 64-40
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du5_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du5_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 40
+    srdi     8,7, 64-40
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du5_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du5_loop)
+    .align 4
+L(du5_1dw):
+    sldi     0,6, 40
+    srdi     8,7, 64-40
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du5_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du5_loop):
+    sldi   0,6, 40
+    srdi   8,7, 64-40
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 40
+    srdi   8,6, 64-40
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 40
+    srdi   8,7, 64-40
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 40
+    srdi   8,6, 64-40
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du5_loop)
+    .align 4
+L(du5_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 40
+    srdi   8,7, 64-40
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du6_do):
+    bf      30,L(du6_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 48
+    srdi     8,7, 64-48
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 48
+    srdi     8,6, 64-48
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du6_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du6_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 48
+    srdi     8,7, 64-48
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du6_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du6_loop)
+    .align 4
+L(du6_1dw):
+    sldi     0,6, 48
+    srdi     8,7, 64-48
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du6_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du6_loop):
+    sldi   0,6, 48
+    srdi   8,7, 64-48
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 48
+    srdi   8,6, 64-48
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 48
+    srdi   8,7, 64-48
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 48
+    srdi   8,6, 64-48
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du6_loop)
+    .align 4
+L(du6_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 48
+    srdi   8,7, 64-48
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+
+    .align 4
+L(du7_do):
+    bf      30,L(du7_1dw)
+
+    /* there are at least two DWs to copy */
+    sldi     0,6, 56
+    srdi     8,7, 64-56
+    or      0,0,8
+    ld      6,16(5)
+    std     0,0(4)
+    sldi     0,7, 56
+    srdi     8,6, 64-56
+    or      0,0,8
+    ld      7,24(5)
+    std     0,8(4)
+    addi    4,4,16
+    addi    5,5,32
+    blt     cr6,L(du7_fini)  /* if total DWs = 3, then bypass loop */
+    bf      31,L(du7_loop)
+    /* there is a third DW to copy */
+    sldi     0,6, 56
+    srdi     8,7, 64-56
+    or      0,0,8
+    std     0,0(4)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    addi    4,4,8
+    beq     cr6,L(du7_fini)  /* if total DWs = 4, then bypass loop */
+    b       L(du7_loop)
+    .align 4
+L(du7_1dw):
+    sldi     0,6, 56
+    srdi     8,7, 64-56
+    addi    5,5,16
+    or      0,0,8
+    bf      31,L(du7_loop)
+    mr      6,7
+    ld      7,0(5)
+    addi    5,5,8
+    std     0,0(4)
+    addi    4,4,8
+    .align 4
+/* copy 32 bytes at a time */
+L(du7_loop):
+    sldi   0,6, 56
+    srdi   8,7, 64-56
+    or    0,0,8
+    ld    6,0(5)
+    std   0,0(4)
+    sldi   0,7, 56
+    srdi   8,6, 64-56
+    or    0,0,8
+    ld    7,8(5)
+    std   0,8(4)
+    sldi   0,6, 56
+    srdi   8,7, 64-56
+    or    0,0,8
+    ld    6,16(5)
+    std   0,16(4)
+    sldi   0,7, 56
+    srdi   8,6, 64-56
+    or    0,0,8
+    ld    7,24(5)
+    std   0,24(4)
+    addi  5,5,32
+    addi  4,4,32
+    bdnz+ L(du7_loop)
+    .align 4
+L(du7_fini):
+    /* calculate and store the final DW */
+    sldi   0,6, 56
+    srdi   8,7, 64-56
+    or    0,0,8  
+    std   0,0(4)
+    b     L(du_done)
+    
+    .align 4
+L(du_done):
+    rldicr 0,31,0,60
+    mtcrf 0x01,31
+    bne   cr1,.L9	/* If the tail is 0 bytes we are done!  */
+  /* Return original dst pointer.  */
+    ld 31,-8(1)
+    ld 3,-16(1)
+    blr
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/970/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/970/fpu/Implies
new file mode 100644
index 0000000000..52993ae71b
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/970/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc32/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc32/fpu/Implies.
+powerpc/powerpc32/970/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power4/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power4/fpu/Implies
new file mode 100644
index 0000000000..3c0690e2fe
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power4/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc32/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc32/fpu/Implies.
+powerpc/powerpc32/power4/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5+/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5+/fpu/Implies
new file mode 100644
index 0000000000..37d43bb6fc
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5+/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc32/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc32/fpu/Implies.
+powerpc/powerpc32/power5+/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5/fpu/Implies
new file mode 100644
index 0000000000..d379a2dd12
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power5/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc32/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc32/fpu/Implies.
+powerpc/powerpc32/power5/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power6/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power6/fpu/Implies
new file mode 100644
index 0000000000..9fb457db93
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc32/power6/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc32/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc32/fpu/Implies.
+powerpc/powerpc32/power6/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power5+/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power5+/fpu/Implies
new file mode 100644
index 0000000000..cf5913dec3
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power5+/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc64/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc64/fpu/Implies.
+powerpc/powerpc64/power5+/fpu
diff --git a/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power6/fpu/Implies b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power6/fpu/Implies
new file mode 100644
index 0000000000..9451147267
--- /dev/null
+++ b/powerpc-cpu/sysdeps/unix/sysv/linux/powerpc/powerpc64/power6/fpu/Implies
@@ -0,0 +1,3 @@
+# Make sure this comes before the powerpc/powerpc64/fpu that's
+# listed in unix/sysv/linux/powerpc/powerpc64/fpu/Implies.
+powerpc/powerpc64/power6/fpu