about summary refs log tree commit diff
path: root/REORG.TODO/sysdeps/powerpc/powerpc32
diff options
context:
space:
mode:
Diffstat (limited to 'REORG.TODO/sysdeps/powerpc/powerpc32')
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/memcmp.S128
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/memcpy.S130
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/memset.S152
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/strcmp.S134
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/strcpy.S107
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/strlen.S75
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/405/strncmp.S128
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/440/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/464/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/476/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/476/memset.S152
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/970/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/Makefile49
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/Versions40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp-common.S82
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp.S39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/a2/memcpy.S527
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/add_n.S68
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/addmul_1.S48
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/atomic-machine.h126
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/backtrace.c131
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/bits/wordsize.h11
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/bsd-_setjmp.S56
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/bsd-setjmp.S39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/bzero.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/cell/memcpy.S242
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/compat-ppc-mcount.S11
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/configure29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/configure.ac16
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/crti.S89
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/crtn.S53
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-dtprocnum.h3
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-irel.h52
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.c608
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.h455
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-start.S103
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/dl-trampoline.S189
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/Makefile9
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feclearexcept.c50
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feholdexcept.c55
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feupdateenv.c46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fclrexcpt.c53
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fe_note_change.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fedisblxcpt.c54
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feenablxcpt.c54
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetenv.c49
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetexcept.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetmode.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetround.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feholdexcpt.c59
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_const.c45
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_libc.h99
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetenv.c50
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetexcept.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetmode.c43
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetround.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fetestexceptflag.c25
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feupdateenv.c48
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_prctl.c42
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_spe.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_prctl.c42
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_spe.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fgetexcptflg.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/flt-rounds.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcept-soft.c25
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcpt.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fsetexcptflg.c55
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/ftestexcept.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/get-rounding-mode.h4
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/s_fabsf.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/spe-raise.c53
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/Makefile3
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S178
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure56
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure.ac34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fix-int-fp-convert-zero.h28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprrest.S94
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprsave.S111
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceil.S83
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S75
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysign.S59
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S66
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabs.S5
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S52
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floor.S83
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floorf.S75
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fma.S5
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_isnan.S57
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrint.c63
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llround.c90
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llroundf.c72
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lrint.S46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lround.S129
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyint.S87
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyintf.S78
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rint.S76
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rintf.S65
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_round.S104
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_roundf.S95
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_trunc.S90
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_truncf.S82
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S183
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp.S47
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/gprrest0.S69
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/gprrest1.S63
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/gprsave0.S87
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/gprsave1.S63
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/libgcc-compat.S137
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/lshift.S125
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/mcount.c17
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/memset.S307
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/mul_1.S45
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/Makefile6
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/Makefile43
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-power5+.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-power5+.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-ppc32.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-power6.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-ppc32.S34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign.c51
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysignf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-power7.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-ppc32.c33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite.c57
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef-ppc32.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef.c34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-power5+.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-power5+.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-ppc32.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-power7.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-ppc32.c33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf.c50
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff-ppc32.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff.c35
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power5.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power6.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power7.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-ppc32.S32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan.c56
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power5.S28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power6.S28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-power6.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-power6.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-ppc32.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power5+.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power6.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround.c43
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llroundf.c34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-power7.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-ppc32.c28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-power7.c21
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-ppc32.c21
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-power6x.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrintf.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power5+.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power6x.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround.c43
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lroundf.c34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-power5+.c31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-ppc32.c29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf.c44
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-power5+.c27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff.c30
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-power5+.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-power5+.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-ppc32.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-power5+.S33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-power5+.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-ppc32.S27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-power5.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-ppc32.S31
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat.c40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-power5.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-ppc32.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S106
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llroundf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt_compat.S108
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf_compat.S100
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/hp-timing.h54
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcmp.S1375
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcopy.h116
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcpy.S481
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/memset.S226
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/Makefile30
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power6.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power7.S26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-ppc32.S35
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/ifunc-impl-list.c224
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/init-arch.h53
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-power7.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-ppc32.c34
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-power7.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-ppc32.S45
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-a2.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-cell.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power6.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power7.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-ppc32.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy.c48
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-power7.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-ppc.c44
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-power7.S35
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-ppc32.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy.c44
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-power7.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-ppc32.c25
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power6.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power7.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-ppc32.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-power7.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-ppc32.c32
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr.c38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memcmp.S19
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memset.S18
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strchr.S18
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strnlen.c18
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp-power7.S39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l-power7.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-power7.S39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-ppc32.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-power7.S39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-ppc32.c28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul.c37
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-power7.S36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-ppc32.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen.c33
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l.c42
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-power7.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-ppc32.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp.c39
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-power7.S40
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-ppc32.c28
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power6.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power7.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-ppc32.c43
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr.c41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power6.c22
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power7.c22
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power6.c20
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power7.c20
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-ppc32.c26
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr.c36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-power7.c23
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-ppc32.c27
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power4/strncmp.S196
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/Implies4
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llround.S59
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llroundf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_lround.S57
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S36
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S29
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5+/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnan.S61
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnanf.S45
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt_compat.S106
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf_compat.S98
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power5/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysign.S58
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysignf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnan.S61
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnanf.S44
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrint.S46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrintf.S38
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llround.S59
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llroundf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/memcpy.S907
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/memset.S539
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lrint.S41
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lround.S51
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power6x/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/Makefile4
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finite.S93
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finitef.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinf.S85
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinff.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnan.S90
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnanf.S1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/memchr.S193
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcmp.S1375
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcpy.S538
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/mempcpy.S482
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/memrchr.S196
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/memset.S431
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/rawmemchr.S110
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp.S129
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp_l.S5
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchr.S225
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchrnul.S127
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strlen.S102
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strncmp.S199
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power7/strnlen.S176
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power8/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power8/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power8/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power9/Implies2
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power9/fpu/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/power9/multiarch/Implies1
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/ppc-mcount.S104
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/register-dump.h120
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/rshift.S55
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/rtld-memset.c4
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/setjmp-common.S78
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/setjmp.S46
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/stackguard-macros.h14
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/start.S95
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/stpcpy.S119
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/strchr.S146
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/strcmp.S150
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/strcpy.S117
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/strlen.S190
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/strncmp.S181
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/sub_n.S68
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/submul_1.S51
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/symbol-hacks.h21
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/sysdep.h174
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/tls-macros.h49
-rw-r--r--REORG.TODO/sysdeps/powerpc/powerpc32/tst-audit.h25
391 files changed, 26598 insertions, 0 deletions
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcmp.S
new file mode 100644
index 0000000000..d1865140eb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcmp.S
@@ -0,0 +1,128 @@
+/* Optimized memcmp implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* memcmp
+
+       r3:source1 address, return equality
+       r4:source2 address
+       r5:byte count
+
+       Check 2 words from src1 and src2. If unequal jump to end and
+       return src1 > src2 or src1 < src2.
+       If count = zero check bytes before zero counter and then jump to end and
+       return src1 > src2, src1 < src2 or src1 = src2.
+       If src1 = src2 and no null, repeat. */
+
+EALIGN (memcmp, 5, 0)
+       srwi.   r6,r5,5
+       beq     L(preword2_count_loop)
+       mtctr   r6
+       clrlwi  r5,r5,27
+
+L(word8_compare_loop):
+       lwz     r10,0(r3)
+       lwz     r6,4(r3)
+       lwz     r8,0(r4)
+       lwz     r9,4(r4)
+       cmplw   cr5,r8,r10
+       cmplw   cr1,r9,r6
+       bne     cr5,L(st2)
+       bne     cr1,L(st1)
+       lwz     r10,8(r3)
+       lwz     r6,12(r3)
+       lwz     r8,8(r4)
+       lwz     r9,12(r4)
+       cmplw   cr5,r8,r10
+       cmplw   cr1,r9,r6
+       bne     cr5,L(st2)
+       bne     cr1,L(st1)
+       lwz     r10,16(r3)
+       lwz     r6,20(r3)
+       lwz     r8,16(r4)
+       lwz     r9,20(r4)
+       cmplw   cr5,r8,r10
+       cmplw   cr1,r9,r6
+       bne     cr5,L(st2)
+       bne     cr1,L(st1)
+       lwz     r10,24(r3)
+       lwz     r6,28(r3)
+       addi    r3,r3,0x20
+       lwz     r8,24(r4)
+       lwz     r9,28(r4)
+       addi    r4,r4,0x20
+       cmplw   cr5,r8,r10
+       cmplw   cr1,r9,r6
+       bne     cr5,L(st2)
+       bne     cr1,L(st1)
+       bdnz    L(word8_compare_loop)
+
+L(preword2_count_loop):
+       srwi.   r6,r5,3
+       beq     L(prebyte_count_loop)
+       mtctr   r6
+       clrlwi  r5,r5,29
+
+L(word2_count_loop):
+       lwz     r10,0(r3)
+       lwz     r6,4(r3)
+       addi    r3,r3,0x08
+       lwz     r8,0(r4)
+       lwz     r9,4(r4)
+       addi    r4,r4,0x08
+       cmplw   cr5,r8,r10
+       cmplw   cr1,r9,r6
+       bne     cr5,L(st2)
+       bne     cr1,L(st1)
+       bdnz    L(word2_count_loop)
+
+L(prebyte_count_loop):
+       addi    r5,r5,1
+       mtctr   r5
+       bdz     L(end_memcmp)
+
+L(byte_count_loop):
+       lbz     r6,0(r3)
+       addi    r3,r3,0x01
+       lbz     r8,0(r4)
+       addi    r4,r4,0x01
+       cmplw   cr5,r8,r6
+       bne     cr5,L(st2)
+       bdnz    L(byte_count_loop)
+
+L(end_memcmp):
+       addi    r3,r0,0
+       blr
+
+L(l_r):
+       addi    r3,r0,1
+       blr
+
+L(st1):
+       blt     cr1,L(l_r)
+       addi    r3,r0,-1
+       blr
+
+L(st2):
+       blt     cr5,L(l_r)
+       addi    r3,r0,-1
+       blr
+END (memcmp)
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp,bcmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcpy.S
new file mode 100644
index 0000000000..9878dbceac
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memcpy.S
@@ -0,0 +1,130 @@
+/* Optimized memcpy implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* memcpy
+
+       r0:return address
+       r3:destination address
+       r4:source address
+       r5:byte count
+
+       Save return address in r0.
+       If destinationn and source are unaligned and copy count is greater than 256
+       then copy 0-3 bytes to make destination aligned.
+       If 32 or more bytes to copy we use 32 byte copy loop.
+       Finaly we copy 0-31 extra bytes. */
+
+EALIGN (memcpy, 5, 0)
+/* Check if bytes to copy are greater than 256 and if
+       source and destination are unaligned */
+       cmpwi   r5,0x0100
+       addi    r0,r3,0
+       ble     L(string_count_loop)
+       neg     r6,r3
+       clrlwi. r6,r6,30
+       beq     L(string_count_loop)
+       neg     r6,r4
+       clrlwi. r6,r6,30
+       beq     L(string_count_loop)
+       mtctr   r6
+       subf    r5,r6,r5
+
+L(unaligned_bytecopy_loop): /* Align destination by coping 0-3 bytes */
+       lbz     r8,0x0(r4)
+       addi    r4,r4,1
+       stb     r8,0x0(r3)
+       addi    r3,r3,1
+       bdnz    L(unaligned_bytecopy_loop)
+       srwi.   r7,r5,5
+       beq     L(preword2_count_loop)
+       mtctr   r7
+
+L(word8_count_loop_no_dcbt): /* Copy 32 bytes at a time */
+       lwz     r6,0(r4)
+       lwz     r7,4(r4)
+       lwz     r8,8(r4)
+       lwz     r9,12(r4)
+       subi    r5,r5,0x20
+       stw     r6,0(r3)
+       stw     r7,4(r3)
+       stw     r8,8(r3)
+       stw     r9,12(r3)
+       lwz     r6,16(r4)
+       lwz     r7,20(r4)
+       lwz     r8,24(r4)
+       lwz     r9,28(r4)
+       addi    r4,r4,0x20
+       stw     r6,16(r3)
+       stw     r7,20(r3)
+       stw     r8,24(r3)
+       stw     r9,28(r3)
+       addi    r3,r3,0x20
+       bdnz    L(word8_count_loop_no_dcbt)
+
+L(preword2_count_loop): /* Copy remaining 0-31 bytes */
+       clrlwi. r12,r5,27
+       beq     L(end_memcpy)
+       mtxer   r12
+       lswx    r5,0,r4
+       stswx   r5,0,r3
+       mr       r3,r0
+       blr
+
+L(string_count_loop): /* Copy odd 0-31 bytes */
+       clrlwi. r12,r5,28
+       add     r3,r3,r5
+       add     r4,r4,r5
+       beq     L(pre_string_copy)
+       mtxer   r12
+       subf    r4,r12,r4
+       subf    r3,r12,r3
+       lswx    r6,0,r4
+       stswx   r6,0,r3
+
+L(pre_string_copy): /* Check how many 32 byte chunks to copy */
+       srwi.   r7,r5,4
+       beq     L(end_memcpy)
+       mtctr   r7
+
+L(word4_count_loop_no_dcbt): /* Copy 32 bytes at a time */
+       lwz     r6,-4(r4)
+       lwz     r7,-8(r4)
+       lwz     r8,-12(r4)
+       lwzu    r9,-16(r4)
+       stw     r6,-4(r3)
+       stw     r7,-8(r3)
+       stw     r8,-12(r3)
+       stwu    r9,-16(r3)
+       bdz     L(end_memcpy)
+       lwz     r6,-4(r4)
+       lwz     r7,-8(r4)
+       lwz     r8,-12(r4)
+       lwzu    r9,-16(r4)
+       stw     r6,-4(r3)
+       stw     r7,-8(r3)
+       stw     r8,-12(r3)
+       stwu    r9,-16(r3)
+       bdnz    L(word4_count_loop_no_dcbt)
+
+L(end_memcpy):
+       mr       r3,r0
+       blr
+END (memcpy)
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memset.S
new file mode 100644
index 0000000000..18aea515ba
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/memset.S
@@ -0,0 +1,152 @@
+/* Optimized memset for PowerPC405,440,464 (32-byte cacheline).
+   Copyright (C) 2012-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* memset
+
+       r3:destination address and return address
+       r4:source integer to copy
+       r5:byte count
+       r11:sources integer to copy in all 32 bits of reg
+       r12:temp return address
+
+       Save return address in r12
+       If destinationn is unaligned and count is greater tha 255 bytes
+       set 0-3 bytes to make destination aligned
+       If count is greater tha 255 bytes and setting zero to memory
+       use dbcz to set memeory when we can
+       otherwsie do the follwoing
+       If 16 or more words to set we use 16 word copy loop.
+       Finaly we set 0-15 extra bytes with string store. */
+
+EALIGN (memset, 5, 0)
+       rlwinm  r11,r4,0,24,31
+       rlwimi  r11,r4,8,16,23
+       rlwimi  r11,r11,16,0,15
+       addi    r12,r3,0
+       cmpwi   r5,0x00FF
+       ble     L(preword8_count_loop)
+       cmpwi   r4,0x00
+       beq     L(use_dcbz)
+       neg     r6,r3
+       clrlwi. r6,r6,30
+       beq     L(preword8_count_loop)
+       addi    r8,0,1
+       mtctr   r6
+       subi    r3,r3,1
+
+L(unaligned_bytecopy_loop):
+       stbu    r11,0x1(r3)
+       subf.   r5,r8,r5
+       beq     L(end_memset)
+       bdnz    L(unaligned_bytecopy_loop)
+       addi    r3,r3,1
+
+L(preword8_count_loop):
+       srwi.   r6,r5,4
+       beq     L(preword2_count_loop)
+       mtctr   r6
+       addi    r3,r3,-4
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+
+L(word8_count_loop_no_dcbt):
+       stwu    r8,4(r3)
+       stwu    r9,4(r3)
+       subi    r5,r5,0x10
+       stwu    r10,4(r3)
+       stwu    r11,4(r3)
+       bdnz    L(word8_count_loop_no_dcbt)
+       addi    r3,r3,4
+
+L(preword2_count_loop):
+       clrlwi. r7,r5,28
+       beq     L(end_memset)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       mtxer   r7
+       stswx   r8,0,r3
+
+L(end_memset):
+       addi    r3,r12,0
+       blr
+
+L(use_dcbz):
+       neg     r6,r3
+       clrlwi. r7,r6,28
+       beq     L(skip_string_loop)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       subf    r5,r7,r5
+       mtxer   r7
+       stswx   r8,0,r3
+       add     r3,r3,r7
+
+L(skip_string_loop):
+       clrlwi  r8,r6,27
+       srwi.   r8,r8,4
+       beq     L(dcbz_pre_loop)
+       mtctr   r8
+
+L(word_loop):
+       stw     r11,0(r3)
+       subi    r5,r5,0x10
+       stw     r11,4(r3)
+       stw     r11,8(r3)
+       stw     r11,12(r3)
+       addi    r3,r3,0x10
+       bdnz    L(word_loop)
+
+L(dcbz_pre_loop):
+       srwi    r6,r5,5
+       mtctr   r6
+       addi    r7,0,0
+
+L(dcbz_loop):
+       dcbz    r3,r7
+       addi    r3,r3,0x20
+       subi    r5,r5,0x20
+       bdnz    L(dcbz_loop)
+       srwi.   r6,r5,4
+       beq     L(postword2_count_loop)
+       mtctr   r6
+
+L(postword8_count_loop):
+       stw     r11,0(r3)
+       subi    r5,r5,0x10
+       stw     r11,4(r3)
+       stw     r11,8(r3)
+       stw     r11,12(r3)
+       addi    r3,r3,0x10
+       bdnz    L(postword8_count_loop)
+
+L(postword2_count_loop):
+       clrlwi. r7,r5,28
+       beq     L(end_memset)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       mtxer   r7
+       stswx   r8,0,r3
+       b       L(end_memset)
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcmp.S
new file mode 100644
index 0000000000..42e04e9552
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcmp.S
@@ -0,0 +1,134 @@
+/* Optimized strcmp implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* strcmp
+
+       Register Use
+       r0:temp return equality
+       r3:source1 address, return equality
+       r4:source2 address
+
+       Implementation description
+       Check 2 words from src1 and src2. If unequal jump to end and
+       return src1 > src2 or src1 < src2.
+       If null check bytes before null and then jump to end and
+       return src1 > src2, src1 < src2 or src1 = src2.
+       If src1 = src2 and no null, repeat. */
+
+EALIGN (strcmp,5,0)
+       neg     r7,r3
+       clrlwi  r7,r7,20
+       neg     r8,r4
+       clrlwi  r8,r8,20
+       srwi.   r7,r7,5
+       beq     L(byte_loop)
+       srwi.   r8,r8,5
+       beq     L(byte_loop)
+       cmplw   r7,r8
+       mtctr   r7
+       ble     L(big_loop)
+       mtctr   r8
+
+L(big_loop):
+       lwz     r5,0(r3)
+       lwz     r6,4(r3)
+       lwz     r8,0(r4)
+       lwz     r9,4(r4)
+       dlmzb.  r12,r5,r6
+       bne     L(end_check)
+       cmplw   r5,r8
+       bne     L(st1)
+       cmplw   r6,r9
+       bne     L(st1)
+       lwz     r5,8(r3)
+       lwz     r6,12(r3)
+       lwz     r8,8(r4)
+       lwz     r9,12(r4)
+       dlmzb.  r12,r5,r6
+       bne     L(end_check)
+       cmplw   r5,r8
+       bne     L(st1)
+       cmplw   r6,r9
+       bne     L(st1)
+       lwz     r5,16(r3)
+       lwz     r6,20(r3)
+       lwz     r8,16(r4)
+       lwz     r9,20(r4)
+       dlmzb.  r12,r5,r6
+       bne     L(end_check)
+       cmplw   r5,r8
+       bne     L(st1)
+       cmplw   r6,r9
+       bne     L(st1)
+       lwz     r5,24(r3)
+       lwz     r6,28(r3)
+       addi    r3,r3,0x20
+       lwz     r8,24(r4)
+       lwz     r9,28(r4)
+       addi    r4,r4,0x20
+       dlmzb.  r12,r5,r6
+       bne     L(end_check)
+       cmplw   r5,r8
+       bne     L(st1)
+       cmplw   r6,r9
+       bne     L(st1)
+       bdnz    L(big_loop)
+       b       L(byte_loop)
+
+L(end_check):
+       subfic  r12,r12,4
+       blt     L(end_check2)
+       rlwinm  r12,r12,3,0,31
+       srw     r5,r5,r12
+       srw     r8,r8,r12
+       cmplw   r5,r8
+       bne     L(st1)
+       b       L(end_strcmp)
+
+L(end_check2):
+       addi    r12,r12,4
+       cmplw   r5,r8
+       rlwinm  r12,r12,3,0,31
+       bne     L(st1)
+       srw     r6,r6,r12
+       srw     r9,r9,r12
+       cmplw   r6,r9
+       bne     L(st1)
+
+L(end_strcmp):
+       addi    r3,r0,0
+       blr
+
+L(st1):
+       mfcr    r3
+       blr
+
+L(byte_loop):
+       lbz     r5,0(r3)
+       addi    r3,r3,1
+       lbz     r6,0(r4)
+       addi    r4,r4,1
+       cmplw   r5,r6
+       bne     L(st1)
+       cmpwi   r5,0
+       beq     L(end_strcmp)
+       b       L(byte_loop)
+END (strcmp)
+libc_hidden_builtin_def (strcmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcpy.S
new file mode 100644
index 0000000000..2a554dc32e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strcpy.S
@@ -0,0 +1,107 @@
+/* Optimized strcpy implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* strcpy
+
+       Register Use
+       r3:destination and return address
+       r4:source address
+       r10:temp destination address
+
+       Implementation description
+       Loop by checking 2 words at a time, with dlmzb. Check if there is a null
+       in the 2 words. If there is a null jump to end checking to determine
+       where in the last 8 bytes it is. Copy the appropriate bytes of the last
+       8 according to the null position. */
+
+EALIGN (strcpy, 5, 0)
+       neg     r7,r4
+       subi    r4,r4,1
+       clrlwi. r8,r7,29
+       subi    r10,r3,1
+       beq     L(pre_word8_loop)
+       mtctr   r8
+
+L(loop):
+       lbzu    r5,0x01(r4)
+       cmpi    cr5,r5,0x0
+       stbu    r5,0x01(r10)
+       beq     cr5,L(end_strcpy)
+       bdnz    L(loop)
+
+L(pre_word8_loop):
+       subi    r4,r4,3
+       subi    r10,r10,3
+
+L(word8_loop):
+       lwzu    r5,0x04(r4)
+       lwzu    r6,0x04(r4)
+       dlmzb.  r11,r5,r6
+       bne     L(byte_copy)
+       stwu    r5,0x04(r10)
+       stwu    r6,0x04(r10)
+       lwzu    r5,0x04(r4)
+       lwzu    r6,0x04(r4)
+       dlmzb.  r11,r5,r6
+       bne     L(byte_copy)
+       stwu    r5,0x04(r10)
+       stwu    r6,0x04(r10)
+       lwzu    r5,0x04(r4)
+       lwzu    r6,0x04(r4)
+       dlmzb.  r11,r5,r6
+       bne     L(byte_copy)
+       stwu    r5,0x04(r10)
+       stwu    r6,0x04(r10)
+       lwzu    r5,0x04(r4)
+       lwzu    r6,0x04(r4)
+       dlmzb.  r11,r5,r6
+       bne     L(byte_copy)
+       stwu    r5,0x04(r10)
+       stwu    r6,0x04(r10)
+       b       L(word8_loop)
+
+L(last_bytes_copy):
+       stwu    r5,0x04(r10)
+       subi    r11,r11,4
+       mtctr   r11
+       addi    r10,r10,3
+       subi    r4,r4,1
+
+L(last_bytes_copy_loop):
+       lbzu    r5,0x01(r4)
+       stbu    r5,0x01(r10)
+       bdnz    L(last_bytes_copy_loop)
+       blr
+
+L(byte_copy):
+       blt     L(last_bytes_copy)
+       mtctr   r11
+       addi    r10,r10,3
+       subi    r4,r4,5
+
+L(last_bytes_copy_loop2):
+       lbzu    r5,0x01(r4)
+       stbu    r5,0x01(r10)
+       bdnz    L(last_bytes_copy_loop2)
+
+L(end_strcpy):
+       blr
+END (strcpy)
+libc_hidden_builtin_def (strcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/strlen.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strlen.S
new file mode 100644
index 0000000000..25e54dafc7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strlen.S
@@ -0,0 +1,75 @@
+/* Optimized strlen implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* strlen
+
+       Register Use
+       r3:source address and return length of string
+       r4:byte counter
+
+       Implementation description
+       Load 2 words at a time and count bytes, if we find null we subtract one from
+       the count and return the count value. We need to subtract one because
+       we don't count the null character as a byte. */
+
+EALIGN (strlen,5,0)
+       neg     r7,r3
+       clrlwi. r8,r7,29
+       addi    r4,0,0
+       beq     L(byte_count_loop)
+       mtctr   r8
+
+L(loop):
+       lbz     r5,0(r3)
+       cmpi    cr5,r5,0x0
+       addi    r3,r3,0x1
+       addi    r4,r4,0x1
+       beq     cr5,L(end_strlen)
+       bdnz    L(loop)
+
+L(byte_count_loop):
+       lwz     r5,0(r3)
+       lwz     r6,4(r3)
+       dlmzb.  r12,r5,r6
+       add     r4,r4,r12
+       bne     L(end_strlen)
+       lwz     r5,8(r3)
+       lwz     r6,12(r3)
+       dlmzb.  r12,r5,r6
+       add     r4,r4,r12
+       bne     L(end_strlen)
+       lwz     r5,16(r3)
+       lwz     r6,20(r3)
+       dlmzb.  r12,r5,r6
+       add     r4,r4,r12
+       bne     L(end_strlen)
+       lwz     r5,24(r3)
+       lwz     r6,28(r3)
+       addi    r3,r3,0x20
+       dlmzb.  r12,r5,r6
+       add     r4,r4,r12
+       bne     L(end_strlen)
+       b       L(byte_count_loop)
+
+L(end_strlen):
+       addi    r3,r4,-1
+       blr
+END (strlen)
+libc_hidden_builtin_def (strlen)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/405/strncmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strncmp.S
new file mode 100644
index 0000000000..9c5a8feb9d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/405/strncmp.S
@@ -0,0 +1,128 @@
+/* Optimized strncmp implementation for PowerPC476.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* strncmp
+
+       Register Use
+       r0:temp return equality
+       r3:source1 address, return equality
+       r4:source2 address
+       r5:byte count
+
+       Implementation description
+       Touch in 3 lines of D-cache.
+       If source1 or source2 is unaligned copy 0-3 bytes to make source1 aligned
+       Check 2 words from src1 and src2. If unequal jump to end and
+       return src1 > src2 or src1 < src2.
+       If null check bytes before null and then jump to end and
+       return src1 > src2, src1 < src2 or src1 = src2.
+       If count = zero check bytes before zero counter and then jump to end and
+       return src1 > src2, src1 < src2 or src1 = src2.
+       If src1 = src2 and no null, repeat. */
+
+EALIGN (strncmp,5,0)
+       neg     r7,r3
+       clrlwi  r7,r7,20
+       neg     r8,r4
+       clrlwi  r8,r8,20
+       srwi.   r7,r7,3
+       beq     L(prebyte_count_loop)
+       srwi.   r8,r8,3
+       beq     L(prebyte_count_loop)
+       cmplw   r7,r8
+       mtctr   r7
+       ble     L(preword2_count_loop)
+       mtctr   r8
+
+L(preword2_count_loop):
+       srwi.   r6,r5,3
+       beq     L(prebyte_count_loop)
+       mfctr   r7
+       cmplw   r6,r7
+       bgt     L(set_count_loop)
+       mtctr   r6
+       clrlwi  r5,r5,29
+
+L(word2_count_loop):
+       lwz     r10,0(r3)
+       lwz     r6,4(r3)
+       addi    r3,r3,0x08
+       lwz     r8,0(r4)
+       lwz     r9,4(r4)
+       addi    r4,r4,0x08
+       dlmzb.  r12,r10,r6
+       bne     L(end_check)
+       cmplw   r10,r8
+       bne     L(st1)
+       cmplw   r6,r9
+       bne     L(st1)
+       bdnz    L(word2_count_loop)
+
+L(prebyte_count_loop):
+       addi    r5,r5,1
+       mtctr   r5
+       bdz     L(end_strncmp)
+
+L(byte_count_loop):
+       lbz     r6,0(r3)
+       addi    r3,r3,1
+       lbz     r7,0(r4)
+       addi    r4,r4,1
+       cmplw   r6,r7
+       bne     L(st1)
+       cmpwi   r6,0
+       beq     L(end_strncmp)
+       bdnz    L(byte_count_loop)
+       b       L(end_strncmp)
+
+L(set_count_loop):
+       slwi    r7,r7,3
+       subf    r5,r7,r5
+       b       L(word2_count_loop)
+
+L(end_check):
+       subfic  r12,r12,4
+       blt     L(end_check2)
+       rlwinm  r12,r12,3,0,31
+       srw     r10,r10,r12
+       srw     r8,r8,r12
+       cmplw   r10,r8
+       bne     L(st1)
+       b       L(end_strncmp)
+
+L(end_check2):
+       addi    r12,r12,4
+       cmplw   r10,r8
+       rlwinm  r12,r12,3,0,31
+       bne     L(st1)
+       srw     r6,r6,r12
+       srw     r9,r9,r12
+       cmplw   r6,r9
+       bne     L(st1)
+
+L(end_strncmp):
+       addi    r3,r0,0
+       blr
+
+L(st1):
+       mfcr    r3
+       blr
+END (strncmp)
+libc_hidden_builtin_def (strncmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/440/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/440/Implies
new file mode 100644
index 0000000000..70c0d2eda3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/440/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/405/fpu
+powerpc/powerpc32/405
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/464/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/464/Implies
new file mode 100644
index 0000000000..c3e52c5504
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/464/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/440/fpu
+powerpc/powerpc32/440
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/476/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/476/Implies
new file mode 100644
index 0000000000..2829f9ccaf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/476/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/464/fpu
+powerpc/powerpc32/464
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/476/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/476/memset.S
new file mode 100644
index 0000000000..17c91238e1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/476/memset.S
@@ -0,0 +1,152 @@
+/* Optimized memset for PowerPC476 (128-byte cacheline).
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* memset
+
+       r3:destination address and return address
+       r4:source integer to copy
+       r5:byte count
+       r11:sources integer to copy in all 32 bits of reg
+       r12:temp return address
+
+       Save return address in r12
+       If destinationn is unaligned and count is greater tha 255 bytes
+       set 0-3 bytes to make destination aligned
+       If count is greater tha 255 bytes and setting zero to memory
+       use dbcz to set memeory when we can
+       otherwsie do the follwoing
+       If 16 or more words to set we use 16 word copy loop.
+       Finaly we set 0-15 extra bytes with string store. */
+
+EALIGN (memset, 5, 0)
+       rlwinm  r11,r4,0,24,31
+       rlwimi  r11,r4,8,16,23
+       rlwimi  r11,r11,16,0,15
+       addi    r12,r3,0
+       cmpwi   r5,0x00FF
+       ble     L(preword8_count_loop)
+       cmpwi   r4,0x00
+       beq     L(use_dcbz)
+       neg     r6,r3
+       clrlwi. r6,r6,30
+       beq     L(preword8_count_loop)
+       addi    r8,0,1
+       mtctr   r6
+       subi    r3,r3,1
+
+L(unaligned_bytecopy_loop):
+       stbu    r11,0x1(r3)
+       subf.   r5,r8,r5
+       beq     L(end_memset)
+       bdnz    L(unaligned_bytecopy_loop)
+       addi    r3,r3,1
+
+L(preword8_count_loop):
+       srwi.   r6,r5,4
+       beq     L(preword2_count_loop)
+       mtctr   r6
+       addi    r3,r3,-4
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+
+L(word8_count_loop_no_dcbt):
+       stwu    r8,4(r3)
+       stwu    r9,4(r3)
+       subi    r5,r5,0x10
+       stwu    r10,4(r3)
+       stwu    r11,4(r3)
+       bdnz    L(word8_count_loop_no_dcbt)
+       addi    r3,r3,4
+
+L(preword2_count_loop):
+       clrlwi. r7,r5,28
+       beq     L(end_memset)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       mtxer   r7
+       stswx   r8,0,r3
+
+L(end_memset):
+       addi    r3,r12,0
+       blr
+
+L(use_dcbz):
+       neg     r6,r3
+       clrlwi. r7,r6,28
+       beq     L(skip_string_loop)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       subf    r5,r7,r5
+       mtxer   r7
+       stswx   r8,0,r3
+       add     r3,r3,r7
+
+L(skip_string_loop):
+       clrlwi  r8,r6,25
+       srwi.   r8,r8,4
+       beq     L(dcbz_pre_loop)
+       mtctr   r8
+
+L(word_loop):
+       stw     r11,0(r3)
+       subi    r5,r5,0x10
+       stw     r11,4(r3)
+       stw     r11,8(r3)
+       stw     r11,12(r3)
+       addi    r3,r3,0x10
+       bdnz    L(word_loop)
+
+L(dcbz_pre_loop):
+       srwi    r6,r5,7
+       mtctr   r6
+       addi    r7,0,0
+
+L(dcbz_loop):
+       dcbz    r3,r7
+       addi    r3,r3,0x80
+       subi    r5,r5,0x80
+       bdnz    L(dcbz_loop)
+       srwi.   r6,r5,4
+       beq     L(postword2_count_loop)
+       mtctr   r6
+
+L(postword8_count_loop):
+       stw     r11,0(r3)
+       subi    r5,r5,0x10
+       stw     r11,4(r3)
+       stw     r11,8(r3)
+       stw     r11,12(r3)
+       addi    r3,r3,0x10
+       bdnz    L(postword8_count_loop)
+
+L(postword2_count_loop):
+       clrlwi. r7,r5,28
+       beq     L(end_memset)
+       mr      r8,r11
+       mr      r9,r11
+       mr      r10,r11
+       mtxer   r7
+       stswx   r8,0,r3
+       b       L(end_memset)
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/970/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/970/Implies
new file mode 100644
index 0000000000..17139bf21c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/970/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power4/fpu
+powerpc/powerpc32/power4
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/Implies
new file mode 100644
index 0000000000..39a34c5f57
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/Implies
@@ -0,0 +1 @@
+wordsize-32
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/Makefile
new file mode 100644
index 0000000000..cf620c8269
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/Makefile
@@ -0,0 +1,49 @@
+# Powerpc32 specific build options.
+
+# Some Powerpc32 variants assume soft-fp is the default even though there is
+# an fp variant so provide -mhard-float if --with-fp is explicitly passed.
+
+ifeq ($(with-fp),yes)
++cflags += -mhard-float
+ASFLAGS += -mhard-float
+sysdep-LDFLAGS += -mhard-float
+endif
+
+ifeq ($(subdir),gmon)
+sysdep_routines += ppc-mcount compat-ppc-mcount
+static-only-routines += ppc-mcount
+shared-only-routines += compat-ppc-mcount
+endif
+
+ifeq ($(subdir),misc)
+sysdep_routines += gprsave0 gprrest0 gprsave1 gprrest1
+endif
+
+# On PPC, -fpic works until the GOT contains 32768 bytes, and possibly
+# more depending on how clever the linker is.  Each GOT entry takes 4 bytes,
+# so that's at least 8192 entries.  Since libc only uses about 2000 entries,
+# we want to use -fpic, because this generates fewer relocs.
+ifeq (yes,$(build-shared))
+pic-ccflag = -fpic
+endif
+
+ifeq ($(subdir),csu)
+# There is no benefit to using sdata for these objects, and the user
+# of the library should be able to control what goes into sdata.
+CFLAGS-init.o = -G0
+CFLAGS-gmon-start.o = -G0
+
+ifeq (yes,$(build-shared))
+# Compatibility
+ifeq (yes,$(have-protected))
+CPPFLAGS-libgcc-compat.S = -DHAVE_DOT_HIDDEN
+endif
+sysdep_routines += libgcc-compat
+shared-only-routines += libgcc-compat
+endif
+endif
+
+ifeq ($(subdir),elf)
+# extra shared linker files to link only into dl-allobjs.so
+sysdep-rtld-routines += dl-start
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/Versions b/REORG.TODO/sysdeps/powerpc/powerpc32/Versions
new file mode 100644
index 0000000000..b0782fecd4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/Versions
@@ -0,0 +1,40 @@
+libc {
+  GLIBC_2.0 {
+    # Functions from libgcc.
+    __divdi3; __moddi3; __udivdi3; __umoddi3;
+    __cmpdi2; __ucmpdi2;
+    __ashldi3; __ashrdi3; __lshrdi3;
+    __fixdfdi; __fixunsdfdi;
+    __fixsfdi; __fixunssfdi;
+    __floatdidf; __floatdisf;
+  }
+  GLIBC_2.16 {
+    __mcount_internal;
+  }
+  GLIBC_PRIVATE {
+    __mcount_internal;
+  }
+}
+
+libm {
+  GLIBC_2.2 {
+    # Special functions to save and restore registers used by the
+    # runtime libraries.
+    _restgpr0_13; _restgpr0_14; _restgpr0_15; _restgpr0_16; _restgpr0_17;
+    _restgpr0_18; _restgpr0_19; _restgpr0_20; _restgpr0_21; _restgpr0_22;
+    _restgpr0_22; _restgpr0_23; _restgpr0_24; _restgpr0_25; _restgpr0_26;
+    _restgpr0_27; _restgpr0_28; _restgpr0_29; _restgpr0_30; _restgpr0_31;
+    _savegpr0_13; _savegpr0_14; _savegpr0_15; _savegpr0_16; _savegpr0_17;
+    _savegpr0_18; _savegpr0_19; _savegpr0_20; _savegpr0_21; _savegpr0_22;
+    _savegpr0_22; _savegpr0_23; _savegpr0_24; _savegpr0_25; _savegpr0_26;
+    _savegpr0_27; _savegpr0_28; _savegpr0_29; _savegpr0_30; _savegpr0_31;
+    _restgpr1_13; _restgpr1_14; _restgpr1_15; _restgpr1_16; _restgpr1_17;
+    _restgpr1_18; _restgpr1_19; _restgpr1_20; _restgpr1_21; _restgpr1_22;
+    _restgpr1_22; _restgpr1_23; _restgpr1_24; _restgpr1_25; _restgpr1_26;
+    _restgpr1_27; _restgpr1_28; _restgpr1_29; _restgpr1_30; _restgpr1_31;
+    _savegpr1_13; _savegpr1_14; _savegpr1_15; _savegpr1_16; _savegpr1_17;
+    _savegpr1_18; _savegpr1_19; _savegpr1_20; _savegpr1_21; _savegpr1_22;
+    _savegpr1_22; _savegpr1_23; _savegpr1_24; _savegpr1_25; _savegpr1_26;
+    _savegpr1_27; _savegpr1_28; _savegpr1_29; _savegpr1_30; _savegpr1_31;
+  }
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp-common.S b/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp-common.S
new file mode 100644
index 0000000000..4b60a2f9a8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp-common.S
@@ -0,0 +1,82 @@
+/* longjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <stap-probe.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+
+#if defined __SPE__ || (defined __NO_FPRS__ && !defined _SOFT_FLOAT)
+# define LOAD_GP(N)	evldd r##N,((JB_FPRS+((N)-14)*2)*4)(r3)
+#else
+# define LOAD_GP(N)	lwz r##N,((JB_GPRS+(N)-14)*4)(r3)
+#endif
+
+ENTRY (__longjmp_symbol)
+
+#if defined PTR_DEMANGLE || defined CHECK_SP
+	lwz r24,(JB_GPR1*4)(r3)
+# ifdef CHECK_SP
+#  ifdef PTR_DEMANGLE
+	PTR_DEMANGLE3 (r24, r24, r25)
+#  endif
+	CHECK_SP (r24)
+	mr r1,r24
+# endif
+#else
+	lwz r1,(JB_GPR1*4)(r3)
+#endif
+	lwz r0,(JB_LR*4)(r3)
+	LOAD_GP (14)
+	LOAD_GP (15)
+	LOAD_GP (16)
+	LOAD_GP (17)
+	LOAD_GP (18)
+	LOAD_GP (19)
+	LOAD_GP (20)
+#ifdef PTR_DEMANGLE
+# ifndef CHECK_SP
+	PTR_DEMANGLE3 (r1, r24, r25)
+# endif
+	PTR_DEMANGLE2 (r0, r25)
+#endif
+	/* longjmp/longjmp_target probe expects longjmp first argument (4@3),
+	   second argument (-4@4), and target address (4@0), respectively.  */
+	LIBC_PROBE (longjmp, 3, 4@3, -4@4, 4@0)
+	mtlr r0
+	LOAD_GP (21)
+	LOAD_GP (22)
+	lwz r5,(JB_CR*4)(r3)
+	LOAD_GP (23)
+	LOAD_GP (24)
+	LOAD_GP (25)
+	mtcrf 0xFF,r5
+	LOAD_GP (26)
+	LOAD_GP (27)
+	LOAD_GP (28)
+	LOAD_GP (29)
+	LOAD_GP (30)
+	LOAD_GP (31)
+	LIBC_PROBE (longjmp_target, 3, 4@3, -4@4, 4@0)
+	mr r3,r4
+	blr
+END (__longjmp_symbol)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp.S
new file mode 100644
index 0000000000..42127c6ff2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/__longjmp.S
@@ -0,0 +1,39 @@
+/* AltiVec/VMX (new) version of __longjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <libc-symbols.h>
+#include <shlib-compat.h>
+
+#if !IS_IN (libc)
+/* Build a non-versioned object for rtld-*.  */
+# define __longjmp_symbol __longjmp
+# include "__longjmp-common.S"
+
+#else /* IS_IN (libc) */
+strong_alias (__vmx__longjmp, __longjmp);
+# define __longjmp_symbol  __vmx__longjmp
+# include "__longjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+#  define __NO_VMX__
+#  undef JB_SIZE
+#  undef __longjmp_symbol
+#  define __longjmp_symbol  __novmx__longjmp
+#  include "__longjmp-common.S"
+# endif
+#endif /* IS_IN (libc) */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/a2/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/a2/memcpy.S
new file mode 100644
index 0000000000..c795ff48fe
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/a2/memcpy.S
@@ -0,0 +1,527 @@
+/* Optimized memcpy implementation for PowerPC A2.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Michael Brutman <brutman@us.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#define PREFETCH_AHEAD 4        /* no cache lines SRC prefetching ahead  */
+#define ZERO_AHEAD 2            /* no cache lines DST zeroing ahead  */
+
+	.machine  a2
+EALIGN (memcpy, 5, 0)
+	CALL_MCOUNT
+
+	dcbt    0,r4            /* Prefetch ONE SRC cacheline  */
+	cmplwi  cr1,r5,16       /* is size < 16 ?  */
+	mr      r6,r3           /* Copy dest reg to r6; */
+	blt+    cr1,L(shortcopy)
+
+
+	/* Big copy (16 bytes or more)
+
+	   Figure out how far to the nearest quadword boundary, or if we are
+	   on one already.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+	neg     r8,r3           /* LS 4 bits = # bytes to 8-byte dest bdry  */
+	clrlwi  r8,r8,32-4      /* align to 16byte boundary  */
+	sub     r7,r4,r3        /* compute offset to src from dest */
+	cmplwi  cr0,r8,0        /* Were we aligned on a 16 byte bdy? */
+	beq+    L(dst_aligned)
+
+
+
+	/* Destination is not aligned on quadword boundary.  Get us to one.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	   r7 - offset to src from dest
+	   r8 - number of bytes to quadword boundary
+	*/
+
+	mtcrf   0x01,r8         /* put #bytes to boundary into cr7  */
+	subf    r5,r8,r5        /* adjust remaining len */
+
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte addr */
+	stb     r0,0(r6)
+	addi    r6,r6,1
+1:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte addr */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte addr */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+0,8f
+	lfdx    r0,r7,r6        /* copy 8 byte addr */
+	stfd    r0,0(r6)
+	addi    r6,r6,8
+8:
+	add     r4,r7,r6        /* update src addr */
+
+
+
+	/* Dest is quadword aligned now.
+
+	   Lots of decisions to make.  If we are copying less than a cache
+	   line we won't be here long.  If we are not on a cache line
+	   boundary we need to get there.  And then we need to figure out
+	   how many cache lines ahead to pre-touch.
+
+	   r3 - return value (always)
+	   r4 - current source addr
+	   r5 - copy length
+	   r6 - current dest addr
+	*/
+
+
+	.align  4
+L(dst_aligned):
+
+
+#ifdef SHARED
+	mflr    r0
+/* Establishes GOT addressability so we can load __cache_line_size
+   from static. This value was set from the aux vector during startup.  */
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis   r9,r9,__cache_line_size-got_label@ha
+	lwz     r9,__cache_line_size-got_label@l(r9)
+	mtlr    r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+   aux vector during startup.  */
+	lis     r9,__cache_line_size@ha
+	lwz     r9,__cache_line_size@l(r9)
+#endif
+
+	cmplwi  cr5, r9, 0
+	bne+    cr5,L(cachelineset)
+
+/* __cache_line_size not set: generic byte copy without much optimization */
+	andi.	r0,r5,1		/* If length is odd copy one byte.  */
+	beq	L(cachelinenotset_align)
+	lbz	r7,0(r4)	/* Read one byte from source.  */
+	addi	r5,r5,-1	/* Update length.  */
+	addi	r4,r4,1		/* Update source pointer address.  */
+	stb	r7,0(r6)	/* Store one byte on dest.  */
+	addi	r6,r6,1		/* Update dest pointer address.  */
+L(cachelinenotset_align):
+	cmpwi   cr7,r5,0	/* If length is 0 return.  */
+	beqlr	cr7
+	ori	r2,r2,0		/* Force a new dispatch group.  */
+L(cachelinenotset_loop):
+	addic.	r5,r5,-2	/* Update length.  */
+	lbz	r7,0(r4)	/* Load 2 bytes from source.  */
+	lbz	r8,1(r4)
+	addi	r4,r4,2		/* Update source pointer address.  */
+	stb	r7,0(r6)	/* Store 2 bytes on dest.  */
+	stb	r8,1(r6)
+	addi	r6,r6,2		/* Update dest pointer address.  */
+	bne	L(cachelinenotset_loop)
+	blr
+
+
+L(cachelineset):
+
+	addi   r10,r9,-1
+
+	cmpw   cr5,r5,r10       /* Less than a cacheline to go? */
+
+	neg     r7,r6           /* How far to next cacheline bdy? */
+
+	addi    r6,r6,-8        /* prepare for stdu  */
+	cmpwi   cr0,r9,128
+	addi    r4,r4,-8        /* prepare for ldu  */
+
+
+	ble+    cr5,L(lessthancacheline)
+
+	beq-    cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+	/* More than a cacheline left to go, and using 64 byte cachelines */
+
+	clrlwi  r7,r7,32-6      /* How far to next cacheline bdy? */
+
+	cmplwi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srwi    r7,r7,4         /* How many qws to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srwi    r10,r5,6
+
+	cmplwi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmplwi  r10,PREFETCH_AHEAD
+	li      r12,64+8        /* prefetch distance  */
+	ble     L(lessthanmaxprefetch)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+	dcbt    r12,r4
+	addi    r12,r12,64
+	bdnz    L(prefetchSRC)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch):
+	mtctr   r7
+
+	cmplwi  cr1,r5,64   /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrlwi  r5,r5,32-6
+
+	beq     cr6,L(cachelinealigned)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+	lfd     fp9,0x08(r4)
+	lfdu    fp10,0x10(r4)
+	stfd    fp9,0x08(r6)
+	stfdu   fp10,0x10(r6)
+	bdnz    L(aligntocacheline)
+
+
+	.align 4
+L(cachelinealigned):            /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <64  */
+
+L(outerloop):
+	cmpwi   r11,0
+	mtctr   r11
+	beq-    L(endloop)
+
+	li      r11,64*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop):                        /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	lfd     fp9,  0x08(r4)
+	dcbz    r11,r6
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfdu    fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfdu   fp12, 0x40(r6)
+
+	bdnz    L(loop)
+
+
+L(endloop):
+	cmpwi   r10,0
+	beq-    L(endloop2)
+	mtctr   r10
+
+L(loop2):                       /* Copy aligned body  */
+	lfd     fp9,  0x08(r4)
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfdu    fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfdu   fp12, 0x40(r6)
+
+	bdnz    L(loop2)
+L(endloop2):
+
+
+	.align  4
+L(lessthancacheline):           /* Was there less than cache to do ?  */
+	cmplwi  cr0,r5,16
+	srwi    r7,r5,4         /* divide size by 16  */
+	blt-    L(do_lt16)
+	mtctr   r7
+
+L(copy_remaining):
+	lfd     fp9,  0x08(r4)
+	lfdu    fp10, 0x10(r4)
+	stfd    fp9,  0x08(r6)
+	stfdu   fp10, 0x10(r6)
+	bdnz    L(copy_remaining)
+
+L(do_lt16):                     /* less than 16 ?  */
+	cmplwi  cr0,r5,0        /* copy remaining bytes (0-15)  */
+	beqlr+                  /* no rest to copy  */
+	addi    r4,r4,8
+	addi    r6,r6,8
+
+L(shortcopy):                   /* SIMPLE COPY to handle size =< 15 bytes  */
+	mtcrf   0x01,r5
+	sub     r7,r4,r6
+	bf-     cr7*4+0,8f
+	lfdx    fp9,r7,r6       /* copy 8 byte  */
+	stfd    fp9,0(r6)
+	addi    r6,r6,8
+8:
+	bf      cr7*4+1,4f
+	lwzx    r0,r7,r6        /* copy 4 byte  */
+	stw     r0,0(r6)
+	addi    r6,r6,4
+4:
+	bf      cr7*4+2,2f
+	lhzx    r0,r7,r6        /* copy 2 byte  */
+	sth     r0,0(r6)
+	addi    r6,r6,2
+2:
+	bf      cr7*4+3,1f
+	lbzx    r0,r7,r6        /* copy 1 byte  */
+	stb     r0,0(r6)
+1:
+	blr
+
+
+
+
+
+	/* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+	clrlwi  r7,r7,32-7      /* How far to next cacheline bdy? */
+
+	cmplwi  cr6,r7,0        /* Are we on a cacheline bdy already? */
+
+	/* Reduce total len by what it takes to get to the next cache line */
+	subf    r5,r7,r5
+	srwi    r7,r7,4         /* How many qw to get to the line bdy? */
+
+	/* How many full cache lines to copy after getting to a line bdy? */
+	srwi    r10,r5,7
+
+	cmplwi  r10,0           /* If no full cache lines to copy ... */
+	li      r11,0           /* number cachelines to copy with prefetch  */
+	beq     L(nocacheprefetch_128)
+
+
+	/* We are here because we have at least one full cache line to copy,
+	   and therefore some pre-touching to do. */
+
+	cmplwi  r10,PREFETCH_AHEAD
+	li      r12,128+8       /* prefetch distance  */
+	ble     L(lessthanmaxprefetch_128)
+
+	/* We can only do so much pre-fetching.  R11 will have the count of
+	   lines left to prefetch after the initial batch of prefetches
+	   are executed. */
+
+	subi    r11,r10,PREFETCH_AHEAD
+	li      r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+	mtctr   r10
+
+	/* At this point r10/ctr hold the number of lines to prefetch in this
+	   initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+	dcbt    r12,r4
+	addi    r12,r12,128
+	bdnz    L(prefetchSRC_128)
+
+
+	/* Prefetching is done, or was not needed.
+
+	   cr6 - are we on a cacheline boundary already?
+	   r7  - number of quadwords to the next cacheline boundary
+	*/
+
+L(nocacheprefetch_128):
+	mtctr   r7
+
+	cmplwi  cr1,r5,128  /* Less than a cache line to copy? */
+
+	/* How many bytes are left after we copy whatever full
+	   cache lines we can get? */
+	clrlwi  r5,r5,32-7
+
+	beq     cr6,L(cachelinealigned_128)
+
+
+	/* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+	lfd     fp9,0x08(r4)
+	lfdu    fp10,0x10(r4)
+	stfd    fp9,0x08(r6)
+	stfdu   fp10,0x10(r6)
+	bdnz    L(aligntocacheline_128)
+
+
+L(cachelinealigned_128):        /* copy while cache lines  */
+
+	blt-    cr1,L(lessthancacheline) /* size <128  */
+
+L(outerloop_128):
+	cmpwi   r11,0
+	mtctr   r11
+	beq-    L(endloop_128)
+
+	li      r11,128*ZERO_AHEAD +8    /* DCBZ dist  */
+
+	.align  4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+L(loop_128):                    /* Copy aligned body  */
+	dcbt    r12,r4          /* PREFETCH SOURCE some cache lines ahead  */
+	lfd     fp9,  0x08(r4)
+	dcbz    r11,r6
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfd     fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfd    fp12, 0x40(r6)
+	lfd     fp9,  0x48(r4)
+	lfd     fp10, 0x50(r4)
+	lfd     fp11, 0x58(r4)
+	lfd     fp12, 0x60(r4)
+	stfd    fp9,  0x48(r6)
+	stfd    fp10, 0x50(r6)
+	stfd    fp11, 0x58(r6)
+	stfd    fp12, 0x60(r6)
+	lfd     fp9,  0x68(r4)
+	lfd     fp10, 0x70(r4)
+	lfd     fp11, 0x78(r4)
+	lfdu    fp12, 0x80(r4)
+	stfd    fp9,  0x68(r6)
+	stfd    fp10, 0x70(r6)
+	stfd    fp11, 0x78(r6)
+	stfdu   fp12, 0x80(r6)
+
+	bdnz    L(loop_128)
+
+
+L(endloop_128):
+	cmpwi   r10,0
+	beq-    L(endloop2_128)
+	mtctr   r10
+
+L(loop2_128):                   /* Copy aligned body  */
+	lfd     fp9,  0x08(r4)
+	lfd     fp10, 0x10(r4)
+	lfd     fp11, 0x18(r4)
+	lfd     fp12, 0x20(r4)
+	stfd    fp9,  0x08(r6)
+	stfd    fp10, 0x10(r6)
+	stfd    fp11, 0x18(r6)
+	stfd    fp12, 0x20(r6)
+	lfd     fp9,  0x28(r4)
+	lfd     fp10, 0x30(r4)
+	lfd     fp11, 0x38(r4)
+	lfd     fp12, 0x40(r4)
+	stfd    fp9,  0x28(r6)
+	stfd    fp10, 0x30(r6)
+	stfd    fp11, 0x38(r6)
+	stfd    fp12, 0x40(r6)
+	lfd     fp9,  0x48(r4)
+	lfd     fp10, 0x50(r4)
+	lfd     fp11, 0x58(r4)
+	lfd     fp12, 0x60(r4)
+	stfd    fp9,  0x48(r6)
+	stfd    fp10, 0x50(r6)
+	stfd    fp11, 0x58(r6)
+	stfd    fp12, 0x60(r6)
+	lfd     fp9,  0x68(r4)
+	lfd     fp10, 0x70(r4)
+	lfd     fp11, 0x78(r4)
+	lfdu    fp12, 0x80(r4)
+	stfd    fp9,  0x68(r6)
+	stfd    fp10, 0x70(r6)
+	stfd    fp11, 0x78(r6)
+	stfdu   fp12, 0x80(r6)
+	bdnz    L(loop2_128)
+L(endloop2_128):
+
+	b       L(lessthancacheline)
+
+
+END (memcpy)
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/add_n.S b/REORG.TODO/sysdeps/powerpc/powerpc32/add_n.S
new file mode 100644
index 0000000000..0687be6236
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/add_n.S
@@ -0,0 +1,68 @@
+/* Add two limb vectors of equal, non-zero length for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_add_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
+                        mp_size_t size)
+   Calculate s1+s2 and put result in res_ptr; return carry, 0 or 1.  */
+
+/* Note on optimisation: This code is optimal for the 601.  Almost every other
+   possible 2-unrolled inner loop will not be.  Also, watch out for the
+   alignment...  */
+
+EALIGN (__mpn_add_n, 3, 0)
+
+/* Set up for loop below.  */
+	mtcrf 0x01,r6
+	srwi. r7,r6,1
+	li    r10,0
+	mtctr r7
+	bt    31,L(2)
+
+/* Clear the carry.  */
+	addic r0,r0,0
+/* Adjust pointers for loop.  */
+	addi  r3,r3,-4
+	addi  r4,r4,-4
+	addi  r5,r5,-4
+	b     L(0)
+
+L(2):	lwz  r7,0(r5)
+	lwz  r6,0(r4)
+	addc r6,r6,r7
+	stw  r6,0(r3)
+        beq  L(1)
+
+/* The loop.  */
+
+/* Align start of loop to an odd word boundary to guarantee that the
+   last two words can be fetched in one access (for 601).  */
+L(0):	lwz  r9,4(r4)
+	lwz  r8,4(r5)
+	lwzu r6,8(r4)
+	lwzu r7,8(r5)
+	adde r8,r9,r8
+	stw  r8,4(r3)
+	adde r6,r6,r7
+	stwu r6,8(r3)
+	bdnz L(0)
+/* Return the carry.  */
+L(1):	addze r3,r10
+	blr
+END (__mpn_add_n)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/addmul_1.S b/REORG.TODO/sysdeps/powerpc/powerpc32/addmul_1.S
new file mode 100644
index 0000000000..f742be21b8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/addmul_1.S
@@ -0,0 +1,48 @@
+/* Multiply a limb vector by a single limb, for PowerPC.
+   Copyright (C) 1993-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_addmul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+                           mp_size_t s1_size, mp_limb_t s2_limb)
+   Calculate res+s1*s2 and put result back in res; return carry.  */
+ENTRY (__mpn_addmul_1)
+	mtctr	r5
+
+	lwz	r0,0(r4)
+	mullw	r7,r0,r6
+	mulhwu	r10,r0,r6
+	lwz     r9,0(r3)
+	addc	r8,r7,r9
+	addi	r3,r3,-4		/* adjust res_ptr */
+	bdz	L(1)
+
+L(0):	lwzu	r0,4(r4)
+	stwu	r8,4(r3)
+	mullw	r8,r0,r6
+	adde	r7,r8,r10
+	mulhwu	r10,r0,r6
+	lwz     r9,4(r3)
+	addze   r10,r10
+	addc    r8,r7,r9
+	bdnz	L(0)
+
+L(1):	stw	r8,4(r3)
+	addze	r3,r10
+	blr
+END (__mpn_addmul_1)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/atomic-machine.h b/REORG.TODO/sysdeps/powerpc/powerpc32/atomic-machine.h
new file mode 100644
index 0000000000..96c7d81359
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/atomic-machine.h
@@ -0,0 +1,126 @@
+/* Atomic operations.  PowerPC32 version.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
+    This is a hint to the hardware to expect additional updates adjacent
+    to the lock word or not.  If we are acquiring a Mutex, the hint
+    should be true. Otherwise we releasing a Mutex or doing a simple
+    atomic operation.  In that case we don't expect additional updates
+    adjacent to the lock word after the Store Conditional and the hint
+    should be false.  */
+
+#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
+# define MUTEX_HINT_ACQ	",1"
+# define MUTEX_HINT_REL	",0"
+#else
+# define MUTEX_HINT_ACQ
+# define MUTEX_HINT_REL
+#endif
+
+#define __HAVE_64B_ATOMICS 0
+#define USE_ATOMIC_COMPILER_BUILTINS 0
+#define ATOMIC_EXCHANGE_USES_CAS 1
+
+/*
+ * The 32-bit exchange_bool is different on powerpc64 because the subf
+ * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
+ * (a load word and zero (high 32) form).  So powerpc64 has a slightly
+ * different version in sysdeps/powerpc/powerpc64/atomic-machine.h.
+ */
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
+({									      \
+  unsigned int __tmp;							      \
+  __asm __volatile (							      \
+		    "1:	lwarx	%0,0,%1" MUTEX_HINT_ACQ "\n"		      \
+		    "	subf.	%0,%2,%0\n"				      \
+		    "	bne	2f\n"					      \
+		    "	stwcx.	%3,0,%1\n"				      \
+		    "	bne-	1b\n"					      \
+		    "2:	" __ARCH_ACQ_INSTR				      \
+		    : "=&r" (__tmp)					      \
+		    : "b" (mem), "r" (oldval), "r" (newval)		      \
+		    : "cr0", "memory");					      \
+  __tmp != 0;								      \
+})
+
+/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
+   load and reserve (ldarx) and store conditional (stdcx.) instructions.
+   So for powerpc32 we stub out the 64-bit forms.  */
+#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+  (abort (), 0)
+
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+  (abort (), (__typeof (*mem)) 0)
+
+#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
+  (abort (), (__typeof (*mem)) 0)
+
+#define __arch_atomic_exchange_64_acq(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_64_rel(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_and_add_64(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
+    ({ abort (); (*mem) = (value); })
+
+#define __arch_atomic_increment_val_64(mem) \
+    ({ abort (); (*mem)++; })
+
+#define __arch_atomic_decrement_val_64(mem) \
+    ({ abort (); (*mem)--; })
+
+#define __arch_atomic_decrement_if_positive_64(mem) \
+    ({ abort (); (*mem)--; })
+
+#ifdef _ARCH_PWR4
+/*
+ * Newer powerpc64 processors support the new "light weight" sync (lwsync)
+ * So if the build is using -mcpu=[power4,power5,power5+,970] we can
+ * safely use lwsync.
+ */
+# define atomic_read_barrier()	__asm ("lwsync" ::: "memory")
+/*
+ * "light weight" sync can also be used for the release barrier.
+ */
+# ifndef UP
+#  define __ARCH_REL_INSTR	"lwsync"
+# endif
+# define atomic_write_barrier()	__asm ("lwsync" ::: "memory")
+#else
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync).  So the only safe option is to use normal sync
+ * for all powerpc32 applications.
+ */
+# define atomic_read_barrier()	__asm ("sync" ::: "memory")
+# define atomic_write_barrier()	__asm ("sync" ::: "memory")
+#endif
+
+/*
+ * Include the rest of the atomic ops macros which are common to both
+ * powerpc32 and powerpc64.
+ */
+#include_next <atomic-machine.h>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/backtrace.c b/REORG.TODO/sysdeps/powerpc/powerpc32/backtrace.c
new file mode 100644
index 0000000000..394062136c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/backtrace.c
@@ -0,0 +1,131 @@
+/* Return backtrace of current program state.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <execinfo.h>
+#include <stddef.h>
+#include <string.h>
+#include <signal.h>
+#include <libc-vdso.h>
+
+/* This is the stack layout we see with every stack frame.
+   Note that every routine is required by the ABI to lay out the stack
+   like this.
+
+            +----------------+        +-----------------+
+    %r1  -> | %r1 last frame--------> | %r1 last frame--->...  --> NULL
+            |                |        |                 |
+            | (unused)       |        | return address  |
+            +----------------+        +-----------------+
+*/
+struct layout
+{
+  struct layout *next;
+  void *return_address;
+};
+
+#define SIGNAL_FRAMESIZE 64
+
+/* Since the signal handler is just like any other function it needs to
+   save/restore its LR and it will save it into callers stack frame.
+   Since a signal handler doesn't have a caller, the kernel creates a
+   dummy frame to make it look like it has a caller.  */
+struct signal_frame_32 {
+  char               dummy[SIGNAL_FRAMESIZE];
+  struct sigcontext  sctx;
+  mcontext_t         mctx;
+  /* We don't care about the rest, since IP value is at 'mctx' field.  */
+};
+
+static inline int
+is_sigtramp_address (void *nip)
+{
+#ifdef SHARED
+  if (nip == VDSO_SYMBOL (sigtramp32))
+    return 1;
+#endif
+  return 0;
+}
+
+struct rt_signal_frame_32 {
+  char               dummy[SIGNAL_FRAMESIZE + 16];
+  siginfo_t          info;
+  struct ucontext    uc;
+  /* We don't care about the rest, since IP value is at 'uc' field.  */
+};
+
+static inline int
+is_sigtramp_address_rt (void * nip)
+{
+#ifdef SHARED
+  if (nip == VDSO_SYMBOL (sigtramp_rt32))
+    return 1;
+#endif
+  return 0;
+}
+
+int
+__backtrace (void **array, int size)
+{
+  struct layout *current;
+  int count;
+
+  /* Force gcc to spill LR.  */
+  asm volatile ("" : "=l"(current));
+
+  /* Get the address on top-of-stack.  */
+  asm volatile ("lwz %0,0(1)" : "=r"(current));
+
+  for (				count = 0;
+       current != NULL && 	count < size;
+       current = current->next, count++)
+    {
+      gregset_t *gregset = NULL;
+
+      array[count] = current->return_address;
+
+      /* Check if the symbol is the signal trampoline and get the interrupted
+       * symbol address from the trampoline saved area.  */
+      if (is_sigtramp_address (current->return_address))
+	{
+	  struct signal_frame_32 *sigframe =
+	    (struct signal_frame_32*) current;
+          gregset = &sigframe->mctx.gregs;
+        }
+      else if (is_sigtramp_address_rt (current->return_address))
+	{
+	  struct rt_signal_frame_32 *sigframe =
+            (struct rt_signal_frame_32*) current;
+          gregset = &sigframe->uc.uc_mcontext.uc_regs->gregs;
+        }
+      if (gregset)
+	{
+	  array[++count] = (void*)((*gregset)[PT_NIP]);
+	  current = (void*)((*gregset)[PT_R1]);
+	}
+    }
+
+  /* It's possible the second-last stack frame can't return
+     (that is, it's __libc_start_main), in which case
+     the CRT startup code will have set its LR to 'NULL'.  */
+  if (count > 0 && array[count-1] == NULL)
+    count--;
+
+  return count;
+}
+weak_alias (__backtrace, backtrace)
+libc_hidden_def (__backtrace)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/bits/wordsize.h b/REORG.TODO/sysdeps/powerpc/powerpc32/bits/wordsize.h
new file mode 100644
index 0000000000..04ca9debf0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/bits/wordsize.h
@@ -0,0 +1,11 @@
+/* Determine the wordsize from the preprocessor defines.  */
+
+#if defined __powerpc64__
+# define __WORDSIZE	64
+# define __WORDSIZE_TIME64_COMPAT32	1
+#else
+# define __WORDSIZE	32
+# define __WORDSIZE_TIME64_COMPAT32	0
+# define __WORDSIZE32_SIZE_ULONG	0
+# define __WORDSIZE32_PTRDIFF_LONG	0
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-_setjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
new file mode 100644
index 0000000000..169766c304
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-_setjmp.S
@@ -0,0 +1,56 @@
+/* BSD `_setjmp' entry point to `sigsetjmp (..., 0)'.  PowerPC32/64 version.
+   Copyright (C) 1994-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+#include <sysdep.h>
+
+#if !IS_IN (libc)
+/* Build a non-versioned object for rtld-*.  */
+ENTRY (_setjmp)
+	li r4,0			/* Set second argument to 0.  */
+	b __sigsetjmp@local
+END (_setjmp)
+libc_hidden_def (_setjmp)
+#else
+/* Build a versioned object for libc.  */
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+compat_symbol (libc, __novmx_setjmp, _setjmp, GLIBC_2_0);
+
+ENTRY (__novmx_setjmp)
+	li r4,0			/* Set second argument to 0.  */
+	b __novmx__sigsetjmp@local
+END (__novmx_setjmp)
+libc_hidden_def (__novmx_setjmp)
+# endif /* defined SHARED  && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) */
+
+versioned_symbol (libc, __vmx_setjmp, _setjmp, GLIBC_2_3_4)
+/* __GI__setjmp prototype is needed for ntpl i.e. _setjmp is defined
+   as a libc_hidden_proto & is used in sysdeps/generic/libc-start.c
+   if HAVE_CLEANUP_JMP_BUF is defined */
+ENTRY (__GI__setjmp)
+	li r4,0			/* Set second argument to 0.  */
+	b __vmx__sigsetjmp@local
+END (__GI__setjmp)
+
+ENTRY (__vmx_setjmp)
+	li r4,0			/* Set second argument to 0.  */
+	b __vmx__sigsetjmp@local
+END (__vmx_setjmp)
+libc_hidden_def (__vmx_setjmp)
+#endif /* IS_IN (libc) */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-setjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-setjmp.S
new file mode 100644
index 0000000000..212d6ce8b1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/bsd-setjmp.S
@@ -0,0 +1,39 @@
+/* BSD `setjmp' entry point to `sigsetjmp (..., 1)'.  PowerPC32/64 version.
+   Copyright (C) 1994-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+#include <sysdep.h>
+
+#if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+
+ENTRY (__novmxsetjmp)
+	li r4,1			/* Set second argument to 1.  */
+	b __novmx__sigsetjmp@local
+END (__novmxsetjmp)
+strong_alias (__novmxsetjmp, __novmx__setjmp)
+compat_symbol (libc, __novmxsetjmp, setjmp, GLIBC_2_0)
+
+#endif  /* defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) ) */
+
+ENTRY (__vmxsetjmp)
+	li r4,1			/* Set second argument to 1.  */
+	b __vmx__sigsetjmp@local
+END (__vmxsetjmp)
+strong_alias (__vmxsetjmp, __vmx__setjmp)
+strong_alias (__vmx__setjmp, __setjmp)
+versioned_symbol (libc, __vmxsetjmp, setjmp, GLIBC_2_3_4)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/bzero.S b/REORG.TODO/sysdeps/powerpc/powerpc32/bzero.S
new file mode 100644
index 0000000000..2638b12db0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/bzero.S
@@ -0,0 +1,27 @@
+/* Optimized bzero `implementation' for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+ENTRY (__bzero)
+
+	mr	r5,r4
+	li	r4,0
+	b	memset@local
+END (__bzero)
+weak_alias (__bzero, bzero)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/cell/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/cell/memcpy.S
new file mode 100644
index 0000000000..a7f761408a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/cell/memcpy.S
@@ -0,0 +1,242 @@
+/* Optimized memcpy implementation for CELL BE PowerPC.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#define PREFETCH_AHEAD 6	/* no cache lines SRC prefetching ahead  */
+#define ZERO_AHEAD 4		/* no cache lines DST zeroing ahead  */
+
+/* memcpy routine optimized for CELL-BE-PPC	v2.0
+ *
+ * The CELL PPC core has 1 integer unit and 1 load/store unit
+ * CELL:
+ * 1st level data cache = 32K
+ * 2nd level data cache = 512K
+ * 3rd level data cache = 0K
+ * With 3.2 GHz clockrate the latency to 2nd level cache is >36 clocks,
+ * latency to memory is >400 clocks
+ * To improve copy performance we need to prefetch source data
+ * far ahead to hide this latency
+ * For best performance instruction forms ending in "." like "andi."
+ * should be avoided as the are implemented in microcode on CELL.
+ * The below code is loop unrolled for the CELL cache line of 128 bytes
+ */
+
+.align  7
+
+EALIGN (memcpy, 5, 0)
+	CALL_MCOUNT
+
+	dcbt	0,r4		/* Prefetch ONE SRC cacheline  */
+	cmplwi	cr1,r5,16	/* is size < 16 ?  */
+	mr	r6,r3
+	blt+	cr1,.Lshortcopy
+
+.Lbigcopy:
+	neg	r8,r3		/* LS 3 bits = # bytes to 8-byte dest bdry  */
+	clrlwi  r8,r8,32-4	/* align to 16byte boundary  */
+	sub     r7,r4,r3
+	cmplwi	cr0,r8,0
+	beq+	.Ldst_aligned
+
+.Ldst_unaligned:
+	mtcrf	0x01,r8		/* put #bytes to boundary into cr7  */
+	subf	r5,r8,r5
+
+	bf	cr7*4+3,1f
+	lbzx	r0,r7,r6	/* copy 1 byte  */
+	stb	r0,0(r6)
+	addi	r6,r6,1
+1:	bf	cr7*4+2,2f
+	lhzx	r0,r7,r6	/* copy 2 byte  */
+	sth	r0,0(r6)
+	addi	r6,r6,2
+2:	bf	cr7*4+1,4f
+	lwzx	r0,r7,r6	/* copy 4 byte  */
+	stw	r0,0(r6)
+	addi	r6,r6,4
+4:	bf	cr7*4+0,8f
+	lfdx	fp9,r7,r6	/* copy 8 byte  */
+	stfd	fp9,0(r6)
+	addi	r6,r6,8
+8:
+	add	r4,r7,r6
+
+.Ldst_aligned:
+
+	cmpwi	cr5,r5,128-1
+
+	neg	r7,r6
+	addi	r6,r6,-8	/* prepare for stfdu  */
+	addi	r4,r4,-8	/* prepare for lfdu  */
+
+	clrlwi  r7,r7,32-7	/* align to cacheline boundary  */
+	ble+	cr5,.Llessthancacheline
+
+	cmplwi	cr6,r7,0
+	subf	r5,r7,r5
+	srwi	r7,r7,4		/* divide size by 16  */
+	srwi	r10,r5,7	/* number of cache lines to copy  */
+
+	cmplwi	r10,0
+	li	r11,0		/* number cachelines to copy with prefetch  */
+	beq	.Lnocacheprefetch
+
+	cmplwi	r10,PREFETCH_AHEAD
+	li	r12,128+8	/* prefetch distance  */
+	ble	.Llessthanmaxprefetch
+
+	subi	r11,r10,PREFETCH_AHEAD
+	li	r10,PREFETCH_AHEAD
+
+.Llessthanmaxprefetch:
+	mtctr	r10
+
+.LprefetchSRC:
+	dcbt    r12,r4
+	addi    r12,r12,128
+	bdnz    .LprefetchSRC
+
+.Lnocacheprefetch:
+	mtctr	r7
+	cmplwi	cr1,r5,128
+	clrlwi  r5,r5,32-7
+	beq	cr6,.Lcachelinealigned
+
+.Laligntocacheline:
+	lfd	fp9,0x08(r4)
+	lfdu	fp10,0x10(r4)
+	stfd	fp9,0x08(r6)
+	stfdu	fp10,0x10(r6)
+	bdnz	.Laligntocacheline
+
+
+.Lcachelinealigned:		/* copy while cache lines  */
+
+	blt-	cr1,.Llessthancacheline	/* size <128  */
+
+.Louterloop:
+	cmpwi   r11,0
+	mtctr	r11
+	beq-	.Lendloop
+
+	li	r11,128*ZERO_AHEAD +8	/* DCBZ dist  */
+
+.align	4
+	/* Copy whole cachelines, optimized by prefetching SRC cacheline  */
+.Lloop:				/* Copy aligned body  */
+	dcbt	r12,r4		/* PREFETCH SOURCE some cache lines ahead  */
+	lfd	fp9, 0x08(r4)
+	dcbz	r11,r6
+	lfd	fp10, 0x10(r4)	/* 4 register stride copy is optimal  */
+	lfd	fp11, 0x18(r4)	/* to hide 1st level cache latency.  */
+	lfd	fp12, 0x20(r4)
+	stfd	fp9, 0x08(r6)
+	stfd	fp10, 0x10(r6)
+	stfd	fp11, 0x18(r6)
+	stfd	fp12, 0x20(r6)
+	lfd	fp9, 0x28(r4)
+	lfd	fp10, 0x30(r4)
+	lfd	fp11, 0x38(r4)
+	lfd	fp12, 0x40(r4)
+	stfd	fp9, 0x28(r6)
+	stfd	fp10, 0x30(r6)
+	stfd	fp11, 0x38(r6)
+	stfd	fp12, 0x40(r6)
+	lfd	fp9, 0x48(r4)
+	lfd	fp10, 0x50(r4)
+	lfd	fp11, 0x58(r4)
+	lfd	fp12, 0x60(r4)
+	stfd	fp9, 0x48(r6)
+	stfd	fp10, 0x50(r6)
+	stfd	fp11, 0x58(r6)
+	stfd	fp12, 0x60(r6)
+	lfd	fp9, 0x68(r4)
+	lfd	fp10, 0x70(r4)
+	lfd	fp11, 0x78(r4)
+	lfdu	fp12, 0x80(r4)
+	stfd	fp9, 0x68(r6)
+	stfd	fp10, 0x70(r6)
+	stfd	fp11, 0x78(r6)
+	stfdu	fp12, 0x80(r6)
+
+	bdnz	.Lloop
+
+.Lendloop:
+	cmpwi	r10,0
+	slwi	r10,r10,2	/* adjust from 128 to 32 byte stride  */
+	beq-	.Lendloop2
+	mtctr	r10
+
+.Lloop2:			/* Copy aligned body  */
+	lfd	fp9, 0x08(r4)
+	lfd	fp10, 0x10(r4)
+	lfd	fp11, 0x18(r4)
+	lfdu	fp12, 0x20(r4)
+	stfd	fp9, 0x08(r6)
+	stfd	fp10, 0x10(r6)
+	stfd	fp11, 0x18(r6)
+	stfdu	fp12, 0x20(r6)
+
+	bdnz	.Lloop2
+.Lendloop2:
+
+.Llessthancacheline:		/* less than cache to do ?  */
+	cmplwi	cr0,r5,16
+	srwi	r7,r5,4		/* divide size by 16  */
+	blt-	.Ldo_lt16
+	mtctr	r7
+
+.Lcopy_remaining:
+	lfd	fp9,0x08(r4)
+	lfdu	fp10,0x10(r4)
+	stfd	fp9,0x08(r6)
+	stfdu	fp10,0x10(r6)
+	bdnz	.Lcopy_remaining
+
+.Ldo_lt16:			/* less than 16 ?  */
+	cmplwi	cr0,r5,0	/* copy remaining bytes (0-15)  */
+	beqlr+			/* no rest to copy  */
+	addi	r4,r4,8
+	addi	r6,r6,8
+
+.Lshortcopy:			/* SIMPLE COPY to handle size =< 15 bytes  */
+	mtcrf	0x01,r5
+	sub	r7,r4,r6
+	bf-	cr7*4+0,8f
+	lfdx	fp9,r7,r6	/* copy 8 byte  */
+	stfd	fp9,0(r6)
+	addi	r6,r6,8
+8:
+	bf	cr7*4+1,4f
+	lwzx	r0,r7,r6	/* copy 4 byte  */
+	stw	r0,0(r6)
+	addi	r6,r6,4
+4:
+	bf	cr7*4+2,2f
+	lhzx	r0,r7,r6	/* copy 2 byte  */
+	sth	r0,0(r6)
+	addi	r6,r6,2
+2:
+	bf	cr7*4+3,1f
+	lbzx	r0,r7,r6	/* copy 1 byte  */
+	stb	r0,0(r6)
+1:	blr
+
+END (memcpy)
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/compat-ppc-mcount.S b/REORG.TODO/sysdeps/powerpc/powerpc32/compat-ppc-mcount.S
new file mode 100644
index 0000000000..2a9cb24072
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/compat-ppc-mcount.S
@@ -0,0 +1,11 @@
+#include <shlib-compat.h>
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_15)
+
+	compat_text_section
+# define _mcount __compat_mcount
+# include "ppc-mcount.S"
+# undef _mcount
+
+compat_symbol (libc, __compat_mcount, _mcount, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/configure b/REORG.TODO/sysdeps/powerpc/powerpc32/configure
new file mode 100644
index 0000000000..29cfd53e8b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/configure
@@ -0,0 +1,29 @@
+# This file is generated from configure.ac by Autoconf.  DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether GCC uses -msecure-plt.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for -msecure-plt by default" >&5
+$as_echo_n "checking for -msecure-plt by default... " >&6; }
+if ${libc_cv_ppc_secure_plt+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  echo 'int foo (void) { extern int bar; return bar; }' > conftest.c
+libc_cv_ppc_secure_plt=no
+if { ac_try='${CC-cc} -S $CFLAGS conftest.c -fpic -o conftest.s 1>&5'
+  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+  if grep '_GLOBAL_OFFSET_TABLE_-.*@ha' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_secure_plt=yes
+  fi
+fi
+rm -rf conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_ppc_secure_plt" >&5
+$as_echo "$libc_cv_ppc_secure_plt" >&6; }
+if test $libc_cv_ppc_secure_plt = yes; then
+  $as_echo "#define HAVE_PPC_SECURE_PLT 1" >>confdefs.h
+
+fi
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/configure.ac b/REORG.TODO/sysdeps/powerpc/powerpc32/configure.ac
new file mode 100644
index 0000000000..5d3a9b509d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/configure.ac
@@ -0,0 +1,16 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc/powerpc32.
+
+# See whether GCC uses -msecure-plt.
+AC_CACHE_CHECK(for -msecure-plt by default, libc_cv_ppc_secure_plt, [dnl
+echo 'int foo (void) { extern int bar; return bar; }' > conftest.c
+libc_cv_ppc_secure_plt=no
+if AC_TRY_COMMAND(${CC-cc} -S $CFLAGS conftest.c -fpic -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
+  if grep '_GLOBAL_OFFSET_TABLE_-.*@ha' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_secure_plt=yes
+  fi
+fi
+rm -rf conftest*])
+if test $libc_cv_ppc_secure_plt = yes; then
+  AC_DEFINE(HAVE_PPC_SECURE_PLT)
+fi
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/crti.S b/REORG.TODO/sysdeps/powerpc/powerpc32/crti.S
new file mode 100644
index 0000000000..50b02630c3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/crti.S
@@ -0,0 +1,89 @@
+/* Special .init and .fini section support for PowerPC.
+   Copyright (C) 2012-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   In addition to the permissions in the GNU Lesser General Public
+   License, the Free Software Foundation gives you unlimited
+   permission to link the compiled version of this file with other
+   programs, and to distribute those programs without any restriction
+   coming from the use of this file. (The GNU Lesser General Public
+   License restrictions do apply in other respects; for example, they
+   cover modification of the file, and distribution when not linked
+   into another program.)
+
+   Note that people who make modified versions of this file are not
+   obligated to grant this special exception for their modified
+   versions; it is their choice whether to do so. The GNU Lesser
+   General Public License gives permission to release a modified
+   version without this exception; this exception also makes it
+   possible to release a modified version which carries forward this
+   exception.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* crti.S puts a function prologue at the beginning of the .init and
+   .fini sections and defines global symbols for those addresses, so
+   they can be called as functions.  The symbols _init and _fini are
+   magic and cause the linker to emit DT_INIT and DT_FINI.  */
+
+#include <libc-symbols.h>
+#include <sysdep.h>
+
+#ifndef PREINIT_FUNCTION
+# define PREINIT_FUNCTION __gmon_start__
+#endif
+
+#ifndef PREINIT_FUNCTION_WEAK
+# define PREINIT_FUNCTION_WEAK 1
+#endif
+
+#if PREINIT_FUNCTION_WEAK
+	weak_extern (PREINIT_FUNCTION)
+#else
+	.hidden PREINIT_FUNCTION
+#endif
+
+	.section .init,"ax",@progbits
+	.align	2
+	.globl	_init
+	.type	_init, @function
+_init:
+	stwu r1, -16(r1)
+	mflr r0
+	stw r0, 20(r1)
+	stw r30, 8(r1)
+	SETUP_GOT_ACCESS (r30, .Lgot_label_i)
+	addis r30, r30, _GLOBAL_OFFSET_TABLE_-.Lgot_label_i@ha
+	addi r30, r30, _GLOBAL_OFFSET_TABLE_-.Lgot_label_i@l
+#if PREINIT_FUNCTION_WEAK
+	lwz r0, PREINIT_FUNCTION@got(r30)
+	cmpwi cr7, r0, 0
+	beq+ cr7, 1f
+	bl PREINIT_FUNCTION@plt
+1:
+#else
+	bl PREINIT_FUNCTION@local
+#endif
+
+	.section .fini,"ax",@progbits
+	.align	2
+	.globl	_fini
+	.type	_fini, @function
+_fini:
+	stwu r1, -16(r1)
+	mflr r0
+	stw r0, 20(r1)
+	stw r30, 8(r1)
+	SETUP_GOT_ACCESS (r30, .Lgot_label_f)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/crtn.S b/REORG.TODO/sysdeps/powerpc/powerpc32/crtn.S
new file mode 100644
index 0000000000..67be2950fe
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/crtn.S
@@ -0,0 +1,53 @@
+/* Special .init and .fini section support for PowerPC.
+   Copyright (C) 2012-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   In addition to the permissions in the GNU Lesser General Public
+   License, the Free Software Foundation gives you unlimited
+   permission to link the compiled version of this file with other
+   programs, and to distribute those programs without any restriction
+   coming from the use of this file. (The GNU Lesser General Public
+   License restrictions do apply in other respects; for example, they
+   cover modification of the file, and distribution when not linked
+   into another program.)
+
+   Note that people who make modified versions of this file are not
+   obligated to grant this special exception for their modified
+   versions; it is their choice whether to do so. The GNU Lesser
+   General Public License gives permission to release a modified
+   version without this exception; this exception also makes it
+   possible to release a modified version which carries forward this
+   exception.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* crtn.S puts function epilogues in the .init and .fini sections
+   corresponding to the prologues in crti.S. */
+
+#include <sysdep.h>
+
+	.section .init,"ax",@progbits
+	lwz r0, 20(r1)
+	mtlr r0
+	lwz r30, 8(r1)
+	addi r1, r1, 16
+	blr
+
+	.section .fini,"ax",@progbits
+	lwz r0, 20(r1)
+	mtlr r0
+	lwz r30, 8(r1)
+	addi r1, r1, 16
+	blr
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-dtprocnum.h b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
new file mode 100644
index 0000000000..7fe2be7939
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-dtprocnum.h
@@ -0,0 +1,3 @@
+/* Number of extra dynamic section entries for this architecture.  By
+   default there are none.  */
+#define DT_THISPROCNUM	DT_PPC_NUM
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-irel.h b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-irel.h
new file mode 100644
index 0000000000..e5d6540ce4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-irel.h
@@ -0,0 +1,52 @@
+/* Machine-dependent ELF indirect relocation inline functions.
+   PowerPC version.
+   Copyright (C) 2009-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _DL_IREL_H
+#define _DL_IREL_H
+
+#include <stdio.h>
+#include <unistd.h>
+#include <ldsodefs.h>
+
+#define ELF_MACHINE_IRELA	1
+
+static inline Elf32_Addr
+__attribute ((always_inline))
+elf_ifunc_invoke (Elf32_Addr addr)
+{
+  return ((Elf32_Addr (*) (unsigned long int)) (addr)) (GLRO(dl_hwcap));
+}
+
+static inline void
+__attribute ((always_inline))
+elf_irela (const Elf32_Rela *reloc)
+{
+  unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
+
+  if (__glibc_likely (r_type == R_PPC_IRELATIVE))
+    {
+      Elf32_Addr *const reloc_addr = (void *) reloc->r_offset;
+      Elf32_Addr value = elf_ifunc_invoke(reloc->r_addend);
+      *reloc_addr = value;
+    }
+  else
+    __libc_fatal ("unexpected reloc type in static binary");
+}
+
+#endif /* dl-irel.h */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.c b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.c
new file mode 100644
index 0000000000..2d6a576552
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.c
@@ -0,0 +1,608 @@
+/* Machine-dependent ELF dynamic relocation functions.  PowerPC version.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <unistd.h>
+#include <string.h>
+#include <sys/param.h>
+#include <link.h>
+#include <ldsodefs.h>
+#include <elf/dynamic-link.h>
+#include <dl-machine.h>
+#include <_itoa.h>
+
+/* The value __cache_line_size is defined in dl-sysdep.c and is initialised
+   by _dl_sysdep_start via DL_PLATFORM_INIT.  */
+extern int __cache_line_size attribute_hidden;
+
+
+/* Stuff for the PLT.  */
+#define PLT_INITIAL_ENTRY_WORDS 18
+#define PLT_LONGBRANCH_ENTRY_WORDS 0
+#define PLT_TRAMPOLINE_ENTRY_WORDS 6
+#define PLT_DOUBLE_SIZE (1<<13)
+#define PLT_ENTRY_START_WORDS(entry_number) \
+  (PLT_INITIAL_ENTRY_WORDS + (entry_number)*2				\
+   + ((entry_number) > PLT_DOUBLE_SIZE					\
+      ? ((entry_number) - PLT_DOUBLE_SIZE)*2				\
+      : 0))
+#define PLT_DATA_START_WORDS(num_entries) PLT_ENTRY_START_WORDS(num_entries)
+
+/* Macros to build PowerPC opcode words.  */
+#define OPCODE_ADDI(rd,ra,simm) \
+  (0x38000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADDIS(rd,ra,simm) \
+  (0x3c000000 | (rd) << 21 | (ra) << 16 | ((simm) & 0xffff))
+#define OPCODE_ADD(rd,ra,rb) \
+  (0x7c000214 | (rd) << 21 | (ra) << 16 | (rb) << 11)
+#define OPCODE_B(target) (0x48000000 | ((target) & 0x03fffffc))
+#define OPCODE_BA(target) (0x48000002 | ((target) & 0x03fffffc))
+#define OPCODE_BCTR() 0x4e800420
+#define OPCODE_LWZ(rd,d,ra) \
+  (0x80000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_LWZU(rd,d,ra) \
+  (0x84000000 | (rd) << 21 | (ra) << 16 | ((d) & 0xffff))
+#define OPCODE_MTCTR(rd) (0x7C0903A6 | (rd) << 21)
+#define OPCODE_RLWINM(ra,rs,sh,mb,me) \
+  (0x54000000 | (rs) << 21 | (ra) << 16 | (sh) << 11 | (mb) << 6 | (me) << 1)
+
+#define OPCODE_LI(rd,simm)    OPCODE_ADDI(rd,0,simm)
+#define OPCODE_ADDIS_HI(rd,ra,value) \
+  OPCODE_ADDIS(rd,ra,((value) + 0x8000) >> 16)
+#define OPCODE_LIS_HI(rd,value) OPCODE_ADDIS_HI(rd,0,value)
+#define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
+
+
+#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC asm volatile ("sync" : : : "memory")
+#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE asm volatile ("tweq 0,0")
+
+/* Use this when you've modified some code, but it won't be in the
+   instruction fetch queue (or when it doesn't matter if it is). */
+#define MODIFIED_CODE_NOQUEUE(where) \
+     do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
+/* Use this when it might be in the instruction queue. */
+#define MODIFIED_CODE(where) \
+     do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
+
+
+/* The idea here is that to conform to the ABI, we are supposed to try
+   to load dynamic objects between 0x10000 (we actually use 0x40000 as
+   the lower bound, to increase the chance of a memory reference from
+   a null pointer giving a segfault) and the program's load address;
+   this may allow us to use a branch instruction in the PLT rather
+   than a computed jump.  The address is only used as a preference for
+   mmap, so if we get it wrong the worst that happens is that it gets
+   mapped somewhere else.  */
+
+ElfW(Addr)
+__elf_preferred_address (struct link_map *loader, size_t maplength,
+			 ElfW(Addr) mapstartpref)
+{
+  ElfW(Addr) low, high;
+  struct link_map *l;
+  Lmid_t nsid;
+
+  /* If the object has a preference, load it there!  */
+  if (mapstartpref != 0)
+    return mapstartpref;
+
+  /* Otherwise, quickly look for a suitable gap between 0x3FFFF and
+     0x70000000.  0x3FFFF is so that references off NULL pointers will
+     cause a segfault, 0x70000000 is just paranoia (it should always
+     be superseded by the program's load address).  */
+  low =  0x0003FFFF;
+  high = 0x70000000;
+  for (nsid = 0; nsid < DL_NNS; ++nsid)
+    for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
+      {
+	ElfW(Addr) mapstart, mapend;
+	mapstart = l->l_map_start & ~(GLRO(dl_pagesize) - 1);
+	mapend = l->l_map_end | (GLRO(dl_pagesize) - 1);
+	assert (mapend > mapstart);
+
+	/* Prefer gaps below the main executable, note that l ==
+	   _dl_loaded does not work for static binaries loading
+	   e.g. libnss_*.so.  */
+	if ((mapend >= high || l->l_type == lt_executable)
+	    && high >= mapstart)
+	  high = mapstart;
+	else if (mapend >= low && low >= mapstart)
+	  low = mapend;
+	else if (high >= mapend && mapstart >= low)
+	  {
+	    if (high - mapend >= mapstart - low)
+	      low = mapend;
+	    else
+	      high = mapstart;
+	  }
+      }
+
+  high -= 0x10000; /* Allow some room between objects.  */
+  maplength = (maplength | (GLRO(dl_pagesize) - 1)) + 1;
+  if (high <= low || high - low < maplength )
+    return 0;
+  return high - maplength;  /* Both high and maplength are page-aligned.  */
+}
+
+/* Set up the loaded object described by L so its unrelocated PLT
+   entries will jump to the on-demand fixup code in dl-runtime.c.
+   Also install a small trampoline to be used by entries that have
+   been relocated to an address too far away for a single branch.  */
+
+/* There are many kinds of PLT entries:
+
+   (1)	A direct jump to the actual routine, either a relative or
+	absolute branch.  These are set up in __elf_machine_fixup_plt.
+
+   (2)	Short lazy entries.  These cover the first 8192 slots in
+        the PLT, and look like (where 'index' goes from 0 to 8191):
+
+	li %r11, index*4
+	b  &plt[PLT_TRAMPOLINE_ENTRY_WORDS+1]
+
+   (3)	Short indirect jumps.  These replace (2) when a direct jump
+	wouldn't reach.  They look the same except that the branch
+	is 'b &plt[PLT_LONGBRANCH_ENTRY_WORDS]'.
+
+   (4)  Long lazy entries.  These cover the slots when a short entry
+	won't fit ('index*4' overflows its field), and look like:
+
+	lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
+	lwzu %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
+	b  &plt[PLT_TRAMPOLINE_ENTRY_WORDS]
+	bctr
+
+   (5)	Long indirect jumps.  These replace (4) when a direct jump
+	wouldn't reach.  They look like:
+
+	lis %r11, %hi(index*4 + &plt[PLT_DATA_START_WORDS])
+	lwz %r12, %r11, %lo(index*4 + &plt[PLT_DATA_START_WORDS])
+	mtctr %r12
+	bctr
+
+   (6) Long direct jumps.  These are used when thread-safety is not
+       required.  They look like:
+
+       lis %r12, %hi(finaladdr)
+       addi %r12, %r12, %lo(finaladdr)
+       mtctr %r12
+       bctr
+
+
+   The lazy entries, (2) and (4), are set up here in
+   __elf_machine_runtime_setup.  (1), (3), and (5) are set up in
+   __elf_machine_fixup_plt.  (1), (3), and (6) can also be constructed
+   in __process_machine_rela.
+
+   The reason for the somewhat strange construction of the long
+   entries, (4) and (5), is that we need to ensure thread-safety.  For
+   (1) and (3), this is obvious because only one instruction is
+   changed and the PPC architecture guarantees that aligned stores are
+   atomic.  For (5), this is more tricky.  When changing (4) to (5),
+   the `b' instruction is first changed to `mtctr'; this is safe
+   and is why the `lwzu' instruction is not just a simple `addi'.
+   Once this is done, and is visible to all processors, the `lwzu' can
+   safely be changed to a `lwz'.  */
+int
+__elf_machine_runtime_setup (struct link_map *map, int lazy, int profile)
+{
+  if (map->l_info[DT_JMPREL])
+    {
+      Elf32_Word i;
+      Elf32_Word *plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+      Elf32_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+				    / sizeof (Elf32_Rela));
+      Elf32_Word rel_offset_words = PLT_DATA_START_WORDS (num_plt_entries);
+      Elf32_Word data_words = (Elf32_Word) (plt + rel_offset_words);
+      Elf32_Word size_modified;
+
+      extern void _dl_runtime_resolve (void);
+      extern void _dl_prof_resolve (void);
+
+      /* Convert the index in r11 into an actual address, and get the
+	 word at that address.  */
+      plt[PLT_LONGBRANCH_ENTRY_WORDS] = OPCODE_ADDIS_HI (11, 11, data_words);
+      plt[PLT_LONGBRANCH_ENTRY_WORDS + 1] = OPCODE_LWZ (11, data_words, 11);
+
+      /* Call the procedure at that address.  */
+      plt[PLT_LONGBRANCH_ENTRY_WORDS + 2] = OPCODE_MTCTR (11);
+      plt[PLT_LONGBRANCH_ENTRY_WORDS + 3] = OPCODE_BCTR ();
+
+      if (lazy)
+	{
+	  Elf32_Word *tramp = plt + PLT_TRAMPOLINE_ENTRY_WORDS;
+	  Elf32_Word dlrr;
+	  Elf32_Word offset;
+
+#ifndef PROF
+	  dlrr = (Elf32_Word) (profile
+			       ? _dl_prof_resolve
+			       : _dl_runtime_resolve);
+	  if (profile && GLRO(dl_profile) != NULL
+	      && _dl_name_match_p (GLRO(dl_profile), map))
+	    /* This is the object we are looking for.  Say that we really
+	       want profiling and the timers are started.  */
+	    GL(dl_profile_map) = map;
+#else
+	  dlrr = (Elf32_Word) _dl_runtime_resolve;
+#endif
+
+	  /* For the long entries, subtract off data_words.  */
+	  tramp[0] = OPCODE_ADDIS_HI (11, 11, -data_words);
+	  tramp[1] = OPCODE_ADDI (11, 11, -data_words);
+
+	  /* Multiply index of entry by 3 (in r11).  */
+	  tramp[2] = OPCODE_SLWI (12, 11, 1);
+	  tramp[3] = OPCODE_ADD (11, 12, 11);
+	  if (dlrr <= 0x01fffffc || dlrr >= 0xfe000000)
+	    {
+	      /* Load address of link map in r12.  */
+	      tramp[4] = OPCODE_LI (12, (Elf32_Word) map);
+	      tramp[5] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
+
+	      /* Call _dl_runtime_resolve.  */
+	      tramp[6] = OPCODE_BA (dlrr);
+	    }
+	  else
+	    {
+	      /* Get address of _dl_runtime_resolve in CTR.  */
+	      tramp[4] = OPCODE_LI (12, dlrr);
+	      tramp[5] = OPCODE_ADDIS_HI (12, 12, dlrr);
+	      tramp[6] = OPCODE_MTCTR (12);
+
+	      /* Load address of link map in r12.  */
+	      tramp[7] = OPCODE_LI (12, (Elf32_Word) map);
+	      tramp[8] = OPCODE_ADDIS_HI (12, 12, (Elf32_Word) map);
+
+	      /* Call _dl_runtime_resolve.  */
+	      tramp[9] = OPCODE_BCTR ();
+	    }
+
+	  /* Set up the lazy PLT entries.  */
+	  offset = PLT_INITIAL_ENTRY_WORDS;
+	  i = 0;
+	  while (i < num_plt_entries && i < PLT_DOUBLE_SIZE)
+	    {
+	      plt[offset  ] = OPCODE_LI (11, i * 4);
+	      plt[offset+1] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS + 2
+					 - (offset+1))
+					* 4);
+	      i++;
+	      offset += 2;
+	    }
+	  while (i < num_plt_entries)
+	    {
+	      plt[offset  ] = OPCODE_LIS_HI (11, i * 4 + data_words);
+	      plt[offset+1] = OPCODE_LWZU (12, i * 4 + data_words, 11);
+	      plt[offset+2] = OPCODE_B ((PLT_TRAMPOLINE_ENTRY_WORDS
+					 - (offset+2))
+					* 4);
+	      plt[offset+3] = OPCODE_BCTR ();
+	      i++;
+	      offset += 4;
+	    }
+	}
+
+      /* Now, we've modified code.  We need to write the changes from
+	 the data cache to a second-level unified cache, then make
+	 sure that stale data in the instruction cache is removed.
+	 (In a multiprocessor system, the effect is more complex.)
+	 Most of the PLT shouldn't be in the instruction cache, but
+	 there may be a little overlap at the start and the end.
+
+	 Assumes that dcbst and icbi apply to lines of 16 bytes or
+	 more.  Current known line sizes are 16, 32, and 128 bytes.
+	 The following gets the __cache_line_size, when available.  */
+
+      /* Default minimum 4 words per cache line.  */
+      int line_size_words = 4;
+
+      if (lazy && __cache_line_size != 0)
+	/* Convert bytes to words.  */
+	line_size_words = __cache_line_size / 4;
+
+      size_modified = lazy ? rel_offset_words : 6;
+      for (i = 0; i < size_modified; i += line_size_words)
+        PPC_DCBST (plt + i);
+      PPC_DCBST (plt + size_modified - 1);
+      PPC_SYNC;
+
+      for (i = 0; i < size_modified; i += line_size_words)
+        PPC_ICBI (plt + i);
+      PPC_ICBI (plt + size_modified - 1);
+      PPC_ISYNC;
+    }
+
+  return lazy;
+}
+
+Elf32_Addr
+__elf_machine_fixup_plt (struct link_map *map,
+			 Elf32_Addr *reloc_addr, Elf32_Addr finaladdr)
+{
+  Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+  if (delta << 6 >> 6 == delta)
+    *reloc_addr = OPCODE_B (delta);
+  else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
+    *reloc_addr = OPCODE_BA (finaladdr);
+  else
+    {
+      Elf32_Word *plt, *data_words;
+      Elf32_Word index, offset, num_plt_entries;
+
+      num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+			 / sizeof(Elf32_Rela));
+      plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+      offset = reloc_addr - plt;
+      index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
+      data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
+
+      reloc_addr += 1;
+
+      if (index < PLT_DOUBLE_SIZE)
+	{
+	  data_words[index] = finaladdr;
+	  PPC_SYNC;
+	  *reloc_addr = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS - (offset+1))
+				  * 4);
+	}
+      else
+	{
+	  index -= (index - PLT_DOUBLE_SIZE)/2;
+
+	  data_words[index] = finaladdr;
+	  PPC_SYNC;
+
+	  reloc_addr[1] = OPCODE_MTCTR (12);
+	  MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
+	  PPC_SYNC;
+
+	  reloc_addr[0] = OPCODE_LWZ (12,
+				      (Elf32_Word) (data_words + index), 11);
+	}
+    }
+  MODIFIED_CODE (reloc_addr);
+  return finaladdr;
+}
+
+void
+_dl_reloc_overflow (struct link_map *map,
+		    const char *name,
+		    Elf32_Addr *const reloc_addr,
+		    const Elf32_Sym *refsym)
+{
+  char buffer[128];
+  char *t;
+  t = stpcpy (buffer, name);
+  t = stpcpy (t, " relocation at 0x00000000");
+  _itoa_word ((unsigned) reloc_addr, t, 16, 0);
+  if (refsym)
+    {
+      const char *strtab;
+
+      strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+      t = stpcpy (t, " for symbol `");
+      t = stpcpy (t, strtab + refsym->st_name);
+      t = stpcpy (t, "'");
+    }
+  t = stpcpy (t, " out of range");
+  _dl_signal_error (0, map->l_name, NULL, buffer);
+}
+
+void
+__process_machine_rela (struct link_map *map,
+			const Elf32_Rela *reloc,
+			struct link_map *sym_map,
+			const Elf32_Sym *sym,
+			const Elf32_Sym *refsym,
+			Elf32_Addr *const reloc_addr,
+			Elf32_Addr const finaladdr,
+			int rinfo)
+{
+  union unaligned
+    {
+      uint16_t u2;
+      uint32_t u4;
+    } __attribute__((__packed__));
+
+  switch (rinfo)
+    {
+    case R_PPC_NONE:
+      return;
+
+    case R_PPC_ADDR32:
+    case R_PPC_GLOB_DAT:
+    case R_PPC_RELATIVE:
+      *reloc_addr = finaladdr;
+      return;
+
+    case R_PPC_IRELATIVE:
+      *reloc_addr = ((Elf32_Addr (*) (void)) finaladdr) ();
+      return;
+
+    case R_PPC_UADDR32:
+      ((union unaligned *) reloc_addr)->u4 = finaladdr;
+      break;
+
+    case R_PPC_ADDR24:
+      if (__glibc_unlikely (finaladdr > 0x01fffffc && finaladdr < 0xfe000000))
+	_dl_reloc_overflow (map,  "R_PPC_ADDR24", reloc_addr, refsym);
+      *reloc_addr = (*reloc_addr & 0xfc000003) | (finaladdr & 0x3fffffc);
+      break;
+
+    case R_PPC_ADDR16:
+      if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
+	_dl_reloc_overflow (map,  "R_PPC_ADDR16", reloc_addr, refsym);
+      *(Elf32_Half*) reloc_addr = finaladdr;
+      break;
+
+    case R_PPC_UADDR16:
+      if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
+	_dl_reloc_overflow (map,  "R_PPC_UADDR16", reloc_addr, refsym);
+      ((union unaligned *) reloc_addr)->u2 = finaladdr;
+      break;
+
+    case R_PPC_ADDR16_LO:
+      *(Elf32_Half*) reloc_addr = finaladdr;
+      break;
+
+    case R_PPC_ADDR16_HI:
+      *(Elf32_Half*) reloc_addr = finaladdr >> 16;
+      break;
+
+    case R_PPC_ADDR16_HA:
+      *(Elf32_Half*) reloc_addr = (finaladdr + 0x8000) >> 16;
+      break;
+
+    case R_PPC_ADDR14:
+    case R_PPC_ADDR14_BRTAKEN:
+    case R_PPC_ADDR14_BRNTAKEN:
+      if (__glibc_unlikely (finaladdr > 0x7fff && finaladdr < 0xffff8000))
+	_dl_reloc_overflow (map,  "R_PPC_ADDR14", reloc_addr, refsym);
+      *reloc_addr = (*reloc_addr & 0xffff0003) | (finaladdr & 0xfffc);
+      if (rinfo != R_PPC_ADDR14)
+	*reloc_addr = ((*reloc_addr & 0xffdfffff)
+		       | ((rinfo == R_PPC_ADDR14_BRTAKEN)
+			  ^ (finaladdr >> 31)) << 21);
+      break;
+
+    case R_PPC_REL24:
+      {
+	Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+	if (delta << 6 >> 6 != delta)
+	  _dl_reloc_overflow (map,  "R_PPC_REL24", reloc_addr, refsym);
+	*reloc_addr = (*reloc_addr & 0xfc000003) | (delta & 0x3fffffc);
+      }
+      break;
+
+    case R_PPC_COPY:
+      if (sym == NULL)
+	/* This can happen in trace mode when an object could not be
+	   found.  */
+	return;
+      if (sym->st_size > refsym->st_size
+	  || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
+	{
+	  const char *strtab;
+
+	  strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
+	  _dl_error_printf ("\
+%s: Symbol `%s' has different size in shared object, consider re-linking\n",
+			    RTLD_PROGNAME, strtab + refsym->st_name);
+	}
+      memcpy (reloc_addr, (char *) finaladdr, MIN (sym->st_size,
+						   refsym->st_size));
+      return;
+
+    case R_PPC_REL32:
+      *reloc_addr = finaladdr - (Elf32_Word) reloc_addr;
+      return;
+
+    case R_PPC_JMP_SLOT:
+      /* It used to be that elf_machine_fixup_plt was used here,
+	 but that doesn't work when ld.so relocates itself
+	 for the second time.  On the bright side, there's
+         no need to worry about thread-safety here.  */
+      {
+	Elf32_Sword delta = finaladdr - (Elf32_Word) reloc_addr;
+	if (delta << 6 >> 6 == delta)
+	  *reloc_addr = OPCODE_B (delta);
+	else if (finaladdr <= 0x01fffffc || finaladdr >= 0xfe000000)
+	  *reloc_addr = OPCODE_BA (finaladdr);
+	else
+	  {
+	    Elf32_Word *plt, *data_words;
+	    Elf32_Word index, offset, num_plt_entries;
+
+	    plt = (Elf32_Word *) D_PTR (map, l_info[DT_PLTGOT]);
+	    offset = reloc_addr - plt;
+
+	    if (offset < PLT_DOUBLE_SIZE*2 + PLT_INITIAL_ENTRY_WORDS)
+	      {
+		index = (offset - PLT_INITIAL_ENTRY_WORDS)/2;
+		num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+				   / sizeof(Elf32_Rela));
+		data_words = plt + PLT_DATA_START_WORDS (num_plt_entries);
+		data_words[index] = finaladdr;
+		reloc_addr[0] = OPCODE_LI (11, index * 4);
+		reloc_addr[1] = OPCODE_B ((PLT_LONGBRANCH_ENTRY_WORDS
+					   - (offset+1))
+					  * 4);
+		MODIFIED_CODE_NOQUEUE (reloc_addr + 1);
+	      }
+	    else
+	      {
+		reloc_addr[0] = OPCODE_LIS_HI (12, finaladdr);
+		reloc_addr[1] = OPCODE_ADDI (12, 12, finaladdr);
+		reloc_addr[2] = OPCODE_MTCTR (12);
+		reloc_addr[3] = OPCODE_BCTR ();
+		MODIFIED_CODE_NOQUEUE (reloc_addr + 3);
+	      }
+	  }
+      }
+      break;
+
+#define DO_TLS_RELOC(suffix)						      \
+    case R_PPC_DTPREL##suffix:						      \
+      /* During relocation all TLS symbols are defined and used.	      \
+	 Therefore the offset is already correct.  */			      \
+      if (sym_map != NULL)						      \
+	do_reloc##suffix ("R_PPC_DTPREL"#suffix,			      \
+			  TLS_DTPREL_VALUE (sym, reloc));		      \
+      break;								      \
+    case R_PPC_TPREL##suffix:						      \
+      if (sym_map != NULL)						      \
+	{								      \
+	  CHECK_STATIC_TLS (map, sym_map);				      \
+	  do_reloc##suffix ("R_PPC_TPREL"#suffix,			      \
+			    TLS_TPREL_VALUE (sym_map, sym, reloc));	      \
+	}								      \
+      break;
+
+    inline void do_reloc16 (const char *r_name, Elf32_Addr value)
+      {
+	if (__glibc_unlikely (value > 0x7fff && value < 0xffff8000))
+	  _dl_reloc_overflow (map, r_name, reloc_addr, refsym);
+	*(Elf32_Half *) reloc_addr = value;
+      }
+    inline void do_reloc16_LO (const char *r_name, Elf32_Addr value)
+      {
+	*(Elf32_Half *) reloc_addr = value;
+      }
+    inline void do_reloc16_HI (const char *r_name, Elf32_Addr value)
+      {
+	*(Elf32_Half *) reloc_addr = value >> 16;
+      }
+    inline void do_reloc16_HA (const char *r_name, Elf32_Addr value)
+      {
+	*(Elf32_Half *) reloc_addr = (value + 0x8000) >> 16;
+      }
+    DO_TLS_RELOC (16)
+    DO_TLS_RELOC (16_LO)
+    DO_TLS_RELOC (16_HI)
+    DO_TLS_RELOC (16_HA)
+
+    default:
+      _dl_reloc_bad_type (map, rinfo, 0);
+      return;
+    }
+
+  MODIFIED_CODE_NOQUEUE (reloc_addr);
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.h b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.h
new file mode 100644
index 0000000000..28eb50f92d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-machine.h
@@ -0,0 +1,455 @@
+/* Machine-dependent ELF dynamic relocation inline functions.  PowerPC version.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef dl_machine_h
+#define dl_machine_h
+
+#define ELF_MACHINE_NAME "powerpc"
+
+#include <assert.h>
+#include <dl-tls.h>
+#include <dl-irel.h>
+#include <hwcapinfo.h>
+
+/* Translate a processor specific dynamic tag to the index
+   in l_info array.  */
+#define DT_PPC(x) (DT_PPC_##x - DT_LOPROC + DT_NUM)
+
+/* Return nonzero iff ELF header is compatible with the running host.  */
+static inline int
+elf_machine_matches_host (const Elf32_Ehdr *ehdr)
+{
+  return ehdr->e_machine == EM_PPC;
+}
+
+/* Return the value of the GOT pointer.  */
+static inline Elf32_Addr * __attribute__ ((const))
+ppc_got (void)
+{
+  Elf32_Addr *got;
+
+  asm ("bcl 20,31,1f\n"
+       "1:	mflr %0\n"
+       "	addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n"
+       "	addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n"
+       : "=b" (got) : : "lr");
+
+  return got;
+}
+
+/* Return the link-time address of _DYNAMIC, stored as
+   the first value in the GOT. */
+static inline Elf32_Addr __attribute__ ((const))
+elf_machine_dynamic (void)
+{
+  return *ppc_got ();
+}
+
+/* Return the run-time load address of the shared object.  */
+static inline Elf32_Addr __attribute__ ((const))
+elf_machine_load_address (void)
+{
+  Elf32_Addr *branchaddr;
+  Elf32_Addr runtime_dynamic;
+
+  /* This is much harder than you'd expect.  Possibly I'm missing something.
+     The 'obvious' way:
+
+       Apparently, "bcl 20,31,$+4" is what should be used to load LR
+       with the address of the next instruction.
+       I think this is so that machines that do bl/blr pairing don't
+       get confused.
+
+     asm ("bcl 20,31,0f ;"
+	  "0: mflr 0 ;"
+	  "lis %0,0b@ha;"
+	  "addi %0,%0,0b@l;"
+	  "subf %0,%0,0"
+	  : "=b" (addr) : : "r0", "lr");
+
+     doesn't work, because the linker doesn't have to (and in fact doesn't)
+     update the @ha and @l references; the loader (which runs after this
+     code) will do that.
+
+     Instead, we use the following trick:
+
+     The linker puts the _link-time_ address of _DYNAMIC at the first
+     word in the GOT. We could branch to that address, if we wanted,
+     by using an @local reloc; the linker works this out, so it's safe
+     to use now. We can't, of course, actually branch there, because
+     we'd cause an illegal instruction exception; so we need to compute
+     the address ourselves. That gives us the following code: */
+
+  /* Get address of the 'b _DYNAMIC@local'...  */
+  asm ("bcl 20,31,0f;"
+       "b _DYNAMIC@local;"
+       "0:"
+       : "=l" (branchaddr));
+
+  /* So now work out the difference between where the branch actually points,
+     and the offset of that location in memory from the start of the file.  */
+  runtime_dynamic = ((Elf32_Addr) branchaddr
+		     + ((Elf32_Sword) (*branchaddr << 6 & 0xffffff00) >> 6));
+
+  return runtime_dynamic - elf_machine_dynamic ();
+}
+
+#define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */
+
+/* The PLT uses Elf32_Rela relocs.  */
+#define elf_machine_relplt elf_machine_rela
+
+/* Mask identifying addresses reserved for the user program,
+   where the dynamic linker should not map anything.  */
+#define ELF_MACHINE_USER_ADDRESS_MASK	0xf0000000UL
+
+/* The actual _start code is in dl-start.S.  Use a really
+   ugly bit of assembler to let dl-start.o see _dl_start.  */
+#define RTLD_START asm (".globl _dl_start");
+
+/* Decide where a relocatable object should be loaded.  */
+extern ElfW(Addr)
+__elf_preferred_address(struct link_map *loader, size_t maplength,
+			ElfW(Addr) mapstartpref);
+#define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) \
+  __elf_preferred_address (loader, maplength, mapstartpref)
+
+/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
+   PLT entries should not be allowed to define the value.
+   ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
+   of the main executable's symbols, as for a COPY reloc.  */
+/* We never want to use a PLT entry as the destination of a
+   reloc, when what is being relocated is a branch. This is
+   partly for efficiency, but mostly so we avoid loops.  */
+#define elf_machine_type_class(type)			\
+  ((((type) == R_PPC_JMP_SLOT				\
+    || (type) == R_PPC_REL24				\
+    || ((type) >= R_PPC_DTPMOD32 /* contiguous TLS */	\
+	&& (type) <= R_PPC_DTPREL32)			\
+    || (type) == R_PPC_ADDR24) * ELF_RTYPE_CLASS_PLT)	\
+   | (((type) == R_PPC_COPY) * ELF_RTYPE_CLASS_COPY))
+
+/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries.  */
+#define ELF_MACHINE_JMP_SLOT	R_PPC_JMP_SLOT
+
+/* The PowerPC never uses REL relocations.  */
+#define ELF_MACHINE_NO_REL 1
+#define ELF_MACHINE_NO_RELA 0
+
+/* We define an initialization function to initialize HWCAP/HWCAP2 and
+   platform data so it can be copied into the TCB later.  This is called
+   very early in _dl_sysdep_start for dynamically linked binaries.  */
+#ifdef SHARED
+# define DL_PLATFORM_INIT dl_platform_init ()
+
+static inline void __attribute__ ((unused))
+dl_platform_init (void)
+{
+  __tcb_parse_hwcap_and_convert_at_platform ();
+}
+#endif
+
+/* Set up the loaded object described by MAP so its unrelocated PLT
+   entries will jump to the on-demand fixup code in dl-runtime.c.
+   Also install a small trampoline to be used by entries that have
+   been relocated to an address too far away for a single branch.  */
+extern int __elf_machine_runtime_setup (struct link_map *map,
+					int lazy, int profile);
+
+static inline int
+elf_machine_runtime_setup (struct link_map *map,
+			   int lazy, int profile)
+{
+  if (map->l_info[DT_JMPREL] == 0)
+    return lazy;
+
+  if (map->l_info[DT_PPC(GOT)] == 0)
+    /* Handle old style PLT.  */
+    return __elf_machine_runtime_setup (map, lazy, profile);
+
+  /* New style non-exec PLT consisting of an array of addresses.  */
+  map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_addr;
+  if (lazy)
+    {
+      Elf32_Addr *plt, *got, glink;
+      Elf32_Word num_plt_entries;
+      void (*dlrr) (void);
+      extern void _dl_runtime_resolve (void);
+      extern void _dl_prof_resolve (void);
+
+      if (__glibc_likely (!profile))
+	dlrr = _dl_runtime_resolve;
+      else
+	{
+	  if (GLRO(dl_profile) != NULL
+	      &&_dl_name_match_p (GLRO(dl_profile), map))
+	    GL(dl_profile_map) = map;
+	  dlrr = _dl_prof_resolve;
+	}
+      got = (Elf32_Addr *) map->l_info[DT_PPC(GOT)]->d_un.d_ptr;
+      glink = got[1];
+      got[1] = (Elf32_Addr) dlrr;
+      got[2] = (Elf32_Addr) map;
+
+      /* Relocate everything in .plt by the load address offset.  */
+      plt = (Elf32_Addr *) D_PTR (map, l_info[DT_PLTGOT]);
+      num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
+			 / sizeof (Elf32_Rela));
+
+      /* If a library is prelinked but we have to relocate anyway,
+	 we have to be able to undo the prelinking of .plt section.
+	 The prelinker saved us at got[1] address of .glink
+	 section's start.  */
+      if (glink)
+	{
+	  glink += map->l_addr;
+	  while (num_plt_entries-- != 0)
+	    *plt++ = glink, glink += 4;
+	}
+      else
+	while (num_plt_entries-- != 0)
+	  *plt++ += map->l_addr;
+    }
+  return lazy;
+}
+
+/* Change the PLT entry whose reloc is 'reloc' to call the actual routine.  */
+extern Elf32_Addr __elf_machine_fixup_plt (struct link_map *map,
+					   Elf32_Addr *reloc_addr,
+					   Elf32_Addr finaladdr);
+
+static inline Elf32_Addr
+elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+		       const Elf32_Rela *reloc,
+		       Elf32_Addr *reloc_addr, Elf64_Addr finaladdr)
+{
+  if (map->l_info[DT_PPC(GOT)] == 0)
+    /* Handle old style PLT.  */
+    return __elf_machine_fixup_plt (map, reloc_addr, finaladdr);
+
+  *reloc_addr = finaladdr;
+  return finaladdr;
+}
+
+/* Return the final value of a plt relocation.  */
+static inline Elf32_Addr
+elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
+		       Elf32_Addr value)
+{
+  return value + reloc->r_addend;
+}
+
+
+/* Names of the architecture-specific auditing callback functions.  */
+#define ARCH_LA_PLTENTER ppc32_gnu_pltenter
+#define ARCH_LA_PLTEXIT ppc32_gnu_pltexit
+
+#endif /* dl_machine_h */
+
+#ifdef RESOLVE_MAP
+
+/* Do the actual processing of a reloc, once its target address
+   has been determined.  */
+extern void __process_machine_rela (struct link_map *map,
+				    const Elf32_Rela *reloc,
+				    struct link_map *sym_map,
+				    const Elf32_Sym *sym,
+				    const Elf32_Sym *refsym,
+				    Elf32_Addr *const reloc_addr,
+				    Elf32_Addr finaladdr,
+				    int rinfo) attribute_hidden;
+
+/* Call _dl_signal_error when a resolved value overflows a relocated area.  */
+extern void _dl_reloc_overflow (struct link_map *map,
+				const char *name,
+				Elf32_Addr *const reloc_addr,
+				const Elf32_Sym *refsym) attribute_hidden;
+
+/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
+   LOADADDR is the load address of the object; INFO is an array indexed
+   by DT_* of the .dynamic section info.  */
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
+		  const Elf32_Sym *sym, const struct r_found_version *version,
+		  void *const reloc_addr_arg, int skip_ifunc)
+{
+  Elf32_Addr *const reloc_addr = reloc_addr_arg;
+  const Elf32_Sym *const refsym = sym;
+  Elf32_Addr value;
+  const int r_type = ELF32_R_TYPE (reloc->r_info);
+  struct link_map *sym_map = NULL;
+
+#ifndef RESOLVE_CONFLICT_FIND_MAP
+  if (r_type == R_PPC_RELATIVE)
+    {
+      *reloc_addr = map->l_addr + reloc->r_addend;
+      return;
+    }
+
+  if (__glibc_unlikely (r_type == R_PPC_NONE))
+    return;
+
+  /* binutils on ppc32 includes st_value in r_addend for relocations
+     against local symbols.  */
+  if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
+      && sym->st_shndx != SHN_UNDEF)
+    value = map->l_addr;
+  else
+    {
+      sym_map = RESOLVE_MAP (&sym, version, r_type);
+      value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
+    }
+  value += reloc->r_addend;
+#else
+  value = reloc->r_addend;
+#endif
+
+  if (sym != NULL
+      && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
+      && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
+      && __builtin_expect (!skip_ifunc, 1))
+    value = elf_ifunc_invoke (value);
+
+  /* A small amount of code is duplicated here for speed.  In libc,
+     more than 90% of the relocs are R_PPC_RELATIVE; in the X11 shared
+     libraries, 60% are R_PPC_RELATIVE, 24% are R_PPC_GLOB_DAT or
+     R_PPC_ADDR32, and 16% are R_PPC_JMP_SLOT (which this routine
+     wouldn't usually handle).  As an bonus, doing this here allows
+     the switch statement in __process_machine_rela to work.  */
+  switch (r_type)
+    {
+    case R_PPC_GLOB_DAT:
+    case R_PPC_ADDR32:
+      *reloc_addr = value;
+      break;
+
+#ifndef RESOLVE_CONFLICT_FIND_MAP
+# ifdef RTLD_BOOTSTRAP
+#  define NOT_BOOTSTRAP 0
+# else
+#  define NOT_BOOTSTRAP 1
+# endif
+
+    case R_PPC_DTPMOD32:
+      if (map->l_info[DT_PPC(OPT)]
+	  && (map->l_info[DT_PPC(OPT)]->d_un.d_val & PPC_OPT_TLS))
+	{
+	  if (!NOT_BOOTSTRAP)
+	    {
+	      reloc_addr[0] = 0;
+	      reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+			       + TLS_DTV_OFFSET);
+	      break;
+	    }
+	  else if (sym_map != NULL)
+	    {
+# ifndef SHARED
+	      CHECK_STATIC_TLS (map, sym_map);
+# else
+	      if (TRY_STATIC_TLS (map, sym_map))
+# endif
+		{
+		  reloc_addr[0] = 0;
+		  /* Set up for local dynamic.  */
+		  reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
+				   + TLS_DTV_OFFSET);
+		  break;
+		}
+	    }
+	}
+      if (!NOT_BOOTSTRAP)
+	/* During startup the dynamic linker is always index 1.  */
+	*reloc_addr = 1;
+      else if (sym_map != NULL)
+	/* Get the information from the link map returned by the
+	   RESOLVE_MAP function.  */
+	*reloc_addr = sym_map->l_tls_modid;
+      break;
+    case R_PPC_DTPREL32:
+      if (map->l_info[DT_PPC(OPT)]
+	  && (map->l_info[DT_PPC(OPT)]->d_un.d_val & PPC_OPT_TLS))
+	{
+	  if (!NOT_BOOTSTRAP)
+	    {
+	      *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
+	      break;
+	    }
+	  else if (sym_map != NULL)
+	    {
+	      /* This reloc is always preceded by R_PPC_DTPMOD32.  */
+# ifndef SHARED
+	      assert (HAVE_STATIC_TLS (map, sym_map));
+# else
+	      if (HAVE_STATIC_TLS (map, sym_map))
+# endif
+		{
+		  *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
+		  break;
+		}
+	    }
+	}
+      /* During relocation all TLS symbols are defined and used.
+	 Therefore the offset is already correct.  */
+      if (NOT_BOOTSTRAP && sym_map != NULL)
+	*reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
+      break;
+    case R_PPC_TPREL32:
+      if (!NOT_BOOTSTRAP || sym_map != NULL)
+	{
+	  if (NOT_BOOTSTRAP)
+	    CHECK_STATIC_TLS (map, sym_map);
+	  *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
+	}
+      break;
+#endif
+
+    case R_PPC_JMP_SLOT:
+#ifdef RESOLVE_CONFLICT_FIND_MAP
+      RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
+#endif
+      if (map->l_info[DT_PPC(GOT)] != 0)
+	{
+	  *reloc_addr = value;
+	  break;
+	}
+      /* FALLTHROUGH */
+
+    default:
+      __process_machine_rela (map, reloc, sym_map, sym, refsym,
+			      reloc_addr, value, r_type);
+    }
+}
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
+			   void *const reloc_addr_arg)
+{
+  Elf32_Addr *const reloc_addr = reloc_addr_arg;
+  *reloc_addr = l_addr + reloc->r_addend;
+}
+
+auto inline void __attribute__ ((always_inline))
+elf_machine_lazy_rel (struct link_map *map,
+		      Elf32_Addr l_addr, const Elf32_Rela *reloc,
+		      int skip_ifunc)
+{
+  /* elf_machine_runtime_setup handles this. */
+}
+
+#endif /* RESOLVE_MAP */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-start.S b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-start.S
new file mode 100644
index 0000000000..ab429567aa
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-start.S
@@ -0,0 +1,103 @@
+/* Machine-dependent ELF startup code.  PowerPC version.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* Initial entry point code for the dynamic linker.
+   The C function `_dl_start' is the real entry point;
+   its return value is the user program's entry point.	*/
+ENTRY(_start)
+/* We start with the following on the stack, from top:
+   argc (4 bytes);
+   arguments for program (terminated by NULL);
+   environment variables (terminated by NULL);
+   arguments for the program loader. */
+
+/* Call _dl_start with one parameter pointing at argc */
+	mr	r3,r1
+/* (we have to frob the stack pointer a bit to allow room for
+   _dl_start to save the link register).  */
+	li	r4,0
+	addi	r1,r1,-16
+	stw	r4,0(r1)
+	bl	_dl_start@local
+
+	/* FALLTHRU */
+_dl_start_user:
+/* Now, we do our main work of calling initialisation procedures.
+   The ELF ABI doesn't say anything about parameters for these,
+   so we just pass argc, argv, and the environment.
+   Changing these is strongly discouraged (not least because argc is
+   passed by value!).  */
+
+/*  Put our GOT pointer in r31, */
+	SETUP_GOT_ACCESS(r31,got_label)
+	addis	r31,r31,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r31,r31,_GLOBAL_OFFSET_TABLE_-got_label@l
+/*  the address of _start in r30, */
+	mr	r30,r3
+/*  &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28.  */
+	lwz	r28,_rtld_local@got(r31)
+	lwz	r29,_dl_argc@got(r31)
+	lwz	r27,__GI__dl_argv@got(r31)
+
+/* Call _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1). */
+	lwz	r3,0(r28)
+	lwz	r4,0(r29)
+	lwz	r5,0(r27)
+	slwi	r6,r4,2
+	add	r6,r5,r6
+	addi	r6,r6,4
+	bl	_dl_init@local
+
+/* Now, to conform to the ELF ABI, we have to: */
+/* Pass argc (actually _dl_argc) in r3; */
+	lwz	r3,0(r29)
+/* pass argv (actually _dl_argv) in r4; */
+	lwz	r4,0(r27)
+/* pass envp (actually _dl_argv+_dl_argc+1) in r5; */
+	slwi	r5,r3,2
+	add	r6,r4,r5
+	addi	r5,r6,4
+/* pass the auxiliary vector in r6. This is passed to us just after _envp.  */
+2:	lwzu	r0,4(r6)
+	cmpwi	r0,0
+	bne	2b
+	addi	r6,r6,4
+/* Pass a termination function pointer (in this case _dl_fini) in r7.  */
+	lwz	r7,_dl_fini@got(r31)
+/* Now, call the start function in r30... */
+	mtctr	r30
+/* Pass the stack pointer in r1 (so far so good), pointing to a NULL value.
+   (This lets our startup code distinguish between a program linked statically,
+   which linux will call with argc on top of the stack which will hopefully
+   never be zero, and a dynamically linked program which will always have
+   a NULL on the top of the stack).
+   Take the opportunity to clear LR, so anyone who accidentally returns
+   from _start gets SEGV.  Also clear the next few words of the stack.  */
+
+_dl_main_dispatch:
+	li	r31,0
+	stw	r31,0(r1)
+	mtlr	r31
+	stw	r31,4(r1)
+	stw	r31,8(r1)
+	stw	r31,12(r1)
+/* Go do it!  */
+	bctr
+END(_start)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/dl-trampoline.S b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-trampoline.S
new file mode 100644
index 0000000000..16b12db0e4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/dl-trampoline.S
@@ -0,0 +1,189 @@
+/* PLT trampolines.  PPC32 version.
+   Copyright (C) 2005-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section ".text"
+	.align 2
+	.globl _dl_runtime_resolve
+	.type _dl_runtime_resolve,@function
+_dl_runtime_resolve:
+	cfi_startproc
+ # We need to save the registers used to pass parameters, and register 0,
+ # which is used by _mcount; the registers are saved in a stack frame.
+	stwu r1,-64(r1)
+	cfi_adjust_cfa_offset (64)
+	stw r0,12(r1)
+	stw r3,16(r1)
+	stw r4,20(r1)
+ # The code that calls this has put parameters for `fixup' in r12 and r11.
+	mr r3,r12
+	stw r5,24(r1)
+	mr r4,r11
+	stw r6,28(r1)
+	mflr r0
+ # We also need to save some of the condition register fields
+	stw r7,32(r1)
+ # Don't clobber the caller's LRSAVE, it is needed by _mcount.
+	stw r0,48(r1)
+ 	cfi_offset (lr, -16)
+	stw r8,36(r1)
+	mfcr r0
+	stw r9,40(r1)
+	stw r10,44(r1)
+	stw r0,8(r1)
+	bl _dl_fixup@local
+ # 'fixup' returns the address we want to branch to.
+	mtctr r3
+ # Put the registers back...
+	lwz r0,48(r1)
+	lwz r10,44(r1)
+	lwz r9,40(r1)
+	mtlr r0
+	lwz r8,36(r1)
+	lwz r0,8(r1)
+	lwz r7,32(r1)
+	lwz r6,28(r1)
+	mtcrf 0xFF,r0
+	lwz r5,24(r1)
+	lwz r4,20(r1)
+	lwz r3,16(r1)
+	lwz r0,12(r1)
+ # ...unwind the stack frame, and jump to the PLT entry we updated.
+	addi r1,r1,64
+	bctr
+	cfi_endproc
+	.size	 _dl_runtime_resolve,.-_dl_runtime_resolve
+
+#ifndef PROF
+	.align 2
+	.globl _dl_prof_resolve
+	.type _dl_prof_resolve,@function
+_dl_prof_resolve:
+	cfi_startproc
+ # We need to save the registers used to pass parameters, and register 0,
+ # which is used by _mcount; the registers are saved in a stack frame.
+	stwu r1,-320(r1)
+	cfi_adjust_cfa_offset (320)
+	/* Stack layout:
+
+	  +312   stackframe
+	  +308   lr
+	  +304   r1
+	  +288   v12
+	  +272   v11
+	  +256   v10
+	  +240   v9
+	  +224   v8
+	  +208   v7
+	  +192   v6
+	  +176   v5
+	  +160   v4
+	  +144   v3
+	  +128   v2
+	  +112   v1
+	  +104   fp8
+	  +96    fp7
+	  +88    fp6
+	  +80    fp5
+	  +72    fp4
+	  +64    fp3
+	  +56    fp2
+	  +48    fp1
+	  +44    r10
+	  +40    r9
+	  +36    r8
+	  +32    r7
+	  +28    r6
+	  +24    r5
+	  +20    r4
+	  +16    r3
+	  +12    r0
+	  +8     cr
+	   r1    link
+	*/
+        stw r0,12(r1)
+	stw r3,16(r1)
+	stw r4,20(r1)
+ # The code that calls this has put parameters for `fixup' in r12 and r11.
+	mr r3,r12
+	stw r5,24(r1)
+	mr r4,r11
+	stw r6,28(r1)
+	mflr r5
+ # We also need to save some of the condition register fields.
+	stw r7,32(r1)
+ # Don't clobber the caller's LRSAVE, it is needed by _mcount.
+	stw r5,308(r1)
+	cfi_offset (lr, -12)
+	stw r8,36(r1)
+	mfcr r0
+	stw r9,40(r1)
+	stw r10,44(r1)
+	stw r0,8(r1)
+#ifndef __NO_FPRS__
+ # Save the floating point registers
+	stfd fp1,48(r1)
+	stfd fp2,56(r1)
+	stfd fp3,64(r1)
+	stfd fp4,72(r1)
+	stfd fp5,80(r1)
+	stfd fp6,88(r1)
+	stfd fp7,96(r1)
+	stfd fp8,104(r1)
+#endif
+ # XXX TODO: store vmx registers
+ # Load the extra parameters.
+	addi r6,r1,16
+	addi r7,r1,312
+	li r0,-1
+	stw r0,0(r7)
+	bl _dl_profile_fixup@local
+ # 'fixup' returns the address we want to branch to.
+	mtctr r3
+ # Put the registers back...
+	lwz r0,308(r1)
+	lwz r10,44(r1)
+	lwz r9,40(r1)
+	mtlr r0
+	lwz r8,36(r1)
+	lwz r0,8(r1)
+	lwz r7,32(r1)
+	lwz r6,28(r1)
+	mtcrf 0xFF,r0
+	lwz r5,24(r1)
+	lwz r4,20(r1)
+	lwz r3,16(r1)
+        lwz r0,12(r1)
+#ifndef __NO_FPRS__
+ # Load the floating point registers.
+	lfd fp1,48(r1)
+	lfd fp2,56(r1)
+	lfd fp3,64(r1)
+	lfd fp4,72(r1)
+	lfd fp5,80(r1)
+	lfd fp6,88(r1)
+	lfd fp7,96(r1)
+	lfd fp8,104(r1)
+#endif
+ # ...unwind the stack frame, and jump to the PLT entry we updated.
+	addi r1,r1,320
+	bctr
+	cfi_endproc
+	.size	 _dl_prof_resolve,.-_dl_prof_resolve
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/Makefile
new file mode 100644
index 0000000000..adf556870a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/Makefile
@@ -0,0 +1,9 @@
+ifeq ($(subdir),math)
+libm-routines += fexcepts_to_spe fexcepts_from_spe
+libm-routines += fexcepts_to_prctl fexcepts_from_prctl
+libm-routines += fe_note_change
+endif
+
+ifeq ($(subdir),soft-fp)
+sysdep_routines += fraiseexcept-soft
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feclearexcept.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feclearexcept.c
new file mode 100644
index 0000000000..09132451a0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feclearexcept.c
@@ -0,0 +1,50 @@
+/* Clear floating-point exceptions for atomic compound assignment.
+   e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <stdlib.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+void
+__atomic_feclearexcept (void)
+{
+  unsigned int fpescr, old_fpescr;
+
+  /* Get the current state.  */
+  old_fpescr = fpescr = fegetenv_register ();
+
+  /* Clear the relevant bits.  */
+  fpescr &= ~SPEFSCR_ALL_EXCEPT;
+
+  /* Put the new state in effect.  */
+  fesetenv_register (fpescr);
+
+  /* Let the kernel know if the "invalid" or "underflow" bit was
+     cleared.  */
+  if (old_fpescr & (SPEFSCR_FINVS | SPEFSCR_FUNFS))
+    {
+      int pflags __attribute__ ((__unused__)), r;
+      INTERNAL_SYSCALL_DECL (err);
+
+      r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
+      if (INTERNAL_SYSCALL_ERROR_P (r, err))
+	abort ();
+    }
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feholdexcept.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feholdexcept.c
new file mode 100644
index 0000000000..3d6e10f1b6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feholdexcept.c
@@ -0,0 +1,55 @@
+/* Store current floating-point environment and clear exceptions for
+   atomic compound assignment.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <stdlib.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+void
+__atomic_feholdexcept (fenv_t *envp)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  /* Get the current state.  */
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    abort ();
+
+  u.l[1] = fegetenv_register ();
+  *envp = u.fenv;
+
+  /* Clear everything except for the rounding mode and trapping to the
+     kernel.  */
+  u.l[0] &= ~(PR_FP_EXC_DIV
+	      | PR_FP_EXC_OVF
+	      | PR_FP_EXC_UND
+	      | PR_FP_EXC_RES
+	      | PR_FP_EXC_INV);
+  u.l[1] &= SPEFSCR_FRMC | (SPEFSCR_ALL_EXCEPT_ENABLE & ~SPEFSCR_FINXE);
+
+  /* Put the new state in effect.  */
+  fesetenv_register (u.l[1]);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			u.l[0] | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    abort ();
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feupdateenv.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feupdateenv.c
new file mode 100644
index 0000000000..a4615a1b01
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/atomic-feupdateenv.c
@@ -0,0 +1,46 @@
+/* Install given floating-point environment and raise exceptions for
+   atomic compound assignment.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <stdlib.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+void
+__atomic_feupdateenv (const fenv_t *envp)
+{
+  int exc;
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  /* Save the currently set exceptions.  */
+  exc = fegetenv_register () & SPEFSCR_ALL_EXCEPT;
+
+  u.fenv = *envp;
+
+  fesetenv_register (u.l[1]);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			u.l[0] | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    abort ();
+
+  /* Raise (if appropriate) saved exceptions. */
+  __feraiseexcept_soft (exc);
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fclrexcpt.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fclrexcpt.c
new file mode 100644
index 0000000000..cbf8d9df6c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fclrexcpt.c
@@ -0,0 +1,53 @@
+/* Clear given exceptions in current floating-point environment.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+#undef feclearexcept
+int
+__feclearexcept (int excepts)
+{
+  unsigned int fpescr;
+  int excepts_spe = __fexcepts_to_spe (excepts);
+
+  /* Get the current state.  */
+  fpescr = fegetenv_register ();
+
+  /* Clear the relevant bits.  */
+  fpescr &= ~excepts_spe;
+
+  /* Put the new state in effect.  */
+  fesetenv_register (fpescr);
+
+  /* Let the kernel know if the "invalid" or "underflow" bit was
+     cleared.  */
+  if (excepts & (FE_INVALID | FE_UNDERFLOW))
+    __fe_note_change ();
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__feclearexcept, __old_feclearexcept)
+compat_symbol (libm, __old_feclearexcept, feclearexcept, GLIBC_2_1);
+#endif
+
+libm_hidden_ver (__feclearexcept, feclearexcept)
+versioned_symbol (libm, __feclearexcept, feclearexcept, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fe_note_change.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fe_note_change.c
new file mode 100644
index 0000000000..3dd3161f7d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fe_note_change.c
@@ -0,0 +1,39 @@
+/* Note a change to floating-point exceptions.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+/* Inform the kernel of a change to floating-point exceptions.  */
+
+void
+__fe_note_change (void)
+{
+  int pflags, r;
+  INTERNAL_SYSCALL_DECL (err);
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return;
+  if ((pflags & PR_FP_EXC_SW_ENABLE) == 0)
+    INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+		      pflags | PR_FP_EXC_SW_ENABLE);
+}
+
+libm_hidden_def (__fe_note_change)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fedisblxcpt.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fedisblxcpt.c
new file mode 100644
index 0000000000..94ce45463c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fedisblxcpt.c
@@ -0,0 +1,54 @@
+/* Disable floating-point exceptions.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+fedisableexcept (int excepts)
+{
+  int result = 0, pflags, r;
+  INTERNAL_SYSCALL_DECL (err);
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  /* Save old enable bits.  */
+  result = __fexcepts_from_prctl (pflags);
+
+  pflags &= ~__fexcepts_to_prctl (excepts);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			pflags | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  /* If disabling signals for "inexact", also disable trapping to the
+     kernel.  */
+  if ((excepts & FE_INEXACT) != 0)
+    {
+      unsigned long fpescr;
+
+      fpescr = fegetenv_register ();
+      fpescr &= ~SPEFSCR_FINXE;
+      fesetenv_register (fpescr);
+    }
+
+  return result;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feenablxcpt.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feenablxcpt.c
new file mode 100644
index 0000000000..32116d1608
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feenablxcpt.c
@@ -0,0 +1,54 @@
+/* Enable floating-point exceptions.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+feenableexcept (int excepts)
+{
+  unsigned int result = 0, pflags, r;
+  INTERNAL_SYSCALL_DECL (err);
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  /* Save old enable bits.  */
+  result = __fexcepts_from_prctl (pflags);
+
+  pflags |= __fexcepts_to_prctl (excepts);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			pflags | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  /* If enabling signals for "inexact", also enable trapping to the
+     kernel.  */
+  if ((excepts & FE_INEXACT) != 0)
+    {
+      unsigned long fpescr;
+
+      fpescr = fegetenv_register ();
+      fpescr |= SPEFSCR_FINXE;
+      fesetenv_register (fpescr);
+    }
+
+  return result;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetenv.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetenv.c
new file mode 100644
index 0000000000..01b8fa4c9a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetenv.c
@@ -0,0 +1,49 @@
+/* Store current floating-point environment.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+__fegetenv (fenv_t *envp)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  u.l[1] = fegetenv_register ();
+  *envp = u.fenv;
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__fegetenv, __old_fegetenv)
+compat_symbol (libm, __old_fegetenv, fegetenv, GLIBC_2_1);
+#endif
+libm_hidden_def (__fegetenv)
+libm_hidden_ver (__fegetenv, fegetenv)
+
+versioned_symbol (libm, __fegetenv, fegetenv, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetexcept.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetexcept.c
new file mode 100644
index 0000000000..74fdb5a1c9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetexcept.c
@@ -0,0 +1,36 @@
+/* Get floating-point exceptions.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+fegetexcept (void)
+{
+  int result = 0, pflags, r;
+  INTERNAL_SYSCALL_DECL (err);
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &pflags);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  result = __fexcepts_from_prctl (pflags);
+
+  return result;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetmode.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetmode.c
new file mode 100644
index 0000000000..d262714266
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetmode.c
@@ -0,0 +1,37 @@
+/* Store current floating-point control modes.  e500 version.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+fegetmode (femode_t *modep)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  u.l[1] = fegetenv_register ();
+  *modep = u.fenv;
+  return 0;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetround.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetround.c
new file mode 100644
index 0000000000..afcc5d18cc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fegetround.c
@@ -0,0 +1,31 @@
+/* Return current rounding direction.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__fegetround (void)
+{
+  unsigned long fpescr;
+
+  fpescr = fegetenv_register ();
+  return fpescr & 3;
+}
+libm_hidden_def (__fegetround)
+weak_alias (__fegetround, fegetround)
+libm_hidden_weak (fegetround)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feholdexcpt.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feholdexcpt.c
new file mode 100644
index 0000000000..cba1239561
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feholdexcpt.c
@@ -0,0 +1,59 @@
+/* Store current floating-point environment and clear exceptions.
+   e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+__feholdexcept (fenv_t *envp)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  /* Get the current state.  */
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_GET_FPEXC, &u.l[0]);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  u.l[1] = fegetenv_register ();
+  *envp = u.fenv;
+
+  /* Clear everything except for the rounding mode and trapping to the
+     kernel.  */
+  u.l[0] &= ~(PR_FP_EXC_DIV
+	      | PR_FP_EXC_OVF
+	      | PR_FP_EXC_UND
+	      | PR_FP_EXC_RES
+	      | PR_FP_EXC_INV);
+  u.l[1] &= SPEFSCR_FRMC | (SPEFSCR_ALL_EXCEPT_ENABLE & ~SPEFSCR_FINXE);
+
+  /* Put the new state in effect.  */
+  fesetenv_register (u.l[1]);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			u.l[0] | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  return 0;
+}
+libm_hidden_def (__feholdexcept)
+weak_alias (__feholdexcept, feholdexcept)
+libm_hidden_weak (feholdexcept)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_const.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_const.c
new file mode 100644
index 0000000000..9fc3f53bc7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_const.c
@@ -0,0 +1,45 @@
+/* Constant floating-point environments for e500.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* The use of "unsigned long long" as the type to define the
+   bit-pattern explicitly, rather than the type "double" used in
+   <bits/fenv.h>, means that we cannot include <fenv_libc.h> here to
+   get the enum constants for the SPEFSCR bits to enable
+   exceptions.  */
+
+#include <sys/prctl.h>
+
+/* If the default argument is used we use this value.  */
+const unsigned long long __fe_dfl_env __attribute__ ((aligned (8))) =
+  0x3cULL;
+
+/* The same representation is used for femode_t.  */
+extern const unsigned long long __fe_dfl_mode
+  __attribute__ ((aligned (8), alias ("__fe_dfl_env")));
+
+/* Floating-point environment where none of the exceptions are masked.  */
+const unsigned long long __fe_enabled_env __attribute__ ((aligned (8))) =
+  (((unsigned long long) (PR_FP_EXC_DIV
+			  | PR_FP_EXC_OVF
+			  | PR_FP_EXC_UND
+			  | PR_FP_EXC_RES
+			  | PR_FP_EXC_INV)) << 32) | 0x7cULL;
+
+/* Non-IEEE mode.  */
+const unsigned long long __fe_nonieee_env __attribute__ ((aligned (8))) =
+  0x0ULL;
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_libc.h b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_libc.h
new file mode 100644
index 0000000000..13437f8052
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fenv_libc.h
@@ -0,0 +1,99 @@
+/* Internal libc stuff for floating point environment routines.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _FENV_LIBC_H
+#define _FENV_LIBC_H	1
+
+#include <fenv.h>
+
+int __feraiseexcept_spe (int);
+libm_hidden_proto (__feraiseexcept_spe)
+
+int __feraiseexcept_soft (int);
+libc_hidden_proto (__feraiseexcept_soft)
+
+int __fexcepts_to_spe (int);
+libm_hidden_proto (__fexcepts_to_spe)
+
+int __fexcepts_from_spe (int);
+libm_hidden_proto (__fexcepts_from_spe)
+
+int __fexcepts_to_prctl (int);
+libm_hidden_proto (__fexcepts_to_prctl)
+
+int __fexcepts_from_prctl (int);
+libm_hidden_proto (__fexcepts_from_prctl)
+
+void __fe_note_change (void);
+libm_hidden_proto (__fe_note_change)
+
+/* Equivalent to fegetenv, but returns an unsigned int instead of
+   taking a pointer.  */
+#define fegetenv_register() \
+  ({ unsigned int fscr; asm volatile ("mfspefscr %0" : "=r" (fscr)); fscr; })
+
+/* Equivalent to fesetenv, but takes an unsigned int instead of a
+   pointer.  */
+#define fesetenv_register(fscr) \
+  ({ asm volatile ("mtspefscr %0" : : "r" (fscr)); })
+
+typedef union
+{
+  fenv_t fenv;
+  unsigned int l[2];
+} fenv_union_t;
+
+/* Definitions of all the SPEFSCR bit numbers.  */
+enum {
+  SPEFSCR_SOVH          = 0x80000000,
+  SPEFSCR_OVH           = 0x40000000,
+  SPEFSCR_FGH           = 0x20000000,
+  SPEFSCR_FXH           = 0x10000000,
+  SPEFSCR_FINVH         = 0x08000000,
+  SPEFSCR_FDBZH         = 0x04000000,
+  SPEFSCR_FUNFH         = 0x02000000,
+  SPEFSCR_FOVFH         = 0x01000000,
+  /* 2 unused bits.  */
+  SPEFSCR_FINXS         = 0x00200000,
+  SPEFSCR_FINVS         = 0x00100000,
+  SPEFSCR_FDBZS         = 0x00080000,
+  SPEFSCR_FUNFS         = 0x00040000,
+  SPEFSCR_FOVFS         = 0x00020000,
+  /* Combination of the exception bits.  */
+  SPEFSCR_ALL_EXCEPT    = 0x003e0000,
+  SPEFSCR_MODE          = 0x00010000,
+  SPEFSCR_SOV           = 0x00008000,
+  SPEFSCR_OV            = 0x00004000,
+  SPEFSCR_FG            = 0x00002000,
+  SPEFSCR_FX            = 0x00001000,
+  SPEFSCR_FINV          = 0x00000800,
+  SPEFSCR_FDBZ          = 0x00000400,
+  SPEFSCR_FUNF          = 0x00000200,
+  SPEFSCR_FOVF          = 0x00000100,
+  /* 1 unused bit.  */
+  SPEFSCR_FINXE         = 0x00000040,
+  SPEFSCR_FINVE         = 0x00000020,
+  SPEFSCR_FDBZE         = 0x00000010,
+  SPEFSCR_FUNFE         = 0x00000008,
+  SPEFSCR_FOVFE         = 0x00000004,
+  /* Combination of the exception trap enable bits.  */
+  SPEFSCR_ALL_EXCEPT_ENABLE = 0x0000007c,
+  SPEFSCR_FRMC          = 0x00000003
+};
+
+#endif /* fenv_libc.h */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetenv.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetenv.c
new file mode 100644
index 0000000000..185bcdb051
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetenv.c
@@ -0,0 +1,50 @@
+/* Install given floating-point environment.  e500 version.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+int
+__fesetenv (const fenv_t *envp)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  u.fenv = *envp;
+
+  fesetenv_register (u.l[1]);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			u.l[0] | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__fesetenv, __old_fesetenv)
+compat_symbol (libm, __old_fesetenv, fesetenv, GLIBC_2_1);
+#endif
+
+libm_hidden_def (__fesetenv)
+libm_hidden_ver (__fesetenv, fesetenv)
+versioned_symbol (libm, __fesetenv, fesetenv, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetexcept.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetexcept.c
new file mode 100644
index 0000000000..688583a6e7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetexcept.c
@@ -0,0 +1,37 @@
+/* Set given exception flags.  e500 version.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+fesetexcept (int excepts)
+{
+  unsigned long old_spefscr, spefscr;
+  int excepts_spe = __fexcepts_to_spe (excepts);
+
+  old_spefscr = fegetenv_register ();
+  spefscr = old_spefscr | excepts_spe;
+  fesetenv_register (spefscr);
+
+  /* If the state of the "invalid" or "underflow" flag has changed,
+     inform the kernel.  */
+  if (((spefscr ^ old_spefscr) & (SPEFSCR_FINVS | SPEFSCR_FUNFS)) != 0)
+    __fe_note_change ();
+
+  return 0;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetmode.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetmode.c
new file mode 100644
index 0000000000..360e500b27
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetmode.c
@@ -0,0 +1,43 @@
+/* Install given floating-point control modes.  e500 version.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sysdep.h>
+#include <sys/prctl.h>
+
+#define SPEFSCR_STATUS 0xff3eff00
+
+int
+fesetmode (const femode_t *modep)
+{
+  fenv_union_t u;
+  INTERNAL_SYSCALL_DECL (err);
+  int r;
+
+  u.fenv = *modep;
+  unsigned int spefscr = fegetenv_register ();
+  spefscr = (spefscr & SPEFSCR_STATUS) | (u.l[1] & ~SPEFSCR_STATUS);
+
+  fesetenv_register (spefscr);
+  r = INTERNAL_SYSCALL (prctl, err, 2, PR_SET_FPEXC,
+			u.l[0] | PR_FP_EXC_SW_ENABLE);
+  if (INTERNAL_SYSCALL_ERROR_P (r, err))
+    return -1;
+
+  return 0;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetround.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetround.c
new file mode 100644
index 0000000000..15aaa62079
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fesetround.c
@@ -0,0 +1,37 @@
+/* Set current rounding direction.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__fesetround (int round)
+{
+  unsigned long fpescr;
+
+  if ((unsigned int) round > 3)
+    return 1;
+
+  fpescr = fegetenv_register ();
+  fpescr = (fpescr & ~SPEFSCR_FRMC) | (round & 3);
+  fesetenv_register (fpescr);
+
+  return 0;
+}
+libm_hidden_def (__fesetround)
+weak_alias (__fesetround, fesetround)
+libm_hidden_weak (fesetround)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fetestexceptflag.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fetestexceptflag.c
new file mode 100644
index 0000000000..9d42d919ec
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fetestexceptflag.c
@@ -0,0 +1,25 @@
+/* Test exception in saved exception state.  e500 version.
+   Copyright (C) 2016-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+fetestexceptflag (const fexcept_t *flagp, int excepts)
+{
+  return __fexcepts_from_spe (*flagp) & excepts & FE_ALL_EXCEPT;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feupdateenv.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feupdateenv.c
new file mode 100644
index 0000000000..54de708449
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/feupdateenv.c
@@ -0,0 +1,48 @@
+/* Install given floating-point environment and raise exceptions.
+   e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__feupdateenv (const fenv_t *envp)
+{
+  int exc;
+
+  /* Save the currently set exceptions.  */
+  exc = fegetenv_register () & SPEFSCR_ALL_EXCEPT;
+
+  /* Install new environment.  */
+  __fesetenv (envp);
+
+  /* Raise (if appropriate) saved exceptions. */
+  __feraiseexcept_spe (exc);
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__feupdateenv, __old_feupdateenv)
+compat_symbol (libm, __old_feupdateenv, feupdateenv, GLIBC_2_1);
+#endif
+
+libm_hidden_def (__feupdateenv)
+libm_hidden_ver (__feupdateenv, feupdateenv)
+versioned_symbol (libm, __feupdateenv, feupdateenv, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_prctl.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_prctl.c
new file mode 100644
index 0000000000..b260fc8df1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_prctl.c
@@ -0,0 +1,42 @@
+/* Convert floating-point exceptions from prctl form.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sys/prctl.h>
+
+/* Convert EXCEPTS from prctl bits to FE_* form, returning the
+   converted value.  */
+
+int
+__fexcepts_from_prctl (int excepts)
+{
+  int result = 0;
+  if (excepts & PR_FP_EXC_OVF)
+    result |= FE_OVERFLOW;
+  if (excepts & PR_FP_EXC_UND)
+    result |= FE_UNDERFLOW;
+  if (excepts & PR_FP_EXC_INV)
+    result |= FE_INVALID;
+  if (excepts & PR_FP_EXC_DIV)
+    result |= FE_DIVBYZERO;
+  if (excepts & PR_FP_EXC_RES)
+    result |= FE_INEXACT;
+  return result;
+}
+
+libm_hidden_def (__fexcepts_from_prctl)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_spe.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_spe.c
new file mode 100644
index 0000000000..a925fe4c37
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_from_spe.c
@@ -0,0 +1,41 @@
+/* Convert floating-point exceptions from SPEFSCR form.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+/* Convert EXCEPTS from SPEFSCR bits to FE_* form, returning the
+   converted value.  */
+
+int
+__fexcepts_from_spe (int excepts)
+{
+  int result = 0;
+  if (excepts & SPEFSCR_FINXS)
+    result |= FE_INEXACT;
+  if (excepts & SPEFSCR_FDBZS)
+    result |= FE_DIVBYZERO;
+  if (excepts & SPEFSCR_FUNFS)
+    result |= FE_UNDERFLOW;
+  if (excepts & SPEFSCR_FOVFS)
+    result |= FE_OVERFLOW;
+  if (excepts & SPEFSCR_FINVS)
+    result |= FE_INVALID;
+  return result;
+}
+
+libm_hidden_def (__fexcepts_from_spe)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_prctl.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_prctl.c
new file mode 100644
index 0000000000..e4626312ee
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_prctl.c
@@ -0,0 +1,42 @@
+/* Convert floating-point exceptions to prctl form.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <sys/prctl.h>
+
+/* Convert EXCEPTS from FE_* form to prctl bits, returning the
+   converted value.  */
+
+int
+__fexcepts_to_prctl (int excepts)
+{
+  int result = 0;
+  if (excepts & FE_INEXACT)
+    result |= PR_FP_EXC_RES;
+  if (excepts & FE_DIVBYZERO)
+    result |= PR_FP_EXC_DIV;
+  if (excepts & FE_UNDERFLOW)
+    result |= PR_FP_EXC_UND;
+  if (excepts & FE_OVERFLOW)
+    result |= PR_FP_EXC_OVF;
+  if (excepts & FE_INVALID)
+    result |= PR_FP_EXC_INV;
+  return result;
+}
+
+libm_hidden_def (__fexcepts_to_prctl)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_spe.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_spe.c
new file mode 100644
index 0000000000..3eed4ae6e9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fexcepts_to_spe.c
@@ -0,0 +1,41 @@
+/* Convert floating-point exceptions to SPEFSCR form.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+/* Convert EXCEPTS from FE_* form to SPEFSCR bits, returning the
+   converted value.  */
+
+int
+__fexcepts_to_spe (int excepts)
+{
+  int result = 0;
+  if (excepts & FE_INEXACT)
+    result |= SPEFSCR_FINXS;
+  if (excepts & FE_DIVBYZERO)
+    result |= SPEFSCR_FDBZS;
+  if (excepts & FE_UNDERFLOW)
+    result |= SPEFSCR_FUNFS;
+  if (excepts & FE_OVERFLOW)
+    result |= SPEFSCR_FOVFS;
+  if (excepts & FE_INVALID)
+    result |= SPEFSCR_FINVS;
+  return result;
+}
+
+libm_hidden_def (__fexcepts_to_spe)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fgetexcptflg.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fgetexcptflg.c
new file mode 100644
index 0000000000..cff4330a9c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fgetexcptflg.c
@@ -0,0 +1,41 @@
+/* Store current representation for exceptions.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__fegetexceptflag (fexcept_t *flagp, int excepts)
+{
+  unsigned long fpescr;
+
+  /* Get the current state.  */
+  fpescr = fegetenv_register ();
+
+  *flagp = fpescr & SPEFSCR_ALL_EXCEPT;
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__fegetexceptflag, __old_fegetexceptflag)
+compat_symbol (libm, __old_fegetexceptflag, fegetexceptflag, GLIBC_2_1);
+#endif
+
+versioned_symbol (libm, __fegetexceptflag, fegetexceptflag, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/flt-rounds.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/flt-rounds.c
new file mode 100644
index 0000000000..4fb8d034c4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/flt-rounds.c
@@ -0,0 +1,39 @@
+/* Return current rounding mode as correct value for FLT_ROUNDS.  e500
+   version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <stdlib.h>
+
+int
+__flt_rounds (void)
+{
+  switch (fegetenv_register () & SPEFSCR_FRMC)
+    {
+    case FE_TOWARDZERO:
+      return 0;
+    case FE_TONEAREST:
+      return 1;
+    case FE_UPWARD:
+      return 2;
+    case FE_DOWNWARD:
+      return 3;
+    default:
+      abort ();
+    }
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcept-soft.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcept-soft.c
new file mode 100644
index 0000000000..ef35a9426d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcept-soft.c
@@ -0,0 +1,25 @@
+/* Raise given exceptions.  e500 version for use from soft-fp.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Aldy Hernandez <aldyh@redhat.com>, 2004.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+#include <libc-symbols.h>
+
+#define __FERAISEEXCEPT_INTERNAL __feraiseexcept_soft
+#include "spe-raise.c"
+libc_hidden_def (__feraiseexcept_soft)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcpt.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcpt.c
new file mode 100644
index 0000000000..915642a1dc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fraiseexcpt.c
@@ -0,0 +1,41 @@
+/* Raise given exceptions.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+#define __FERAISEEXCEPT_INTERNAL __feraiseexcept_spe
+#include "spe-raise.c"
+
+libm_hidden_def (__feraiseexcept_spe)
+
+#undef feraiseexcept
+int
+__feraiseexcept (int excepts)
+{
+  return __feraiseexcept_spe (__fexcepts_to_spe (excepts));
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__feraiseexcept, __old_feraiseexcept)
+compat_symbol (libm, __old_feraiseexcept, feraiseexcept, GLIBC_2_1);
+#endif
+
+libm_hidden_def (__feraiseexcept)
+libm_hidden_ver (__feraiseexcept, feraiseexcept)
+versioned_symbol (libm, __feraiseexcept, feraiseexcept, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fsetexcptflg.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fsetexcptflg.c
new file mode 100644
index 0000000000..f1e6a02681
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/fsetexcptflg.c
@@ -0,0 +1,55 @@
+/* Set floating-point environment exception handling.  e500 version.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__fesetexceptflag (const fexcept_t *flagp, int excepts)
+{
+  unsigned long old_spefscr, spefscr;
+  fexcept_t flag;
+  int excepts_spe = __fexcepts_to_spe (excepts);
+
+  /* Get the current state.  */
+  old_spefscr = fegetenv_register ();
+
+  /* Ignore exceptions not listed in 'excepts'.  */
+  flag = *flagp & excepts_spe;
+
+  /* Replace the exception status */
+  spefscr = (old_spefscr & ~excepts_spe) | flag;
+
+  /* Store the new status word (along with the rest of the environment).  */
+  fesetenv_register (spefscr);
+
+  /* If the state of the "invalid" or "underflow" flag has changed,
+     inform the kernel.  */
+  if (((spefscr ^ old_spefscr) & (SPEFSCR_FINVS | SPEFSCR_FUNFS)) != 0)
+    __fe_note_change ();
+
+  /* Success.  */
+  return 0;
+}
+
+#include <shlib-compat.h>
+#if SHLIB_COMPAT (libm, GLIBC_2_1, GLIBC_2_2)
+strong_alias (__fesetexceptflag, __old_fesetexceptflag)
+compat_symbol (libm, __old_fesetexceptflag, fesetexceptflag, GLIBC_2_1);
+#endif
+
+versioned_symbol (libm, __fesetexceptflag, fesetexceptflag, GLIBC_2_2);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/ftestexcept.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/ftestexcept.c
new file mode 100644
index 0000000000..05040d7224
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/ftestexcept.c
@@ -0,0 +1,31 @@
+/* Test exception in current environment.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+fetestexcept (int excepts)
+{
+  unsigned long f;
+
+  /* Get the current state.  */
+  f = fegetenv_register ();
+
+  return __fexcepts_from_spe (f) & excepts;
+}
+libm_hidden_def (fetestexcept)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/get-rounding-mode.h b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/get-rounding-mode.h
new file mode 100644
index 0000000000..117e7331e9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/get-rounding-mode.h
@@ -0,0 +1,4 @@
+/* The generic version of get-rounding-mode.h using fpu_control.h, not
+   the one using the software rounding mode, is correct for e500.  */
+
+#include <sysdeps/generic/get-rounding-mode.h>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/s_fabsf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/s_fabsf.S
new file mode 100644
index 0000000000..9d00b62923
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/s_fabsf.S
@@ -0,0 +1,27 @@
+/* Floating-point absolute value.  e500 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+ENTRY (__fabsf)
+/* float [r3] fabsf (float [r3] x) ;  */
+	efsabs r3,r3
+	blr
+END (__fabsf)
+
+weak_alias (__fabsf, fabsf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/spe-raise.c b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/spe-raise.c
new file mode 100644
index 0000000000..cc13c67786
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/e500/nofpu/spe-raise.c
@@ -0,0 +1,53 @@
+/* Raise given exceptions, given the SPEFSCR bits for those exceptions.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <fenv_libc.h>
+
+int
+__FERAISEEXCEPT_INTERNAL (int excepts)
+{
+  unsigned long f;
+
+  f = fegetenv_register ();
+  f |= (excepts & SPEFSCR_ALL_EXCEPT);
+  fesetenv_register (f);
+
+  /* Force the operations that cause the exceptions.  */
+  if ((SPEFSCR_FINVS & excepts) != 0)
+    /* 0 / 0 */
+    asm volatile ("efsdiv %0,%0,%1" : : "r" (0), "r" (0));
+
+  if ((SPEFSCR_FDBZS & excepts) != 0)
+    /* 1.0 / 0.0 */
+    asm volatile ("efsdiv %0,%0,%1" : : "r" (1.0F), "r" (0));
+
+  if ((SPEFSCR_FOVFS & excepts) != 0)
+    /* Largest normalized number plus itself.  */
+    asm volatile ("efsadd %0,%0,%1" : : "r" (0x7f7fffff), "r" (0x7f7fffff));
+
+  if ((SPEFSCR_FUNFS & excepts) != 0)
+    /* Smallest normalized number times itself.  */
+    asm volatile ("efsmul %0,%0,%1" : : "r" (0x800000), "r" (0x800000));
+
+  if ((SPEFSCR_FINXS & excepts) != 0)
+    /* Smallest normalized minus 1.0 raises the inexact flag.  */
+    asm volatile ("efssub %0,%0,%1" : : "r" (0x00800000), "r" (1.0F));
+
+  /* Success.  */
+  return 0;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/Makefile
new file mode 100644
index 0000000000..e05073970d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),misc)
+sysdep_routines += fprsave fprrest
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
new file mode 100644
index 0000000000..c01c94dfb7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
@@ -0,0 +1,178 @@
+/* longjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <stap-probe.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+
+	.machine	"altivec"
+ENTRY (__longjmp_symbol)
+#ifndef __NO_VMX__
+# ifdef PIC
+	mflr    r6
+	cfi_register (lr,r6)
+	SETUP_GOT_ACCESS(r5,got_label)
+	addis	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@l
+#  ifdef SHARED
+#   if IS_IN (rtld)
+	/* Inside ld.so we use the local alias to avoid runtime GOT
+	   relocations.  */
+	lwz     r5,_rtld_local_ro@got(r5)
+#   else
+	lwz     r5,_rtld_global_ro@got(r5)
+#   endif
+	mtlr    r6
+	cfi_same_value (lr)
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
+#  else
+	lwz     r5,_dl_hwcap@got(r5)
+	mtlr    r6
+	cfi_same_value (lr)
+	lwz     r5,LOWORD(r5)
+#  endif
+# else
+	lis	r5,(_dl_hwcap+LOWORD)@ha
+	lwz     r5,(_dl_hwcap+LOWORD)@l(r5)
+# endif
+	andis.	r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
+	beq	L(no_vmx)
+	la	r5,((JB_VRS)*4)(3)
+	andi.	r6,r5,0xf
+	lwz	r0,((JB_VRSAVE)*4)(3)
+	mtspr	VRSAVE,r0
+	beq+	L(aligned_restore_vmx)
+	addi    r6,r5,16
+	lvsl	v0,0,r5
+	lvx	v1,0,r5
+	addi    r5,r5,32
+	lvx	v21,0,r6
+	vperm   v20,v1,v21,v0
+# define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \
+	addi    addgpr,addgpr,32; \
+	lvx	lovr,0,loadgpr; \
+	vperm   loadvr,loadvr,lovr,shiftvr;
+	load_misaligned_vmx_lo_loaded(v21,v22,v0,r5,r6)
+	load_misaligned_vmx_lo_loaded(v22,v23,v0,r6,r5)
+	load_misaligned_vmx_lo_loaded(v23,v24,v0,r5,r6)
+	load_misaligned_vmx_lo_loaded(v24,v25,v0,r6,r5)
+	load_misaligned_vmx_lo_loaded(v25,v26,v0,r5,r6)
+	load_misaligned_vmx_lo_loaded(v26,v27,v0,r6,r5)
+	load_misaligned_vmx_lo_loaded(v27,v28,v0,r5,r6)
+	load_misaligned_vmx_lo_loaded(v28,v29,v0,r6,r5)
+	load_misaligned_vmx_lo_loaded(v29,v30,v0,r5,r6)
+	load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5)
+	lvx	v1,0,r5
+	vperm   v31,v31,v1,v0
+	b       L(no_vmx)
+L(aligned_restore_vmx):
+	addi	r6,r5,16
+	lvx	v20,0,r5
+	addi	r5,r5,32
+	lvx	v21,0,r6
+	addi	r6,r6,32
+	lvx	v22,0,r5
+	addi	r5,r5,32
+	lvx	v23,0,r6
+	addi	r6,r6,32
+	lvx	v24,0,r5
+	addi	r5,r5,32
+	lvx	v25,0,r6
+	addi	r6,r6,32
+	lvx	v26,0,r5
+	addi	r5,r5,32
+	lvx	v27,0,r6
+	addi	r6,r6,32
+	lvx	v28,0,r5
+	addi	r5,r5,32
+	lvx	v29,0,r6
+	addi	r6,r6,32
+	lvx	v30,0,r5
+	lvx	v31,0,r6
+L(no_vmx):
+#endif
+#if defined PTR_DEMANGLE || defined CHECK_SP
+	lwz r24,(JB_GPR1*4)(r3)
+# ifdef CHECK_SP
+#  ifdef PTR_DEMANGLE
+	PTR_DEMANGLE3 (r24, r24, r25)
+#  endif
+	CHECK_SP (r24)
+	mr r1,r24
+# endif
+#else
+	lwz r1,(JB_GPR1*4)(r3)
+#endif
+	lwz r0,(JB_LR*4)(r3)
+	lwz r14,((JB_GPRS+0)*4)(r3)
+	lfd fp14,((JB_FPRS+0*2)*4)(r3)
+	lwz r15,((JB_GPRS+1)*4)(r3)
+	lfd fp15,((JB_FPRS+1*2)*4)(r3)
+	lwz r16,((JB_GPRS+2)*4)(r3)
+	lfd fp16,((JB_FPRS+2*2)*4)(r3)
+	lwz r17,((JB_GPRS+3)*4)(r3)
+	lfd fp17,((JB_FPRS+3*2)*4)(r3)
+	lwz r18,((JB_GPRS+4)*4)(r3)
+	lfd fp18,((JB_FPRS+4*2)*4)(r3)
+	lwz r19,((JB_GPRS+5)*4)(r3)
+	lfd fp19,((JB_FPRS+5*2)*4)(r3)
+	lwz r20,((JB_GPRS+6)*4)(r3)
+	lfd fp20,((JB_FPRS+6*2)*4)(r3)
+#ifdef PTR_DEMANGLE
+# ifndef CHECK_SP
+	PTR_DEMANGLE3 (r1, r24, r25)
+# endif
+	PTR_DEMANGLE2 (r0, r25)
+#endif
+	/* longjmp/longjmp_target probe expects longjmp first argument (4@3),
+	   second argument (-4@4), and target address (4@0), respectively.  */
+	LIBC_PROBE (longjmp, 3, 4@3, -4@4, 4@0)
+	mtlr r0
+	lwz r21,((JB_GPRS+7)*4)(r3)
+	lfd fp21,((JB_FPRS+7*2)*4)(r3)
+	lwz r22,((JB_GPRS+8)*4)(r3)
+	lfd fp22,((JB_FPRS+8*2)*4)(r3)
+	lwz r5,(JB_CR*4)(r3)
+	lwz r23,((JB_GPRS+9)*4)(r3)
+	lfd fp23,((JB_FPRS+9*2)*4)(r3)
+	lwz r24,((JB_GPRS+10)*4)(r3)
+	lfd fp24,((JB_FPRS+10*2)*4)(r3)
+	lwz r25,((JB_GPRS+11)*4)(r3)
+	lfd fp25,((JB_FPRS+11*2)*4)(r3)
+	mtcrf 0xFF,r5
+	lwz r26,((JB_GPRS+12)*4)(r3)
+	lfd fp26,((JB_FPRS+12*2)*4)(r3)
+	lwz r27,((JB_GPRS+13)*4)(r3)
+	lfd fp27,((JB_FPRS+13*2)*4)(r3)
+	lwz r28,((JB_GPRS+14)*4)(r3)
+	lfd fp28,((JB_FPRS+14*2)*4)(r3)
+	lwz r29,((JB_GPRS+15)*4)(r3)
+	lfd fp29,((JB_FPRS+15*2)*4)(r3)
+	lwz r30,((JB_GPRS+16)*4)(r3)
+	lfd fp30,((JB_FPRS+16*2)*4)(r3)
+	lwz r31,((JB_GPRS+17)*4)(r3)
+	lfd fp31,((JB_FPRS+17*2)*4)(r3)
+	LIBC_PROBE (longjmp_target, 3, 4@3, -4@4, 4@0)
+	mr r3,r4
+	blr
+END (__longjmp_symbol)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp.S
new file mode 100644
index 0000000000..0e62245927
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/__longjmp.S
@@ -0,0 +1,40 @@
+/* AltiVec/VMX (new) version of __longjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <libc-symbols.h>
+#include <rtld-global-offsets.h>
+#include <shlib-compat.h>
+
+#if !IS_IN (libc)
+/* Build a non-versioned object for rtld-*.  */
+# define __longjmp_symbol __longjmp
+# include "__longjmp-common.S"
+
+#else /* IS_IN (libc) */
+strong_alias (__vmx__longjmp, __longjmp)
+# define __longjmp_symbol  __vmx__longjmp
+# include "__longjmp-common.S"
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+#  define __NO_VMX__
+#  undef JB_SIZE
+#  undef __longjmp_symbol
+#  define __longjmp_symbol  __novmx__longjmp
+#  include "__longjmp-common.S"
+# endif
+#endif /* IS_IN (libc) */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure
new file mode 100644
index 0000000000..98c6f30ca3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure
@@ -0,0 +1,56 @@
+# This file is generated from configure.ac by Autoconf.  DO NOT EDIT!
+ # Local configure fragment for sysdeps/powerpc/powerpc32/fpu.
+
+# Test whether integer to floating point conversions use fcfid.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fcfid use" >&5
+$as_echo_n "checking for fcfid use... " >&6; }
+if ${libc_cv_ppc_fcfid+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  echo 'double foo (int x) { return (double) x; }' > conftest.c
+libc_cv_ppc_fcfid=no
+if { ac_try='${CC-cc} -S $CFLAGS conftest.c -o conftest.s 1>&5'
+  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+  if grep '[ 	]fcfid' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_fcfid=yes
+  fi
+fi
+rm -rf conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_ppc_fcfid" >&5
+$as_echo "$libc_cv_ppc_fcfid" >&6; }
+if test $libc_cv_ppc_fcfid = yes; then
+  $as_echo "#define HAVE_PPC_FCFID 1" >>confdefs.h
+
+fi
+
+# Test whether floating point to long long conversions use fctidz.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fctidz use" >&5
+$as_echo_n "checking for fctidz use... " >&6; }
+if ${libc_cv_ppc_fctidz+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  echo 'long long int foo (double x) { return (long long int) x; }' > conftest.c
+libc_cv_ppc_fctidz=no
+if { ac_try='${CC-cc} -S $CFLAGS conftest.c -o conftest.s 1>&5'
+  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+  (eval $ac_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+  if grep '[ 	]fctidz' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_fctidz=yes
+  fi
+fi
+rm -rf conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_ppc_fctidz" >&5
+$as_echo "$libc_cv_ppc_fctidz" >&6; }
+if test $libc_cv_ppc_fctidz = yes; then
+  $as_echo "#define HAVE_PPC_FCTIDZ 1" >>confdefs.h
+
+fi
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure.ac b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure.ac
new file mode 100644
index 0000000000..1899705aab
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/configure.ac
@@ -0,0 +1,34 @@
+GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
+# Local configure fragment for sysdeps/powerpc/powerpc32/fpu.
+
+# Test whether integer to floating point conversions use fcfid.
+AC_CACHE_CHECK([for fcfid use], [libc_cv_ppc_fcfid], [dnl
+echo 'double foo (int x) { return (double) x; }' > conftest.c
+libc_cv_ppc_fcfid=no
+if AC_TRY_COMMAND(${CC-cc} -S $CFLAGS conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
+changequote(,)dnl
+  if grep '[ 	]fcfid' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_fcfid=yes
+  fi
+changequote([,])dnl
+fi
+rm -rf conftest*])
+if test $libc_cv_ppc_fcfid = yes; then
+  AC_DEFINE([HAVE_PPC_FCFID])
+fi
+
+# Test whether floating point to long long conversions use fctidz.
+AC_CACHE_CHECK([for fctidz use], [libc_cv_ppc_fctidz], [dnl
+echo 'long long int foo (double x) { return (long long int) x; }' > conftest.c
+libc_cv_ppc_fctidz=no
+if AC_TRY_COMMAND(${CC-cc} -S $CFLAGS conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
+changequote(,)dnl
+  if grep '[ 	]fctidz' conftest.s > /dev/null 2>&1; then
+    libc_cv_ppc_fctidz=yes
+  fi
+changequote([,])dnl
+fi
+rm -rf conftest*])
+if test $libc_cv_ppc_fctidz = yes; then
+  AC_DEFINE([HAVE_PPC_FCTIDZ])
+fi
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fix-int-fp-convert-zero.h b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fix-int-fp-convert-zero.h
new file mode 100644
index 0000000000..bd3eb62bf5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fix-int-fp-convert-zero.h
@@ -0,0 +1,28 @@
+/* Fix for conversion of integer 0 to floating point.  PowerPC version.
+   Copyright (C) 2015-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef FIX_INT_FP_CONVERT_ZERO_H
+#define FIX_INT_FP_CONVERT_ZERO_H	1
+
+/* The code sequences GCC generates for conversion of integers to
+   floating point result in -0 instead of +0 in FE_DOWNWARD mode when
+   the fcfid instruction is not used, as of GCC 5.  See
+   <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67771>.  */
+#define FIX_INT_FP_CONVERT_ZERO (!HAVE_PPC_FCFID)
+
+#endif /* fix-int-fp-convert-zero.h */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprrest.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprrest.S
new file mode 100644
index 0000000000..7b2346471b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprrest.S
@@ -0,0 +1,94 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    Floating Point Registers (FPRs) restore routine
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restfpr_all)
+		.globl C_TEXT(_restf14)
+		.globl C_TEXT(_restfpr_14)
+C_TEXT(_restf14):
+C_TEXT(_restfpr_14):	lfd	fp14,-144(r1)
+		.globl C_TEXT(_restf15)
+		.globl C_TEXT(_restfpr_15)
+C_TEXT(_restf15):
+C_TEXT(_restfpr_15):	lfd	fp15,-136(r1)
+		.globl C_TEXT(_restf16)
+		.globl C_TEXT(_restfpr_16)
+C_TEXT(_restf16):
+C_TEXT(_restfpr_16):	lfd	fp16,-128(r1)
+		.globl C_TEXT(_restf17)
+		.globl C_TEXT(_restfpr_17)
+C_TEXT(_restf17):
+C_TEXT(_restfpr_17):	lfd	fp17,-120(r1)
+		.globl C_TEXT(_restf18)
+		.globl C_TEXT(_restfpr_18)
+C_TEXT(_restf18):
+C_TEXT(_restfpr_18):	lfd	fp18,-112(r1)
+		.globl C_TEXT(_restf19)
+		.globl C_TEXT(_restfpr_19)
+C_TEXT(_restf19):
+C_TEXT(_restfpr_19):	lfd	fp19,-104(r1)
+		.globl C_TEXT(_restf20)
+		.globl C_TEXT(_restfpr_20)
+C_TEXT(_restf20):
+C_TEXT(_restfpr_20):	lfd	fp20,-96(r1)
+		.globl C_TEXT(_restf21)
+		.globl C_TEXT(_restfpr_21)
+C_TEXT(_restf21):
+C_TEXT(_restfpr_21):	lfd	fp21,-88(r1)
+		.globl C_TEXT(_restf22)
+		.globl C_TEXT(_restfpr_22)
+C_TEXT(_restf22):
+C_TEXT(_restfpr_22):	lfd	fp22,-80(r1)
+		.globl C_TEXT(_restf23)
+		.globl C_TEXT(_restfpr_23)
+C_TEXT(_restf23):
+C_TEXT(_restfpr_23):	lfd	fp23,-72(r1)
+		.globl C_TEXT(_restf24)
+		.globl C_TEXT(_restfpr_24)
+C_TEXT(_restf24):
+C_TEXT(_restfpr_24):	lfd	fp24,-64(r1)
+		.globl C_TEXT(_restf25)
+		.globl C_TEXT(_restfpr_25)
+C_TEXT(_restf25):
+C_TEXT(_restfpr_25):	lfd	fp25,-56(r1)
+		.globl C_TEXT(_restf26)
+		.globl C_TEXT(_restfpr_26)
+C_TEXT(_restf26):
+C_TEXT(_restfpr_26):	lfd	fp26,-48(r1)
+		.globl C_TEXT(_restf27)
+		.globl C_TEXT(_restfpr_27)
+C_TEXT(_restf27):
+C_TEXT(_restfpr_27):	lfd	fp27,-40(r1)
+		.globl C_TEXT(_restf28)
+		.globl C_TEXT(_restfpr_28)
+C_TEXT(_restf28):
+C_TEXT(_restfpr_28):	lfd	fp28,-32(r1)
+		.globl C_TEXT(_restf29)
+		.globl C_TEXT(_restfpr_29)
+C_TEXT(_restf29):
+C_TEXT(_restfpr_29):	lwz	r0,8(r1)	#get return address from frame
+			lfd	fp29,-24(r1)    #restore f29
+			mtlr	r0		#move return address to LR
+			lfd	fp30,-16(r1)	#restore f30
+			lfd	fp31,-8(r1)	#restore f31
+			blr			#return
+END (_restfpr_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprsave.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprsave.S
new file mode 100644
index 0000000000..975a8216cb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/fprsave.S
@@ -0,0 +1,111 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    Floating Point Registers (FPRs) save routine
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savefpr_all)
+                .globl C_TEXT(_savef14)
+		.globl C_TEXT(_savefpr_14)
+C_TEXT(_savef14):
+C_TEXT(_savefpr_14):	stfd	fp14,-144(r1)
+			cfi_offset(fp14,-144)
+		.globl C_TEXT(_savef15)
+		.globl C_TEXT(_savefpr_15)
+C_TEXT(_savef15):
+C_TEXT(_savefpr_15):	stfd	fp15,-136(r1)
+			cfi_offset(fp15,-136)
+		.globl C_TEXT(_savef16)
+		.globl C_TEXT(_savefpr_16)
+C_TEXT(_savef16):
+C_TEXT(_savefpr_16):	stfd	fp16,-128(r1)
+			cfi_offset(fp16,-128)
+		.globl C_TEXT(_savef17)
+		.globl C_TEXT(_savefpr_17)
+C_TEXT(_savef17):
+C_TEXT(_savefpr_17):	stfd	fp17,-120(r1)
+			cfi_offset(fp17,-120)
+		.globl C_TEXT(_savef18)
+		.globl C_TEXT(_savefpr_18)
+C_TEXT(_savef18):
+C_TEXT(_savefpr_18):	stfd	fp18,-112(r1)
+			cfi_offset(fp18,-112)
+		.globl C_TEXT(_savef19)
+		.globl C_TEXT(_savefpr_19)
+C_TEXT(_savef19):
+C_TEXT(_savefpr_19):	stfd	fp19,-104(r1)
+			cfi_offset(fp19,-104)
+		.globl C_TEXT(_savef20)
+		.globl C_TEXT(_savefpr_20)
+C_TEXT(_savef20):
+C_TEXT(_savefpr_20):	stfd	fp20,-96(r1)
+			cfi_offset(fp20,-96)
+		.globl C_TEXT(_savef21)
+		.globl C_TEXT(_savefpr_21)
+C_TEXT(_savef21):
+C_TEXT(_savefpr_21):	stfd	fp21,-88(r1)
+			cfi_offset(fp21,-88)
+		.globl C_TEXT(_savef22)
+		.globl C_TEXT(_savefpr_22)
+C_TEXT(_savef22):
+C_TEXT(_savefpr_22):	stfd	fp22,-80(r1)
+			cfi_offset(fp22,-80)
+		.globl C_TEXT(_savef23)
+		.globl C_TEXT(_savefpr_23)
+C_TEXT(_savef23):
+C_TEXT(_savefpr_23):	stfd	fp23,-72(r1)
+			cfi_offset(fp23,-72)
+		.globl C_TEXT(_savef24)
+		.globl C_TEXT(_savefpr_24)
+C_TEXT(_savef24):
+C_TEXT(_savefpr_24):	stfd	fp24,-64(r1)
+			cfi_offset(fp24,-64)
+		.globl C_TEXT(_savef25)
+		.globl C_TEXT(_savefpr_25)
+C_TEXT(_savef25):
+C_TEXT(_savefpr_25):	stfd	fp25,-56(r1)
+			cfi_offset(fp25,-56)
+		.globl C_TEXT(_savef26)
+		.globl C_TEXT(_savefpr_26)
+C_TEXT(_savef26):
+C_TEXT(_savefpr_26):	stfd	fp26,-48(r1)
+			cfi_offset(fp26,-48)
+		.globl C_TEXT(_savef27)
+		.globl C_TEXT(_savefpr_27)
+C_TEXT(_savef27):
+C_TEXT(_savefpr_27):	stfd	fp27,-40(r1)
+			cfi_offset(fp27,-40)
+		.globl C_TEXT(_savef28)
+		.globl C_TEXT(_savefpr_28)
+C_TEXT(_savef28):
+C_TEXT(_savefpr_28):	stfd	fp28,-32(r1)
+			cfi_offset(fp28,-32)
+		.globl C_TEXT(_savef29)
+		.globl C_TEXT(_savefpr_29)
+C_TEXT(_savef29):
+C_TEXT(_savefpr_29):	stfd	fp29,-24(r1)	#save f29
+			stfd	fp30,-16(r1)	#save f30
+			stfd	fp31,-8(r1)	#save f31
+			cfi_offset(fp29,-24)
+			cfi_offset(fp30,-16)
+			cfi_offset(fp31,-8)
+			stw	r0,8(r1)	#save LR in callers frame
+			blr			#return
+END (_savefpr_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceil.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
new file mode 100644
index 0000000000..51b8c21027
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceil.S
@@ -0,0 +1,83 @@
+/* ceil function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000
+
+	.section	".text"
+ENTRY (__ceil)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,2		/* Set rounding mode toward +inf.  */
+	ble-	cr6,.L4
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+	END (__ceil)
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__ceil, ceill)
+strong_alias (__ceil, __ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
new file mode 100644
index 0000000000..9d8d8aa294
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S
@@ -0,0 +1,75 @@
+/* float ceil function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**23 */
+	.long 0x4b000000
+
+	.section	".text"
+ENTRY (__ceilf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsubs	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO23)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,2		/* Set rounding mode toward +inf.  */
+	ble-	cr6,.L4
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+	END (__ceilf)
+
+weak_alias (__ceilf, ceilf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysign.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
new file mode 100644
index 0000000000..850dded3b6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysign.S
@@ -0,0 +1,59 @@
+/* Copy a sign bit between floating-point values.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+   when it's coded in C.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__copysign)
+/* double [f1] copysign (double [f1] x, double [f2] y);
+   copysign(x,y) returns a value with the magnitude of x and
+   with the sign bit of y.  */
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	stfd	fp2,8(r1)
+	lwz	r3,8+HIWORD(r1)
+	cmpwi   r3,0
+	addi    r1,r1,16
+	cfi_adjust_cfa_offset (-16)
+	blt     L(0)
+	fabs    fp1,fp1
+	blr
+L(0):	fnabs   fp1,fp1
+	blr
+	END (__copysign)
+
+weak_alias (__copysign,copysign)
+
+/* It turns out that it's safe to use this code even for single-precision.  */
+weak_alias (__copysign,copysignf)
+strong_alias(__copysign,__copysignf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__copysign,copysignl)
+strong_alias(__copysign,__copysignl)
+#endif
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __copysign, copysignl, GLIBC_2_0)
+# endif
+#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __copysign, copysignl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S
new file mode 100644
index 0000000000..e05438ae7d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignf.S
@@ -0,0 +1 @@
+/* __copysignf is in s_copysign.S  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S
new file mode 100644
index 0000000000..272032b49e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_copysignl.S
@@ -0,0 +1,66 @@
+/* Copy a sign bit between floating-point values.
+   IBM extended format long double version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__copysignl)
+/* long double [f1,f2] copysign (long double [f1,f2] x, long double [f3,f4] y);
+   copysign(x,y) returns a value with the magnitude of x and
+   with the sign bit of y.  */
+#ifdef _ARCH_PPCGR
+	/* fsel available.  */
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	stfd	fp3,8(r1)
+	fmr	fp0,fp1
+	fabs	fp1,fp1
+	lwz	r3,8+HIWORD(r1)
+	cmpwi	cr6,r3,0
+	addi	r1,r1,16
+	cfi_adjust_cfa_offset (-16)
+	fneg	fp3,fp2
+	fsel	fp2,fp0,fp2,fp3
+	bgelr	cr6
+	fneg	fp1,fp1
+	fneg	fp2,fp2
+	blr
+#else
+	stwu	r1,-32(r1)
+	cfi_adjust_cfa_offset (32)
+	stfd	fp3,8(r1)
+	stfd	fp1,16(r1)
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,16+HIWORD(r1)
+	xor	r3,r3,r4
+	cmpwi	cr6,r3,0
+	addi	r1,r1,32
+	cfi_adjust_cfa_offset (-32)
+	bgelr	cr6
+	fneg	fp1,fp1
+	fneg	fp2,fp2
+	blr
+#endif
+END (__copysignl)
+
+#if IS_IN (libm)
+long_double_symbol (libm, __copysignl, copysignl)
+#else
+long_double_symbol (libc, __copysignl, copysignl)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabs.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabs.S
new file mode 100644
index 0000000000..53d21301ee
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabs.S
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fabs.S>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __fabs, fabsl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S
new file mode 100644
index 0000000000..75608ec70c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fabsl.S
@@ -0,0 +1,52 @@
+/* Copy a sign bit between floating-point values.
+   IBM extended format long double version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ENTRY(__fabsl)
+/* long double [f1,f2] fabs (long double [f1,f2] x);
+   fabs(x,y) returns a value with the magnitude of x and
+   with the sign bit of y.  */
+#ifdef _ARCH_PPCGR
+	/* fsel available.  */
+	fmr	fp0,fp1
+#else
+	/* Use integer operations to test sign of high part to avoid
+	   exceptions on sNaNs.  */
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	stfd	fp1,8(r1)
+#endif
+	fabs	fp1,fp1
+#ifdef _ARCH_PPCGR
+	fneg	fp3,fp2
+	fsel	fp2,fp0,fp2,fp3
+#else
+	lwz	r3,8+HIWORD(r1)
+	cmpwi	cr6,r3,0
+	addi	r1,r1,16
+	cfi_adjust_cfa_offset (-16)
+	bgelr	cr6
+	fneg	fp2,fp2
+#endif
+	blr
+END (__fabsl)
+
+long_double_symbol (libm, __fabsl, fabsl)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floor.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floor.S
new file mode 100644
index 0000000000..90a1b184df
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floor.S
@@ -0,0 +1,83 @@
+/* Floor function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000
+
+	.section	".text"
+ENTRY (__floor)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,3		/* Set rounding mode toward -inf.  */
+	ble-	cr6,.L4
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+	END (__floor)
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__floor, floorl)
+strong_alias (__floor, __floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floorf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
new file mode 100644
index 0000000000..b87e3bf33e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_floorf.S
@@ -0,0 +1,75 @@
+/* float Floor function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**23 */
+	.long 0x4b000000
+
+	.section	".text"
+ENTRY (__floorf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsubs	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO23)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,3		/* Set rounding mode toward -inf.  */
+	ble-	cr6,.L4
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+	END (__floorf)
+
+weak_alias (__floorf, floorf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fma.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fma.S
new file mode 100644
index 0000000000..d40695c633
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_fma.S
@@ -0,0 +1,5 @@
+#include <math_ldbl_opt.h>
+#include <sysdeps/powerpc/fpu/s_fma.S>
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __fma, fmal, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_isnan.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_isnan.S
new file mode 100644
index 0000000000..363535dcdb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_isnan.S
@@ -0,0 +1,57 @@
+/* isnan().  PowerPC32 version.
+   Copyright (C) 2008-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnan(x)  */
+	.machine power4
+EALIGN (__isnan, 4, 0)
+	mffs	fp0
+	mtfsb0	4*cr6+lt /* reset_fpscr_bit (FPSCR_VE) */
+	fcmpu	cr7,fp1,fp1
+	mtfsf	255,fp0
+	li	r3,0
+	beqlr+	cr7	/* (x == x) then not a NAN */
+	li	r3,1	/* else must be a NAN */
+	blr
+	END (__isnan)
+
+hidden_def (__isnan)
+weak_alias (__isnan, isnan)
+
+/* It turns out that the 'double' version will also always work for
+   single-precision.  */
+#ifndef __isnan
+strong_alias (__isnan, __isnanf)
+hidden_def (__isnanf)
+weak_alias (__isnanf, isnanf)
+#endif
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isnan, __isnanl)
+weak_alias (__isnan, isnanl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrint.c b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrint.c
new file mode 100644
index 0000000000..13d150cd68
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrint.c
@@ -0,0 +1,63 @@
+/* Round a double value to a long long in the current rounding mode.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <limits.h>
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <math_private.h>
+#include <stdint.h>
+
+long long int
+__llrint (double x)
+{
+  double rx = __rint (x);
+  if (HAVE_PPC_FCTIDZ || rx != x)
+    return (long long int) rx;
+  else
+    {
+      /* Avoid incorrect exceptions from libgcc conversions (as of GCC
+	 5): <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59412>.  */
+      if (fabs (rx) < 0x1p31)
+	return (long long int) (long int) rx;
+      uint64_t i0;
+      EXTRACT_WORDS64 (i0, rx);
+      int exponent = ((i0 >> 52) & 0x7ff) - 0x3ff;
+      if (exponent < 63)
+	{
+	  unsigned long long int mant
+	    = (i0 & ((1ULL << 52) - 1)) | (1ULL << 52);
+	  if (exponent < 52)
+	    mant >>= 52 - exponent;
+	  else
+	    mant <<= exponent - 52;
+	  return (long long int) ((i0 & (1ULL << 63)) != 0 ? -mant : mant);
+	}
+      else if (rx == (double) LLONG_MIN)
+	return LLONG_MIN;
+      else
+	return (long long int) (long int) rx << 32;
+    }
+}
+weak_alias (__llrint, llrint)
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llrint, llrintl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c
new file mode 100644
index 0000000000..46365452a8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llrintf.c
@@ -0,0 +1,46 @@
+/* Round a float value to a long long in the current rounding mode.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_private.h>
+#include <stdint.h>
+
+long long int
+__llrintf (float x)
+{
+  float rx = __rintf (x);
+  if (HAVE_PPC_FCTIDZ || rx != x)
+    return (long long int) rx;
+  else
+    {
+      float arx = fabsf (rx);
+      /* Avoid incorrect exceptions from libgcc conversions (as of GCC
+	 5): <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59412>.  */
+      if (arx < 0x1p31f)
+	return (long long int) (long int) rx;
+      else if (!(arx < 0x1p55f))
+	return (long long int) (long int) (rx * 0x1p-32f) << 32;
+      uint32_t i0;
+      GET_FLOAT_WORD (i0, rx);
+      int exponent = ((i0 >> 23) & 0xff) - 0x7f;
+      unsigned long long int mant = (i0 & 0x7fffff) | 0x800000;
+      mant <<= exponent - 23;
+      return (long long int) ((i0 & 0x80000000) != 0 ? -mant : mant);
+    }
+}
+weak_alias (__llrintf, llrintf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llround.c b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llround.c
new file mode 100644
index 0000000000..5e5a237b0c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llround.c
@@ -0,0 +1,90 @@
+/* Round double value to long long int.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <limits.h>
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <math_private.h>
+#include <stdint.h>
+
+/* Round to the nearest integer, with values exactly on a 0.5 boundary
+   rounded away from zero, regardless of the current rounding mode.
+   If (long long)x, when x is out of range of a long long, clips at
+   LLONG_MAX or LLONG_MIN, then this implementation also clips.  */
+
+long long int
+__llround (double x)
+{
+  long long xr;
+  if (HAVE_PPC_FCTIDZ)
+    xr = (long long) x;
+  else
+    {
+      /* Avoid incorrect exceptions from libgcc conversions (as of GCC
+	 5): <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59412>.  */
+      if (fabs (x) < 0x1p31)
+	xr = (long long int) (long int) x;
+      else
+	{
+	  uint64_t i0;
+	  EXTRACT_WORDS64 (i0, x);
+	  int exponent = ((i0 >> 52) & 0x7ff) - 0x3ff;
+	  if (exponent < 63)
+	    {
+	      unsigned long long int mant
+		= (i0 & ((1ULL << 52) - 1)) | (1ULL << 52);
+	      if (exponent < 52)
+		/* llround is not required to raise "inexact".  */
+		mant >>= 52 - exponent;
+	      else
+		mant <<= exponent - 52;
+	      xr = (long long int) ((i0 & (1ULL << 63)) != 0 ? -mant : mant);
+	    }
+	  else if (x == (double) LLONG_MIN)
+	    xr = LLONG_MIN;
+	  else
+	    xr = (long long int) (long int) x << 32;
+	}
+    }
+  /* Avoid spurious "inexact" converting LLONG_MAX to double, and from
+     subtraction when the result is out of range, by returning early
+     for arguments large enough that no rounding is needed.  */
+  if (!(fabs (x) < 0x1p52))
+    return xr;
+  double xrf = (double) xr;
+
+  if (x >= 0.0)
+    {
+      if (x - xrf >= 0.5)
+	xr += (long long) ((unsigned long long) xr + 1) > 0;
+    }
+  else
+    {
+      if (xrf - x >= 0.5)
+	xr -= (long long) ((unsigned long long) xr - 1) < 0;
+    }
+  return xr;
+}
+weak_alias (__llround, llround)
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llround, __llroundl)
+weak_alias (__llround, llroundl)
+#endif
+#if LONG_DOUBLE_COMPAT (libm, GLIBC_2_1)
+compat_symbol (libm, __llround, llroundl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llroundf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llroundf.c
new file mode 100644
index 0000000000..55452bac73
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_llroundf.c
@@ -0,0 +1,72 @@
+/* Round float value to long long int.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_private.h>
+#include <stdint.h>
+
+/* Round to the nearest integer, with values exactly on a 0.5 boundary
+   rounded away from zero, regardless of the current rounding mode.
+   If (long long)x, when x is out of range of a long long, clips at
+   LLONG_MAX or LLONG_MIN, then this implementation also clips.  */
+
+long long int
+__llroundf (float x)
+{
+  long long xr;
+  if (HAVE_PPC_FCTIDZ)
+    xr = (long long) x;
+  else
+    {
+      float ax = fabsf (x);
+      /* Avoid incorrect exceptions from libgcc conversions (as of GCC
+	 5): <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59412>.  */
+      if (ax < 0x1p31f)
+	xr = (long long int) (long int) x;
+      else if (!(ax < 0x1p55f))
+	xr = (long long int) (long int) (x * 0x1p-32f) << 32;
+      else
+	{
+	  uint32_t i0;
+	  GET_FLOAT_WORD (i0, x);
+	  int exponent = ((i0 >> 23) & 0xff) - 0x7f;
+	  unsigned long long int mant = (i0 & 0x7fffff) | 0x800000;
+	  mant <<= exponent - 23;
+	  xr = (long long int) ((i0 & 0x80000000) != 0 ? -mant : mant);
+	}
+    }
+  /* Avoid spurious "inexact" converting LLONG_MAX to float, and from
+     subtraction when the result is out of range, by returning early
+     for arguments large enough that no rounding is needed.  */
+  if (!(fabsf (x) < 0x1p23f))
+    return xr;
+  float xrf = (float) xr;
+
+  if (x >= 0.0)
+    {
+      if (x - xrf >= 0.5)
+	xr += (long long) ((unsigned long long) xr + 1) > 0;
+    }
+  else
+    {
+      if (xrf - x >= 0.5)
+	xr -= (long long) ((unsigned long long) xr - 1) < 0;
+    }
+  return xr;
+}
+weak_alias (__llroundf, llroundf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lrint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lrint.S
new file mode 100644
index 0000000000..8d54d95b6a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lrint.S
@@ -0,0 +1,46 @@
+/* Round double to long int.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long int[r3] __lrint (double x[fp1])  */
+ENTRY (__lrint)
+	stwu	r1,-16(r1)
+	fctiw	fp13,fp1
+	stfd	fp13,8(r1)
+	nop	/* Ensure the following load is in a different dispatch group */
+	nop	/* to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__lrint)
+
+weak_alias (__lrint, lrint)
+
+strong_alias (__lrint, __lrintf)
+weak_alias (__lrint, lrintf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__lrint, __lrintl)
+weak_alias (__lrint, lrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lround.S
new file mode 100644
index 0000000000..e4ec1bb0b6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lround.S
@@ -0,0 +1,129 @@
+/* lround function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 0.5 */
+	.long 0x3f000000
+.LC1:	/* 2^52.  */
+	.long 0x59800000
+	.section	.rodata.cst8,"aM",@progbits,8
+	.align	3
+.LC2:	/* 0x7fffffff.8p0.  */
+	.long 0x41dfffff
+	.long 0xffe00000
+.LC3:	/* -0x80000000.8p0.  */
+	.long 0xc1e00000
+	.long 0x00100000
+	.section	".text"
+
+/* long [r3] lround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we can't use the PowerPC "round to Nearest" mode. Instead we set
+   "round toward Zero" mode and round by adding +-0.5 before rounding
+   to the integer value.  It is necessary to detect when x is
+   (+-)0x1.fffffffffffffp-2 because adding +-0.5 in this case will
+   cause an erroneous shift, carry and round.  We simply return 0 if
+   0.5 > x > -0.5.  */
+
+ENTRY (__lround)
+	stwu    r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r10,r9,.LC0-got_label@ha
+	lfs	fp10,.LC0-got_label@l(r10)
+	addis	r10,r9,.LC1-got_label@ha
+	lfs	fp11,.LC1-got_label@l(r10)
+	addis	r10,r9,.LC2-got_label@ha
+	lfd	fp9,.LC2-got_label@l(r10)
+	addis	r10,r9,.LC3-got_label@ha
+	lfd	fp8,.LC3-got_label@l(r10)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp10,.LC0@l(r9)
+	lis	r9,.LC1@ha
+	lfs	fp11,.LC1@l(r9)
+	lis	r9,.LC2@ha
+	lfd	fp9,.LC2@l(r9)
+	lis	r9,.LC3@ha
+	lfd	fp8,.LC3@l(r9)
+#endif
+	fabs	fp2, fp1	/* Get the absolute value of x.  */
+	fsub	fp12,fp10,fp10	/* Compute 0.0.  */
+	fcmpu	cr6, fp2, fp10	/* if |x| < 0.5  */
+	fcmpu	cr5, fp1, fp9	/* if x >= 0x7fffffff.8p0  */
+	fcmpu	cr1, fp1, fp8	/* if x <= -0x80000000.8p0  */
+	fcmpu	cr7, fp1, fp12	/* x is negative? x < 0.0  */
+	blt-	cr6,.Lretzero
+	bge-	cr5,.Loflow
+	ble-	cr1,.Loflow
+	/* Test whether an integer to avoid spurious "inexact".  */
+	fadd	fp3,fp2,fp11
+	fsub	fp3,fp3,fp11
+	fcmpu	cr5, fp2, fp3
+	beq	cr5,.Lnobias
+	fadd	fp3,fp2,fp10	/* |x|+=0.5 bias to prepare to round.  */
+	bge	cr7,.Lconvert	/* x is positive so don't negate x.  */
+	fnabs	fp3,fp3		/* -(|x|+=0.5)  */
+.Lconvert:
+	fctiwz	fp4,fp3		/* Convert to Integer word lround toward 0.  */
+	stfd	fp4,8(r1)
+	nop	/* Ensure the following load is in a different dispatch  */
+	nop	/* group to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+LOWORD(r1)	/* Load return as integer.  */
+.Lout:
+	addi	r1,r1,16
+	blr
+.Lretzero:			/* when 0.5 > x > -0.5  */
+	li	r3,0		/* return 0.  */
+	b	.Lout
+.Lnobias:
+	fmr	fp3,fp1
+	b	.Lconvert
+.Loflow:
+	fmr	fp3,fp11
+	bge	cr7,.Lconvert
+	fnabs	fp3,fp3
+	b	.Lconvert
+	END (__lround)
+
+weak_alias (__lround, lround)
+
+strong_alias (__lround, __lroundf)
+weak_alias (__lround, lroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lround, lroundl)
+strong_alias (__lround, __lroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S
new file mode 100644
index 0000000000..6289e0be58
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_lroundf.S
@@ -0,0 +1 @@
+/* __lroundf is in s_lround.S */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyint.S
new file mode 100644
index 0000000000..df590e08bd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyint.S
@@ -0,0 +1,87 @@
+/* Round to int floating-point values.  PowerPC32 version.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Adhemerval Zanella <azanella@br.ibm.com>, 2011
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+   when it's coded in C.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+
+/* double [fp1] nearbyint(double [fp1] x) */
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000	/* TWO52: 2**52 */
+
+	.section	".text"
+ENTRY (__nearbyint)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	bcl	20,31,1f
+1:	mflr	r9
+	addis	r9,r9,.LC0-1b@ha
+	lfs	fp13,.LC0-1b@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub    fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52 */
+	bge	cr7,.L10
+	fcmpu	cr7,fp1,fp12	/* if (x > 0.0 */
+	ble	cr7,L(lessthanzero)
+	mffs	fp11
+	mtfsb0	4*cr7+lt	/* Disable FE_INEXACT exception */
+	fadd	fp1,fp1,fp13	/* x += TWO52 */
+	fsub	fp1,fp1,fp13	/* x -= TWO52 */
+	fabs	fp1,fp1		/* if (x == 0.0 */
+	mtfsf	0xff,fp11	/* Restore FE_INEXACT state.  */
+	blr
+L(lessthanzero):
+	bgelr	cr7
+	mffs	fp11
+	mtfsb0	4*cr7+lt	/* Disable FE_INEXACT exception */
+	fsub	fp1,fp1,fp13	/* x -= TWO52 */
+	fadd	fp1,fp1,fp13	/* x += TWO52 */
+	fnabs	fp1,fp1		/* if (x == 0.0) */
+	mtfsf	0xff,fp11	/* Restore FE_INEXACT state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+END (__nearbyint)
+
+weak_alias (__nearbyint, nearbyint)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__nearbyint, nearbyintl)
+strong_alias (__nearbyint, __nearbyintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __nearbyint, nearbyintl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyintf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyintf.S
new file mode 100644
index 0000000000..fb4c6e4cee
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_nearbyintf.S
@@ -0,0 +1,78 @@
+/* Round to int floating-point values.  PowerPC32 version.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Adhemerval Zanella <azanella@br.ibm.com>, 2011
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+   when it's coded in C.  */
+
+#include <sysdep.h>
+
+
+/* float [fp1] nearbyintf(float [fp1] x) */
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:
+	.long 0x4B000000	/* TWO23: 2**23 */
+
+	.section	".text"
+ENTRY (__nearbyintf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	bcl	20,31,1f
+1:	mflr	r9
+	addis	r9,r9,.LC0-1b@ha
+	lfs	fp13,.LC0-1b@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub    fp12,fp13,fp13		/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13		/* if (fabs(x) > TWO23 */
+	bge	cr7,.L10
+	fcmpu	cr7,fp1,fp12		/* if (x > 0.0 */
+	ble	cr7,L(lessthanzero)
+	mffs	fp11
+	mtfsb0	4*cr7+lt		/* Disable FE_INEXACT exception */
+	fadds	fp1,fp1,fp13		/* x += TWO23 */
+	fsubs	fp1,fp1,fp13		/* x -= TWO23 */
+	fabs	fp1,fp1			/* if (x == 0.0) */
+	mtfsf	0xff,fp11		/* Restore FE_INEXACT state.  */
+	blr
+L(lessthanzero):
+	bgelr	cr7
+	mffs	fp11
+	mtfsb0	4*cr7+lt		/* Disable FE_INEXACT exception */
+	fsubs	fp1,fp1,fp13		/* x -= TWO23 */
+	fadds	fp1,fp1,fp13		/* x += TWO23 */
+	fnabs	fp1,fp1			/* if (x == 0.0) */
+	mtfsf	0xff,fp11		/* Restore FE_INEXACT state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+END (__nearbyintf)
+
+weak_alias (__nearbyintf, nearbyintf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rint.S
new file mode 100644
index 0000000000..a1c3116447
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rint.S
@@ -0,0 +1,76 @@
+/* Round to int floating-point values.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This has been coded in assembler because GCC makes such a mess of it
+   when it's coded in C.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000
+
+	.section	".text"
+ENTRY (__rint)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52)  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl	cr7,.L10
+	bng	cr6,.L4
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+	blr			/* x = 0.0; */
+.L4:
+	bnllr	cr6		/* if (x < 0.0)  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+	blr			/* x = -0.0; */
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+	END (__rint)
+
+weak_alias (__rint, rint)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__rint, rintl)
+strong_alias (__rint, __rintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __rint, rintl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rintf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
new file mode 100644
index 0000000000..70e52e894d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_rintf.S
@@ -0,0 +1,65 @@
+/* Round float to int floating-point values.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**23 */
+	.long 0x4b000000
+
+	.section	".text"
+ENTRY (__rintf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsubs	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO23)  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl	cr7,.L10
+	bng	cr6,.L4
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+	blr			/* x = 0.0; */
+.L4:
+	bnllr	cr6		/* if (x < 0.0)  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+	blr			/* x = -0.0; */
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+	END (__rintf)
+
+weak_alias (__rintf, rintf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_round.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_round.S
new file mode 100644
index 0000000000..f539890b17
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_round.S
@@ -0,0 +1,104 @@
+/* round function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst8,"aM",@progbits,8
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000
+.LC1:	/* 0.5 */
+	.long 0x3f000000
+
+/* double [fp1] round (double x [fp1])
+   IEEE 1003.1 round function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "Round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we can't use the PowerPC "Round to Nearest" mode. Instead we set
+   "Round toward Zero" mode and round by adding +-0.5 before rounding
+   to the integer value.  */
+
+	.section	".text"
+ENTRY (__round)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	addi	r9,r9,.LC0-got_label@l
+	mtlr	r11
+	cfi_same_value (lr)
+	lfs	fp13,0(r9)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,1		/* Set rounding mode toward 0.  */
+#ifdef SHARED
+	lfs	fp10,.LC1-.LC0(r9)
+#else
+	lis	r9,.LC1@ha
+	lfs	fp10,.LC1@l(r9)
+#endif
+	ble-	cr6,.L4
+	fadd	fp1,fp1,fp10	/* x+= 0.5;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	fsub	fp9,fp1,fp10	/* x+= 0.5;  */
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsub	fp1,fp9,fp13	/* x-= TWO52;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+	END (__round)
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__round, roundl)
+strong_alias (__round, __roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __round, roundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_roundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
new file mode 100644
index 0000000000..5daf84b598
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_roundf.S
@@ -0,0 +1,95 @@
+/* roundf  function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section	.rodata.cst8,"aM",@progbits,8
+	.align	3
+.LC0:	/* 2**23 */
+	.long 0x4b000000
+.LC1:	/* 0.5 */
+	.long 0x3f000000
+
+/* float [fp1] roundf  (float x [fp1])
+   IEEE 1003.1 round function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "Round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we can't use the PowerPC "Round to Nearest" mode. Instead we set
+   "Round toward Zero" mode and round by adding +-0.5 before rounding
+   to the integer value.  */
+
+	.section	".text"
+ENTRY (__roundf )
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	addi	r9,r9,.LC0-got_label@l
+	mtlr	r11
+	cfi_same_value (lr)
+	lfs	fp13,0(r9)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsubs	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO23)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,1		/* Set rounding mode toward 0.  */
+#ifdef SHARED
+	lfs	fp10,.LC1-.LC0(r9)
+#else
+	lfs	fp10,.LC1@l(r9)
+#endif
+	ble-	cr6,.L4
+	fadds	fp1,fp1,fp10	/* x+= 0.5;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	fsubs	fp9,fp1,fp10	/* x+= 0.5;  */
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsubs	fp1,fp9,fp13	/* x-= TWO23;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+	END (__roundf)
+
+weak_alias (__roundf, roundf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_trunc.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
new file mode 100644
index 0000000000..85d292c03c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_trunc.S
@@ -0,0 +1,90 @@
+/* trunc function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**52 */
+	.long 0x59800000
+
+/* double [fp1] trunc (double x [fp1])
+   IEEE 1003.1 trunc function.  IEEE specifies "trunc to the integer
+   value, in floating format, nearest to but no larger in magnitude
+   then the argument."
+   We set "round toward Zero" mode and trunc by adding +-2**52 then
+   subtracting +-2**52.  */
+
+	.section	".text"
+ENTRY (__trunc)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsub	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO52)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,1		/* Set rounding toward 0 mode.  */
+	ble-	cr6,.L4
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsub	fp1,fp1,fp13	/* x-= TWO52;  */
+	fadd	fp1,fp1,fp13	/* x+= TWO52;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadd	fp1,fp1,fp1
+	blr
+	END (__trunc)
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__trunc, truncl)
+strong_alias (__trunc, __truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_truncf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
new file mode 100644
index 0000000000..9b91e3f0a5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/s_truncf.S
@@ -0,0 +1,82 @@
+/* truncf function.  PowerPC32 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.section	.rodata.cst4,"aM",@progbits,4
+	.align	2
+.LC0:	/* 2**23 */
+	.long 0x4b000000
+
+/* float [fp1] truncf (float x [fp1])
+   IEEE 1003.1 trunc function.  IEEE specifies "trunc to the integer
+   value, in floating format, nearest to but no larger in magnitude
+   then the argument."
+   We set "round toward Zero" mode and trunc by adding +-2**23 then
+   subtracting +-2**23.  */
+
+	.section	".text"
+ENTRY (__truncf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfs	fp13,.LC0-got_label@l(r9)
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfs	fp13,.LC0@l(r9)
+#endif
+	fabs	fp0,fp1
+	fsubs	fp12,fp13,fp13	/* generate 0.0  */
+	fcmpu	cr7,fp0,fp13	/* if (fabs(x) > TWO23)  */
+	mffs	fp11		/* Save current FPU rounding mode and
+				   "inexact" state.  */
+	fcmpu	cr6,fp1,fp12	/* if (x > 0.0)  */
+	bnl-	cr7,.L10
+	mtfsfi	7,1		/* Set rounding toward 0 mode.  */
+	ble-	cr6,.L4
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = 0.0; */
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L4:
+	bge-	cr6,.L9		/* if (x < 0.0)  */
+	fsubs	fp1,fp1,fp13	/* x-= TWO23;  */
+	fadds	fp1,fp1,fp13	/* x+= TWO23;  */
+	fnabs	fp1,fp1		/* if (x == 0.0)  */
+				/* x = -0.0; */
+.L9:
+	mtfsf	0xff,fp11	/* Restore previous rounding mode and
+				   "inexact" state.  */
+	blr
+.L10:
+	/* Ensure sNaN input is converted to qNaN.  */
+	fcmpu	cr7,fp1,fp1
+	beqlr	cr7
+	fadds	fp1,fp1,fp1
+	blr
+	END (__truncf)
+
+weak_alias (__truncf, truncf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
new file mode 100644
index 0000000000..19ad07ee56
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
@@ -0,0 +1,183 @@
+/* setjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <stap-probe.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+
+	.machine	"altivec"
+ENTRY (__sigsetjmp_symbol)
+
+#ifdef PTR_MANGLE
+	mr   r5,r1
+	PTR_MANGLE(r5, r6)
+	stw  r5,(JB_GPR1*4)(3)
+#else
+	stw  r1,(JB_GPR1*4)(3)
+#endif
+	mflr r0
+	/* setjmp probe expects longjmp first argument (4@3), second argument
+	   (-4@4), and target address (4@0), respectively.  */
+	LIBC_PROBE (setjmp, 3, 4@3, -4@4, 4@0)
+	stw  r14,((JB_GPRS+0)*4)(3)
+	stfd fp14,((JB_FPRS+0*2)*4)(3)
+#ifdef PTR_MANGLE
+	PTR_MANGLE2 (r0, r6)
+#endif
+	stw  r0,(JB_LR*4)(3)
+	stw  r15,((JB_GPRS+1)*4)(3)
+	stfd fp15,((JB_FPRS+1*2)*4)(3)
+	mfcr r0
+	stw  r16,((JB_GPRS+2)*4)(3)
+	stfd fp16,((JB_FPRS+2*2)*4)(3)
+	stw  r0,(JB_CR*4)(3)
+	stw  r17,((JB_GPRS+3)*4)(3)
+	stfd fp17,((JB_FPRS+3*2)*4)(3)
+	stw  r18,((JB_GPRS+4)*4)(3)
+	stfd fp18,((JB_FPRS+4*2)*4)(3)
+	stw  r19,((JB_GPRS+5)*4)(3)
+	stfd fp19,((JB_FPRS+5*2)*4)(3)
+	stw  r20,((JB_GPRS+6)*4)(3)
+	stfd fp20,((JB_FPRS+6*2)*4)(3)
+	stw  r21,((JB_GPRS+7)*4)(3)
+	stfd fp21,((JB_FPRS+7*2)*4)(3)
+	stw  r22,((JB_GPRS+8)*4)(3)
+	stfd fp22,((JB_FPRS+8*2)*4)(3)
+	stw  r23,((JB_GPRS+9)*4)(3)
+	stfd fp23,((JB_FPRS+9*2)*4)(3)
+	stw  r24,((JB_GPRS+10)*4)(3)
+	stfd fp24,((JB_FPRS+10*2)*4)(3)
+	stw  r25,((JB_GPRS+11)*4)(3)
+	stfd fp25,((JB_FPRS+11*2)*4)(3)
+	stw  r26,((JB_GPRS+12)*4)(3)
+	stfd fp26,((JB_FPRS+12*2)*4)(3)
+	stw  r27,((JB_GPRS+13)*4)(3)
+	stfd fp27,((JB_FPRS+13*2)*4)(3)
+	stw  r28,((JB_GPRS+14)*4)(3)
+	stfd fp28,((JB_FPRS+14*2)*4)(3)
+	stw  r29,((JB_GPRS+15)*4)(3)
+	stfd fp29,((JB_FPRS+15*2)*4)(3)
+	stw  r30,((JB_GPRS+16)*4)(3)
+	stfd fp30,((JB_FPRS+16*2)*4)(3)
+	stw  r31,((JB_GPRS+17)*4)(3)
+	stfd fp31,((JB_FPRS+17*2)*4)(3)
+#ifndef __NO_VMX__
+# ifdef PIC
+	mflr    r6
+	cfi_register(lr,r6)
+	SETUP_GOT_ACCESS(r5,got_label)
+	addis	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r5,r5,_GLOBAL_OFFSET_TABLE_-got_label@l
+	mtlr	r6
+	cfi_same_value (lr)
+#  ifdef SHARED
+#   if IS_IN (rtld)
+	/* Inside ld.so we use the local alias to avoid runtime GOT
+	   relocations.  */
+	lwz     r5,_rtld_local_ro@got(r5)
+#   else
+	lwz     r5,_rtld_global_ro@got(r5)
+#   endif
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET+LOWORD(r5)
+#  else
+	lwz     r5,_dl_hwcap@got(r5)
+	lwz     r5,LOWORD(r5)
+#  endif
+# else
+	lis	r6,(_dl_hwcap+LOWORD)@ha
+	lwz     r5,(_dl_hwcap+LOWORD)@l(r6)
+# endif
+	andis.	r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
+	beq	L(no_vmx)
+	la	r5,((JB_VRS)*4)(3)
+	andi.	r6,r5,0xf
+	mfspr	r0,VRSAVE
+	stw	r0,((JB_VRSAVE)*4)(3)
+	addi	r6,r5,16
+	beq+	L(aligned_save_vmx)
+
+	lvsr	v0,0,r5
+	lvsl	v1,0,r5
+	addi	r6,r5,-16
+
+# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \
+	addi	addgpr,addgpr,32;					 \
+	vperm	tmpvr,prevvr,savevr,shiftvr;				 \
+	stvx	tmpvr,0,savegpr
+
+	/*
+	 * We have to be careful not to corrupt the data below v20 and
+	 * above v31. To keep things simple we just rotate both ends in
+	 * the opposite direction to our main permute so we can use
+	 * the common macro.
+	 */
+
+	/* load and rotate data below v20 */
+	lvx	v2,0,r5
+	vperm	v2,v2,v2,v1
+	save_misaligned_vmx(v20,v2,v0,v3,r5,r6)
+	save_misaligned_vmx(v21,v20,v0,v3,r6,r5)
+	save_misaligned_vmx(v22,v21,v0,v3,r5,r6)
+	save_misaligned_vmx(v23,v22,v0,v3,r6,r5)
+	save_misaligned_vmx(v24,v23,v0,v3,r5,r6)
+	save_misaligned_vmx(v25,v24,v0,v3,r6,r5)
+	save_misaligned_vmx(v26,v25,v0,v3,r5,r6)
+	save_misaligned_vmx(v27,v26,v0,v3,r6,r5)
+	save_misaligned_vmx(v28,v27,v0,v3,r5,r6)
+	save_misaligned_vmx(v29,v28,v0,v3,r6,r5)
+	save_misaligned_vmx(v30,v29,v0,v3,r5,r6)
+	save_misaligned_vmx(v31,v30,v0,v3,r6,r5)
+	/* load and rotate data above v31 */
+	lvx	v2,0,r6
+	vperm	v2,v2,v2,v1
+	save_misaligned_vmx(v2,v31,v0,v3,r5,r6)
+
+	b	L(no_vmx)
+
+L(aligned_save_vmx):
+	stvx	20,0,r5
+	addi	r5,r5,32
+	stvx	21,0,r6
+	addi	r6,r6,32
+	stvx	22,0,r5
+	addi	r5,r5,32
+	stvx	23,0,r6
+	addi	r6,r6,32
+	stvx	24,0,r5
+	addi	r5,r5,32
+	stvx	25,0,r6
+	addi	r6,r6,32
+	stvx	26,0,r5
+	addi	r5,r5,32
+	stvx	27,0,r6
+	addi	r6,r6,32
+	stvx	28,0,r5
+	addi	r5,r5,32
+	stvx	29,0,r6
+	addi	r6,r6,32
+	stvx	30,0,r5
+	stvx	31,0,r6
+L(no_vmx):
+#endif
+	b __sigjmp_save_symbol@local
+END (__sigsetjmp_symbol)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp.S
new file mode 100644
index 0000000000..02b17d3467
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/fpu/setjmp.S
@@ -0,0 +1,47 @@
+/* non altivec (old) version of setjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <libc-symbols.h>
+#include <rtld-global-offsets.h>
+#include <shlib-compat.h>
+
+#if !IS_IN (libc)
+/* Build a non-versioned object for rtld-*.  */
+# define __sigsetjmp_symbol __sigsetjmp
+# define __sigjmp_save_symbol __sigjmp_save
+# include "setjmp-common.S"
+
+#else /* IS_IN (libc) */
+/* Build a versioned object for libc.  */
+versioned_symbol (libc, __vmx__sigsetjmp, __sigsetjmp, GLIBC_2_3_4)
+# define __sigsetjmp_symbol __vmx__sigsetjmp
+# define __sigjmp_save_symbol __vmx__sigjmp_save
+# include "setjmp-common.S"
+libc_hidden_ver (__vmx__sigsetjmp, __sigsetjmp)
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+#  define __NO_VMX__
+#  undef __sigsetjmp_symbol
+#  undef __sigjmp_save_symbol
+#  undef JB_SIZE
+compat_symbol (libc, __novmx__sigsetjmp, __sigsetjmp, GLIBC_2_0)
+#  define __sigsetjmp_symbol __novmx__sigsetjmp
+#  define __sigjmp_save_symbol __novmx__sigjmp_save
+#  include "setjmp-common.S"
+# endif
+#endif /* IS_IN (libc) */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest0.S b/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest0.S
new file mode 100644
index 0000000000..a379e0248e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest0.S
@@ -0,0 +1,69 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    General Purpose Register (GPR) restore routine
+    when Floating Point Registers (FPRs) are not saved
+
+    Note: This restore routine must not be called when GPR30 or
+    GPR31, or both, are the only registers beings saved. In these
+    cases, the saving and restoring must be done inline.
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restgpr0_all)
+		.globl C_TEXT(_restgpr0_13)
+C_TEXT(_restgpr0_13):	lwz	r13,-76(r1)
+		.globl C_TEXT(_restgpr0_14)
+C_TEXT(_restgpr0_14):	lwz	r14,-72(r1)
+		.globl C_TEXT(_restgpr0_15)
+C_TEXT(_restgpr0_15):	lwz	r15,-68(r1)
+		.globl C_TEXT(_restgpr0_16)
+C_TEXT(_restgpr0_16):	lwz	r16,-64(r1)
+		.globl C_TEXT(_restgpr0_17)
+C_TEXT(_restgpr0_17):	lwz	r17,-60(r1)
+		.globl C_TEXT(_restgpr0_18)
+C_TEXT(_restgpr0_18):	lwz	r18,-56(r1)
+		.globl C_TEXT(_restgpr0_19)
+C_TEXT(_restgpr0_19):	lwz	r19,-52(r1)
+		.globl C_TEXT(_restgpr0_20)
+C_TEXT(_restgpr0_20):	lwz	r20,-48(r1)
+		.globl C_TEXT(_restgpr0_21)
+C_TEXT(_restgpr0_21):	lwz	r21,-44(r1)
+		.globl C_TEXT(_restgpr0_22)
+C_TEXT(_restgpr0_22):	lwz	r22,-40(r1)
+		.globl C_TEXT(_restgpr0_23)
+C_TEXT(_restgpr0_23):	lwz	r23,-36(r1)
+		.globl C_TEXT(_restgpr0_24)
+C_TEXT(_restgpr0_24):	lwz	r24,-32(r1)
+		.globl C_TEXT(_restgpr0_25)
+C_TEXT(_restgpr0_25):	lwz	r25,-28(r1)
+		.globl C_TEXT(_restgpr0_26)
+C_TEXT(_restgpr0_26):	lwz	r26,-24(r1)
+		.globl C_TEXT(_restgpr0_27)
+C_TEXT(_restgpr0_27):	lwz	r27,-20(r1)
+		.globl C_TEXT(_restgpr0_28)
+C_TEXT(_restgpr0_28):	lwz	r28,-16(r1)
+		.globl C_TEXT(_restgpr0_29)
+C_TEXT(_restgpr0_29):	lwz	r0,8(r1)	#get return address from frame
+			lwz	r29,-12(r1)	#restore r29
+			mtlr	r0		#move return address to LR
+			lwz	r30,-8(r1)	#restore r30
+			lwz	r31,-4(r1)	#restore r31
+			blr			#return
+END (_restgpr0_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest1.S b/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest1.S
new file mode 100644
index 0000000000..c3ac120ca5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/gprrest1.S
@@ -0,0 +1,63 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    General Purpose Register (GPR) restore routine
+    when Floating Point Registers (FPRs) are saved
+*/
+
+#include <sysdep.h>
+
+ENTRY(_restgpr1_all)
+		.globl	C_TEXT(_restgpr1_13)
+C_TEXT(_restgpr1_13):	lwz	r13,-76(r12)
+		.globl	C_TEXT(_restgpr1_14)
+C_TEXT(_restgpr1_14):	lwz	r14,-72(r12)
+		.globl	C_TEXT(_restgpr1_15)
+C_TEXT(_restgpr1_15):	lwz	r15,-68(r12)
+		.globl	C_TEXT(_restgpr1_16)
+C_TEXT(_restgpr1_16):	lwz	r16,-64(r12)
+		.globl	C_TEXT(_restgpr1_17)
+C_TEXT(_restgpr1_17):	lwz	r17,-60(r12)
+		.globl	C_TEXT(_restgpr1_18)
+C_TEXT(_restgpr1_18):	lwz	r18,-56(r12)
+		.globl	C_TEXT(_restgpr1_19)
+C_TEXT(_restgpr1_19):	lwz	r19,-52(r12)
+		.globl	C_TEXT(_restgpr1_20)
+C_TEXT(_restgpr1_20):	lwz	r20,-48(r12)
+		.globl	C_TEXT(_restgpr1_21)
+C_TEXT(_restgpr1_21):	lwz	r21,-44(r12)
+		.globl	C_TEXT(_restgpr1_22)
+C_TEXT(_restgpr1_22):	lwz	r22,-40(r12)
+		.globl	C_TEXT(_restgpr1_23)
+C_TEXT(_restgpr1_23):	lwz	r23,-36(r12)
+		.globl	C_TEXT(_restgpr1_24)
+C_TEXT(_restgpr1_24):	lwz	r24,-32(r12)
+		.globl	C_TEXT(_restgpr1_25)
+C_TEXT(_restgpr1_25):	lwz	r25,-28(r12)
+		.globl	C_TEXT(_restgpr1_26)
+C_TEXT(_restgpr1_26):	lwz	r26,-24(r12)
+		.globl	C_TEXT(_restgpr1_27)
+C_TEXT(_restgpr1_27):	lwz	r27,-20(r12)
+		.globl	C_TEXT(_restgpr1_28)
+C_TEXT(_restgpr1_28):	lwz	r28,-16(r12)
+		.globl	C_TEXT(_restgpr1_29)
+C_TEXT(_restgpr1_29):	lwz	r29,-12(r12)	#restore r29
+			lwz	r30,-8(r12)	#restore r30
+			lwz	r31,-4(r12)	#restore r31
+			blr			#return
+END (_restgpr1_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave0.S b/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave0.S
new file mode 100644
index 0000000000..4c474514c6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave0.S
@@ -0,0 +1,87 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    General Purpose Register (GPR) save routine
+    when Floating Point Registers (FPRs) are not saved
+
+    Note: This save routine must not be called when GPR30 or
+    GPR31, or both, are the only registers beings saved. In these
+    cases, the saving and restoring must be done inline.
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savegpr0_all)
+		.globl C_TEXT(_savegpr0_13)
+C_TEXT(_savegpr0_13):	stw	r13,-76(r1)
+			cfi_offset(r13,-76)
+		.globl C_TEXT(_savegpr0_14)
+C_TEXT(_savegpr0_14):	stw	r14,-72(r1)
+			cfi_offset(r14,-72)
+		.globl C_TEXT(_savegpr0_15)
+C_TEXT(_savegpr0_15):	stw	r15,-68(r1)
+			cfi_offset(r15,-68)
+		.globl C_TEXT(_savegpr0_16)
+C_TEXT(_savegpr0_16):	stw	r16,-64(r1)
+			cfi_offset(r16,-64)
+		.globl C_TEXT(_savegpr0_17)
+C_TEXT(_savegpr0_17):	stw	r17,-60(r1)
+			cfi_offset(r17,-60)
+		.globl C_TEXT(_savegpr0_18)
+C_TEXT(_savegpr0_18):	stw	r18,-56(r1)
+			cfi_offset(r18,-56)
+		.globl C_TEXT(_savegpr0_19)
+C_TEXT(_savegpr0_19):	stw	r19,-52(r1)
+			cfi_offset(r19,-52)
+		.globl C_TEXT(_savegpr0_20)
+C_TEXT(_savegpr0_20):	stw	r20,-48(r1)
+			cfi_offset(r20,-48)
+		.globl C_TEXT(_savegpr0_21)
+C_TEXT(_savegpr0_21):	stw	r21,-44(r1)
+			cfi_offset(r21,-44)
+		.globl C_TEXT(_savegpr0_22)
+C_TEXT(_savegpr0_22):	stw	r22,-40(r1)
+			cfi_offset(r22,-40)
+		.globl C_TEXT(_savegpr0_23)
+C_TEXT(_savegpr0_23):	stw	r23,-36(r1)
+			cfi_offset(r23,-36)
+		.globl C_TEXT(_savegpr0_24)
+C_TEXT(_savegpr0_24):	stw	r24,-32(r1)
+			cfi_offset(r24,-32)
+		.globl C_TEXT(_savegpr0_25)
+C_TEXT(_savegpr0_25):	stw	r25,-28(r1)
+			cfi_offset(r25,-28)
+		.globl C_TEXT(_savegpr0_26)
+C_TEXT(_savegpr0_26):	stw	r26,-24(r1)
+			cfi_offset(r26,-24)
+		.globl C_TEXT(_savegpr0_27)
+C_TEXT(_savegpr0_27):	stw	r27,-20(r1)
+			cfi_offset(r27,-20)
+		.globl C_TEXT(_savegpr0_28)
+C_TEXT(_savegpr0_28):	stw	r28,-16(r1)
+			cfi_offset(r28,-16)
+		.globl C_TEXT(_savegpr0_29)
+C_TEXT(_savegpr0_29):	stw	r29,-12(r1) #save r29
+			stw	r30,-8(r1)  #save r30
+			stw	r31,-4(r1)  #save r31
+			cfi_offset(r29,-12)
+			cfi_offset(r30,-8)
+			cfi_offset(r31,-4)
+			stw	r0,8(r1)    #save LR in callers frame
+			blr		    #return
+END (_savegpr0_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave1.S b/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave1.S
new file mode 100644
index 0000000000..89da657070
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/gprsave1.S
@@ -0,0 +1,63 @@
+/* Copyright (C) 2000-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/*
+    General Purpose Register (GPR) save routine
+    when Floating Point Registers (FPRs) are saved
+*/
+
+#include <sysdep.h>
+
+ENTRY(_savegpr1_all)
+		.globl C_TEXT(_savegpr1_13)
+C_TEXT(_savegpr1_13):	stw	r13,-76(r12)
+		.globl C_TEXT(_savegpr1_14)
+C_TEXT(_savegpr1_14):	stw	r14,-72(r12)
+		.globl C_TEXT(_savegpr1_15)
+C_TEXT(_savegpr1_15):	stw	r15,-68(r12)
+		.globl C_TEXT(_savegpr1_16)
+C_TEXT(_savegpr1_16):	stw	r16,-64(r12)
+		.globl C_TEXT(_savegpr1_17)
+C_TEXT(_savegpr1_17):	stw	r17,-60(r12)
+		.globl C_TEXT(_savegpr1_18)
+C_TEXT(_savegpr1_18):	stw	r18,-56(r12)
+		.globl C_TEXT(_savegpr1_19)
+C_TEXT(_savegpr1_19):	stw	r19,-52(r12)
+		.globl C_TEXT(_savegpr1_20)
+C_TEXT(_savegpr1_20):	stw	r20,-48(r12)
+		.globl C_TEXT(_savegpr1_21)
+C_TEXT(_savegpr1_21):	stw	r21,-44(r12)
+		.globl C_TEXT(_savegpr1_22)
+C_TEXT(_savegpr1_22):	stw	r22,-40(r12)
+		.globl C_TEXT(_savegpr1_23)
+C_TEXT(_savegpr1_23):	stw	r23,-36(r12)
+		.globl C_TEXT(_savegpr1_24)
+C_TEXT(_savegpr1_24):	stw	r24,-32(r12)
+		.globl C_TEXT(_savegpr1_25)
+C_TEXT(_savegpr1_25):	stw	r25,-28(r12)
+		.globl C_TEXT(_savegpr1_26)
+C_TEXT(_savegpr1_26):	stw	r26,-24(r12)
+		.globl C_TEXT(_savegpr1_27)
+C_TEXT(_savegpr1_27):	stw	r27,-20(r12)
+		.globl C_TEXT(_savegpr1_28)
+C_TEXT(_savegpr1_28):	stw	r28,-16(r12)
+		.globl C_TEXT(_savegpr1_29)
+C_TEXT(_savegpr1_29):	stw	r29,-12(r12)	#save r29
+			stw	r30,-8(r12)	#save r30
+			stw	r31,-4(r12)	#save r31
+			blr			#return
+END (_savegpr1_all)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/libgcc-compat.S b/REORG.TODO/sysdeps/powerpc/powerpc32/libgcc-compat.S
new file mode 100644
index 0000000000..431ccaa4b3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/libgcc-compat.S
@@ -0,0 +1,137 @@
+/* pre-.hidden libgcc compatibility
+   Copyright (C) 2002-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+	.file	"libgcc-compat.S"
+
+#include <shlib-compat.h>
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_6)
+
+	.symver __ashldi3_v_glibc20,__ashldi3@GLIBC_2.0
+	.symver __ashrdi3_v_glibc20,__ashrdi3@GLIBC_2.0
+	.symver __lshrdi3_v_glibc20,__lshrdi3@GLIBC_2.0
+	.symver __cmpdi2_v_glibc20,__cmpdi2@GLIBC_2.0
+	.symver __ucmpdi2_v_glibc20,__ucmpdi2@GLIBC_2.0
+#if !defined _SOFT_FLOAT && !defined __NO_FPRS__
+	.symver __fixdfdi_v_glibc20,__fixdfdi@GLIBC_2.0
+	.symver __fixunsdfdi_v_glibc20,__fixunsdfdi@GLIBC_2.0
+	.symver __fixsfdi_v_glibc20,__fixsfdi@GLIBC_2.0
+	.symver __fixunssfdi_v_glibc20,__fixunssfdi@GLIBC_2.0
+	.symver __floatdidf_v_glibc20,__floatdidf@GLIBC_2.0
+	.symver __floatdisf_v_glibc20,__floatdisf@GLIBC_2.0
+#endif
+
+#ifdef HAVE_DOT_HIDDEN
+	.hidden __ashldi3
+	.hidden __ashrdi3
+	.hidden __lshrdi3
+	.hidden __cmpdi2
+	.hidden __ucmpdi2
+# if !defined _SOFT_FLOAT && !defined __NO_FPRS__
+	.hidden __fixdfdi
+	.hidden __fixsfdi
+	.hidden __fixunsdfdi
+	.hidden __fixunssfdi
+	.hidden __floatdidf
+	.hidden __floatdisf
+# endif
+#endif
+
+	.section	".text"
+
+	.align 2
+	.globl __ashldi3_v_glibc20
+	.type	__ashldi3_v_glibc20,@function
+__ashldi3_v_glibc20:
+	b __ashldi3@local
+.Lfe5:
+	.size	__ashldi3_v_glibc20,.Lfe5-__ashldi3_v_glibc20
+	.align 2
+	.globl __ashrdi3_v_glibc20
+	.type	__ashrdi3_v_glibc20,@function
+__ashrdi3_v_glibc20:
+	b __ashrdi3@local
+.Lfe6:
+	.size	__ashrdi3_v_glibc20,.Lfe6-__ashrdi3_v_glibc20
+	.align 2
+	.globl __lshrdi3_v_glibc20
+	.type	__lshrdi3_v_glibc20,@function
+__lshrdi3_v_glibc20:
+	b __lshrdi3@local
+.Lfe7:
+	.size	__lshrdi3_v_glibc20,.Lfe7-__lshrdi3_v_glibc20
+	.align 2
+	.globl __cmpdi2_v_glibc20
+	.type	__cmpdi2_v_glibc20,@function
+__cmpdi2_v_glibc20:
+	b __cmpdi2@local
+.Lfe8:
+	.size	__cmpdi2_v_glibc20,.Lfe8-__cmpdi2_v_glibc20
+	.align 2
+	.globl __ucmpdi2_v_glibc20
+	.type	__ucmpdi2_v_glibc20,@function
+__ucmpdi2_v_glibc20:
+	b __ucmpdi2@local
+.Lfe9:
+	.size	__ucmpdi2_v_glibc20,.Lfe9-__ucmpdi2_v_glibc20
+#if !defined _SOFT_FLOAT && !defined __NO_FPRS__
+	.align 2
+	.globl __fixdfdi_v_glibc20
+	.type	__fixdfdi_v_glibc20,@function
+__fixdfdi_v_glibc20:
+	b __fixdfdi@local
+.Lfe10:
+	.size	__fixdfdi_v_glibc20,.Lfe10-__fixdfdi_v_glibc20
+	.align 2
+	.globl __fixunsdfdi_v_glibc20
+	.type	__fixunsdfdi_v_glibc20,@function
+__fixunsdfdi_v_glibc20:
+	b __fixunsdfdi@local
+.Lfe11:
+	.size	__fixunsdfdi_v_glibc20,.Lfe11-__fixunsdfdi_v_glibc20
+	.align 2
+	.globl __fixsfdi_v_glibc20
+	.type	__fixsfdi_v_glibc20,@function
+__fixsfdi_v_glibc20:
+	b __fixsfdi@local
+.Lfe12:
+	.size	__fixsfdi_v_glibc20,.Lfe12-__fixsfdi_v_glibc20
+	.align 2
+	.globl __fixunssfdi_v_glibc20
+	.type	__fixunssfdi_v_glibc20,@function
+__fixunssfdi_v_glibc20:
+	b __fixunssfdi@local
+.Lfe13:
+	.size	__fixunssfdi_v_glibc20,.Lfe13-__fixunssfdi_v_glibc20
+	.align 2
+	.globl __floatdidf_v_glibc20
+	.type	__floatdidf_v_glibc20,@function
+__floatdidf_v_glibc20:
+	b __floatdidf@local
+.Lfe14:
+	.size	__floatdidf_v_glibc20,.Lfe14-__floatdidf_v_glibc20
+	.align 2
+	.globl __floatdisf_v_glibc20
+	.type	__floatdisf_v_glibc20,@function
+__floatdisf_v_glibc20:
+	b __floatdisf@local
+.Lfe15:
+	.size	__floatdisf_v_glibc20,.Lfe15-__floatdisf_v_glibc20
+#endif
+
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/lshift.S b/REORG.TODO/sysdeps/powerpc/powerpc32/lshift.S
new file mode 100644
index 0000000000..78c151f101
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/lshift.S
@@ -0,0 +1,125 @@
+/* Shift a limb left, low level routine.
+   Copyright (C) 1996-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_lshift (mp_ptr wp, mp_srcptr up, mp_size_t usize,
+			 unsigned int cnt)  */
+
+EALIGN (__mpn_lshift, 3, 0)
+
+	mtctr	r5		# copy size into CTR
+	cmplwi	cr0,r5,16	# is size < 16
+	slwi	r0,r5,2
+	add	r7,r3,r0	# make r7 point at end of res
+	add	r4,r4,r0	# make r4 point at end of s1
+	lwzu	r11,-4(r4)	# load first s1 limb
+	subfic	r8,r6,32
+	srw	r3,r11,r8	# compute function return value
+	bge	cr0,L(big)	# branch if size >= 16
+
+	bdz	L(end1)
+
+L(0):	lwzu	r10,-4(r4)
+	slw	r9,r11,r6
+	srw	r12,r10,r8
+	or	r9,r9,r12
+	stwu	r9,-4(r7)
+	bdz	L(end2)
+	lwzu	r11,-4(r4)
+	slw	r9,r10,r6
+	srw	r12,r11,r8
+	or	r9,r9,r12
+	stwu	r9,-4(r7)
+	bdnz	L(0)
+
+L(end1):slw	r0,r11,r6
+	stw	r0,-4(r7)
+	blr
+
+
+/* Guaranteed not to succeed.  */
+L(boom): tweq    r0,r0
+
+/* We imitate a case statement, by using (yuk!) fixed-length code chunks,
+   of size 4*12 bytes.  We have to do this (or something) to make this PIC.  */
+L(big):	mflr    r9
+	cfi_register(lr,r9)
+	bltl-   cr0,L(boom)	# Never taken, only used to set LR.
+	slwi    r10,r6,4
+	mflr    r12
+	add     r10,r12,r10
+	slwi	r8,r6,5
+	add     r10,r8,r10
+	mtctr   r10
+	addi	r5,r5,-1
+	mtlr	r9
+	cfi_same_value (lr)
+	bctr
+
+L(end2):slw	r0,r10,r6
+	stw	r0,-4(r7)
+	blr
+
+#define DO_LSHIFT(n) \
+	mtctr	r5;							\
+L(n):	lwzu	r10,-4(r4);						\
+	slwi	r9,r11,n;						\
+	inslwi	r9,r10,n,32-n;					\
+	stwu	r9,-4(r7);						\
+	bdz-	L(end2);						\
+	lwzu	r11,-4(r4);						\
+	slwi	r9,r10,n;						\
+	inslwi	r9,r11,n,32-n;					\
+	stwu	r9,-4(r7);						\
+	bdnz	L(n);							\
+	b	L(end1)
+
+	DO_LSHIFT(1)
+	DO_LSHIFT(2)
+	DO_LSHIFT(3)
+	DO_LSHIFT(4)
+	DO_LSHIFT(5)
+	DO_LSHIFT(6)
+	DO_LSHIFT(7)
+	DO_LSHIFT(8)
+	DO_LSHIFT(9)
+	DO_LSHIFT(10)
+	DO_LSHIFT(11)
+	DO_LSHIFT(12)
+	DO_LSHIFT(13)
+	DO_LSHIFT(14)
+	DO_LSHIFT(15)
+	DO_LSHIFT(16)
+	DO_LSHIFT(17)
+	DO_LSHIFT(18)
+	DO_LSHIFT(19)
+	DO_LSHIFT(20)
+	DO_LSHIFT(21)
+	DO_LSHIFT(22)
+	DO_LSHIFT(23)
+	DO_LSHIFT(24)
+	DO_LSHIFT(25)
+	DO_LSHIFT(26)
+	DO_LSHIFT(27)
+	DO_LSHIFT(28)
+	DO_LSHIFT(29)
+	DO_LSHIFT(30)
+	DO_LSHIFT(31)
+
+END (__mpn_lshift)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/mcount.c b/REORG.TODO/sysdeps/powerpc/powerpc32/mcount.c
new file mode 100644
index 0000000000..d8c063222a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/mcount.c
@@ -0,0 +1,17 @@
+#include <shlib-compat.h>
+
+#define __mcount_internal ___mcount_internal
+
+#include <gmon/mcount.c>
+
+#undef __mcount_internal
+
+/* __mcount_internal was added in glibc 2.15 with version GLIBC_PRIVATE,
+   but it should have been put in version GLIBC_2.15.  Mark the
+   GLIBC_PRIVATE version obsolete and add it to GLIBC_2.16 instead.  */
+versioned_symbol (libc, ___mcount_internal, __mcount_internal, GLIBC_2_16);
+
+#if SHLIB_COMPAT (libc, GLIBC_2_15, GLIBC_2_16)
+strong_alias (___mcount_internal, ___mcount_internal_private);
+symbol_version (___mcount_internal_private, __mcount_internal, GLIBC_PRIVATE);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/memset.S
new file mode 100644
index 0000000000..8913a02698
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/memset.S
@@ -0,0 +1,307 @@
+/* Optimized memset implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.
+
+   The memset is done in four sizes: byte (8 bits), word (32 bits),
+   32-byte blocks (256 bits) and __cache_line_size (128, 256, 1024 bits).
+   There is a special case for setting whole cache lines to 0, which
+   takes advantage of the dcbz instruction.  */
+
+	.section	".text"
+EALIGN (memset, 5, 1)
+
+#define rTMP	r0
+#define rRTN	r3	/* initial value of 1st argument */
+#define rMEMP0	r3	/* original value of 1st arg */
+#define rCHR	r4	/* char to set in each byte */
+#define rLEN	r5	/* length of region to set */
+#define rMEMP	r6	/* address at which we are storing */
+#define rALIGN	r7	/* number of bytes we are setting now (when aligning) */
+#define rMEMP2	r8
+
+#define rPOS32	r7	/* constant +32 for clearing with dcbz */
+#define rNEG64	r8	/* constant -64 for clearing with dcbz */
+#define rNEG32	r9	/* constant -32 for clearing with dcbz */
+
+#define rGOT	r9	/* Address of the Global Offset Table.  */
+#define rCLS	r8	/* Cache line size obtained from static.  */
+#define rCLM	r9	/* Cache line size mask to check for cache alignment.  */
+
+/* take care of case for size <= 4  */
+	cmplwi	cr1, rLEN, 4
+	andi.	rALIGN, rMEMP0, 3
+	mr	rMEMP, rMEMP0
+	ble-	cr1, L(small)
+/* align to word boundary  */
+	cmplwi	cr5, rLEN, 31
+	rlwimi	rCHR, rCHR, 8, 16, 23
+	beq+	L(aligned)	/* 8th instruction from .align */
+	mtcrf	0x01, rMEMP0
+	subfic	rALIGN, rALIGN, 4
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	bf+	31, L(g0)
+	stb	rCHR, 0(rMEMP0)
+	bt	30, L(aligned)
+L(g0):	sth	rCHR, -2(rMEMP)	/* 16th instruction from .align */
+/* take care of case for size < 31 */
+L(aligned):
+	mtcrf	0x01, rLEN
+	rlwimi	rCHR, rCHR, 16, 0, 15
+	ble	cr5, L(medium)
+/* align to cache line boundary...  */
+	andi.	rALIGN, rMEMP, 0x1C
+	subfic	rALIGN, rALIGN, 0x20
+	beq	L(caligned)
+	mtcrf	0x01, rALIGN
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	cmplwi	cr1, rALIGN, 0x10
+	mr	rMEMP2, rMEMP
+	bf	28, L(a1)
+	stw	rCHR, -4(rMEMP2)
+	stwu	rCHR, -8(rMEMP2)
+L(a1):	blt	cr1, L(a2)
+	stw	rCHR, -4(rMEMP2) /* 32nd instruction from .align */
+	stw	rCHR, -8(rMEMP2)
+	stw	rCHR, -12(rMEMP2)
+	stwu	rCHR, -16(rMEMP2)
+L(a2):	bf	29, L(caligned)
+	stw	rCHR, -4(rMEMP2)
+/* now aligned to a cache line.  */
+L(caligned):
+	cmplwi	cr1, rCHR, 0
+	clrrwi.	rALIGN, rLEN, 5
+	mtcrf	0x01, rLEN	/* 40th instruction from .align */
+
+/* Check if we can use the special case for clearing memory using dcbz.
+   This requires that we know the correct cache line size for this
+   processor.  Getting the __cache_line_size may require establishing GOT
+   addressability, so branch out of line to set this up.  */
+	beq	cr1, L(checklinesize)
+
+/* Store blocks of 32-bytes (256-bits) starting on a 32-byte boundary.
+   Can't assume that rCHR is zero or that the cache line size is either
+   32-bytes or even known.  */
+L(nondcbz):
+	srwi	rTMP, rALIGN, 5
+	mtctr	rTMP
+	beq	L(medium)	/* we may not actually get to do a full line */
+	clrlwi.	rLEN, rLEN, 27
+	add	rMEMP, rMEMP, rALIGN
+	li	rNEG64, -0x40
+	bdz	L(cloopdone)	/* 48th instruction from .align */
+
+/* We can't use dcbz here as we don't know the cache line size.  We can
+   use "data cache block touch for store", which is safe.  */
+L(c3):	dcbtst	rNEG64, rMEMP
+	stw	rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	stw	rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP)
+	nop			/* let 601 fetch last 4 instructions of loop */
+	stw	rCHR, -20(rMEMP)
+	stw	rCHR, -24(rMEMP) /* 56th instruction from .align */
+	nop			/* let 601 fetch first 8 instructions of loop */
+	stw	rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	bdnz	L(c3)
+L(cloopdone):
+	stw	rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	stw	rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP) /* 64th instruction from .align */
+	stw	rCHR, -20(rMEMP)
+	cmplwi	cr1, rLEN, 16
+	stw	rCHR, -24(rMEMP)
+	stw	rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	beqlr
+	add	rMEMP, rMEMP, rALIGN
+	b	L(medium_tail2)	/* 72nd instruction from .align */
+
+	.align	5
+	nop
+/* Clear cache lines of memory in 128-byte chunks.
+   This code is optimized for processors with 32-byte cache lines.
+   It is further optimized for the 601 processor, which requires
+   some care in how the code is aligned in the i-cache.  */
+L(zloopstart):
+	clrlwi	rLEN, rLEN, 27
+	mtcrf	0x02, rALIGN
+	srwi.	rTMP, rALIGN, 7
+	mtctr	rTMP
+	li	rPOS32, 0x20
+	li	rNEG64, -0x40
+	cmplwi	cr1, rLEN, 16	/* 8 */
+	bf	26, L(z0)
+	dcbz	0, rMEMP
+	addi	rMEMP, rMEMP, 0x20
+L(z0):	li	rNEG32, -0x20
+	bf	25, L(z1)
+	dcbz	0, rMEMP
+	dcbz	rPOS32, rMEMP
+	addi	rMEMP, rMEMP, 0x40 /* 16 */
+L(z1):	cmplwi	cr5, rLEN, 0
+	beq	L(medium)
+L(zloop):
+	dcbz	0, rMEMP
+	dcbz	rPOS32, rMEMP
+	addi	rMEMP, rMEMP, 0x80
+	dcbz	rNEG64, rMEMP
+	dcbz	rNEG32, rMEMP
+	bdnz	L(zloop)
+	beqlr	cr5
+	b	L(medium_tail2)
+
+	.align	5
+L(small):
+/* Memset of 4 bytes or less.  */
+	cmplwi	cr5, rLEN, 1
+	cmplwi	cr1, rLEN, 3
+	bltlr	cr5
+	stb	rCHR, 0(rMEMP)
+	beqlr	cr5
+	nop
+	stb	rCHR, 1(rMEMP)
+	bltlr	cr1
+	stb	rCHR, 2(rMEMP)
+	beqlr	cr1
+	nop
+	stb	rCHR, 3(rMEMP)
+	blr
+
+/* Memset of 0-31 bytes.  */
+	.align	5
+L(medium):
+	cmplwi	cr1, rLEN, 16
+L(medium_tail2):
+	add	rMEMP, rMEMP, rLEN
+L(medium_tail):
+	bt-	31, L(medium_31t)
+	bt-	30, L(medium_30t)
+L(medium_30f):
+	bt-	29, L(medium_29t)
+L(medium_29f):
+	bge-	cr1, L(medium_27t)
+	bflr-	28
+	stw	rCHR, -4(rMEMP)	/* 8th instruction from .align */
+	stw	rCHR, -8(rMEMP)
+	blr
+
+L(medium_31t):
+	stbu	rCHR, -1(rMEMP)
+	bf-	30, L(medium_30f)
+L(medium_30t):
+	sthu	rCHR, -2(rMEMP)
+	bf-	29, L(medium_29f)
+L(medium_29t):
+	stwu	rCHR, -4(rMEMP)
+	blt-	cr1, L(medium_27f) /* 16th instruction from .align */
+L(medium_27t):
+	stw	rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	stw	rCHR, -12(rMEMP)
+	stwu	rCHR, -16(rMEMP)
+L(medium_27f):
+	bflr-	28
+L(medium_28t):
+	stw	rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+
+L(checklinesize):
+#ifdef SHARED
+	mflr	rTMP
+/* If the remaining length is less the 32 bytes then don't bother getting
+   the cache line size.  */
+	beq	L(medium)
+/* Establishes GOT addressability so we can load __cache_line_size
+   from static. This value was set from the aux vector during startup.  */
+	SETUP_GOT_ACCESS(rGOT,got_label)
+	addis	rGOT,rGOT,__cache_line_size-got_label@ha
+	lwz	rCLS,__cache_line_size-got_label@l(rGOT)
+	mtlr	rTMP
+#else
+/* Load __cache_line_size from static. This value was set from the
+   aux vector during startup.  */
+	lis	rCLS,__cache_line_size@ha
+/* If the remaining length is less the 32 bytes then don't bother getting
+   the cache line size.  */
+	beq	L(medium)
+	lwz	rCLS,__cache_line_size@l(rCLS)
+#endif
+
+/* If the cache line size was not set then goto to L(nondcbz), which is
+   safe for any cache line size.  */
+	cmplwi	cr1,rCLS,0
+	beq	cr1,L(nondcbz)
+
+/* If the cache line size is 32 bytes then goto to L(zloopstart),
+   which is coded specifically for 32-byte lines (and 601).  */
+	cmplwi	cr1,rCLS,32
+	beq	cr1,L(zloopstart)
+
+/* Now we know the cache line size and it is not 32-bytes.  However
+   we may not yet be aligned to the cache line and may have a partial
+   line to fill.  Touch it 1st to fetch the cache line.  */
+	dcbtst	0,rMEMP
+
+	addi	rCLM,rCLS,-1
+L(getCacheAligned):
+	cmplwi	cr1,rLEN,32
+	and.	rTMP,rCLM,rMEMP
+	blt	cr1,L(handletail32)
+	beq	L(cacheAligned)
+/* We are not aligned to start of a cache line yet.  Store 32-byte
+   of data and test again.  */
+	addi	rMEMP,rMEMP,32
+	addi	rLEN,rLEN,-32
+	stw	rCHR,-32(rMEMP)
+	stw	rCHR,-28(rMEMP)
+	stw	rCHR,-24(rMEMP)
+	stw	rCHR,-20(rMEMP)
+	stw	rCHR,-16(rMEMP)
+	stw	rCHR,-12(rMEMP)
+	stw	rCHR,-8(rMEMP)
+	stw	rCHR,-4(rMEMP)
+	b	L(getCacheAligned)
+
+/* Now we are aligned to the cache line and can use dcbz.  */
+L(cacheAligned):
+	cmplw	cr1,rLEN,rCLS
+	blt	cr1,L(handletail32)
+	dcbz	0,rMEMP
+	subf	rLEN,rCLS,rLEN
+	add	rMEMP,rMEMP,rCLS
+	b	L(cacheAligned)
+
+/* We are here because; the cache line size was set, it was not
+   32-bytes, and the remainder (rLEN) is now less than the actual cache
+   line size.  Set up the preconditions for L(nondcbz) and go there to
+   store the remaining bytes.  */
+L(handletail32):
+	clrrwi.	rALIGN, rLEN, 5
+	b	L(nondcbz)
+
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/mul_1.S b/REORG.TODO/sysdeps/powerpc/powerpc32/mul_1.S
new file mode 100644
index 0000000000..c3093e2bc6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/mul_1.S
@@ -0,0 +1,45 @@
+/* Multiply a limb vector by a limb, for PowerPC.
+   Copyright (C) 1993-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_mul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+                        mp_size_t s1_size, mp_limb_t s2_limb)
+   Calculate s1*s2 and put result in res_ptr; return carry.  */
+
+ENTRY (__mpn_mul_1)
+	mtctr	r5
+
+	lwz	r0,0(r4)
+	mullw	r7,r0,r6
+	mulhwu	r10,r0,r6
+	addi	r3,r3,-4		# adjust res_ptr
+	addic	r5,r5,0			# clear cy with dummy insn
+	bdz	L(1)
+
+L(0):	lwzu	r0,4(r4)
+	stwu	r7,4(r3)
+	mullw	r8,r0,r6
+	adde	r7,r8,r10
+	mulhwu	r10,r0,r6
+	bdnz	L(0)
+
+L(1):	stw	r7,4(r3)
+	addze	r3,r10
+	blr
+END (__mpn_mul_1)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Implies
new file mode 100644
index 0000000000..a372141bb7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Implies
@@ -0,0 +1,2 @@
+powerpc/power4/fpu
+powerpc/power4
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Makefile
new file mode 100644
index 0000000000..ba06adb5d0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/Makefile
@@ -0,0 +1,6 @@
+# Makefile fragment for POWER4/5/5+.
+
+ifeq ($(subdir),string)
+CFLAGS-wordcopy.c += --param max-variable-expansions-in-unroller=2 --param max-unroll-times=2 -funroll-loops -fpeel-loops
+CFLAGS-memmove.c += --param max-variable-expansions-in-unroller=2 --param max-unroll-times=2 -funroll-loops -fpeel-loops
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/Makefile
new file mode 100644
index 0000000000..5afbade15f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/Makefile
@@ -0,0 +1,43 @@
+ifeq ($(subdir),math)
+sysdep_routines += s_isnan-power7 s_isnan-power6 s_isnan-power5 s_isnan-ppc32 \
+		   s_isnanf-power6 s_isnanf-power5 s_isinf-power7 \
+		   s_isinf-ppc32 s_isinff-ppc32 s_finite-power7 \
+		   s_finite-ppc32 s_finitef-ppc32 s_copysign-power6 \
+		   s_copysign-ppc32 s_modf-power5+ s_modf-ppc32 \
+		   s_modff-power5+ s_modff-ppc32
+
+libm-sysdep_routines += s_llrintf-power6 s_llrintf-ppc32 s_llrint-power6 \
+			s_llrint-ppc32 s_llround-power6 s_llround-power5+ \
+			s_llround-ppc32 s_isnan-power7 \
+			w_sqrt_compat-power5 w_sqrt_compat-ppc32 \
+			w_sqrtf_compat-power5 w_sqrtf_compat-ppc32 \
+			s_isnan-power6 s_isnan-power5 s_isnan-ppc32 \
+			s_isnanf-power6 s_isnanf-power5 s_isinf-power7 \
+			s_isinf-ppc32 s_isinff-ppc32 s_finite-power7 \
+			s_finite-ppc32 s_finitef-ppc32 s_ceil-power5+ \
+			s_ceil-ppc32 s_ceilf-power5+ s_ceilf-ppc32 \
+			s_floor-power5+ s_floor-ppc32 s_floorf-power5+ \
+			s_floorf-ppc32 s_round-power5+ s_round-ppc32 \
+			s_roundf-power5+ s_roundf-ppc32 s_trunc-power5+ \
+			s_trunc-ppc32 s_truncf-power5+ s_truncf-ppc32 \
+			s_copysign-power6 s_copysign-ppc32 s_lround-power6x \
+			s_lround-power5+ s_lround-ppc32 s_lrint-power6x \
+			s_lrint-ppc32 s_modf-power5+ s_modf-ppc32 \
+			s_modff-power5+ s_modff-ppc32 s_logbl-power7 \
+			s_logbl-ppc32 s_logb-power7 s_logb-ppc32 \
+			s_logbf-power7 s_logbf-ppc32 e_hypot-power7 \
+			e_hypot-ppc32 e_hypotf-power7 e_hypotf-ppc32
+
+CFLAGS-s_modf-power5+.c = -mcpu=power5+
+CFLAGS-s_modff-power5+.c = -mcpu=power5+
+CFLAGS-s_logbl-power7.c = -mcpu=power7
+CFLAGS-s_logb-power7.c = -mcpu=power7
+CFLAGS-s_logbf-power7.c = -mcpu=power7
+CFLAGS-e_hypot-power7.c = -mcpu=power7
+CFLAGS-e_hypotf-power7.c = -mcpu=power7
+
+# These files quiet sNaNs in a way that is optimized away without
+# -fsignaling-nans.
+CFLAGS-s_modf-ppc32.c += -fsignaling-nans
+CFLAGS-s_modff-ppc32.c += -fsignaling-nans
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-power7.c
new file mode 100644
index 0000000000..d62b7c4f7b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-power7.c
@@ -0,0 +1,26 @@
+/* __ieee_hypot() POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __ieee754_hypot __ieee754_hypot_power7
+
+#include <sysdeps/powerpc/fpu/e_hypot.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-ppc32.c
new file mode 100644
index 0000000000..25984b724c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot-ppc32.c
@@ -0,0 +1,26 @@
+/* __ieee_hypot() PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __ieee754_hypot __ieee754_hypot_ppc32
+
+#include <sysdeps/powerpc/fpu/e_hypot.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot.c
new file mode 100644
index 0000000000..5b8a06b4d1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypot.c
@@ -0,0 +1,32 @@
+/* Multiple versions of ieee754_hypot.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__ieee754_hypot) __ieee754_hypot_ppc32 attribute_hidden;
+extern __typeof (__ieee754_hypot) __ieee754_hypot_power7 attribute_hidden;
+
+libc_ifunc (__ieee754_hypot,
+	    (hwcap & PPC_FEATURE_ARCH_2_06)
+	    ? __ieee754_hypot_power7
+            : __ieee754_hypot_ppc32);
+
+strong_alias (__ieee754_hypot, __hypot_finite)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-power7.c
new file mode 100644
index 0000000000..f52bc635d1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-power7.c
@@ -0,0 +1,26 @@
+/* __ieee754_hypot POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __ieee754_hypotf __ieee754_hypotf_power7
+
+#include <sysdeps/powerpc/fpu/e_hypotf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-ppc32.c
new file mode 100644
index 0000000000..d9f86163c9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf-ppc32.c
@@ -0,0 +1,26 @@
+/* __ieee_hypot() PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __ieee754_hypotf __ieee754_hypotf_ppc32
+
+#include <sysdeps/ieee754/flt-32/e_hypotf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf.c
new file mode 100644
index 0000000000..d1fc0ce532
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/e_hypotf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of ieee754_hypotf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__ieee754_hypotf) __ieee754_hypotf_ppc32 attribute_hidden;
+extern __typeof (__ieee754_hypotf) __ieee754_hypotf_power7 attribute_hidden;
+
+libc_ifunc (__ieee754_hypotf,
+	    (hwcap & PPC_FEATURE_ARCH_2_06)
+	    ? __ieee754_hypotf_power7
+            : __ieee754_hypotf_ppc32);
+
+strong_alias (__ieee754_hypotf, __hypotf_finite)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-power5+.S
new file mode 100644
index 0000000000..670d6bbffb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-power5+.S
@@ -0,0 +1,33 @@
+/* ceil function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __ceil __ceil_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-ppc32.S
new file mode 100644
index 0000000000..77d43c5de7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil-ppc32.S
@@ -0,0 +1,31 @@
+/* ceil function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __ceil __ceil_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_ceil.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil.c
new file mode 100644
index 0000000000..4e3d980ce6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceil.c
@@ -0,0 +1,40 @@
+/* Multiple versions of ceil.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__ceil) __ceil_ppc32 attribute_hidden;
+extern __typeof (__ceil) __ceil_power5plus attribute_hidden;
+
+libc_ifunc (__ceil,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __ceil_power5plus
+            : __ceil_ppc32);
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__ceil, __ceill)
+weak_alias (__ceil, ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-power5+.S
new file mode 100644
index 0000000000..089261460e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-power5+.S
@@ -0,0 +1,26 @@
+/* ceilf function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __ceilf __ceilf_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-ppc32.S
new file mode 100644
index 0000000000..c783919f3a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf-ppc32.S
@@ -0,0 +1,27 @@
+/* ceilf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __ceilf __ceilf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_ceilf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf.c
new file mode 100644
index 0000000000..9674001caa
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_ceilf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of ceilf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__ceilf) __ceilf_ppc32 attribute_hidden;
+extern __typeof (__ceilf) __ceilf_power5plus attribute_hidden;
+
+libc_ifunc (__ceilf,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __ceilf_power5plus
+            : __ceilf_ppc32);
+
+weak_alias (__ceilf, ceilf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-power6.S
new file mode 100644
index 0000000000..1f58420763
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-power6.S
@@ -0,0 +1,33 @@
+/* copysign().  PowerPC32/POWER6 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __copysign __copysign_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_copysign.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-ppc32.S
new file mode 100644
index 0000000000..5d46f0379a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign-ppc32.S
@@ -0,0 +1,34 @@
+/* copysign().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a, b, c, d)
+
+#define __copysign __copysign_ppc32
+#undef hidden_def
+#define hidden_def(name)
+  strong_alias (__copysign_ppc32, __GI___copysign)
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_copysign.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign.c
new file mode 100644
index 0000000000..bddc1ab3c5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysign.c
@@ -0,0 +1,51 @@
+/* Multiple versions of copysign.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Redefine copysign so that the compiler won't complain about the type
+   mismatch with the IFUNC selector in strong_alias below.  */
+#undef __copysign
+#define __copysign __redirect_copysign
+#include <math.h>
+#include <math_ldbl_opt.h>
+#undef __copysign
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__redirect_copysign) __copysign_ppc32 attribute_hidden;
+extern __typeof (__redirect_copysign) __copysign_power6 attribute_hidden;
+
+extern __typeof (__redirect_copysign) __libm_copysign;
+libc_ifunc (__libm_copysign,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __copysign_power6
+            : __copysign_ppc32);
+
+strong_alias (__libm_copysign, __copysign)
+weak_alias (__copysign, copysign)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__copysign,copysignl)
+strong_alias(__copysign,__copysignl)
+#endif
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __copysign, copysignl, GLIBC_2_0);
+# endif
+#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __copysign, copysignl, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysignf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysignf.c
new file mode 100644
index 0000000000..7709e08968
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_copysignf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of copysignf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+/* It's safe to use double-precision implementation for single-precision.  */
+extern __typeof (__copysignf) __copysign_ppc32 attribute_hidden;
+extern __typeof (__copysignf) __copysign_power6 attribute_hidden;
+
+libc_ifunc (__copysignf,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __copysign_power6
+            : __copysign_ppc32);
+
+weak_alias (__copysignf, copysignf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-power7.S
new file mode 100644
index 0000000000..eb23f73e6e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-power7.S
@@ -0,0 +1,33 @@
+/* finite().  PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __finite __finite_power7
+
+#include <sysdeps/powerpc/powerpc32/power7/fpu/s_finite.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-ppc32.c
new file mode 100644
index 0000000000..495dd36d0b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite-ppc32.c
@@ -0,0 +1,33 @@
+/* finite().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define FINITE __finite_ppc32
+#ifdef SHARED
+# undef hidden_def
+# define hidden_def(a) \
+   __hidden_ver1 (__finite_ppc32, __GI___finite, __finite_ppc32);
+#endif
+
+#include <sysdeps/ieee754/dbl-64/s_finite.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite.c
new file mode 100644
index 0000000000..d3d0755bc0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finite.c
@@ -0,0 +1,57 @@
+/* Multiple versions of finite.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __finite __redirect___finite
+#define __finitef __redirect___finitef
+#define __finitel __redirect___finitel
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__finite) __finite_ppc32 attribute_hidden;
+extern __typeof (__finite) __finite_power7 attribute_hidden;
+#undef __finite
+#undef __finitef
+#undef __finitel
+
+libc_ifunc_redirected (__redirect___finite,  __finite,
+		       (hwcap & PPC_FEATURE_ARCH_2_06)
+		       ? __finite_power7
+		       : __finite_ppc32);
+
+weak_alias (__finite, finite)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__finite, __finitel)
+weak_alias (__finite, finitel)
+#endif
+
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
+compat_symbol (libm, finite, finitel, GLIBC_2_0);
+# endif
+# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_1)
+compat_symbol (libm, __finite, __finitel, GLIBC_2_1);
+# endif
+#else
+# if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
+compat_symbol (libc, __finite, __finitel, GLIBC_2_0);
+compat_symbol (libc, finite, finitel, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef-ppc32.c
new file mode 100644
index 0000000000..4a52949066
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef-ppc32.c
@@ -0,0 +1,31 @@
+/* finitef().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define FINITEF __finitef_ppc32
+#ifdef SHARED
+# undef hidden_def
+# define hidden_def(a) \
+   __hidden_ver1 (__finitef_ppc32, __GI___finitef, __finitef_ppc32);
+#endif
+
+#include <sysdeps/ieee754/flt-32/s_finitef.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef.c
new file mode 100644
index 0000000000..fa214f37ae
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_finitef.c
@@ -0,0 +1,34 @@
+/* Multiple versions of finitef.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __finitef __redirect___finitef
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__finitef) __finitef_ppc32 attribute_hidden;
+/* The power7 finite(double) works for float.  */
+extern __typeof (__finitef) __finite_power7 attribute_hidden;
+#undef __finitef
+
+libc_ifunc_redirected  (__redirect___finitef, __finitef,
+			(hwcap & PPC_FEATURE_ARCH_2_06)
+			? __finite_power7
+			: __finitef_ppc32);
+
+weak_alias (__finitef, finitef)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-power5+.S
new file mode 100644
index 0000000000..dfecd1c59e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-power5+.S
@@ -0,0 +1,33 @@
+/* floor function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __floor __floor_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-ppc32.S
new file mode 100644
index 0000000000..3af604ca25
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor-ppc32.S
@@ -0,0 +1,31 @@
+/* floor function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __floor __floor_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_floor.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor.c
new file mode 100644
index 0000000000..0da528c922
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floor.c
@@ -0,0 +1,40 @@
+/* Multiple versions of floor.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__floor) __floor_ppc32 attribute_hidden;
+extern __typeof (__floor) __floor_power5plus attribute_hidden;
+
+libc_ifunc (__floor,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __floor_power5plus
+            : __floor_ppc32);
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__floor, __floorl)
+weak_alias (__floor, floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-power5+.S
new file mode 100644
index 0000000000..fd2fa331bb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-power5+.S
@@ -0,0 +1,26 @@
+/* floorf function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __floorf __floorf_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-ppc32.S
new file mode 100644
index 0000000000..55bee8652b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf-ppc32.S
@@ -0,0 +1,27 @@
+/* floorf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __floorf __floorf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_floorf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf.c
new file mode 100644
index 0000000000..56375097f7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_floorf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of floorf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__floorf) __floorf_ppc32 attribute_hidden;
+extern __typeof (__floorf) __floorf_power5plus attribute_hidden;
+
+libc_ifunc (__floorf,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __floorf_power5plus
+            : __floorf_ppc32);
+
+weak_alias (__floorf, floorf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-power7.S
new file mode 100644
index 0000000000..f7c7510649
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-power7.S
@@ -0,0 +1,33 @@
+/* isinf().  PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __isinf __isinf_power7
+
+#include <sysdeps/powerpc/powerpc32/power7/fpu/s_isinf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-ppc32.c
new file mode 100644
index 0000000000..0d1cb75cf3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf-ppc32.c
@@ -0,0 +1,33 @@
+/* isinf().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __isinf __isinf_ppc32
+#ifdef SHARED
+# undef hidden_def
+# define hidden_def(a) \
+   __hidden_ver1 (__isinf_ppc32, __GI___isinf, __isinf_ppc32);
+#endif
+
+#include <sysdeps/ieee754/dbl-64/s_isinf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf.c
new file mode 100644
index 0000000000..c7d7568ce0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinf.c
@@ -0,0 +1,50 @@
+/* Multiple versions of isinf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __isinf __redirect___isinf
+#define __isinff __redirect___isinff
+#define __isinfl __redirect___isinfl
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__isinf) __isinf_ppc32 attribute_hidden;
+extern __typeof (__isinf) __isinf_power7 attribute_hidden;
+#undef __isinf
+#undef __isinff
+#undef __isinfl
+
+libc_ifunc_redirected (__redirect___isinf,  __isinf,
+		       (hwcap & PPC_FEATURE_ARCH_2_06)
+		       ? __isinf_power7
+		       : __isinf_ppc32);
+
+weak_alias (__isinf, isinf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isinf, __isinfl)
+weak_alias (__isinf, isinfl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
+compat_symbol (libc, __isinf, __isinfl, GLIBC_2_0);
+compat_symbol (libc, isinf, isinfl, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff-ppc32.c
new file mode 100644
index 0000000000..25fd22d0c3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff-ppc32.c
@@ -0,0 +1,31 @@
+/* isinff().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __isinff __isinff_ppc32
+#ifdef SHARED
+# undef hidden_def
+# define hidden_def(a) \
+   __hidden_ver1 (__isinff_ppc32, __GI___isinff, __isinff_ppc32);
+#endif
+
+#include <sysdeps/ieee754/flt-32/s_isinff.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff.c
new file mode 100644
index 0000000000..fd6e9983f6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isinff.c
@@ -0,0 +1,35 @@
+/* Multiple versions of isinf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __isinff __redirect___isinff
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__isinff) __isinff_ppc32 attribute_hidden;
+/* The power7 isinf(double) works for float.  */
+extern __typeof (__isinff) __isinf_power7 attribute_hidden;
+#undef __isinff
+
+libc_ifunc_redirected (__redirect___isinff,  __isinff,
+		       (hwcap & PPC_FEATURE_ARCH_2_06)
+		       ? __isinf_power7
+		       : __isinff_ppc32);
+
+weak_alias (__isinff, isinff)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power5.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power5.S
new file mode 100644
index 0000000000..36d6709ab0
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power5.S
@@ -0,0 +1,33 @@
+/* isnan().  PowerPC32/POWER5 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, symbol, ver)
+
+#define __isnan __isnan_power5
+
+#include <sysdeps/powerpc/powerpc32/power5/fpu/s_isnan.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power6.S
new file mode 100644
index 0000000000..0ee970330c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power6.S
@@ -0,0 +1,33 @@
+/* isnan().  PowerPC32/POWER6 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, symbol, ver)
+
+#define __isnan __isnan_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_isnan.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power7.S
new file mode 100644
index 0000000000..24d5a21d73
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-power7.S
@@ -0,0 +1,33 @@
+/* isnan().  PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, symbol, ver)
+
+#define __isnan __isnan_power7
+
+#include <sysdeps/powerpc/powerpc32/power7/fpu/s_isnan.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-ppc32.S
new file mode 100644
index 0000000000..175229edd6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan-ppc32.S
@@ -0,0 +1,32 @@
+/* isnan().  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+#undef compat_symbol
+#define compat_symbol(a, b, c, d)
+
+#define __isnan __isnan_ppc32
+#undef hidden_def
+#define hidden_def(name)
+  strong_alias (__isnan_ppc32, __GI___isnan)
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_isnan.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan.c
new file mode 100644
index 0000000000..79447af535
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnan.c
@@ -0,0 +1,56 @@
+/* Multiple versions of isnan.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __isnan __redirect___isnan
+#define __isnanf __redirect___isnanf
+#define __isnanl __redirect___isnanl
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__isnan) __isnan_ppc32 attribute_hidden;
+extern __typeof (__isnan) __isnan_power5 attribute_hidden;
+extern __typeof (__isnan) __isnan_power6 attribute_hidden;
+extern __typeof (__isnan) __isnan_power7 attribute_hidden;
+#undef __isnan
+#undef __isnanf
+#undef __isnanl
+
+libc_ifunc_redirected (__redirect___isnan, __isnan,
+		       (hwcap & PPC_FEATURE_ARCH_2_06)
+		       ? __isnan_power7
+		       : (hwcap & PPC_FEATURE_ARCH_2_05)
+			 ? __isnan_power6
+			 : (hwcap & PPC_FEATURE_POWER5)
+			   ? __isnan_power5
+			   : __isnan_ppc32);
+
+weak_alias (__isnan, isnan)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isnan, __isnanl)
+weak_alias (__isnan, isnanl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power5.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power5.S
new file mode 100644
index 0000000000..4e57289794
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power5.S
@@ -0,0 +1,28 @@
+/* isnanf().  PowerPC32/POWER5 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __isnanf __isnanf_power5
+
+#include <sysdeps/powerpc/powerpc32/power5/fpu/s_isnanf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power6.S
new file mode 100644
index 0000000000..40687b5f43
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf-power6.S
@@ -0,0 +1,28 @@
+/* isnanf().  PowerPC32/POWER6 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __isnanf __isnanf_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_isnanf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf.c
new file mode 100644
index 0000000000..12bdcffcec
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_isnanf.c
@@ -0,0 +1,39 @@
+/* Multiple versions of isnanf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+/* Both ppc32 and power7 isnan(double) work for float.  */
+extern __typeof (__isnanf) __isnan_ppc32 attribute_hidden;
+extern __typeof (__isnanf) __isnanf_power5 attribute_hidden;
+extern __typeof (__isnanf) __isnanf_power6 attribute_hidden;
+extern __typeof (__isnanf) __isnan_power7 attribute_hidden;
+
+libc_ifunc_hidden (__isnanf, __isnanf,
+		   (hwcap & PPC_FEATURE_ARCH_2_06)
+		   ? __isnan_power7
+		   : (hwcap & PPC_FEATURE_ARCH_2_05)
+		     ? __isnanf_power6
+		     : (hwcap & PPC_FEATURE_POWER5)
+		       ? __isnanf_power5
+		       : __isnan_ppc32);
+
+hidden_def (__isnanf)
+weak_alias (__isnanf, isnanf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-power6.S
new file mode 100644
index 0000000000..07c0f94a2f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-power6.S
@@ -0,0 +1,31 @@
+/* Round double to long int.  PowerPC32/Power6.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llrint __llrint_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_llrint.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-ppc32.S
new file mode 100644
index 0000000000..390cd9a8bc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint-ppc32.S
@@ -0,0 +1,31 @@
+/* llrint function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llrint __llrint_ppc32
+
+#include <sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint.c
new file mode 100644
index 0000000000..88357ebdd9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrint.c
@@ -0,0 +1,40 @@
+/* Multiple versions of llrint.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__llrint) __llrint_ppc32 attribute_hidden;
+extern __typeof (__llrint) __llrint_power6 attribute_hidden;
+
+libc_ifunc (__llrint,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __llrint_power6
+            : __llrint_ppc32);
+
+weak_alias (__llrint, llrint)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llrint, llrintl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-power6.S
new file mode 100644
index 0000000000..8ebbefd3dd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-power6.S
@@ -0,0 +1,26 @@
+/* Round float to long int.  PowerPC32/POWER6 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __llrintf __llrintf_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_llrintf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-ppc32.S
new file mode 100644
index 0000000000..aa66e1ed9d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf-ppc32.S
@@ -0,0 +1,26 @@
+/* llrintf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __llrintf __llrintf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf.c
new file mode 100644
index 0000000000..f513e61944
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llrintf.c
@@ -0,0 +1,31 @@
+/* Multiple versions of llrintf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__llrintf) __llrintf_ppc32 attribute_hidden;
+extern __typeof (__llrintf) __llrintf_power6 attribute_hidden;
+
+libc_ifunc (__llrintf,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __llrintf_power6
+            : __llrintf_ppc32);
+
+weak_alias (__llrintf, llrintf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power5+.S
new file mode 100644
index 0000000000..16e3124a3c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power5+.S
@@ -0,0 +1,31 @@
+/* lround function.  PowerPC32/POWER5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llround __llround_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_llround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power6.S
new file mode 100644
index 0000000000..508c6b7a29
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-power6.S
@@ -0,0 +1,31 @@
+/* lround function.  PowerPC32/POWER6 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llround __llround_power6
+
+#include <sysdeps/powerpc/powerpc32/power6/fpu/s_llround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-ppc32.S
new file mode 100644
index 0000000000..4ecd2a266f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround-ppc32.S
@@ -0,0 +1,31 @@
+/* llround function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __llround __llround_ppc32
+
+#include <sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround.c
new file mode 100644
index 0000000000..caf8953c81
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llround.c
@@ -0,0 +1,43 @@
+/* Multiple versions of llround.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__llround) __llround_ppc32 attribute_hidden;
+extern __typeof (__llround) __llround_power5plus attribute_hidden;
+extern __typeof (__llround) __llround_power6 attribute_hidden;
+
+libc_ifunc (__llround,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __llround_power6 :
+	      (hwcap & PPC_FEATURE_POWER5_PLUS)
+	      ? __llround_power5plus
+            : __llround_ppc32);
+
+weak_alias (__llround, llround)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llround, __llroundl)
+weak_alias (__llround, llroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llround, llroundl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llroundf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llroundf.c
new file mode 100644
index 0000000000..1b7e45653a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_llroundf.c
@@ -0,0 +1,34 @@
+/* Multiple versions of llroundf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include "init-arch.h"
+
+/* It's safe to use double-precision implementation for single-precision.  */
+extern __typeof (__llroundf) __llround_ppc32 attribute_hidden;
+extern __typeof (__llroundf) __llround_power5plus attribute_hidden;
+extern __typeof (__llroundf) __llround_power6 attribute_hidden;
+
+libc_ifunc (__llroundf,
+	    (hwcap & PPC_FEATURE_ARCH_2_05)
+	    ? __llround_power6 :
+	      (hwcap & PPC_FEATURE_POWER5_PLUS)
+	      ? __llround_power5plus
+            : __llround_ppc32);
+
+weak_alias (__llroundf, llroundf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-power7.c
new file mode 100644
index 0000000000..20fd02a5ee
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-power7.c
@@ -0,0 +1,31 @@
+/* logb(). PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+#undef strong_alias
+#define strong_alias(a, b)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __logb __logb_power7
+
+#include <sysdeps/powerpc/power7/fpu/s_logb.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-ppc32.c
new file mode 100644
index 0000000000..3920579dbc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb-ppc32.c
@@ -0,0 +1,28 @@
+/* logb(). PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+#undef strong_alias
+#define strong_alias(a, b)
+
+#define __logb __logb_ppc32
+
+#include <sysdeps/ieee754/dbl-64/s_logb.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb.c
new file mode 100644
index 0000000000..fddd1ecbec
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logb.c
@@ -0,0 +1,41 @@
+/* Multiple versions of logb.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__logb) __logb_ppc32 attribute_hidden;
+extern __typeof (__logb) __logb_power7 attribute_hidden;
+
+libc_ifunc (__logb,
+	    (hwcap & PPC_FEATURE_ARCH_2_06)
+	    ? __logb_power7
+            : __logb_ppc32);
+
+weak_alias (__logb, logb)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__logb, __logbl)
+weak_alias (__logb, logbl)
+#endif
+
+#if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
+compat_symbol (libm, logb, logbl, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-power7.c
new file mode 100644
index 0000000000..6e064bedbf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-power7.c
@@ -0,0 +1,26 @@
+/* logbf(). PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __logbf __logbf_power7
+
+#include <sysdeps/powerpc/power7/fpu/s_logbf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-ppc32.c
new file mode 100644
index 0000000000..ca9865d784
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf-ppc32.c
@@ -0,0 +1,26 @@
+/* logbf().  PowerPC32 default implementation.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#define __logbf __logbf_ppc32
+
+#include <sysdeps/ieee754/flt-32/s_logbf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf.c
new file mode 100644
index 0000000000..3b9de174bd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of logbf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__logbf) __logbf_ppc32 attribute_hidden;
+extern __typeof (__logbf) __logbf_power7 attribute_hidden;
+
+libc_ifunc (__logbf,
+	    (hwcap & PPC_FEATURE_ARCH_2_06)
+	    ? __logbf_power7
+            : __logbf_ppc32);
+
+weak_alias (__logbf, logbf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-power7.c
new file mode 100644
index 0000000000..547664dd4b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-power7.c
@@ -0,0 +1,21 @@
+/* logbl(). PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __logbl __logbl_power7
+
+#include <sysdeps/powerpc/power7/fpu/s_logbl.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-ppc32.c
new file mode 100644
index 0000000000..c4361226dd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl-ppc32.c
@@ -0,0 +1,21 @@
+/* logbl(). PowerPC32/POWER7 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define __logbl __logbl_ppc32
+
+#include <sysdeps/ieee754/ldbl-128ibm/s_logbl.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl.c
new file mode 100644
index 0000000000..167e9535cb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_logbl.c
@@ -0,0 +1,32 @@
+/* Multiple versions of logbl.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__logbl) __logbl_ppc32 attribute_hidden;
+extern __typeof (__logbl) __logbl_power7 attribute_hidden;
+
+libc_ifunc (__logbl,
+	    (hwcap & PPC_FEATURE_ARCH_2_06)
+	    ? __logbl_power7
+            : __logbl_ppc32);
+
+long_double_symbol (libm, __logbl, logbl);
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-power6x.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-power6x.S
new file mode 100644
index 0000000000..3be812e5dc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-power6x.S
@@ -0,0 +1,33 @@
+/* Round double to long int.  POWER6x PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __lrint __lrint_power6x
+
+#include <sysdeps/powerpc/powerpc32/power6x/fpu/s_lrint.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-ppc32.S
new file mode 100644
index 0000000000..ee5725db03
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint-ppc32.S
@@ -0,0 +1,31 @@
+/* Round double to long int.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __lrint __lrint_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_lrint.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint.c
new file mode 100644
index 0000000000..ec7c991464
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrint.c
@@ -0,0 +1,40 @@
+/* Multiple versions of lrint.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__lrint) __lrint_ppc32 attribute_hidden;
+extern __typeof (__lrint) __lrint_power6x attribute_hidden;
+
+libc_ifunc (__lrint,
+	    (hwcap & PPC_FEATURE_POWER6_EXT) ?
+	      __lrint_power6x
+            : __lrint_ppc32);
+
+weak_alias (__lrint, lrint)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lrint, lrintl)
+strong_alias (__lrint, __lrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lrint, lrintl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrintf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrintf.c
new file mode 100644
index 0000000000..4a7fa4bcfa
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lrintf.c
@@ -0,0 +1,31 @@
+/* Multiple versions of lrintf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include "init-arch.h"
+
+/* It's safe to use double-precision implementation for single-precision.  */
+extern __typeof (__lrintf) __lrint_ppc32 attribute_hidden;
+extern __typeof (__lrintf) __lrint_power6x attribute_hidden;
+
+libc_ifunc (__lrintf,
+	    (hwcap & PPC_FEATURE_POWER6_EXT) ?
+	      __lrint_power6x
+            : __lrint_ppc32);
+
+weak_alias (__lrintf, lrintf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power5+.S
new file mode 100644
index 0000000000..7aa2364183
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power5+.S
@@ -0,0 +1,33 @@
+/* lround function.  POWER5+, PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __lround __lround_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_lround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power6x.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power6x.S
new file mode 100644
index 0000000000..a9d54d560d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-power6x.S
@@ -0,0 +1,33 @@
+/* lround function.  POWER6x, PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __lround __lround_power6x
+
+#include <sysdeps/powerpc/powerpc32/power6x/fpu/s_lround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-ppc32.S
new file mode 100644
index 0000000000..78a931238a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround-ppc32.S
@@ -0,0 +1,31 @@
+/* lround function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __lround __lround_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_lround.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround.c
new file mode 100644
index 0000000000..fdc0c3dd8d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lround.c
@@ -0,0 +1,43 @@
+/* Multiple versions of lround.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__lround) __lround_ppc32 attribute_hidden;
+extern __typeof (__lround) __lround_power5plus attribute_hidden;
+extern __typeof (__lround) __lround_power6x attribute_hidden;
+
+libc_ifunc (__lround,
+	    (hwcap & PPC_FEATURE_POWER6_EXT) ?
+	      __lround_power6x
+		: (hwcap & PPC_FEATURE_POWER5_PLUS) ?
+		  __lround_power5plus
+            : __lround_ppc32);
+
+weak_alias (__lround, lround)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lround, lroundl)
+strong_alias (__lround, __lroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lround, lroundl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lroundf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lroundf.c
new file mode 100644
index 0000000000..ff61dd6ca7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_lroundf.c
@@ -0,0 +1,34 @@
+/* Multiple versions of lroundf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include "init-arch.h"
+
+/* It's safe to use double-precision implementation for single-precision.  */
+extern __typeof (__lroundf) __lround_ppc32 attribute_hidden;
+extern __typeof (__lroundf) __lround_power5plus attribute_hidden;
+extern __typeof (__lroundf) __lround_power6x attribute_hidden;
+
+libc_ifunc (__lroundf,
+	    (hwcap & PPC_FEATURE_POWER6_EXT) ?
+	      __lround_power6x
+		: (hwcap & PPC_FEATURE_POWER5_PLUS) ?
+		  __lround_power5plus
+            : __lround_ppc32);
+
+weak_alias (__lroundf, lroundf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-power5+.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-power5+.c
new file mode 100644
index 0000000000..955b265045
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-power5+.c
@@ -0,0 +1,31 @@
+/* PowerPC/POWER5+ implementation for modf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __modf __modf_power5plus
+
+#include <sysdeps/powerpc/power5+/fpu/s_modf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-ppc32.c
new file mode 100644
index 0000000000..6561fdf8e1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf-ppc32.c
@@ -0,0 +1,29 @@
+/* PowerPC32 default implementation for modf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+
+#define __modf __modf_ppc32
+
+#include <sysdeps/ieee754/dbl-64/s_modf.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf.c
new file mode 100644
index 0000000000..537592ab16
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modf.c
@@ -0,0 +1,44 @@
+/* Multiple versions of modf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__modf) __modf_ppc32 attribute_hidden;
+extern __typeof (__modf) __modf_power5plus attribute_hidden;
+
+libc_ifunc (__modf,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __modf_power5plus
+            : __modf_ppc32);
+
+weak_alias (__modf, modf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__modf, __modfl)
+weak_alias (__modf, modfl)
+#endif
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __modf, modfl, GLIBC_2_0);
+# endif
+#elif LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __modf, modfl, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-power5+.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-power5+.c
new file mode 100644
index 0000000000..f5a12a282a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-power5+.c
@@ -0,0 +1,27 @@
+/* PowerPC/POWER5+ implementation for modff.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __modff __modff_power5plus
+
+#include <sysdeps/powerpc/power5+/fpu/s_modff.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-ppc32.c
new file mode 100644
index 0000000000..9b9fa971bf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff-ppc32.c
@@ -0,0 +1,26 @@
+/* PowerPC32 default implementation for modff.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __modff __modff_ppc32
+
+#include <sysdeps/ieee754/flt-32/s_modff.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff.c
new file mode 100644
index 0000000000..7ae682d124
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_modff.c
@@ -0,0 +1,30 @@
+/* Multiple versions of modff.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include "init-arch.h"
+
+extern __typeof (__modff) __modff_ppc32 attribute_hidden;
+extern __typeof (__modff) __modff_power5plus attribute_hidden;
+
+libc_ifunc (__modff,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __modff_power5plus
+            : __modff_ppc32);
+
+weak_alias (__modff, modff)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-power5+.S
new file mode 100644
index 0000000000..02ab78b33c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-power5+.S
@@ -0,0 +1,33 @@
+/* round function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __round __round_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-ppc32.S
new file mode 100644
index 0000000000..b9e5bb6170
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round-ppc32.S
@@ -0,0 +1,31 @@
+/* round function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __round __round_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_round.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round.c
new file mode 100644
index 0000000000..46102862ac
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_round.c
@@ -0,0 +1,40 @@
+/* Multiple versions of round.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__round) __round_ppc32 attribute_hidden;
+extern __typeof (__round) __round_power5plus attribute_hidden;
+
+libc_ifunc (__round,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __round_power5plus
+            : __round_ppc32);
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__round, __roundl)
+weak_alias (__round, roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __round, roundl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-power5+.S
new file mode 100644
index 0000000000..442af4c1ea
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-power5+.S
@@ -0,0 +1,26 @@
+/* roundf function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __roundf __roundf_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-ppc32.S
new file mode 100644
index 0000000000..abe74e2e1a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf-ppc32.S
@@ -0,0 +1,27 @@
+/* roundf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __roundf __roundf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_roundf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf.c
new file mode 100644
index 0000000000..0a2e6d53cc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_roundf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of roundf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__roundf) __roundf_ppc32 attribute_hidden;
+extern __typeof (__roundf) __roundf_power5plus attribute_hidden;
+
+libc_ifunc (__roundf,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __roundf_power5plus
+            : __roundf_ppc32);
+
+weak_alias (__roundf, roundf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-power5+.S
new file mode 100644
index 0000000000..129570ca34
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-power5+.S
@@ -0,0 +1,33 @@
+/* trunc function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef hidden_def
+#define hidden_def(name)
+#undef weak_alias
+#define weak_alias(name, alias)
+#undef strong_alias
+#define strong_alias(name, alias)
+#undef compat_symbol
+#define compat_symbol(lib, name, alias, ver)
+
+#define __trunc __trunc_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-ppc32.S
new file mode 100644
index 0000000000..5e74248a9f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc-ppc32.S
@@ -0,0 +1,31 @@
+/* trunc function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __trunc __trunc_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_trunc.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc.c
new file mode 100644
index 0000000000..110e701218
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc.c
@@ -0,0 +1,40 @@
+/* Multiple versions of trunc.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__trunc) __trunc_ppc32 attribute_hidden;
+extern __typeof (__trunc) __trunc_power5plus attribute_hidden;
+
+libc_ifunc (__trunc,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __trunc_power5plus
+            : __trunc_ppc32);
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__trunc, __truncl)
+weak_alias (__trunc, truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_1);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-power5+.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-power5+.S
new file mode 100644
index 0000000000..57ab878876
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-power5+.S
@@ -0,0 +1,26 @@
+/* truncf function.  PowerPC32/power5+ version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#define __truncf __truncf_power5plus
+
+#include <sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-ppc32.S
new file mode 100644
index 0000000000..4dd0a6021a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf-ppc32.S
@@ -0,0 +1,27 @@
+/* truncf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __truncf __truncf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/fpu/s_truncf.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf.c
new file mode 100644
index 0000000000..ef6e97d000
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf.c
@@ -0,0 +1,32 @@
+/* Multiple versions of truncf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__truncf) __truncf_ppc32 attribute_hidden;
+extern __typeof (__truncf) __truncf_power5plus attribute_hidden;
+
+libc_ifunc (__truncf,
+	    (hwcap & PPC_FEATURE_POWER5_PLUS)
+	    ? __truncf_power5plus
+            : __truncf_ppc32);
+
+weak_alias (__truncf, truncf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-power5.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-power5.S
new file mode 100644
index 0000000000..7c5a504177
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-power5.S
@@ -0,0 +1,31 @@
+/* sqrt function.  PowerPC32/POWER5 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __sqrt __sqrt_power5
+
+#include <sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt_compat.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-ppc32.S
new file mode 100644
index 0000000000..534e934ac9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat-ppc32.S
@@ -0,0 +1,31 @@
+/* sqrt function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+#undef strong_alias
+#define strong_alias(a,b)
+#undef compat_symbol
+#define compat_symbol(a,b,c,d)
+
+#define __sqrt __sqrt_ppc32
+
+#include <sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt_compat.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat.c
new file mode 100644
index 0000000000..1e1892034e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrt_compat.c
@@ -0,0 +1,40 @@
+/* Multiple versions of sqrt.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__sqrt) __sqrt_ppc32 attribute_hidden;
+extern __typeof (__sqrt) __sqrt_power5 attribute_hidden;
+
+libc_ifunc (__sqrt,
+	    (hwcap & PPC_FEATURE_POWER5)
+	    ? __sqrt_power5
+	    : __sqrt_ppc32);
+
+weak_alias (__sqrt, sqrt)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__sqrt, __sqrtl)
+weak_alias (__sqrt, sqrtl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __sqrt, sqrtl, GLIBC_2_0);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-power5.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-power5.S
new file mode 100644
index 0000000000..eacc042850
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-power5.S
@@ -0,0 +1,26 @@
+/* sqrtf function.  PowerPC32/POWER5 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __sqrtf __sqrtf_power5
+
+#include <sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf_compat.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-ppc32.S
new file mode 100644
index 0000000000..72191fc9a5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat-ppc32.S
@@ -0,0 +1,26 @@
+/* sqrtf function.  PowerPC32 default version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef weak_alias
+#define weak_alias(a,b)
+
+#define __sqrtf __sqrtf_ppc32
+
+#include <sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf_compat.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat.c
new file mode 100644
index 0000000000..bbab4d4f93
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/multiarch/w_sqrtf_compat.c
@@ -0,0 +1,32 @@
+/* Multiple versions of sqrtf.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <math_ldbl_opt.h>
+#include <shlib-compat.h>
+#include "init-arch.h"
+
+extern __typeof (__sqrtf) __sqrtf_ppc32 attribute_hidden;
+extern __typeof (__sqrtf) __sqrtf_power5 attribute_hidden;
+
+libc_ifunc (__sqrtf,
+	    (hwcap & PPC_FEATURE_POWER5)
+	    ? __sqrtf_power5
+	    : __sqrtf_ppc32);
+
+weak_alias (__sqrtf, sqrtf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S
new file mode 100644
index 0000000000..d16dbb8406
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrint.S
@@ -0,0 +1,46 @@
+/* Round double to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long long int[r3, r4] __llrint (double x[fp1])  */
+ENTRY (__llrint)
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+	nop	/* Insure the following load is in a different dispatch group */
+	nop	/* to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llrint)
+
+weak_alias (__llrint, llrint)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llrint, llrintl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S
new file mode 100644
index 0000000000..9c3dd77863
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llrintf.S
@@ -0,0 +1,38 @@
+/* Round float to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long long int[r3, r4] __llrintf (float x[fp1])  */
+ENTRY (__llrintf)
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+	nop	/* Insure the following load is in a different dispatch group */
+	nop	/* to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llrintf)
+
+weak_alias (__llrintf, llrintf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S
new file mode 100644
index 0000000000..24bd533748
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llround.S
@@ -0,0 +1,106 @@
+/* llround function.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+ .section .rodata.cst8,"aM",@progbits,8
+ .align 3
+ .LC0:	.long (52+127)<<23 /* 0x1p+52  */
+	.long (-1+127)<<23 /* 0.5  */
+
+	.section	".text"
+
+/* long [r3] lround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we can't use the PowerPC "round to Nearest" mode. Instead we set
+   "round toward Zero" mode and round by adding +-0.5 before rounding
+   to the integer value.
+
+   It is necessary to detect when x is (+-)0x1.fffffffffffffp-2
+   because adding +-0.5 in this case will cause an erroneous shift,
+   carry and round.  We simply return 0 if 0.5 > x > -0.5.  Likewise
+   if x is and odd number between +-(2^52 and 2^53-1) a shift and
+   carry will erroneously round if biased with +-0.5.  Therefore if x
+   is greater/less than +-2^52 we don't need to bias the number with
+   +-0.5.  */
+
+ENTRY (__llround)
+	stwu    r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	addi	r9,r9,.LC0-got_label@l
+	mtlr	r11
+	cfi_same_value (lr)
+	lfs	fp9,0(r9)
+	lfs	fp10,4(r9)
+#else
+	lis r9,.LC0@ha
+	lfs fp9,.LC0@l(r9)	/* Load 2^52 into fpr9.  */
+	lfs fp10,.LC0@l+4(r9)	/* Load 0.5 into fpr10.  */
+#endif
+	fabs	fp2,fp1		/* Get the absolute value of x.  */
+	fsub	fp12,fp10,fp10	/* Compute 0.0 into fpr12.  */
+	fcmpu	cr6,fp2,fp10	/* if |x| < 0.5  */
+	fcmpu	cr7,fp2,fp9	/* if |x| >= 2^52  */
+	fcmpu	cr1,fp1,fp12	/* x is negative? x < 0.0  */
+	blt-	cr6,.Lretzero	/* 0.5 > x < -0.5 so just return 0.  */
+	bge-	cr7,.Lnobias	/* 2^52 > x < -2^52 just convert with no bias.  */
+	fadd	fp3,fp2,fp10	/* |x|+=0.5 bias to prepare to round.  */
+	bge	cr1,.Lconvert	/* x is positive so don't negate x.  */
+	fnabs	fp3,fp3		/* -(|x|+=0.5)  */
+.Lconvert:
+	fctidz	fp4,fp3		/* Convert to Integer double word round toward 0.  */
+	stfd	fp4,8(r1)
+	nop
+	nop
+	nop
+	lwz	r3,8+HIWORD(r1)	/* Load return as integer.  */
+	lwz	r4,8+LOWORD(r1)
+.Lout:
+	addi	r1,r1,16
+	blr
+.Lretzero:			/* 0.5 > x > -0.5  */
+	li	r3,0		/* return 0.  */
+	li	r4,0
+	b	.Lout
+.Lnobias:
+	fmr	fp3,fp1
+	b	.Lconvert
+	END (__llround)
+
+weak_alias (__llround, llround)
+
+strong_alias (__llround, __llroundf)
+weak_alias (__llround, llroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__llround, llroundl)
+strong_alias (__llround, __llroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llroundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llroundf.S
new file mode 100644
index 0000000000..72d6181541
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __llroundf is in s_llround.S */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt_compat.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt_compat.S
new file mode 100644
index 0000000000..bb896a33cd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrt_compat.S
@@ -0,0 +1,108 @@
+/* sqrt function.  PowerPC32 version.
+   Copyright (C) 2007-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* double [fp1] sqrt (double x [fp1])
+   Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
+   The fsqrt instruction generates the correct value for all inputs and
+   sets the appropriate floating point exceptions.  Extended checking is
+   only needed to set errno (via __kernel_standard) if the input value
+   is negative.
+
+   The fsqrt will set FPCC and FU (Floating Point Unordered or NaN
+   to indicated that the input value was negative or NaN. Use Move to
+   Condition Register from FPSCR to copy the FPCC field to cr1.  The
+   branch on summary overflow transfers control to w_sqrt to process
+   any error conditions. Otherwise we can return the result directly.
+
+   This part of the function is a leaf routine,  so no need to stack a
+   frame or execute prologue/epilogue code. This means it is safe to
+   transfer directly to w_sqrt as long as the input value (f1) is
+   preserved. Putting the sqrt result into f2 (double parameter 2)
+   allows passing both the input value and sqrt result into the extended
+   wrapper so there is no need to recompute.
+
+   This tactic avoids the overhead of stacking a frame for the normal
+   (non-error) case.  Until gcc supports prologue shrink-wrapping
+   this is the best we can do.  */
+
+	.section	".text"
+	.machine power4
+EALIGN (__sqrt, 5, 0)
+	fsqrt	fp2,fp1
+	mcrfs	cr1,4
+	bso-	cr1,.Lw_sqrt
+	fmr	fp1,fp2
+	blr
+	.align	4
+.Lw_sqrt:
+	mflr	r0
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset(16)
+	fmr	fp12,fp2
+	stw	r0,20(r1)
+	stw	r30,8(r1)
+	cfi_offset(lr,20-16)
+	cfi_offset(r30,8-16)
+#ifdef SHARED
+	SETUP_GOT_ACCESS(r30,got_label)
+	addis	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@l
+	lwz	r9,_LIB_VERSION@got(30)
+	lwz	r0,0(r9)
+#else
+	lis	r9,_LIB_VERSION@ha
+	lwz	r0,_LIB_VERSION@l(r9)
+#endif
+/*  if (_LIB_VERSION == _IEEE_) return z; */
+	cmpwi	cr7,r0,-1
+	beq-	cr7,.L4
+/*  if (x != x) return z; !isnan*/
+	fcmpu	cr7,fp1,fp1
+	bne-	cr7,.L4
+/*  if  (x < 0.0)
+    return __kernel_standard (x, x, 26) */
+	fmr	fp2,fp1
+	fabs	fp0,fp1
+	li	r3,26
+	fcmpu	cr7,fp1,fp0
+	bne- 	cr7,.L11
+.L4:
+	lwz	r0,20(r1)
+	fmr	fp1,fp12
+	lwz	r30,8(r1)
+	addi	r1,r1,16
+	mtlr 	r0
+	blr
+.L11:
+	bl	__kernel_standard@plt
+	fmr	fp12,fp1
+	b	.L4
+	END	(__sqrt)
+
+weak_alias (__sqrt, sqrt)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__sqrt, sqrtl)
+strong_alias (__sqrt, __sqrtl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __sqrt, sqrtl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf_compat.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf_compat.S
new file mode 100644
index 0000000000..c304ab5ca2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/fpu/w_sqrtf_compat.S
@@ -0,0 +1,100 @@
+/* sqrtf function.  PowerPC32 version.
+   Copyright (C) 2007-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* float [fp1] sqrts (float x [fp1])
+   Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
+   The fsqrts instruction generates the correct value for all inputs and
+   sets the appropriate floating point exceptions.  Extended checking is
+   only needed to set errno (via __kernel_standard) if the input value
+   is negative.
+
+   The fsqrts will set FPCC and FU (Floating Point Unordered or NaN
+   to indicated that the input value was negative or NaN. Use Move to
+   Condition Register from FPSCR to copy the FPCC field to cr1.  The
+   branch on summary overflow transfers control to w_sqrt to process
+   any error conditions. Otherwise we can return the result directly.
+
+   This part of the function is a leaf routine,  so no need to stack a
+   frame or execute prologue/epilogue code. This means it is safe to
+   transfer directly to w_sqrt as long as the input value (f1) is
+   preserved. Putting the sqrt result into f2 (float parameter 2)
+   allows passing both the input value and sqrt result into the extended
+   wrapper so there is no need to recompute.
+
+   This tactic avoids the overhead of stacking a frame for the normal
+   (non-error) case.  Until gcc supports prologue shrink-wrapping
+   this is the best we can do.  */
+
+	.section	".text"
+	.machine power4
+EALIGN (__sqrtf, 5, 0)
+	fsqrts	fp2,fp1
+	mcrfs	cr1,4
+	bso-	cr1,.Lw_sqrtf
+	fmr	fp1,fp2
+	blr
+        .align 4
+.Lw_sqrtf:
+	mflr	r0
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset(16)
+	fmr	fp12,fp2
+	stw	r0,20(r1)
+	stw	r30,8(r1)
+	cfi_offset(lr,20-16)
+	cfi_offset(r30,8-16)
+#ifdef SHARED
+	SETUP_GOT_ACCESS(r30,got_label)
+	addis	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@l
+	lwz	r9,_LIB_VERSION@got(30)
+	lwz	r0,0(r9)
+#else
+	lis	r9,_LIB_VERSION@ha
+	lwz	r0,_LIB_VERSION@l(r9)
+#endif
+/*  if (_LIB_VERSION == _IEEE_) return z; */
+	cmpwi	cr7,r0,-1
+	beq-	cr7,.L4
+/*  if (x != x, 0) return z; !isnan */
+	fcmpu	cr7,fp1,fp1
+	bne-	cr7,.L4
+/*  if  (x < 0.0)
+    return __kernel_standard (x, x, 126) */
+	fmr	fp2,fp1
+	fabs	fp0,fp1
+	li	r3,126
+	fcmpu	cr7,1,0
+	bne- 	cr7,.L11
+.L4:
+	lwz	r0,20(r1)
+	fmr	fp1,fp12
+	lwz	r30,8(r1)
+	addi	r1,r1,16
+	mtlr 	r0
+	blr
+.L11:
+	bl	__kernel_standard@plt
+	fmr	fp12,fp1
+	b	.L4
+	END	(__sqrtf)
+
+weak_alias (__sqrtf, sqrtf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/hp-timing.h b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/hp-timing.h
new file mode 100644
index 0000000000..93cce4625a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/hp-timing.h
@@ -0,0 +1,54 @@
+/* High precision, low overhead timing functions.  powerpc64 version.
+   Copyright (C) 2005-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _HP_TIMING_H
+#define _HP_TIMING_H	1
+
+/* We always assume having the timestamp register.  */
+#define HP_TIMING_AVAIL		(1)
+#define HP_SMALL_TIMING_AVAIL	(1)
+
+/* We indeed have inlined functions.  */
+#define HP_TIMING_INLINE	(1)
+
+/* We use 64bit values for the times.  */
+typedef unsigned long long int hp_timing_t;
+
+/* That's quite simple.  Use the `mftb' instruction.  Note that the value
+   might not be 100% accurate since there might be some more instructions
+   running in this moment.  This could be changed by using a barrier like
+   'lwsync' right before the `mftb' instruction.  But we are not interested
+   in accurate clock cycles here so we don't do this.  */
+
+#define HP_TIMING_NOW(Var)						\
+  do {									\
+    unsigned int hi, lo, tmp;						\
+    __asm__ __volatile__ ("1:	mfspr	%0,269;"			\
+			  "	mfspr	%1,268;"			\
+			  "	mfspr	%2,269;"			\
+			  "	cmpw	%0,%2;"				\
+			  "	bne	1b;"				\
+			  : "=&r" (hi), "=&r" (lo), "=&r" (tmp)		\
+			  : : "cr0");					\
+    Var = ((hp_timing_t) hi << 32) | lo;				\
+  } while (0)
+
+#include <hp-timing-common.h>
+
+#endif	/* hp-timing.h */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcmp.S
new file mode 100644
index 0000000000..c6270d347f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcmp.S
@@ -0,0 +1,1375 @@
+/* Optimized strcmp implementation for PowerPC32.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] memcmp (const char *s1 [r3],
+		    const char *s2 [r4],
+		    size_t size [r5])  */
+
+	.machine power4
+EALIGN (memcmp, 4, 0)
+	CALL_MCOUNT
+
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3	r8	/* next word in s1 */
+#define rWORD4	r9	/* next word in s2 */
+#define rWORD5	r10	/* next word in s1 */
+#define rWORD6	r11	/* next word in s2 */
+#define rWORD7	r30	/* next word in s1 */
+#define rWORD8	r31	/* next word in s2 */
+
+	xor	r0, rSTR2, rSTR1
+	cmplwi	cr6, rN, 0
+	cmplwi	cr1, rN, 12
+	clrlwi.	r0, r0, 30
+	clrlwi	r12, rSTR1, 30
+	cmplwi	cr5, r12, 0
+	beq-	cr6, L(zeroLength)
+	dcbt	0, rSTR1
+	dcbt	0, rSTR2
+/* If less than 8 bytes or not aligned, use the unaligned
+   byte loop.  */
+	blt	cr1, L(bytealigned)
+	stwu	1, -64(r1)
+	cfi_adjust_cfa_offset(64)
+	stw	rWORD8, 48(r1)
+	stw	rWORD7, 44(r1)
+	cfi_offset(rWORD8, (48-64))
+	cfi_offset(rWORD7, (44-64))
+	bne	L(unaligned)
+/* At this point we know both strings have the same alignment and the
+   compare length is at least 8 bytes.  r12 contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of r12 to 0.  If r12 == 0 then we are already word
+   aligned and can perform the word aligned loop.
+
+   Otherwise we know the two strings have the same alignment (but not
+   yet word aligned).  So we force the string addresses to the next lower
+   word boundary and special case this first word using shift left to
+   eliminate bits preceding the first byte.  Since we want to join the
+   normal (word aligned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first word. This ensures that the loop count is
+   correct and the first word (shifted) is in the expected register pair. */
+	.align	4
+L(samealignment):
+	clrrwi	rSTR1, rSTR1, 2
+	clrrwi	rSTR2, rSTR2, 2
+	beq	cr5, L(Waligned)
+	add	rN, rN, r12
+	slwi	rWORD6, r12, 3
+	srwi	r0, rN, 4	/* Divide by 16 */
+	andi.	r12, rN, 12	/* Get the word remainder */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dPs4)
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(dPs3)
+	beq	cr1, L(dPs2)
+
+/* Remainder is 4 */
+	.align	3
+L(dsP1):
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD2, rWORD6
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(dP1e)
+/* Remainder is 8 */
+	.align	4
+L(dPs2):
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD2, rWORD6
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	b	L(dP2e)
+/* Remainder is 12 */
+	.align	4
+L(dPs3):
+	slw	rWORD3, rWORD1, rWORD6
+	slw	rWORD4, rWORD2, rWORD6
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(dP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(dPs4):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	slw	rWORD1, rWORD1, rWORD6
+	slw	rWORD2, rWORD2, rWORD6
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(dP4e)
+
+/* At this point we know both strings are word aligned and the
+   compare length is at least 8 bytes.  */
+	.align	4
+L(Waligned):
+	andi.	r12, rN, 12	/* Get the word remainder */
+	srwi	r0, rN, 4	/* Divide by 16 */
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dP4)
+	bgt	cr1, L(dP3)
+	beq	cr1, L(dP2)
+
+/* Remainder is 4 */
+	.align	4
+L(dP1):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
+   (8-15 byte compare), we want to use only volatile registers.  This
+   means we can avoid restoring non-volatile registers since we did not
+   change any on the early exit path.  The key here is the non-early
+   exit path only cares about the condition code (cr5), not about which
+   register pair was used.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+#endif
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+L(dP1e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5x)
+	bne	cr7, L(dLcr7x)
+
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	bne	cr1, L(dLcr1)
+	cmplw	cr5, rWORD7, rWORD8
+	bdnz	L(dLoop)
+	bne	cr6, L(dLcr6)
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+	.align	3
+L(dP1x):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Remainder is 8 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP2):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+L(dP2e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr6, L(dLcr6)
+	bne	cr5, L(dLcr5)
+	b	L(dLoop2)
+/* Again we are on a early exit path (16-23 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align	4
+L(dP2x):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	slwi.	r12, rN, 3
+	bne	cr6, L(dLcr6x)
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr1, L(dLcr1x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Remainder is 12 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP3):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 0(rSTR1)
+	lwz	rWORD4, 0(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+L(dP3e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP3x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	bne	cr1, L(dLcr1)
+	bne	cr6, L(dLcr6)
+	b	L(dLoop1)
+/* Again we are on a early exit path (24-31 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align	4
+L(dP3x):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	slwi.	r12, rN, 3
+	bne	cr1, L(dLcr1x)
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	bne	cr6, L(dLcr6x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	bne	cr7, L(dLcr7x)
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP4):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+L(dP4e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(dLcr7)
+	bne	cr1, L(dLcr1)
+	bdz-	L(d24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align	4
+L(dLoop):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+L(dLoop1):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+L(dLoop2):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(dLcr7)
+L(dLoop3):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	bne-	cr1, L(dLcr1)
+	cmplw	cr7, rWORD1, rWORD2
+	bdnz+	L(dLoop)
+
+L(dL4):
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(d44):
+	bne	cr7, L(dLcr7)
+L(d34):
+	bne	cr1, L(dLcr1)
+L(d24):
+	bne	cr6, L(dLcr6)
+L(d14):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5)
+L(d04):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	beq	L(zeroLength)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  Since
+   we are aligned it is safe to load the whole word, and use
+   shift right to eliminate bits beyond the compare length.  */
+L(d00):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+	sub	rRTN, rWORD1, rWORD2
+	blr
+
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr7):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr7x):
+	li	rRTN, 1
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr7
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr1):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr1x):
+	li	rRTN, 1
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr6):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr6x):
+	li	rRTN, 1
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr5):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr5x):
+	li	rRTN, 1
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr5
+	li	rRTN, -1
+	blr
+
+	.align	4
+L(bytealigned):
+	mtctr	rN	/* Power4 wants mtctr 1st in dispatch group */
+
+/* We need to prime this loop.  This loop is swing modulo scheduled
+   to avoid pipe delays.  The dependent instruction latencies (load to
+   compare to conditional branch) is 2 to 3 cycles.  In this loop each
+   dispatch group ends in a branch and takes 1 cycle.  Effectively
+   the first iteration of the loop only serves to load operands and
+   branches based on compares are delayed until the next loop.
+
+   So we must precondition some registers and condition codes so that
+   we don't exit the loop early on the first iteration.  */
+
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	bdz-	L(b11)
+	cmplw	cr7, rWORD1, rWORD2
+	lbz	rWORD3, 1(rSTR1)
+	lbz	rWORD4, 1(rSTR2)
+	bdz-	L(b12)
+	cmplw	cr1, rWORD3, rWORD4
+	lbzu	rWORD5, 2(rSTR1)
+	lbzu	rWORD6, 2(rSTR2)
+	bdz-	L(b13)
+	.align	4
+L(bLoop):
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne-	cr7, L(bLcr7)
+
+	cmplw	cr6, rWORD5, rWORD6
+	bdz-	L(b3i)
+
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne-	cr1, L(bLcr1)
+
+	cmplw	cr7, rWORD1, rWORD2
+	bdz-	L(b2i)
+
+	lbzu	rWORD5, 1(rSTR1)
+	lbzu	rWORD6, 1(rSTR2)
+	bne-	cr6, L(bLcr6)
+
+	cmplw	cr1, rWORD3, rWORD4
+	bdnz+	L(bLoop)
+
+/* We speculatively loading bytes before we have tested the previous
+   bytes.  But we must avoid overrunning the length (in the ctr) to
+   prevent these speculative loads from causing a segfault.  In this
+   case the loop will exit early (before the all pending bytes are
+   tested.  In this case we must complete the pending operations
+   before returning.  */
+L(b1i):
+	bne-	cr7, L(bLcr7)
+	bne-	cr1, L(bLcr1)
+	b	L(bx56)
+	.align	4
+L(b2i):
+	bne-	cr6, L(bLcr6)
+	bne-	cr7, L(bLcr7)
+	b	L(bx34)
+	.align	4
+L(b3i):
+	bne-	cr1, L(bLcr1)
+	bne-	cr6, L(bLcr6)
+	b	L(bx12)
+	.align	4
+L(bLcr7):
+	li	rRTN, 1
+	bgtlr	cr7
+	li	rRTN, -1
+	blr
+L(bLcr1):
+	li	rRTN, 1
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+L(bLcr6):
+	li	rRTN, 1
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+
+L(b13):
+	bne-	cr7, L(bx12)
+	bne-	cr1, L(bx34)
+L(bx56):
+	sub	rRTN, rWORD5, rWORD6
+	blr
+	nop
+L(b12):
+	bne-	cr7, L(bx12)
+L(bx34):
+	sub	rRTN, rWORD3, rWORD4
+	blr
+L(b11):
+L(bx12):
+	sub	rRTN, rWORD1, rWORD2
+	blr
+	.align	4
+L(zeroLength):
+	li	rRTN, 0
+	blr
+
+	.align	4
+/* At this point we know the strings have different alignment and the
+   compare length is at least 8 bytes.  r12 contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of r12 to 0.  If r12 == 0 then rStr1 is word aligned and can
+   perform the Wunaligned loop.
+
+   Otherwise we know that rSTR1 is not already word aligned yet.
+   So we can force the string addresses to the next lower word
+   boundary and special case this first word using shift left to
+   eliminate bits preceding the first byte.  Since we want to join the
+   normal (Wualigned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first W. This ensures that the loop count is
+   correct and the first W (shifted) is in the expected resister pair.  */
+#define rSHL		r29	/* Unaligned shift left count.  */
+#define rSHR		r28	/* Unaligned shift right count.  */
+#define rWORD8_SHIFT	r27	/* Left rotation temp for rWORD2.  */
+#define rWORD2_SHIFT	r26	/* Left rotation temp for rWORD4.  */
+#define rWORD4_SHIFT	r25	/* Left rotation temp for rWORD6.  */
+#define rWORD6_SHIFT	r24	/* Left rotation temp for rWORD8.  */
+	cfi_adjust_cfa_offset(64)
+L(unaligned):
+	stw	rSHL, 40(r1)
+	cfi_offset(rSHL, (40-64))
+	clrlwi	rSHL, rSTR2, 30
+	stw	rSHR, 36(r1)
+	cfi_offset(rSHR, (36-64))
+	beq	cr5, L(Wunaligned)
+	stw	rWORD8_SHIFT, 32(r1)
+	cfi_offset(rWORD8_SHIFT, (32-64))
+/* Adjust the logical start of rSTR2 to compensate for the extra bits
+   in the 1st rSTR1 W.  */
+	sub	rWORD8_SHIFT, rSTR2, r12
+/* But do not attempt to address the W before that W that contains
+   the actual start of rSTR2.  */
+	clrrwi	rSTR2, rSTR2, 2
+	stw	rWORD2_SHIFT, 28(r1)
+/* Compute the left/right shift counts for the unaligned rSTR2,
+   compensating for the logical (W aligned) start of rSTR1.  */
+	clrlwi	rSHL, rWORD8_SHIFT, 30
+	clrrwi	rSTR1, rSTR1, 2
+	stw	rWORD4_SHIFT, 24(r1)
+	slwi	rSHL, rSHL, 3
+	cmplw	cr5, rWORD8_SHIFT, rSTR2
+	add	rN, rN, r12
+	slwi	rWORD6, r12, 3
+	stw	rWORD6_SHIFT, 20(r1)
+	cfi_offset(rWORD2_SHIFT, (28-64))
+	cfi_offset(rWORD4_SHIFT, (24-64))
+	cfi_offset(rWORD6_SHIFT, (20-64))
+	subfic	rSHR, rSHL, 32
+	srwi	r0, rN, 4	/* Divide by 16 */
+	andi.	r12, rN, 12	/* Get the W remainder */
+/* We normally need to load 2 Ws to start the unaligned rSTR2, but in
+   this special case those bits may be discarded anyway.  Also we
+   must avoid loading a W where none of the bits are part of rSTR2 as
+   this may cross a page boundary and cause a page fault.  */
+	li	rWORD8, 0
+	blt	cr5, L(dus0)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD8, 0(rSTR2)
+	addi	rSTR2, rSTR2, 4
+#endif
+	slw	rWORD8, rWORD8, rSHL
+
+L(dus0):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	srw	r12, rWORD2, rSHR
+	clrlwi	rN, rN, 30
+	beq	L(duPs4)
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, r12, rWORD8
+	bgt	cr1, L(duPs3)
+	beq	cr1, L(duPs2)
+
+/* Remainder is 4 */
+	.align	4
+L(dusP1):
+	slw	rWORD8_SHIFT, rWORD2, rSHL
+	slw	rWORD7, rWORD1, rWORD6
+	slw	rWORD8, rWORD8, rWORD6
+	bge	cr7, L(duP1e)
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align	4
+L(duPs2):
+	slw	rWORD6_SHIFT, rWORD2, rSHL
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD8, rWORD6
+	b	L(duP2e)
+/* Remainder is 12 */
+	.align	4
+L(duPs3):
+	slw	rWORD4_SHIFT, rWORD2, rSHL
+	slw	rWORD3, rWORD1, rWORD6
+	slw	rWORD4, rWORD8, rWORD6
+	b	L(duP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(duPs4):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	or	rWORD8, r12, rWORD8
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	slw	rWORD1, rWORD1, rWORD6
+	slw	rWORD2, rWORD8, rWORD6
+	b	L(duP4e)
+
+/* At this point we know rSTR1 is word aligned and the
+   compare length is at least 8 bytes.  */
+	.align	4
+L(Wunaligned):
+	stw	rWORD8_SHIFT, 32(r1)
+	clrrwi	rSTR2, rSTR2, 2
+	stw	rWORD2_SHIFT, 28(r1)
+	srwi	r0, rN, 4	/* Divide by 16 */
+	stw	rWORD4_SHIFT, 24(r1)
+	andi.	r12, rN, 12	/* Get the W remainder */
+	stw	rWORD6_SHIFT, 20(r1)
+	cfi_offset(rWORD8_SHIFT, (32-64))
+	cfi_offset(rWORD2_SHIFT, (28-64))
+	cfi_offset(rWORD4_SHIFT, (24-64))
+	cfi_offset(rWORD6_SHIFT, (20-64))
+	slwi	rSHL, rSHL, 3
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD6, 0(rSTR2)
+	lwzu	rWORD8, 4(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	subfic	rSHR, rSHL, 32
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	beq	L(duP4)
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(duP3)
+	beq	cr1, L(duP2)
+
+/* Remainder is 4 */
+	.align	4
+L(duP1):
+	srw	r12, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD7, 0(rSTR1)
+#endif
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP1x)
+L(duP1e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	bne	cr5, L(duLcr5)
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	bne	cr7, L(duLcr7)
+	or	rWORD6, r0, rWORD4_SHIFT
+	cmplw	cr6, rWORD5, rWORD6
+	b	L(duLoop3)
+	.align	4
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+L(duP1x):
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align	4
+L(duP2):
+	srw	r0, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+#endif
+	or	rWORD6, r0, rWORD6_SHIFT
+	slw	rWORD6_SHIFT, rWORD8, rSHL
+L(duP2e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP2x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	bne	cr5, L(duLcr5)
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(duLoop2)
+	.align	4
+L(duP2x):
+	cmplw	cr5, rWORD7, rWORD8
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+
+/* Remainder is 12 */
+	.align	4
+L(duP3):
+	srw	r12, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD3, 0(rSTR1)
+#endif
+	slw	rWORD4_SHIFT, rWORD8, rSHL
+	or	rWORD4, r12, rWORD6_SHIFT
+L(duP3e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP3x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(duLoop1)
+	.align	4
+L(duP3x):
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+#if 0
+/* Huh?  We've already branched on cr1!  */
+	bne	cr1, L(duLcr1)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(duP4):
+	mtctr	r0	/* Power4 wants mtctr 1st in dispatch group */
+	srw	r0, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+#endif
+	slw	rWORD2_SHIFT, rWORD8, rSHL
+	or	rWORD2, r0, rWORD6_SHIFT
+L(duP4e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr7, L(duLcr7)
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	cmplw	cr5, rWORD7, rWORD8
+	bdz-	L(du24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align	4
+L(duLoop):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+L(duLoop1):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+L(duLoop2):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(duLcr7)
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+L(duLoop3):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	bne-	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	bdnz+	L(duLoop)
+
+L(duL4):
+#if 0
+/* Huh?  We've already branched on cr1!  */
+	bne	cr1, L(duLcr1)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(du44):
+	bne	cr7, L(duLcr7)
+L(du34):
+	bne	cr1, L(duLcr1)
+L(du24):
+	bne	cr6, L(duLcr6)
+L(du14):
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  We use
+   shift right to eliminate bits beyond the compare length.
+   This allows the use of word subtract to compute the final result.
+
+   However it may not be safe to load rWORD2 which may be beyond the
+   string length. So we compare the bit length of the remainder to
+   the right shift count (rSHR). If the bit count is less than or equal
+   we do not need to load rWORD2 (all significant bits are already in
+   rWORD8_SHIFT).  */
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	.align	4
+L(dutrim):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+#else
+	lwz	rWORD1, 4(rSTR1)
+#endif
+	lwz	rWORD8, 48(r1)
+	subfic	rN, rN, 32	/* Shift count is 32 - (rN * 8).  */
+	or	rWORD2, r0, rWORD8_SHIFT
+	lwz	rWORD7, 44(r1)
+	lwz	rSHL, 40(r1)
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+	lwz	rSHR, 36(r1)
+	lwz	rWORD8_SHIFT, 32(r1)
+	sub	rRTN, rWORD1, rWORD2
+	b	L(dureturn26)
+	.align	4
+L(duLcr7):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr7, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr1):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr1, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr6):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr6, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr5):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr5, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	3
+L(duZeroReturn):
+	li	rRTN, 0
+	.align	4
+L(dureturn):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+L(dureturn29):
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+L(dureturn27):
+	lwz	rWORD8_SHIFT, 32(r1)
+L(dureturn26):
+	lwz	rWORD2_SHIFT, 28(r1)
+L(dureturn25):
+	lwz	rWORD4_SHIFT, 24(r1)
+	lwz	rWORD6_SHIFT, 20(r1)
+	addi	1, 1, 64
+	cfi_adjust_cfa_offset(-64)
+	blr
+END (memcmp)
+
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp, bcmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcopy.h b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcopy.h
new file mode 100644
index 0000000000..c76739e390
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcopy.h
@@ -0,0 +1,116 @@
+/* memcopy.h -- definitions for memory copy functions.  Generic C version.
+   Copyright (C) 1991-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Torbjorn Granlund (tege@sics.se).
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* The strategy of the memory functions is:
+
+     1. Copy bytes until the destination pointer is aligned.
+
+     2. Copy words in unrolled loops.  If the source and destination
+     are not aligned in the same way, use word memory operations,
+     but shift and merge two read words before writing.
+
+     3. Copy the few remaining bytes.
+
+   This is fast on processors that have at least 10 registers for
+   allocation by GCC, and that can access memory at reg+const in one
+   instruction.
+
+   I made an "exhaustive" test of this memmove when I wrote it,
+   exhaustive in the sense that I tried all alignment and length
+   combinations, with and without overlap.  */
+
+#include <sysdeps/generic/memcopy.h>
+
+/* The macros defined in this file are:
+
+   BYTE_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy)
+
+   BYTE_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy)
+
+   WORD_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_remaining, nbytes_to_copy)
+
+   WORD_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_remaining, nbytes_to_copy)
+
+   MERGE(old_word, sh_1, new_word, sh_2)
+     [I fail to understand.  I feel stupid.  --roland]
+*/
+
+
+/* Threshold value for when to enter the unrolled loops.  */
+#undef	OP_T_THRES
+#define OP_T_THRES 16
+
+/* Copy exactly NBYTES bytes from SRC_BP to DST_BP,
+   without any assumptions about alignment of the pointers.  */
+#undef BYTE_COPY_FWD
+#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes)				      \
+  do									      \
+    {									      \
+      size_t __nbytes = (nbytes);					      \
+      if (__nbytes & 1)							      \
+        {								      \
+	  ((byte *) dst_bp)[0] =  ((byte *) src_bp)[0];			      \
+	  src_bp += 1;							      \
+	  dst_bp += 1;							      \
+	  __nbytes -= 1;						      \
+        }								      \
+      while (__nbytes > 0)						      \
+	{								      \
+	  byte __x = ((byte *) src_bp)[0];				      \
+	  byte __y = ((byte *) src_bp)[1];				      \
+	  src_bp += 2;							      \
+	  __nbytes -= 2;						      \
+	  ((byte *) dst_bp)[0] = __x;					      \
+	  ((byte *) dst_bp)[1] = __y;					      \
+	  dst_bp += 2;							      \
+	}								      \
+    } while (0)
+
+/* Copy exactly NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR,
+   beginning at the bytes right before the pointers and continuing towards
+   smaller addresses.  Don't assume anything about alignment of the
+   pointers.  */
+#undef BYTE_COPY_BWD
+#define BYTE_COPY_BWD(dst_ep, src_ep, nbytes)				      \
+  do									      \
+    {									      \
+      size_t __nbytes = (nbytes);					      \
+      if (__nbytes & 1)							      \
+        {								      \
+	  src_ep -= 1;							      \
+	  dst_ep -= 1;							      \
+	  ((byte *) dst_ep)[0] =  ((byte *) src_ep)[0];			      \
+	  __nbytes -= 1;						      \
+        }								      \
+      while (__nbytes > 0)						      \
+	{								      \
+	  byte __x, __y;						      \
+	  src_ep -= 2;							      \
+	  __y = ((byte *) src_ep)[1];					      \
+	  __x = ((byte *) src_ep)[0];					      \
+	  dst_ep -= 2;							      \
+	  __nbytes -= 2;						      \
+	  ((byte *) dst_ep)[1] = __y;					      \
+	  ((byte *) dst_ep)[0] = __x;					      \
+	}								      \
+    } while (0)
+
+/* The powerpc memcpy implementation is safe to use for memmove.  */
+#undef MEMCPY_OK_FOR_FWD_MEMMOVE
+#define MEMCPY_OK_FOR_FWD_MEMMOVE 1
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcpy.S
new file mode 100644
index 0000000000..37bc712dd9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memcpy.S
@@ -0,0 +1,481 @@
+/* Optimized memcpy implementation for PowerPC32 on PowerPC64.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.
+
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled
+   with the appropriate combination of byte and halfword load/stores.
+   There is minimal effort to optimize the alignment of short moves.
+
+   Longer moves (>= 32-bytes) justify the effort to get at least the
+   destination word (4-byte) aligned.  Further optimization is
+   possible when both source and destination are word aligned.
+   Each case has an optimized unrolled loop.   */
+
+	.machine power4
+EALIGN (memcpy, 5, 0)
+	CALL_MCOUNT
+
+    stwu  1,-32(1)
+    cfi_adjust_cfa_offset(32)
+    stw   30,20(1)
+    cfi_offset(30,(20-32))
+    mr    30,3
+    cmplwi cr1,5,31
+    stw   31,24(1)
+    cfi_offset(31,(24-32))
+    neg   0,3
+    andi. 11,3,3	/* check alignment of dst.  */
+    clrlwi 0,0,30	/* Number of bytes until the 1st word of dst.  */
+    clrlwi 10,4,30	/* check alignment of src.  */
+    cmplwi cr6,5,8
+    ble-  cr1,.L2	/* If move < 32 bytes use short move code.  */
+    cmplw cr6,10,11
+    mr    12,4
+    srwi  9,5,2		/* Number of full words remaining.  */
+    mtcrf 0x01,0
+    mr    31,5
+    beq   .L0
+
+    subf  31,0,5
+  /* Move 0-3 bytes as needed to get the destination word aligned.  */
+1:  bf    31,2f
+    lbz   6,0(12)
+    addi  12,12,1
+    stb   6,0(3)
+    addi  3,3,1
+2:  bf    30,0f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+0:
+    clrlwi 10,12,30	/* check alignment of src again.  */
+    srwi  9,31,2	/* Number of full words remaining.  */
+
+  /* Copy words from source to destination, assuming the destination is
+     aligned on a word boundary.
+
+     At this point we know there are at least 25 bytes left (32-7) to copy.
+     The next step is to determine if the source is also word aligned.
+     If not branch to the unaligned move code at .L6. which uses
+     a load, shift, store strategy.
+
+     Otherwise source and destination are word aligned, and we can use
+     the optimized word copy loop.  */
+.L0:
+    clrlwi	11,31,30  /* calculate the number of tail bytes */
+    mtcrf 0x01,9
+    bne-  cr6,.L6   /* If source is not word aligned.  */
+
+  /* Move words where destination and source are word aligned.
+     Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+     If the copy is not an exact multiple of 16 bytes, 1-3
+     words are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-3 bytes. These bytes are
+     copied a halfword/byte at a time as needed to preserve alignment.  */
+
+    srwi  8,31,4    /* calculate the 16 byte loop count */
+    cmplwi	cr1,9,4
+    cmplwi	cr6,11,0
+    mr    11,12
+
+    bf    30,1f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  11,12,8
+    mtctr 8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  10,3,8
+    bf    31,4f
+    lwz   0,8(12)
+    stw   0,8(3)
+    blt   cr1,3f
+    addi  11,12,12
+    addi  10,3,12
+    b     4f
+    .align  4
+1:
+    mr    10,3
+    mtctr 8
+    bf    31,4f
+    lwz   6,0(12)
+    addi  11,12,4
+    stw   6,0(3)
+    addi  10,3,4
+
+    .align  4
+4:
+    lwz   6,0(11)
+    lwz   7,4(11)
+    lwz   8,8(11)
+    lwz   0,12(11)
+    stw   6,0(10)
+    stw   7,4(10)
+    stw   8,8(10)
+    stw   0,12(10)
+    addi  11,11,16
+    addi  10,10,16
+    bdnz  4b
+3:
+    clrrwi 0,31,2
+    mtcrf 0x01,31
+    beq   cr6,0f
+.L9:
+    add   3,3,0
+    add   12,12,0
+
+/*  At this point we have a tail of 0-3 bytes and we know that the
+    destination is word aligned.  */
+2:  bf    30,1f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+1:  bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    mr  3,30
+    lwz 30,20(1)
+    lwz 31,24(1)
+    addi 1,1,32
+    blr
+
+/* Copy up to 31 bytes.  This is divided into two cases 0-8 bytes and
+   9-31 bytes.  Each case is handled without loops, using binary
+   (1,2,4,8) tests.
+
+   In the short (0-8 byte) case no attempt is made to force alignment
+   of either source or destination.  The hardware will handle the
+   unaligned load/stores with small delays for crossing 32- 64-byte, and
+   4096-byte boundaries. Since these short moves are unlikely to be
+   unaligned or cross these boundaries, the overhead to force
+   alignment is not justified.
+
+   The longer (9-31 byte) move is more likely to cross 32- or 64-byte
+   boundaries.  Since only loads are sensitive to the 32-/64-byte
+   boundaries it is more important to align the source than the
+   destination.  If the source is not already word aligned, we first
+   move 1-3 bytes as needed.  While the destination and stores may
+   still be unaligned, this is only an issue for page (4096 byte
+   boundary) crossing, which should be rare for these short moves.
+   The hardware handles this case automatically with a small delay.  */
+
+    .align  4
+.L2:
+    mtcrf 0x01,5
+    neg   8,4
+    clrrwi 11,4,2
+    andi. 0,8,3
+    ble   cr6,.LE8	/* Handle moves of 0-8 bytes.  */
+/* At least 9 bytes left.  Get the source word aligned.  */
+    cmplwi	cr1,5,16
+    mr    10,5
+    mr    12,4
+    cmplwi	cr6,0,2
+    beq   .L3	/* If the source is already word aligned skip this.  */
+/* Copy 1-3 bytes to get source address word aligned.  */
+    lwz   6,0(11)
+    subf  10,0,5
+    add   12,4,0
+    blt   cr6,5f
+    srwi  7,6,16
+    bgt	  cr6,3f
+#ifdef __LITTLE_ENDIAN__
+    sth   7,0(3)
+#else
+    sth   6,0(3)
+#endif
+    b     7f
+    .align  4
+3:
+#ifdef __LITTLE_ENDIAN__
+    rotlwi 6,6,24
+    stb   6,0(3)
+    sth   7,1(3)
+#else
+    stb   7,0(3)
+    sth   6,1(3)
+#endif
+    b     7f
+    .align  4
+5:
+#ifdef __LITTLE_ENDIAN__
+    rotlwi 6,6,8
+#endif
+    stb   6,0(3)
+7:
+    cmplwi	cr1,10,16
+    add   3,3,0
+    mtcrf 0x01,10
+    .align  4
+.L3:
+/* At least 6 bytes left and the source is word aligned.  */
+    blt   cr1,8f
+16: /* Move 16 bytes.  */
+    lwz   6,0(12)
+    lwz   7,4(12)
+    stw   6,0(3)
+    lwz   6,8(12)
+    stw   7,4(3)
+    lwz   7,12(12)
+    addi  12,12,16
+    stw   6,8(3)
+    stw   7,12(3)
+    addi  3,3,16
+8:  /* Move 8 bytes.  */
+    bf    28,4f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  12,12,8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  3,3,8
+4:  /* Move 4 bytes.  */
+    bf    29,2f
+    lwz   6,0(12)
+    addi  12,12,4
+    stw   6,0(3)
+    addi  3,3,4
+2:  /* Move 2-3 bytes.  */
+    bf    30,1f
+    lhz   6,0(12)
+    sth   6,0(3)
+    bf    31,0f
+    lbz   7,2(12)
+    stb   7,2(3)
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+1:  /* Move 1 byte.  */
+    bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    addi 1,1,32
+    blr
+
+/* Special case to copy 0-8 bytes.  */
+    .align  4
+.LE8:
+    mr    12,4
+    bne   cr6,4f
+    lwz   6,0(4)
+    lwz   7,4(4)
+    stw   6,0(3)
+    stw   7,4(3)
+  /* Return original dst pointer.  */
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+    .align  4
+4:  bf    29,2b
+    lwz   6,0(4)
+    stw   6,0(3)
+6:
+    bf    30,5f
+    lhz   7,4(4)
+    sth   7,4(3)
+    bf    31,0f
+    lbz   8,6(4)
+    stb   8,6(3)
+    mr    3,30
+    lwz   30,20(1)
+    addi  1,1,32
+    blr
+    .align  4
+5:
+    bf    31,0f
+    lbz   6,4(4)
+    stb   6,4(3)
+    .align  4
+0:
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    addi 1,1,32
+    blr
+
+    .align  4
+.L6:
+
+  /* Copy words where the destination is aligned but the source is
+     not.  Use aligned word loads from the source, shifted to realign
+     the data, to allow aligned destination stores.
+     Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+     A single word is retained for storing at loop exit to avoid walking
+     off the end of a page within the loop.
+     If the copy is not an exact multiple of 16 bytes, 1-3
+     words are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-3 bytes. These bytes are
+     copied a halfword/byte at a time as needed to preserve alignment.  */
+
+
+    cmplwi  cr6,11,0  /* are there tail bytes left ? */
+    subf    5,10,12   /* back up src pointer to prev word alignment */
+    slwi    10,10,3   /* calculate number of bits to shift 1st word left */
+    addi    11,9,-1   /* we move one word after the loop */
+    srwi    8,11,2    /* calculate the 16 byte loop count */
+    lwz     6,0(5)    /* load 1st src word into R6 */
+    mr      4,3
+    lwz     7,4(5)    /* load 2nd src word into R7 */
+    mtcrf   0x01,11
+    subfic  9,10,32   /* number of bits to shift 2nd word right */
+    mtctr   8
+    bf      30,1f
+
+    /* there are at least two words to copy, so copy them */
+#ifdef __LITTLE_ENDIAN__
+    srw   0,6,10
+    slw   8,7,9
+#else
+    slw   0,6,10  /* shift 1st src word to left align it in R0 */
+    srw   8,7,9   /* shift 2nd src word to right align it in R8 */
+#endif
+    or    0,0,8   /* or them to get word to store */
+    lwz   6,8(5)  /* load the 3rd src word */
+    stw   0,0(4)  /* store the 1st dst word */
+#ifdef __LITTLE_ENDIAN__
+    srw   0,7,10
+    slw   8,6,9
+#else
+    slw   0,7,10  /* now left align 2nd src word into R0 */
+    srw   8,6,9   /* shift 3rd src word to right align it in R8 */
+#endif
+    or    0,0,8   /* or them to get word to store */
+    lwz   7,12(5)
+    stw   0,4(4)  /* store the 2nd dst word */
+    addi  4,4,8
+    addi  5,5,16
+    bf    31,4f
+    /* there is a third word to copy, so copy it */
+#ifdef __LITTLE_ENDIAN__
+    srw   0,6,10
+    slw   8,7,9
+#else
+    slw   0,6,10  /* shift 3rd src word to left align it in R0 */
+    srw   8,7,9   /* shift 4th src word to right align it in R8 */
+#endif
+    or    0,0,8   /* or them to get word to store */
+    stw   0,0(4)  /* store 3rd dst word */
+    mr    6,7
+    lwz   7,0(5)
+    addi  5,5,4
+    addi  4,4,4
+    b     4f
+    .align 4
+1:
+#ifdef __LITTLE_ENDIAN__
+    srw     0,6,10
+    slw     8,7,9
+#else
+    slw     0,6,10  /* shift 1st src word to left align it in R0 */
+    srw     8,7,9   /* shift 2nd src word to right align it in R8 */
+#endif
+    addi  5,5,8
+    or    0,0,8   /* or them to get word to store */
+    bf    31,4f
+    mr    6,7
+    lwz   7,0(5)
+    addi  5,5,4
+    stw   0,0(4)  /* store the 1st dst word */
+    addi  4,4,4
+
+    .align  4
+4:
+    /* copy 16 bytes at a time */
+#ifdef __LITTLE_ENDIAN__
+    srw   0,6,10
+    slw   8,7,9
+#else
+    slw   0,6,10
+    srw   8,7,9
+#endif
+    or    0,0,8
+    lwz   6,0(5)
+    stw   0,0(4)
+#ifdef __LITTLE_ENDIAN__
+    srw   0,7,10
+    slw   8,6,9
+#else
+    slw   0,7,10
+    srw   8,6,9
+#endif
+    or    0,0,8
+    lwz   7,4(5)
+    stw   0,4(4)
+#ifdef __LITTLE_ENDIAN__
+    srw   0,6,10
+    slw   8,7,9
+#else
+    slw   0,6,10
+    srw   8,7,9
+#endif
+    or    0,0,8
+    lwz   6,8(5)
+    stw   0,8(4)
+#ifdef __LITTLE_ENDIAN__
+    srw   0,7,10
+    slw   8,6,9
+#else
+    slw   0,7,10
+    srw   8,6,9
+#endif
+    or    0,0,8
+    lwz   7,12(5)
+    stw   0,12(4)
+    addi  5,5,16
+    addi  4,4,16
+    bdnz+ 4b
+8:
+    /* calculate and store the final word */
+#ifdef __LITTLE_ENDIAN__
+    srw   0,6,10
+    slw   8,7,9
+#else
+    slw   0,6,10
+    srw   8,7,9
+#endif
+    or    0,0,8
+    stw   0,0(4)
+3:
+    clrrwi 0,31,2
+    mtcrf 0x01,31
+    bne   cr6,.L9	/* If the tail is 0 bytes we are done!  */
+
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    lwz  31,24(1)
+    addi 1,1,32
+    blr
+END (memcpy)
+
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memset.S
new file mode 100644
index 0000000000..25319f7233
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/memset.S
@@ -0,0 +1,226 @@
+/* Optimized memset implementation for PowerPC64.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.
+
+   The memset is done in three sizes: byte (8 bits), word (32 bits),
+   cache line (1024 bits). There is a special case for setting cache lines
+   to 0, to take advantage of the dcbz instruction.  */
+
+	.machine power4
+EALIGN (memset, 5, 0)
+	CALL_MCOUNT
+
+#define rTMP	r0
+#define rRTN	r3	/* Initial value of 1st argument.  */
+#define rMEMP0	r3	/* Original value of 1st arg.  */
+#define rCHR	r4	/* Char to set in each byte.  */
+#define rLEN	r5	/* Length of region to set.  */
+#define rMEMP	r6	/* Address at which we are storing.  */
+#define rALIGN	r7	/* Number of bytes we are setting now (when aligning). */
+#define rMEMP2	r8
+
+#define rNEG64	r8	/* Constant -64 for clearing with dcbz.  */
+#define rCLS	r8	/* Cache line size (known to be 128).  */
+#define rCLM	r9	/* Cache line size mask to check for cache alignment.  */
+L(_memset):
+/* Take care of case for size <= 4.  */
+	cmplwi	cr1, rLEN, 4
+	andi.	rALIGN, rMEMP0, 3
+	mr	rMEMP, rMEMP0
+	ble-	cr1, L(small)
+
+/* Align to word boundary.  */
+	cmplwi	cr5, rLEN, 31
+	insrwi	rCHR, rCHR, 8, 16     /* Replicate byte to halfword.  */
+	beq+	L(aligned)
+	mtcrf	0x01, rMEMP0
+	subfic	rALIGN, rALIGN, 4
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	bf+	31, L(g0)
+	stb	rCHR, 0(rMEMP0)
+	bt	30, L(aligned)
+L(g0):
+	sth	rCHR, -2(rMEMP)
+
+/* Handle the case of size < 31.  */
+L(aligned):
+	mtcrf	0x01, rLEN
+	insrwi	rCHR, rCHR, 16, 0    /* Replicate halfword to word.  */
+	ble	cr5, L(medium)
+/* Align to 32-byte boundary.  */
+	andi.	rALIGN, rMEMP, 0x1C
+	subfic	rALIGN, rALIGN, 0x20
+	beq	L(caligned)
+	mtcrf	0x01, rALIGN
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	cmplwi	cr1, rALIGN, 0x10
+	mr	rMEMP2, rMEMP
+	bf	28, L(a1)
+        stw     rCHR, -4(rMEMP2)
+	stwu	rCHR, -8(rMEMP2)
+L(a1):	blt	cr1, L(a2)
+        stw     rCHR, -4(rMEMP2)
+	stw	rCHR, -8(rMEMP2)
+	stw	rCHR, -12(rMEMP2)
+	stwu	rCHR, -16(rMEMP2)
+L(a2):  bf      29, L(caligned)
+        stw     rCHR, -4(rMEMP2)
+
+/* Now aligned to a 32 byte boundary.  */
+L(caligned):
+	cmplwi	cr1, rCHR, 0
+	clrrwi.	rALIGN, rLEN, 5
+	mtcrf	0x01, rLEN
+	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
+L(nondcbz):
+	srwi	rTMP, rALIGN, 5
+	mtctr	rTMP
+	beq	L(medium)	/* We may not actually get to do a full line.  */
+	clrlwi.	rLEN, rLEN, 27
+	add	rMEMP, rMEMP, rALIGN
+	li	rNEG64, -0x40
+	bdz	L(cloopdone)
+
+        .align 4
+L(c3): 	dcbtst	rNEG64, rMEMP
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP)
+        stw     rCHR, -20(rMEMP)
+	stw	rCHR, -24(rMEMP)
+        stw     rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	bdnz	L(c3)
+L(cloopdone):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stw	rCHR, -16(rMEMP)
+	cmplwi	cr1, rLEN, 16
+        stw     rCHR, -20(rMEMP)
+	stw	rCHR, -24(rMEMP)
+        stw     rCHR, -28(rMEMP)
+	stwu	rCHR, -32(rMEMP)
+	beqlr
+	add	rMEMP, rMEMP, rALIGN
+	b	L(medium_tail2)
+
+	.align 5
+/* Clear lines of memory in 128-byte chunks.  */
+L(zloopstart):
+/* If the remaining length is less the 32 bytes, don't bother getting
+	 the cache line size.  */
+	beq	L(medium)
+	li      rCLS,128  /* cache line size is 128 */
+	dcbt	0,rMEMP
+L(getCacheAligned):
+	cmplwi	cr1,rLEN,32
+	andi.	rTMP,rMEMP,127
+	blt	cr1,L(handletail32)
+	beq	L(cacheAligned)
+	addi	rMEMP,rMEMP,32
+	addi	rLEN,rLEN,-32
+	stw	rCHR,-32(rMEMP)
+        stw     rCHR,-28(rMEMP)
+	stw	rCHR,-24(rMEMP)
+	stw     rCHR,-20(rMEMP)
+	stw	rCHR,-16(rMEMP)
+        stw     rCHR,-12(rMEMP)
+	stw	rCHR,-8(rMEMP)
+        stw     rCHR,-4(rMEMP)
+	b	L(getCacheAligned)
+
+/* Now we are aligned to the cache line and can use dcbz.  */
+        .align 4
+L(cacheAligned):
+	cmplw	cr1,rLEN,rCLS
+	blt	cr1,L(handletail32)
+	dcbz	0,rMEMP
+	subf	rLEN,rCLS,rLEN
+	add	rMEMP,rMEMP,rCLS
+	b	L(cacheAligned)
+
+/* We are here because the cache line size was set and the remainder
+  (rLEN) is less than the actual cache line size.
+   So set up the preconditions for L(nondcbz) and go there.  */
+L(handletail32):
+	clrrwi.	rALIGN, rLEN, 5
+	b		L(nondcbz)
+
+	.align 5
+L(small):
+/* Memset of 4 bytes or less.  */
+	cmplwi	cr5, rLEN, 1
+	cmplwi	cr1, rLEN, 3
+	bltlr	cr5
+	stb	rCHR, 0(rMEMP)
+	beqlr	cr5
+	stb	rCHR, 1(rMEMP)
+	bltlr	cr1
+	stb	rCHR, 2(rMEMP)
+	beqlr	cr1
+	stb	rCHR, 3(rMEMP)
+	blr
+
+/* Memset of 0-31 bytes.  */
+	.align 5
+L(medium):
+	cmplwi	cr1, rLEN, 16
+L(medium_tail2):
+	add	rMEMP, rMEMP, rLEN
+L(medium_tail):
+	bt-	31, L(medium_31t)
+	bt-	30, L(medium_30t)
+L(medium_30f):
+	bt-	29, L(medium_29t)
+L(medium_29f):
+	bge-	cr1, L(medium_27t)
+	bflr-	28
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+
+L(medium_31t):
+	stbu	rCHR, -1(rMEMP)
+	bf-	30, L(medium_30f)
+L(medium_30t):
+	sthu	rCHR, -2(rMEMP)
+	bf-	29, L(medium_29f)
+L(medium_29t):
+	stwu	rCHR, -4(rMEMP)
+	blt-	cr1, L(medium_27f)
+L(medium_27t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stwu	rCHR, -16(rMEMP)
+L(medium_27f):
+	bflr-	28
+L(medium_28t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/Makefile
new file mode 100644
index 0000000000..bd9d360efa
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/Makefile
@@ -0,0 +1,30 @@
+ifeq ($(subdir),string)
+sysdep_routines += memcpy-power7 memcpy-a2 memcpy-power6 memcpy-cell \
+		   memcpy-ppc32 memcmp-power7 memcmp-ppc32 memset-power7 \
+		   memset-power6 memset-ppc32 bzero-power7 bzero-power6 \
+		   bzero-ppc32 mempcpy-power7 mempcpy-ppc32 memchr-power7 \
+		   memchr-ppc32 memrchr-power7 memrchr-ppc32 rawmemchr-power7 \
+		   rawmemchr-ppc32 strlen-power7 strlen-ppc32 strnlen-power7 \
+		   strnlen-ppc32 strncmp-power7 strncmp-ppc32 \
+		   strcasecmp-power7 strcasecmp_l-power7 strncase-power7 \
+		   strncase_l-power7 strchrnul-power7 strchrnul-ppc32 \
+		   strchr-power7 strchr-ppc32 \
+		   wordcopy-power7 wordcopy-ppc32 \
+		   memmove-power7 memmove-ppc
+
+CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
+CFLAGS-strncase_l-power7.c += -mcpu=power7 -funroll-loops
+endif
+
+ifeq ($(subdir),wcsmbs)
+sysdep_routines += wcschr-power7 wcschr-power6 wcschr-ppc32 \
+		   wcsrchr-power7 wcsrchr-power6 wcsrchr-ppc32 \
+		   wcscpy-power7 wcscpy-power6 wcscpy-ppc32
+
+CFLAGS-wcschr-power7.c += -mcpu=power7
+CFLAGS-wcschr-power6.c += -mcpu=power6
+CFLAGS-wcsrchr-power7.c += -mcpu=power7
+CFLAGS-wcsrchr-power6.c += -mcpu=power6
+CFLAGS-wcscpy-power7.c += -mcpu=power7
+CFLAGS-wcscpy-power6.c += -mcpu=power6
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power6.S
new file mode 100644
index 0000000000..4e000309cf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power6.S
@@ -0,0 +1,26 @@
+/* Optimized bzero implementation for PowerPC32/POWER6.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+ENTRY (__bzero_power6)
+        mr      r5,r4
+        li      r4,0
+        b       __memset_power6@local
+END (__bzero_power6)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power7.S
new file mode 100644
index 0000000000..580da55166
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-power7.S
@@ -0,0 +1,26 @@
+/* Optimized bzero implementation for PowerPC32/POWER7.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+ENTRY (__bzero_power7)
+        mr      r5,r4
+        li      r4,0
+        b       __memset_power7@local
+END (__bzero_power7)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-ppc32.S
new file mode 100644
index 0000000000..33c69cbfb9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero-ppc32.S
@@ -0,0 +1,35 @@
+/* Optimized bzero implementation for PowerPC32/PPC32.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* memset ifunc selector is not built for static and memset@local
+   for shared builds makes the linker point the call to the ifunc
+   selector.  */
+#ifdef SHARED
+# define MEMSET __memset_ppc
+#else
+# define MEMSET memset
+#endif
+
+ENTRY (__bzero_ppc)
+        mr      r5,r4
+        li      r4,0
+        b       MEMSET@local
+END (__bzero_ppc)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero.c
new file mode 100644
index 0000000000..865920ee26
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/bzero.c
@@ -0,0 +1,37 @@
+/* Multiple versions of bzero.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for definition in libc.  */
+#if IS_IN (libc)
+# include <string.h>
+# include <strings.h>
+# include "init-arch.h"
+
+extern __typeof (bzero) __bzero_ppc attribute_hidden;
+extern __typeof (bzero) __bzero_power6 attribute_hidden;
+extern __typeof (bzero) __bzero_power7 attribute_hidden;
+
+libc_ifunc (__bzero,
+            (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __bzero_power7 :
+	      (hwcap & PPC_FEATURE_ARCH_2_05)
+		? __bzero_power6
+            : __bzero_ppc);
+
+weak_alias (__bzero, bzero)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/ifunc-impl-list.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/ifunc-impl-list.c
new file mode 100644
index 0000000000..1caf15a07d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/ifunc-impl-list.c
@@ -0,0 +1,224 @@
+/* Enumerate available IFUNC implementations of a function.  PowerPC32 version.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <assert.h>
+#include <string.h>
+#include <wchar.h>
+#include <ldsodefs.h>
+#include <ifunc-impl-list.h>
+
+/* Maximum number of IFUNC implementations.  */
+#define MAX_IFUNC	6
+
+size_t
+__libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
+			size_t max)
+{
+  assert (max >= MAX_IFUNC);
+
+  size_t i = 0;
+
+  unsigned long int hwcap = GLRO(dl_hwcap);
+  /* hwcap contains only the latest supported ISA, the code checks which is
+     and fills the previous supported ones.  */
+  if (hwcap & PPC_FEATURE_ARCH_2_06)
+    hwcap |= PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_POWER5_PLUS |
+             PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4;
+  else if (hwcap & PPC_FEATURE_ARCH_2_05)
+    hwcap |= PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4;
+  else if (hwcap & PPC_FEATURE_POWER5_PLUS)
+    hwcap |= PPC_FEATURE_POWER5 | PPC_FEATURE_POWER4;
+  else if (hwcap & PPC_FEATURE_POWER5)
+    hwcap |= PPC_FEATURE_POWER4;
+
+#ifdef SHARED
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memcpy.c.  */
+  IFUNC_IMPL (i, name, memcpy,
+	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_HAS_VSX,
+			      __memcpy_power7)
+	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_ARCH_2_06,
+			      __memcpy_a2)
+	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_ARCH_2_05,
+			      __memcpy_power6)
+	      IFUNC_IMPL_ADD (array, i, memcpy, hwcap & PPC_FEATURE_CELL_BE,
+			      __memcpy_cell)
+	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memmove.c.  */
+  IFUNC_IMPL (i, name, memmove,
+	      IFUNC_IMPL_ADD (array, i, memmove, hwcap & PPC_FEATURE_HAS_VSX,
+			      __memmove_power7)
+	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memset.c.  */
+  IFUNC_IMPL (i, name, memset,
+	      IFUNC_IMPL_ADD (array, i, memset, hwcap & PPC_FEATURE_HAS_VSX,
+			      __memset_power7)
+	      IFUNC_IMPL_ADD (array, i, memset, hwcap & PPC_FEATURE_ARCH_2_05,
+			      __memset_power6)
+	      IFUNC_IMPL_ADD (array, i, memset, 1, __memset_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/bzero.c.  */
+  IFUNC_IMPL (i, name, bzero,
+	      IFUNC_IMPL_ADD (array, i, bzero, hwcap & PPC_FEATURE_HAS_VSX,
+			      __bzero_power7)
+	      IFUNC_IMPL_ADD (array, i, bzero, hwcap & PPC_FEATURE_ARCH_2_05,
+			      __bzero_power6)
+	      IFUNC_IMPL_ADD (array, i, bzero, 1, __bzero_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strlen.c.  */
+  IFUNC_IMPL (i, name, strlen,
+	      IFUNC_IMPL_ADD (array, i, strlen, hwcap & PPC_FEATURE_HAS_VSX,
+			      __strlen_power7)
+	      IFUNC_IMPL_ADD (array, i, strlen, 1,
+			      __strlen_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strnlen.c.  */
+  IFUNC_IMPL (i, name, strnlen,
+	      IFUNC_IMPL_ADD (array, i, strnlen, hwcap & PPC_FEATURE_HAS_VSX,
+			      __strnlen_power7)
+	      IFUNC_IMPL_ADD (array, i, strnlen, 1,
+			      __strnlen_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/multiarch/strncmp.c.  */
+  IFUNC_IMPL (i, name, strncmp,
+	      IFUNC_IMPL_ADD (array, i, strncmp, hwcap & PPC_FEATURE_HAS_VSX,
+			      __strncmp_power7)
+	      IFUNC_IMPL_ADD (array, i, strncmp, 1,
+			      __strncmp_ppc))
+#endif
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memcmp.c.  */
+  IFUNC_IMPL (i, name, memcmp,
+	      IFUNC_IMPL_ADD (array, i, memcmp, hwcap & PPC_FEATURE_HAS_VSX,
+			      __memcmp_power7)
+	      IFUNC_IMPL_ADD (array, i, memcmp, 1, __memcmp_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy.c.  */
+  IFUNC_IMPL (i, name, mempcpy,
+	      IFUNC_IMPL_ADD (array, i, mempcpy,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __mempcpy_power7)
+	      IFUNC_IMPL_ADD (array, i, mempcpy, 1,
+			      __mempcpy_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memchr.c.  */
+  IFUNC_IMPL (i, name, memchr,
+	      IFUNC_IMPL_ADD (array, i, memchr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __memchr_power7)
+	      IFUNC_IMPL_ADD (array, i, memchr, 1,
+			      __memchr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/memrchr.c.  */
+  IFUNC_IMPL (i, name, memrchr,
+	      IFUNC_IMPL_ADD (array, i, memrchr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __memrchr_power7)
+	      IFUNC_IMPL_ADD (array, i, memrchr, 1,
+			      __memrchr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr.c.  */
+  IFUNC_IMPL (i, name, rawmemchr,
+	      IFUNC_IMPL_ADD (array, i, rawmemchr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __rawmemchr_power7)
+	      IFUNC_IMPL_ADD (array, i, rawmemchr, 1,
+			      __rawmemchr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp.c.  */
+  IFUNC_IMPL (i, name, strcasecmp,
+	      IFUNC_IMPL_ADD (array, i, strcasecmp,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strcasecmp_power7)
+	      IFUNC_IMPL_ADD (array, i, strcasecmp, 1, __strcasecmp_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l.c.  */
+  IFUNC_IMPL (i, name, strcasecmp_l,
+	      IFUNC_IMPL_ADD (array, i, strcasecmp_l,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strcasecmp_l_power7)
+	      IFUNC_IMPL_ADD (array, i, strcasecmp_l, 1,
+			      __strcasecmp_l_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strncase.c.  */
+  IFUNC_IMPL (i, name, strncasecmp,
+	      IFUNC_IMPL_ADD (array, i, strncasecmp,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strncasecmp_power7)
+	      IFUNC_IMPL_ADD (array, i, strncasecmp, 1, __strncasecmp_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l.c.  */
+  IFUNC_IMPL (i, name, strncasecmp_l,
+	      IFUNC_IMPL_ADD (array, i, strncasecmp_l,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strncasecmp_l_power7)
+	      IFUNC_IMPL_ADD (array, i, strncasecmp_l, 1,
+			      __strncasecmp_l_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul.c.  */
+  IFUNC_IMPL (i, name, strchrnul,
+	      IFUNC_IMPL_ADD (array, i, strchrnul,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strchrnul_power7)
+	      IFUNC_IMPL_ADD (array, i, strchrnul, 1,
+			      __strchrnul_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/strchr.c.  */
+  IFUNC_IMPL (i, name, strchr,
+	      IFUNC_IMPL_ADD (array, i, strchr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __strchr_power7)
+	      IFUNC_IMPL_ADD (array, i, strchr, 1,
+			      __strchr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/wcschr.c.  */
+  IFUNC_IMPL (i, name, wcschr,
+	      IFUNC_IMPL_ADD (array, i, wcschr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __wcschr_power7)
+	      IFUNC_IMPL_ADD (array, i, wcschr,
+			      hwcap & PPC_FEATURE_ARCH_2_05,
+			      __wcschr_power6)
+	      IFUNC_IMPL_ADD (array, i, wcschr, 1,
+			      __wcschr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr.c.  */
+  IFUNC_IMPL (i, name, wcsrchr,
+	      IFUNC_IMPL_ADD (array, i, wcsrchr,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __wcsrchr_power7)
+	      IFUNC_IMPL_ADD (array, i, wcsrchr,
+			      hwcap & PPC_FEATURE_ARCH_2_05,
+			      __wcsrchr_power6)
+	      IFUNC_IMPL_ADD (array, i, wcsrchr, 1,
+			      __wcsrchr_ppc))
+
+  /* Support sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy.c.  */
+  IFUNC_IMPL (i, name, wcscpy,
+	      IFUNC_IMPL_ADD (array, i, wcscpy,
+			      hwcap & PPC_FEATURE_HAS_VSX,
+			      __wcscpy_power7)
+	      IFUNC_IMPL_ADD (array, i, wcscpy,
+			      hwcap & PPC_FEATURE_ARCH_2_05,
+			      __wcscpy_power6)
+	      IFUNC_IMPL_ADD (array, i, wcscpy, 1,
+			      __wcscpy_ppc))
+
+  return i;
+}
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/init-arch.h b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/init-arch.h
new file mode 100644
index 0000000000..f2e6a4b705
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/init-arch.h
@@ -0,0 +1,53 @@
+/* This file is part of the GNU C Library.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <ldsodefs.h>
+
+/* The code checks if _rtld_global_ro was realocated before trying to access
+   the dl_hwcap field. The assembly is to make the compiler not optimize the
+   test (&_rtld_global_ro != NULL), which is always true in ISO C (but not
+   in that case since _rtld_global_ro might not been realocated yet).  */
+#if defined(SHARED) && !IS_IN (rtld)
+# define __GLRO(value) \
+  ({ volatile void **__p = (volatile void**)(&_rtld_global_ro);	\
+    unsigned long int __ret;					\
+     asm ("# x in %0" : "+r" (__p));				\
+     __ret = (__p) ? GLRO(value) : 0;				\
+     __ret; })
+#else
+# define __GLRO(value)  GLRO(value)
+#endif
+
+/* dl_hwcap contains only the latest supported ISA, the macro checks which is
+   and fills the previous ones.  */
+#define INIT_ARCH() \
+  unsigned long int hwcap = __GLRO(dl_hwcap); 			\
+  unsigned long int __attribute__((unused)) hwcap2 = __GLRO(dl_hwcap2); \
+  if (hwcap & PPC_FEATURE_ARCH_2_06)				\
+    hwcap |= PPC_FEATURE_ARCH_2_05 |				\
+	     PPC_FEATURE_POWER5_PLUS |				\
+	     PPC_FEATURE_POWER5 |				\
+	     PPC_FEATURE_POWER4;				\
+  else if (hwcap & PPC_FEATURE_ARCH_2_05)			\
+    hwcap |= PPC_FEATURE_POWER5_PLUS |				\
+	     PPC_FEATURE_POWER5 |				\
+	     PPC_FEATURE_POWER4;				\
+  else if (hwcap & PPC_FEATURE_POWER5_PLUS)			\
+    hwcap |= PPC_FEATURE_POWER5 |				\
+	     PPC_FEATURE_POWER4;				\
+  else if (hwcap & PPC_FEATURE_POWER5)				\
+    hwcap |= PPC_FEATURE_POWER4;
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-power7.S
new file mode 100644
index 0000000000..e7eb56a8fc
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-power7.S
@@ -0,0 +1,40 @@
+/* Optimized memchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__memchr_power7);				\
+ .type C_SYMBOL_NAME(__memchr_power7),@function;		\
+ C_LABEL(__memchr_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memchr_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/memchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-ppc32.c
new file mode 100644
index 0000000000..1e4b88f9e9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr-ppc32.c
@@ -0,0 +1,34 @@
+/* PowerPC32 default implementation of memchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+
+#define MEMCHR  __memchr_ppc
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#ifdef SHARED
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name) \
+  __hidden_ver1(__memchr_ppc, __GI_memchr, __memchr_ppc);
+#endif
+
+extern __typeof (memchr) __memchr_ppc attribute_hidden;
+
+#include <string/memchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr.c
new file mode 100644
index 0000000000..7eb4be7248
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memchr.c
@@ -0,0 +1,41 @@
+/* Multiple versions of memchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# undef memchr
+/* Redefine memchr so that the compiler won't make the weak_alias point
+   to internal hidden definition (__GI_memchr), since PPC32 does not
+   support local IFUNC calls.  */
+# define memchr __redirect_memchr
+# include <string.h>
+# include "init-arch.h"
+
+extern __typeof (__redirect_memchr) __memchr_ppc attribute_hidden;
+extern __typeof (__redirect_memchr) __memchr_power7 attribute_hidden;
+
+extern __typeof (__redirect_memchr) __libc_memchr;
+
+libc_ifunc (__libc_memchr,
+	    (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __memchr_power7
+            : __memchr_ppc);
+#undef memchr
+weak_alias (__libc_memchr, memchr)
+#else
+#include <string/memchr.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-power7.S
new file mode 100644
index 0000000000..e002aef057
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-power7.S
@@ -0,0 +1,41 @@
+/* Optimized memcmp implementation for POWER7/PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memcmp_power7);				\
+ .type C_SYMBOL_NAME(__memcmp_power7),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memcmp_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memcmp_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#undef weak_alias
+#define weak_alias(a, b)
+
+#include <sysdeps/powerpc/powerpc32/power7/memcmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-ppc32.S
new file mode 100644
index 0000000000..dc1f21bcb5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp-ppc32.S
@@ -0,0 +1,45 @@
+/* Default memcmp implementation for PowerPC32.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if IS_IN (libc)
+# undef EALIGN
+# define EALIGN(name, alignt, words)				\
+  .globl C_SYMBOL_NAME(__memcmp_ppc);				\
+  .type C_SYMBOL_NAME(__memcmp_ppc),@function;		\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  C_LABEL(__memcmp_ppc)					\
+  cfi_startproc;
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  ASM_SIZE_DIRECTIVE(__memcmp_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+  .globl __GI_memcmp; __GI_memcmp = __memcmp_ppc
+
+# undef weak_alias
+# define weak_alias(a, b)					\
+  .weak b ; b = __memcmp_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc32/power4/memcmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp.c
new file mode 100644
index 0000000000..00bbcfaa4c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcmp.c
@@ -0,0 +1,36 @@
+/* Multiple versions of memcmp.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for definition in libc.  */
+#if IS_IN (libc)
+# define memcmp __redirect_memcmp
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (memcmp) __memcmp_ppc attribute_hidden;
+extern __typeof (memcmp) __memcmp_power7 attribute_hidden;
+# undef memcmp
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect_memcmp, memcmp,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __memcmp_power7
+		       : __memcmp_ppc);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-a2.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-a2.S
new file mode 100644
index 0000000000..17a31226c5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-a2.S
@@ -0,0 +1,38 @@
+/* Optimized memcpy implementation for PowerPC A2.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memcpy_a2);				\
+ .type C_SYMBOL_NAME(__memcpy_a2),@function;			\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memcpy_a2)						\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memcpy_a2)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/a2/memcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-cell.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-cell.S
new file mode 100644
index 0000000000..59859c6b94
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-cell.S
@@ -0,0 +1,38 @@
+/* Optimized memcpy implementation for CELL BE PowerPC.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memcpy_cell);				\
+ .type C_SYMBOL_NAME(__memcpy_cell),@function;			\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memcpy_cell)						\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memcpy_cell)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/cell/memcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power6.S
new file mode 100644
index 0000000000..750151973b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power6.S
@@ -0,0 +1,38 @@
+/* Optimized memcpy implementation for PowerPC32 on POWER6.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memcpy_power6);				\
+ .type C_SYMBOL_NAME(__memcpy_power6),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memcpy_power6)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memcpy_power6)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power6/memcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power7.S
new file mode 100644
index 0000000000..3ac7c32084
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-power7.S
@@ -0,0 +1,38 @@
+/* Optimized memcpy implementation for PowerPC32/POWER7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memcpy_power7);				\
+ .type C_SYMBOL_NAME(__memcpy_power7),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memcpy_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memcpy_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/memcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-ppc32.S
new file mode 100644
index 0000000000..f018684155
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy-ppc32.S
@@ -0,0 +1,41 @@
+/* Default memcpy implementation for PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if defined SHARED && IS_IN (libc)
+# undef EALIGN
+# define EALIGN(name, alignt, words)				\
+  .globl C_SYMBOL_NAME(__memcpy_ppc);				\
+  .type C_SYMBOL_NAME(__memcpy_ppc),@function;		\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  C_LABEL(__memcpy_ppc)					\
+  cfi_startproc;
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  ASM_SIZE_DIRECTIVE(__memcpy_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+    .globl __GI_memcpy; __GI_memcpy = __memcpy_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc32/power4/memcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy.c
new file mode 100644
index 0000000000..b414ba946b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memcpy.c
@@ -0,0 +1,48 @@
+/* Multiple versions of memcpy.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for the definition in lib and for
+   DSO.  In static binaries we need memcpy before the initialization
+   happened.  */
+#if defined SHARED && IS_IN (libc)
+# undef memcpy
+# define memcpy __redirect_memcpy
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (memcpy) __memcpy_ppc attribute_hidden;
+extern __typeof (memcpy) __memcpy_cell attribute_hidden;
+extern __typeof (memcpy) __memcpy_power6 attribute_hidden;
+extern __typeof (memcpy) __memcpy_a2 attribute_hidden;
+extern __typeof (memcpy) __memcpy_power7 attribute_hidden;
+# undef memcpy
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect_memcpy, memcpy,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __memcpy_power7
+		       : (hwcap & PPC_FEATURE_ARCH_2_06)
+			 ? __memcpy_a2
+			 : (hwcap & PPC_FEATURE_ARCH_2_05)
+			   ? __memcpy_power6
+			   : (hwcap & PPC_FEATURE_CELL_BE)
+			     ? __memcpy_cell
+			     : __memcpy_ppc);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-power7.c
new file mode 100644
index 0000000000..12902fec3f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-power7.c
@@ -0,0 +1,41 @@
+/* Power7 multiarch memmove.
+   Copyright (C) 2014-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+#include <memcopy.h>
+
+extern __typeof (_wordcopy_fwd_aligned) _wordcopy_fwd_aligned_power7;
+extern __typeof (_wordcopy_fwd_dest_aligned) _wordcopy_fwd_dest_aligned_power7;
+extern __typeof (_wordcopy_bwd_aligned) _wordcopy_bwd_aligned_power7;
+extern __typeof (_wordcopy_bwd_dest_aligned) _wordcopy_bwd_dest_aligned_power7;
+
+#define _wordcopy_fwd_aligned       _wordcopy_fwd_aligned_power7
+#define _wordcopy_fwd_dest_aligned  _wordcopy_fwd_dest_aligned_power7
+#define _wordcopy_bwd_aligned       _wordcopy_bwd_aligned_power7
+#define _wordcopy_bwd_dest_aligned  _wordcopy_bwd_dest_aligned_power7
+
+extern __typeof (memcpy) __memcpy_power7;
+#define memcpy __memcpy_power7
+
+extern __typeof (memmove) __memmove_power7;
+#define MEMMOVE __memmove_power7
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <string/memmove.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-ppc.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-ppc.c
new file mode 100644
index 0000000000..59f298507c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove-ppc.c
@@ -0,0 +1,44 @@
+/* Power7 multiarch memmove.
+   Copyright (C) 2014-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+#include <memcopy.h>
+
+extern __typeof (_wordcopy_fwd_aligned) _wordcopy_fwd_aligned_ppc;
+extern __typeof (_wordcopy_fwd_dest_aligned) _wordcopy_fwd_dest_aligned_ppc;
+extern __typeof (_wordcopy_bwd_aligned) _wordcopy_bwd_aligned_ppc;
+extern __typeof (_wordcopy_bwd_dest_aligned) _wordcopy_bwd_dest_aligned_ppc;
+
+#define _wordcopy_fwd_aligned       _wordcopy_fwd_aligned_ppc
+#define _wordcopy_fwd_dest_aligned  _wordcopy_fwd_dest_aligned_ppc
+#define _wordcopy_bwd_aligned       _wordcopy_bwd_aligned_ppc
+#define _wordcopy_bwd_dest_aligned  _wordcopy_bwd_dest_aligned_ppc
+
+extern __typeof (memcpy) __memcpy_ppc;
+#define memcpy __memcpy_ppc
+
+extern __typeof (memmove) __memmove_ppc;
+#define MEMMOVE __memmove_ppc
+
+#if defined SHARED
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)  \
+  __hidden_ver1 (__memmove_ppc, __GI_memmove, __memmove_ppc);
+#endif
+
+#include <string/memmove.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove.c
new file mode 100644
index 0000000000..481139fae8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memmove.c
@@ -0,0 +1,36 @@
+/* Multiple versions of memmove. PowerPC32 version.
+   Copyright (C) 2014-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined SHARED && IS_IN (libc)
+/* Redefine memmove so that the compiler won't complain about the type
+   mismatch with the IFUNC selector in strong_alias, below.  */
+# define memmove __redirect_memmove
+# include <string.h>
+# include "init-arch.h"
+
+extern __typeof (memmove) __memmove_ppc attribute_hidden;
+extern __typeof (memmove) __memmove_power7 attribute_hidden;
+# undef memmove
+
+libc_ifunc_redirected (__redirect_memmove, memmove,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __memmove_power7
+		       : __memmove_ppc);
+#else
+# include <string/memmove.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-power7.S
new file mode 100644
index 0000000000..a1a078dec6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-power7.S
@@ -0,0 +1,35 @@
+/* Optimized mempcpy implementation for POWER7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__mempcpy_power7);			\
+ .type C_SYMBOL_NAME(__mempcpy_power7),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__mempcpy_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__mempcpy_power7)
+
+#include <sysdeps/powerpc/powerpc32/power7/mempcpy.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-ppc32.c
new file mode 100644
index 0000000000..2a20060e5b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy-ppc32.c
@@ -0,0 +1,32 @@
+/* PowerPC32 default implementation of mempcpy.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define MEMPCPY  __mempcpy_ppc
+
+#undef libc_hidden_def
+#define libc_hidden_def(name)
+#undef weak_alias
+#define weak_alias(a, b)
+
+#if defined SHARED
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)  \
+  __hidden_ver1 (__mempcpy_ppc, __GI_mempcpy, __mempcpy_ppc);
+#endif
+
+#include <string/mempcpy.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy.c
new file mode 100644
index 0000000000..0c7250a4bf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/mempcpy.c
@@ -0,0 +1,44 @@
+/* Multiple versions of mempcpy.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define mempcpy __redirect_mempcpy
+# define __mempcpy __redirect___mempcpy
+# define NO_MEMPCPY_STPCPY_REDIRECT
+/* Omit the mempcpy inline definitions because it would redefine mempcpy.  */
+# define _HAVE_STRING_ARCH_mempcpy 1
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__mempcpy) __mempcpy_ppc attribute_hidden;
+extern __typeof (__mempcpy) __mempcpy_power7 attribute_hidden;
+# undef mempcpy
+# undef __mempcpy
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect___mempcpy,  __mempcpy,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __mempcpy_power7
+		       : __mempcpy_ppc);
+
+weak_alias (__mempcpy, mempcpy)
+#else
+# include <string/mempcpy.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-power7.S
new file mode 100644
index 0000000000..4c3f6af9f3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-power7.S
@@ -0,0 +1,40 @@
+/* Optimized memrchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__memrchr_power7);			\
+ .type C_SYMBOL_NAME(__memrchr_power7),@function;		\
+ C_LABEL(__memrchr_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memrchr_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/memrchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-ppc32.c
new file mode 100644
index 0000000000..a0247f49c8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr-ppc32.c
@@ -0,0 +1,25 @@
+/* PowerPC32 default implementation of memrchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define MEMRCHR  __memrchr_ppc
+# include <string.h>
+extern void *__memrchr_ppc (const void *, int, size_t);
+#endif
+
+#include <string/memrchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr.c
new file mode 100644
index 0000000000..fb09fdf89c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memrchr.c
@@ -0,0 +1,37 @@
+/* Multiple versions of memrchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__memrchr) __memrchr_ppc attribute_hidden;
+extern __typeof (__memrchr) __memrchr_power7 attribute_hidden;
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc (__memrchr,
+	    (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __memrchr_power7
+            : __memrchr_ppc);
+
+weak_alias (__memrchr, memrchr)
+#else
+#include <string/memrchr.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power6.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power6.S
new file mode 100644
index 0000000000..55ff437a20
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power6.S
@@ -0,0 +1,38 @@
+/* Optimized 32-bit memset implementation for POWER6.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memset_power6);				\
+ .type C_SYMBOL_NAME(__memset_power6),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memset_power6)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memset_power6)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power6/memset.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power7.S
new file mode 100644
index 0000000000..ced4cb015b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-power7.S
@@ -0,0 +1,38 @@
+/* Optimized memset implementation for PowerPC32/POWER7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__memset_power7);				\
+ .type C_SYMBOL_NAME(__memset_power7),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__memset_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__memset_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/memset.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-ppc32.S
new file mode 100644
index 0000000000..63cd5b4eea
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset-ppc32.S
@@ -0,0 +1,41 @@
+/* Default memset implementation for PowerPC32.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if defined SHARED && IS_IN (libc)
+# undef EALIGN
+# define EALIGN(name, alignt, words)				\
+  .globl C_SYMBOL_NAME(__memset_ppc);				\
+  .type C_SYMBOL_NAME(__memset_ppc),@function;		\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  C_LABEL(__memset_ppc)					\
+  cfi_startproc;
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  ASM_SIZE_DIRECTIVE(__memset_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+    .globl __GI_memset; __GI_memset = __memset_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc32/power4/memset.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset.c
new file mode 100644
index 0000000000..afcca12c78
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/memset.c
@@ -0,0 +1,39 @@
+/* Multiple versions of memset.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for definition in libc.  */
+#if defined SHARED && IS_IN (libc)
+# define memset __redirect_memset
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (memset) __memset_ppc attribute_hidden;
+extern __typeof (memset) __memset_power6 attribute_hidden;
+extern __typeof (memset) __memset_power7 attribute_hidden;
+# undef memset
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect_memset, memset,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __memset_power7
+		       : (hwcap & PPC_FEATURE_ARCH_2_05)
+			 ? __memset_power6
+			 : __memset_ppc);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-power7.S
new file mode 100644
index 0000000000..e088c6b046
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-power7.S
@@ -0,0 +1,40 @@
+/* Optimized rawrawmemchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__rawmemchr_power7);			\
+ .type C_SYMBOL_NAME(__rawmemchr_power7),@function;		\
+ C_LABEL(__rawmemchr_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__rawmemchr_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/rawmemchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-ppc32.c
new file mode 100644
index 0000000000..bce76cbe75
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr-ppc32.c
@@ -0,0 +1,32 @@
+/* PowerPC32 default implementation of rawmemchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+
+#define RAWMEMCHR  __rawmemchr_ppc
+#undef weak_alias
+#define weak_alias(a, b)
+#ifdef SHARED
+# undef libc_hidden_def
+# define libc_hidden_def(name)  \
+  __hidden_ver1 (__rawmemchr_ppc, __GI___rawmemchr, __rawmemchr_ppc);
+#endif
+
+extern __typeof (rawmemchr) __rawmemchr_ppc attribute_hidden;
+
+#include <string/rawmemchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr.c
new file mode 100644
index 0000000000..6ea56db0af
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rawmemchr.c
@@ -0,0 +1,38 @@
+/* Multiple versions of rawmemchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define __rawmemchr __redirect___rawmemchr
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__rawmemchr) __rawmemchr_ppc attribute_hidden;
+extern __typeof (__rawmemchr) __rawmemchr_power7 attribute_hidden;
+# undef __rawmemchr
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect___rawmemchr, __rawmemchr,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __rawmemchr_power7
+		       : __rawmemchr_ppc);
+weak_alias (__rawmemchr, rawmemchr)
+#else
+#include <string/rawmemchr.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memcmp.S
new file mode 100644
index 0000000000..b676dd147e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memcmp.S
@@ -0,0 +1,19 @@
+/* Loader memcmp implementation for PowerPC32.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/powerpc/powerpc32/power4/memcmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memset.S
new file mode 100644
index 0000000000..b9eb81328f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-memset.S
@@ -0,0 +1,18 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/powerpc/powerpc32/power4/memset.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strchr.S
new file mode 100644
index 0000000000..5d197557af
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strchr.S
@@ -0,0 +1,18 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/powerpc/powerpc32/strchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strnlen.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strnlen.c
new file mode 100644
index 0000000000..79704aa2d9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/rtld-strnlen.c
@@ -0,0 +1,18 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <string/strnlen.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp-power7.S
new file mode 100644
index 0000000000..f9324a972e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp-power7.S
@@ -0,0 +1,39 @@
+/* Optimized strcasecmp implementation for PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strcasecmp_power7);			\
+ .type C_SYMBOL_NAME(__strcasecmp_power7),@function;		\
+ C_LABEL(__strcasecmp_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strcasecmp_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strcasecmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp.c
new file mode 100644
index 0000000000..da7d8415d2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp.c
@@ -0,0 +1,41 @@
+/* Multiple versions of strcasecmp.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# define strcasecmp __strcasecmp_ppc
+
+extern __typeof (__strcasecmp) __strcasecmp_ppc attribute_hidden;
+extern __typeof (__strcasecmp) __strcasecmp_power7 attribute_hidden;
+#endif
+
+#include <string/strcasecmp.c>
+#undef strcasecmp
+
+#if IS_IN (libc)
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__strcasecmp) __libc_strcasecmp;
+libc_ifunc (__libc_strcasecmp,
+	    (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __strcasecmp_power7
+            : __strcasecmp_ppc);
+
+weak_alias (__libc_strcasecmp, strcasecmp)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l-power7.S
new file mode 100644
index 0000000000..66e0584139
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l-power7.S
@@ -0,0 +1,41 @@
+/* Default strcasecmp implementation for PowerPC32.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strcasecmp_l_power7);			\
+ .type C_SYMBOL_NAME(__strcasecmp_l_power7),@function;		\
+ C_LABEL(__strcasecmp_l_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strcasecmp_l_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#define USE_IN_EXTENDED_LOCALE_MODEL
+
+#include <sysdeps/powerpc/powerpc32/power7/strcasecmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l.c
new file mode 100644
index 0000000000..85411f5558
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strcasecmp_l.c
@@ -0,0 +1,41 @@
+/* Multiple versions of strcasecmp.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# define strcasecmp_l __strcasecmp_l_ppc
+
+extern __typeof (__strcasecmp_l) __strcasecmp_l_ppc attribute_hidden;
+extern __typeof (__strcasecmp_l) __strcasecmp_l_power7 attribute_hidden;
+#endif
+
+#include <string/strcasecmp_l.c>
+#undef strcasecmp_l
+
+#if IS_IN (libc)
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__strcasecmp_l) __libc_strcasecmp_l;
+libc_ifunc (__libc_strcasecmp_l,
+	    (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __strcasecmp_l_power7
+            : __strcasecmp_l_ppc);
+
+weak_alias (__libc_strcasecmp_l, strcasecmp_l)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-power7.S
new file mode 100644
index 0000000000..7624a27bbd
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-power7.S
@@ -0,0 +1,39 @@
+/* Optimized strchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strchr_power7);			\
+ .type C_SYMBOL_NAME(__strchr_power7),@function;		\
+ C_LABEL(__strchr_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strchr_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-ppc32.S
new file mode 100644
index 0000000000..7dcd55af59
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr-ppc32.S
@@ -0,0 +1,41 @@
+/* PowerPC32 default implementation of strchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#ifdef SHARED
+# undef ENTRY
+# define ENTRY(name)						\
+    .globl C_SYMBOL_NAME(__strchr_ppc);			\
+    .type C_SYMBOL_NAME(__strchr_ppc),@function;		\
+    .align ALIGNARG(2);						\
+    C_LABEL(__strchr_ppc)					\
+    cfi_startproc;						\
+    CALL_MCOUNT
+
+# undef END
+# define END(name)						\
+    cfi_endproc;						\
+    ASM_SIZE_DIRECTIVE(__strchr_ppc)				\
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+    .globl __GI_strchr; __GI_strchr = __strchr_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc32/strchr.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr.c
new file mode 100644
index 0000000000..712bc1a4b8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchr.c
@@ -0,0 +1,39 @@
+/* Multiple versions of strchr.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for definition in libc.  */
+#if defined SHARED && IS_IN (libc)
+# define strchr __redirect_strchr
+/* Omit the strchr inline definitions because it would redefine strchr.  */
+# define __NO_STRING_INLINES
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (strchr) __strchr_ppc attribute_hidden;
+extern __typeof (strchr) __strchr_power7 attribute_hidden;
+# undef strchr
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect_strchr,  strchr,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __strchr_power7
+		       : __strchr_ppc);
+weak_alias (strchr, index)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-power7.S
new file mode 100644
index 0000000000..3baad50818
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-power7.S
@@ -0,0 +1,39 @@
+/* Optimized strchrnul implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strchrnul_power7);			\
+ .type C_SYMBOL_NAME(__strchrnul_power7),@function;		\
+ C_LABEL(__strchrnul_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strchrnul_power7)
+
+#undef weak_alias
+#define weak_alias(name, alias)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strchrnul.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-ppc32.c
new file mode 100644
index 0000000000..c981eb67f3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul-ppc32.c
@@ -0,0 +1,28 @@
+/* PowerPC32 default implementation of strchrnul.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+
+#define STRCHRNUL  __strchrnul_ppc
+
+#undef weak_alias
+#define weak_alias(a,b )
+
+extern __typeof (strchrnul) __strchrnul_ppc attribute_hidden;
+
+#include <string/strchrnul.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul.c
new file mode 100644
index 0000000000..b8f853d8e4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strchrnul.c
@@ -0,0 +1,37 @@
+/* Multiple versions of strchrnul.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__strchrnul) __strchrnul_ppc attribute_hidden;
+extern __typeof (__strchrnul) __strchrnul_power7 attribute_hidden;
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc (__strchrnul,
+	    (hwcap & PPC_FEATURE_HAS_VSX)
+            ? __strchrnul_power7
+            : __strchrnul_ppc);
+
+weak_alias (__strchrnul, strchrnul)
+#else
+#include <string/strchrnul.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-power7.S
new file mode 100644
index 0000000000..7681b827d6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-power7.S
@@ -0,0 +1,36 @@
+/* Optimized strlen implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strlen_power7);				\
+ .type C_SYMBOL_NAME(__strlen_power7),@function;		\
+ C_LABEL(__strlen_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strlen_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strlen.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-ppc32.S
new file mode 100644
index 0000000000..b665977e17
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen-ppc32.S
@@ -0,0 +1,41 @@
+/* Default strlen implementation for PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined SHARED && IS_IN (libc)
+
+#include <sysdep.h>
+
+# undef ENTRY
+# define ENTRY(name)						\
+  .globl C_SYMBOL_NAME(__strlen_ppc);				\
+  .type C_SYMBOL_NAME(__strlen_ppc),@function;		\
+  C_LABEL(__strlen_ppc)					\
+  cfi_startproc;
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  ASM_SIZE_DIRECTIVE(__strlen_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+  .globl __GI_strlen; __GI_strlen = __strlen_ppc
+
+#endif
+
+#include <sysdeps/powerpc/powerpc32/strlen.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen.c
new file mode 100644
index 0000000000..c13940e999
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strlen.c
@@ -0,0 +1,33 @@
+/* Multiple versions of strlen.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if defined SHARED && IS_IN (libc)
+# define strlen __redirect_strlen
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (strlen) __strlen_ppc attribute_hidden;
+extern __typeof (strlen) __strlen_power7 attribute_hidden;
+# undef strlen
+
+libc_ifunc_redirected (__redirect_strlen, strlen,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __strlen_power7
+		       : __strlen_ppc);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase-power7.c
new file mode 100644
index 0000000000..a49bed9278
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase-power7.c
@@ -0,0 +1,26 @@
+/* Optimized strcasecmp_l implememtation for POWER7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+
+#include <string.h>
+
+#define __strncasecmp __strncasecmp_power7
+
+extern __typeof (strncasecmp) __strncasecmp_power7 attribute_hidden;
+
+#include <string/strncase.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase.c
new file mode 100644
index 0000000000..089faa9853
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase.c
@@ -0,0 +1,41 @@
+/* Multiple versions of strncasecmp.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# define strncasecmp __strncasecmp_ppc
+extern __typeof (__strncasecmp) __strncasecmp_ppc attribute_hidden;
+extern __typeof (__strncasecmp) __strncasecmp_power7 attribute_hidden;
+#endif
+
+#include <string/strncase.c>
+#undef strncasecmp
+
+#if IS_IN (libc)
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+extern __typeof (__strncasecmp) __libc_strncasecmp;
+libc_ifunc (__libc_strncasecmp,
+	     (hwcap & PPC_FEATURE_HAS_VSX)
+             ? __strncasecmp_power7
+             : __strncasecmp_ppc);
+weak_alias (__libc_strncasecmp, strncasecmp)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l-power7.c
new file mode 100644
index 0000000000..80f7d48133
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l-power7.c
@@ -0,0 +1,26 @@
+/* Optimized strcasecmp_l implememtation for POWER7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <string.h>
+
+#define __strncasecmp_l __strncasecmp_l_power7
+#define USE_IN_EXTENDED_LOCALE_MODEL    1
+
+extern __typeof (strncasecmp_l) __strncasecmp_l_power7 attribute_hidden;
+
+#include <string/strncase.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l.c
new file mode 100644
index 0000000000..c988c8dd3f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncase_l.c
@@ -0,0 +1,42 @@
+/* Multiple versions of strncasecmp_l.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <string.h>
+# define strncasecmp_l __strncasecmp_l_ppc
+extern __typeof (__strncasecmp_l) __strncasecmp_l_ppc attribute_hidden;
+extern __typeof (__strncasecmp_l) __strncasecmp_l_power7 attribute_hidden;
+#endif
+
+#include <string/strncase_l.c>
+#undef strncasecmp_l
+
+#if IS_IN (libc)
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+extern __typeof (__strncasecmp_l) __libc_strncasecmp_l;
+libc_ifunc (__libc_strncasecmp_l,
+	     (hwcap & PPC_FEATURE_HAS_VSX)
+             ? __strncasecmp_l_power7
+             : __strncasecmp_l_ppc);
+
+weak_alias (__libc_strncasecmp_l, strncasecmp_l)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-power7.S
new file mode 100644
index 0000000000..cbe969a5a4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-power7.S
@@ -0,0 +1,38 @@
+/* Optimized strcmp implementation for POWER7/PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef EALIGN
+#define EALIGN(name, alignt, words)				\
+ .globl C_SYMBOL_NAME(__strncmp_power7);			\
+ .type C_SYMBOL_NAME(__strncmp_power7),@function;		\
+ .align ALIGNARG(alignt);					\
+ EALIGN_W_##words;						\
+ C_LABEL(__strncmp_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strncmp_power7)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strncmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-ppc32.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-ppc32.S
new file mode 100644
index 0000000000..2f5d2d3651
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp-ppc32.S
@@ -0,0 +1,40 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if defined SHARED && IS_IN (libc)
+# undef EALIGN
+# define EALIGN(name, alignt, words)				\
+  .globl C_SYMBOL_NAME(__strncmp_ppc);			\
+  .type C_SYMBOL_NAME(__strncmp_ppc),@function;		\
+  .align ALIGNARG(alignt);					\
+  EALIGN_W_##words;						\
+  C_LABEL(__strncmp_ppc)					\
+  cfi_startproc;
+
+# undef END
+# define END(name)						\
+  cfi_endproc;							\
+  ASM_SIZE_DIRECTIVE(__strncmp_ppc)
+
+# undef libc_hidden_builtin_def
+# define libc_hidden_builtin_def(name)				\
+    .globl __GI_strncmp; __GI_strncmp = __strncmp_ppc
+#endif
+
+#include <sysdeps/powerpc/powerpc32/power4/strncmp.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp.c
new file mode 100644
index 0000000000..bb4e892df8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strncmp.c
@@ -0,0 +1,39 @@
+/* Multiple versions of strncmp.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* Define multiple versions only for definition in libc.  */
+#if defined SHARED && IS_IN (libc)
+# define strncmp __redirect_strncmp
+/* Omit the strncmp inline definitions because it would redefine strncmp.  */
+# define __NO_STRING_INLINES
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (strncmp) __strncmp_ppc attribute_hidden;
+extern __typeof (strncmp) __strncmp_power4 attribute_hidden;
+extern __typeof (strncmp) __strncmp_power7 attribute_hidden;
+# undef strncmp
+
+/* Avoid DWARF definition DIE on ifunc symbol so that GDB can handle
+   ifunc symbol properly.  */
+libc_ifunc_redirected (__redirect_strncmp, strncmp,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __strncmp_power7
+		       : __strncmp_ppc);
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-power7.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-power7.S
new file mode 100644
index 0000000000..3f5a32f0c6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-power7.S
@@ -0,0 +1,40 @@
+/* Optimized strnlen implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#undef ENTRY
+#define ENTRY(name)						\
+ .globl C_SYMBOL_NAME(__strnlen_power7);			\
+ .type C_SYMBOL_NAME(__strnlen_power7),@function;		\
+ C_LABEL(__strnlen_power7)					\
+ cfi_startproc;
+
+#undef END
+#define END(name)						\
+ cfi_endproc;							\
+ ASM_SIZE_DIRECTIVE(__strnlen_power7)
+
+#undef libc_hidden_def
+#define libc_hidden_def(name)
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc32/power7/strnlen.S>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-ppc32.c
new file mode 100644
index 0000000000..b41fc9d359
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen-ppc32.c
@@ -0,0 +1,28 @@
+/* Default strnlen implementation for PowerPC32.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define STRNLEN  __strnlen_ppc
+#ifdef SHARED
+# undef libc_hidden_def
+# define libc_hidden_def(name)  \
+    __hidden_ver1 (__strnlen_ppc, __GI_strnlen, __strnlen_ppc); \
+    strong_alias (__strnlen_ppc, __strnlen_ppc_1); \
+    __hidden_ver1 (__strnlen_ppc_1, __GI___strnlen, __strnlen_ppc_1);
+#endif
+
+#include <string/strnlen.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen.c
new file mode 100644
index 0000000000..f2883e69eb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/strnlen.c
@@ -0,0 +1,36 @@
+/* Multiple versions of strnlen.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define strnlen __redirect_strnlen
+# define __strnlen __redirect___strnlen
+# include <string.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__strnlen) __strnlen_ppc attribute_hidden;
+extern __typeof (__strnlen) __strnlen_power7 attribute_hidden;
+# undef strnlen
+# undef __strnlen
+
+libc_ifunc_redirected (__redirect___strnlen, __strnlen,
+		       (hwcap & PPC_FEATURE_HAS_VSX)
+		       ? __strnlen_power7
+		       : __strnlen_ppc);
+weak_alias (__strnlen, strnlen)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power6.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power6.c
new file mode 100644
index 0000000000..6610b5ef82
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power6.c
@@ -0,0 +1,26 @@
+/* wcschr.c - Wide Character Search for powerpc32/power6.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#define WCSCHR __wcschr_power6
+
+#undef libc_hidden_def
+#define libc_hidden_def(name)
+
+#include <sysdeps/powerpc/power6/wcschr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power7.c
new file mode 100644
index 0000000000..7e22c441ac
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-power7.c
@@ -0,0 +1,26 @@
+/* wcschr.c - Wide Character Search for powerpc32/power7.
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#define WCSCHR __wcschr_power7
+
+#undef libc_hidden_def
+#define libc_hidden_def(name)
+
+#include <sysdeps/powerpc/power6/wcschr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-ppc32.c
new file mode 100644
index 0000000000..777ec080b2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr-ppc32.c
@@ -0,0 +1,43 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#if IS_IN (libc)
+# undef libc_hidden_weak
+# define libc_hidden_weak(name)
+
+# undef weak_alias
+# undef libc_hidden_def
+
+# ifdef SHARED
+#  define libc_hidden_def(name)  \
+    __hidden_ver1 (__wcschr_ppc, __GI_wcschr, __wcschr_ppc); \
+    strong_alias (__wcschr_ppc, __wcschr_ppc_1); \
+    __hidden_ver1 (__wcschr_ppc_1, __GI___wcschr, __wcschr_ppc_1);
+#  define weak_alias(name,alias)
+# else
+#  define weak_alias(name, alias) \
+    _weak_alias(__wcschr_ppc, __wcschr)
+#  define libc_hidden_def(name)
+# endif /* SHARED  */
+#endif
+
+extern __typeof (wcschr) __wcschr_ppc;
+
+#define WCSCHR  __wcschr_ppc
+#include <wcsmbs/wcschr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr.c
new file mode 100644
index 0000000000..059665f1b1
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcschr.c
@@ -0,0 +1,41 @@
+/* Multiple versions of wcschr
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define wcschr __redirect_wcschr
+# include <wchar.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (__redirect_wcschr) __wcschr_ppc attribute_hidden;
+extern __typeof (__redirect_wcschr) __wcschr_power6 attribute_hidden;
+extern __typeof (__redirect_wcschr) __wcschr_power7 attribute_hidden;
+
+extern __typeof (__redirect_wcschr) __libc_wcschr;
+
+libc_ifunc (__libc_wcschr,
+	     (hwcap & PPC_FEATURE_HAS_VSX)
+             ? __wcschr_power7 :
+	       (hwcap & PPC_FEATURE_ARCH_2_05)
+	       ? __wcschr_power6
+             : __wcschr_ppc);
+#undef wcschr
+weak_alias (__libc_wcschr, wcschr)
+#else
+#include <wcsmbs/wcschr.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power6.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power6.c
new file mode 100644
index 0000000000..8e732fc80c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power6.c
@@ -0,0 +1,22 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#define WCSCPY __wcscpy_power6
+
+#include <sysdeps/powerpc/power6/wcscpy.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power7.c
new file mode 100644
index 0000000000..dece1024f4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-power7.c
@@ -0,0 +1,22 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#define WCSCPY __wcscpy_power7
+
+#include <sysdeps/powerpc/power6/wcscpy.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-ppc32.c
new file mode 100644
index 0000000000..b48ff54d92
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy-ppc32.c
@@ -0,0 +1,26 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#if IS_IN (libc)
+# define WCSCPY  __wcscpy_ppc
+#endif
+
+extern __typeof (wcscpy) __wcscpy_ppc;
+
+#include <wcsmbs/wcscpy.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy.c
new file mode 100644
index 0000000000..a59e794f03
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcscpy.c
@@ -0,0 +1,36 @@
+/* Multiple versions of wcscpy
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <wchar.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (wcscpy) __wcscpy_ppc attribute_hidden;
+extern __typeof (wcscpy) __wcscpy_power6 attribute_hidden;
+extern __typeof (wcscpy) __wcscpy_power7 attribute_hidden;
+
+libc_ifunc (wcscpy,
+	     (hwcap & PPC_FEATURE_HAS_VSX)
+             ? __wcscpy_power7 :
+	       (hwcap & PPC_FEATURE_ARCH_2_05)
+	       ? __wcscpy_power6
+             : __wcscpy_ppc);
+#else
+#include <wcsmbs/wcscpy.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power6.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power6.c
new file mode 100644
index 0000000000..0391e12442
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power6.c
@@ -0,0 +1,20 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define WCSRCHR      __wcsrchr_power6
+
+#include <sysdeps/powerpc/power6/wcsrchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power7.c
new file mode 100644
index 0000000000..1167a75734
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-power7.c
@@ -0,0 +1,20 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define WCSRCHR      __wcsrchr_power7
+
+#include <sysdeps/powerpc/power6/wcsrchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-ppc32.c
new file mode 100644
index 0000000000..1c8e12eefb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr-ppc32.c
@@ -0,0 +1,26 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <wchar.h>
+
+#if IS_IN (libc)
+# define WCSRCHR  __wcsrchr_ppc
+#endif
+
+extern __typeof (wcsrchr) __wcsrchr_ppc;
+
+#include <wcsmbs/wcsrchr.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr.c
new file mode 100644
index 0000000000..10820443b4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wcsrchr.c
@@ -0,0 +1,36 @@
+/* Multiple versions of wcsrchr
+   Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# include <wchar.h>
+# include <shlib-compat.h>
+# include "init-arch.h"
+
+extern __typeof (wcsrchr) __wcsrchr_ppc attribute_hidden;
+extern __typeof (wcsrchr) __wcsrchr_power6 attribute_hidden;
+extern __typeof (wcsrchr) __wcsrchr_power7 attribute_hidden;
+
+libc_ifunc (wcsrchr,
+	     (hwcap & PPC_FEATURE_HAS_VSX)
+             ? __wcsrchr_power7 :
+	       (hwcap & PPC_FEATURE_ARCH_2_05)
+	       ? __wcsrchr_power6
+             : __wcsrchr_ppc);
+#else
+#include <wcsmbs/wcsrchr.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-power7.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-power7.c
new file mode 100644
index 0000000000..d2095d85ac
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-power7.c
@@ -0,0 +1,23 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define WORDCOPY_FWD_ALIGNED      _wordcopy_fwd_aligned_power7
+#define WORDCOPY_FWD_DEST_ALIGNED _wordcopy_fwd_dest_aligned_power7
+#define WORDCOPY_BWD_ALIGNED      _wordcopy_bwd_aligned_power7
+#define WORDCOPY_BWD_DEST_ALIGNED _wordcopy_bwd_dest_aligned_power7
+
+#include <sysdeps/powerpc/power6/wordcopy.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-ppc32.c b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-ppc32.c
new file mode 100644
index 0000000000..ecdc2fa73d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/multiarch/wordcopy-ppc32.c
@@ -0,0 +1,27 @@
+/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#if IS_IN (libc)
+# define WORDCOPY_FWD_ALIGNED      _wordcopy_fwd_aligned_ppc
+# define WORDCOPY_FWD_DEST_ALIGNED _wordcopy_fwd_dest_aligned_ppc
+# define WORDCOPY_BWD_ALIGNED      _wordcopy_bwd_aligned_ppc
+# define WORDCOPY_BWD_DEST_ALIGNED _wordcopy_bwd_dest_aligned_ppc
+
+# include <sysdeps/powerpc/power4/wordcopy.c>
+#else
+# include <string/wordcopy.c>
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power4/strncmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/strncmp.S
new file mode 100644
index 0000000000..42a67e7e8a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power4/strncmp.S
@@ -0,0 +1,196 @@
+/* Optimized strcmp implementation for PowerPC32.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (strncmp, 4, 0)
+
+#define rTMP2	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3  r10
+#define rWORD4  r11
+#define rFEFE	r8	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r9	/* constant 0x7f7f7f7f */
+#define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+#define rTMP	r12
+
+	dcbt	0,rSTR1
+	or	rTMP, rSTR2, rSTR1
+	lis	r7F7F, 0x7f7f
+	dcbt	0,rSTR2
+	clrlwi.	rTMP, rTMP, 30
+	cmplwi	cr1, rN, 0
+	lis	rFEFE, -0x101
+	bne	L(unaligned)
+/* We are word aligned so set up for two loops.  first a word
+   loop, then fall into the byte loop if any residual.  */
+	srwi.	rTMP, rN, 2
+	clrlwi	rN, rN, 30
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	cmplwi	cr1, rN, 0
+	beq	L(unaligned)
+
+	mtctr	rTMP	/* Power4 wants mtctr 1st in dispatch group.  */
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	b	L(g1)
+
+L(g0):
+	lwzu	rWORD1, 4(rSTR1)
+	bne-	cr1, L(different)
+	lwzu	rWORD2, 4(rSTR2)
+L(g1):	add	rTMP, rFEFE, rWORD1
+	nor	rNEG, r7F7F, rWORD1
+	bdz	L(tail)
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	beq+	L(g0)
+
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+
+#ifdef __LITTLE_ENDIAN__
+L(endstring):
+	slwi	rTMP, rTMP, 1
+	addi    rTMP2, rTMP, -1
+	andc    rTMP2, rTMP2, rTMP
+	and	rWORD2, rWORD2, rTMP2		/* Mask off gunk.  */
+	and	rWORD1, rWORD1, rTMP2
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+#else
+L(endstring):
+	and	rTMP, r7F7F, rWORD1
+	beq	cr1, L(equal)
+	add	rTMP, rTMP, r7F7F
+	xor.	rBITDIF, rWORD1, rWORD2
+	andc	rNEG, rNEG, rTMP
+	blt-	L(highbit)
+	cntlzw	rBITDIF, rBITDIF
+	cntlzw	rNEG, rNEG
+	addi	rNEG, rNEG, 7
+	cmpw	cr1, rNEG, rBITDIF
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+	cr1
+L(equal):
+	li	rRTN, 0
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	xor.	rBITDIF, rWORD1, rWORD2
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+
+L(highbit):
+	ori	rRTN, rWORD2, 1
+	blr
+#endif
+
+/* Oh well.  In this case, we just do a byte-by-byte comparison.  */
+	.align 4
+L(tail):
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	bne-	L(endstring)
+	addi	rSTR1, rSTR1, 4
+	bne-	cr1, L(different)
+	addi	rSTR2, rSTR2, 4
+	cmplwi	cr1, rN, 0
+L(unaligned):
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+	ble	cr1, L(ux)
+L(uz):
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	.align 4
+L(u1):
+	cmpwi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpw	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	bne-	L(u4)
+	lbzu    rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	cmpwi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpw	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	bne-    L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	cmpwi	cr1, rWORD1, 0
+	bdz	L(u4)
+	cmpw	rWORD1, rWORD2
+	beq-	cr1, L(u4)
+	bne-	L(u4)
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	cmpwi	cr1, rWORD3, 0
+	bdz	L(u3)
+	cmpw	rWORD3, rWORD4
+	beq-    cr1, L(u3)
+	bne-	L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	b       L(u1)
+
+L(u3):  sub     rRTN, rWORD3, rWORD4
+	blr
+L(u4):	sub	rRTN, rWORD1, rWORD2
+	blr
+L(ux):
+	li	rRTN, 0
+	blr
+END (strncmp)
+libc_hidden_builtin_def (strncmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/Implies
new file mode 100644
index 0000000000..02d222d22a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/Implies
@@ -0,0 +1,4 @@
+powerpc/power5+/fpu
+powerpc/power5+
+powerpc/powerpc32/power5/fpu
+powerpc/powerpc32/power5
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/multiarch/Implies
new file mode 100644
index 0000000000..76a985188e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S
new file mode 100644
index 0000000000..efe7be8242
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceil.S
@@ -0,0 +1,36 @@
+/* ceil function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__ceil, 4, 0)
+	frip	fp1, fp1
+	blr
+	END (__ceil)
+
+weak_alias (__ceil, ceil)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__ceil, ceill)
+strong_alias (__ceil, __ceill)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __ceil, ceill, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S
new file mode 100644
index 0000000000..cff058e7ef
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_ceilf.S
@@ -0,0 +1,29 @@
+/* ceilf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__ceilf, 4, 0)
+	frip	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__ceilf)
+
+weak_alias (__ceilf, ceilf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S
new file mode 100644
index 0000000000..9f040d8457
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floor.S
@@ -0,0 +1,36 @@
+/* floor function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__floor, 4, 0)
+	frim	fp1, fp1
+	blr
+	END (__floor)
+
+weak_alias (__floor, floor)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__floor, floorl)
+strong_alias (__floor, __floorl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __floor, floorl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S
new file mode 100644
index 0000000000..b84e4c64fb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_floorf.S
@@ -0,0 +1,29 @@
+/* floorf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__floorf, 4, 0)
+	frim	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__floorf)
+
+weak_alias (__floorf, floorf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llround.S
new file mode 100644
index 0000000000..adbc7ebe18
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llround.S
@@ -0,0 +1,59 @@
+/* lround function.  POWER5+, PowerPC32 version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long [r3] llround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we pre-round using the V2.02 Floating Round to Integer Nearest
+   instruction before we use the Floating Convert to Integer Word with
+   round to zero instruction.  */
+
+	.machine	"power5"
+ENTRY (__llround)
+	stwu    r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	frin	fp2,fp1
+	fctidz	fp3,fp2		/* Convert To Integer Word lround toward 0.  */
+	stfd	fp3,8(r1)
+	nop	/* Ensure the following load is in a different dispatch  */
+	nop	/* group to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llround)
+
+weak_alias (__llround, llround)
+
+strong_alias (__llround, __llroundf)
+weak_alias (__llround, llroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__llround, llroundl)
+strong_alias (__llround, __llroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llroundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llroundf.S
new file mode 100644
index 0000000000..030d2fdff8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __llroundf is in s_llround.S  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_lround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_lround.S
new file mode 100644
index 0000000000..f61846331d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_lround.S
@@ -0,0 +1,57 @@
+/* lround function.  POWER5+, PowerPC32 version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long [r3] lround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we pre-round using the V2.02 Floating Round to Integer Nearest
+   instruction before we use the Floating Convert to Integer Word with
+   round to zero instruction.  */
+
+	.machine	"power5"
+ENTRY (__lround)
+	stwu    r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	frin	fp2,fp1
+	fctiwz	fp3,fp2		/* Convert To Integer Word lround toward 0.  */
+	stfd	fp3,8(r1)
+	nop	/* Ensure the following load is in a different dispatch  */
+	nop	/* group to avoid pipe stall on POWER4&5.  */
+	nop
+	lwz	r3,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__lround)
+
+weak_alias (__lround, lround)
+
+strong_alias (__lround, __lroundf)
+weak_alias (__lround, lroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lround, lroundl)
+strong_alias (__lround, __lroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S
new file mode 100644
index 0000000000..91b42352f3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_round.S
@@ -0,0 +1,36 @@
+/* round function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__round, 4, 0)
+	frin	fp1, fp1
+	blr
+	END (__round)
+
+weak_alias (__round, round)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__round, roundl)
+strong_alias (__round, __roundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __round, roundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S
new file mode 100644
index 0000000000..4e0c7e5cec
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_roundf.S
@@ -0,0 +1,29 @@
+/* roundf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__roundf, 4, 0)
+	frin	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__roundf)
+
+weak_alias (__roundf, roundf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S
new file mode 100644
index 0000000000..ceca529826
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_trunc.S
@@ -0,0 +1,36 @@
+/* trunc function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power5"
+EALIGN (__trunc, 4, 0)
+	friz	fp1, fp1
+	blr
+	END (__trunc)
+
+weak_alias (__trunc, trunc)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__trunc, truncl)
+strong_alias (__trunc, __truncl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __trunc, truncl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S
new file mode 100644
index 0000000000..60be314c28
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/fpu/s_truncf.S
@@ -0,0 +1,29 @@
+/* truncf function.  PowerPC32/power5+ version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+	.machine	"power5"
+EALIGN (__truncf, 4, 0)
+	friz	fp1, fp1	/* The rounding instructions are double.  */
+	frsp	fp1, fp1	/* But we need to set ooverflow for float.  */
+	blr
+	END (__truncf)
+
+weak_alias (__truncf, truncf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/multiarch/Implies
new file mode 100644
index 0000000000..54b3931625
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5+/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/Implies
new file mode 100644
index 0000000000..17139bf21c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power4/fpu
+powerpc/powerpc32/power4
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/multiarch/Implies
new file mode 100644
index 0000000000..c6c090a60e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power4/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnan.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnan.S
new file mode 100644
index 0000000000..09a2fe3865
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnan.S
@@ -0,0 +1,61 @@
+/* isnan().  PowerPC32 version.
+   Copyright (C) 2008-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnan(x)  */
+	.machine power5
+EALIGN (__isnan, 4, 0)
+	stwu	r1,-32(r1)
+	cfi_adjust_cfa_offset (32)
+	ori	r1,r1,0
+	stfd	fp1,24(r1)	/* copy FPR to GPR */
+	ori	r1,r1,0
+	lwz	r4,24+HIWORD(r1)
+	lwz	r5,24+LOWORD(r1)
+	lis	r0,0x7ff0	/* const long r0 0x7ff00000 00000000 */
+	clrlwi	r4,r4,1		/* x = fabs(x) */
+	cmpw	cr7,r4,r0	/* if (fabs(x) =< inf) */
+	cmpwi	cr6,r5,0
+	li	r3,0		/* then return 0 */
+	addi	r1,r1,32
+	cfi_adjust_cfa_offset (-32)
+	bltlr+	cr7
+	bgt-	cr7,L(NaN)
+	beqlr+	cr6
+L(NaN):
+	li	r3,1		/* else return 1 */
+	blr
+	END (__isnan)
+
+hidden_def (__isnan)
+weak_alias (__isnan, isnan)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isnan, __isnanl)
+weak_alias (__isnan, isnanl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnanf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnanf.S
new file mode 100644
index 0000000000..7948f52e84
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/s_isnanf.S
@@ -0,0 +1,45 @@
+/* isnan().  PowerPC32 version.
+   Copyright (C) 2008-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnanf(x)  */
+	.machine power5
+EALIGN (__isnanf, 4, 0)
+	stwu	r1,-32(r1)
+	cfi_adjust_cfa_offset (32)
+	stfs	fp1,28(r1)	/* copy FPR to GPR */
+	nop
+	nop
+	lwz	r4,28(r1)
+	lis	r0,0x7f80	/* const long r0 0x7f800000 */
+	clrlwi	r4,r4,1		/* x = fabs(x) */
+	cmpw	cr7,r4,r0	/* if (fabs(x) =< inf) */
+	li	r3,0		/* then return 0 */
+	addi	r1,r1,32
+	cfi_adjust_cfa_offset (-32)
+	blelr+	cr7
+L(NaN):
+	li	r3,1		/* else return 1 */
+	blr
+	END (__isnanf)
+
+hidden_def (__isnanf)
+weak_alias (__isnanf, isnanf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt_compat.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt_compat.S
new file mode 100644
index 0000000000..93625c5aa9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrt_compat.S
@@ -0,0 +1,106 @@
+/* sqrt function.  PowerPC32 version.
+   Copyright (C) 2007-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* double [fp1] sqrt (double x [fp1])
+   Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
+   The fsqrt instruction generates the correct value for all inputs and
+   sets the appropriate floating point exceptions.  Extended checking is
+   only needed to set errno (via __kernel_standard) if the input value
+   is negative.
+
+   So compare the input value against the absolute value of itself.
+   This will compare equal unless the value is negative (EDOM) or a NAN,
+   in which case we branch to the extend wrapper.  If equal we can return
+   the result directly.
+
+   This part of the function looks like a leaf routine,  so no need to
+   stack a frame or execute prologue/epilogue code. It is safe to
+   branch directly to w_sqrt as long as the input value (f1) is
+   preserved. Putting the sqrt result into f2 (float parameter 2)
+   allows passing both the input value and sqrt result into the extended
+   wrapper so there is no need to recompute.
+
+   This tactic avoids the overhead of stacking a frame for the normal
+   (non-error) case.  Until gcc supports prologue shrink-wrapping
+   this is the best we can do.  */
+
+	.section	".text"
+	.machine power4
+EALIGN (__sqrt, 5, 0)
+	fabs	fp0,fp1
+	fsqrt	fp2,fp1
+	fcmpu	cr1,fp0,fp1
+	bne-	cr1,.Lw_sqrt
+	fmr	fp1,fp2
+	blr
+	.align	4
+.Lw_sqrt:
+	mflr	r0
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset(16)
+	fmr	fp12,fp2
+	stw	r0,20(r1)
+	stw	r30,8(r1)
+	cfi_offset(lr,20-16)
+	cfi_offset(r30,8-16)
+#ifdef SHARED
+	SETUP_GOT_ACCESS(r30,got_label)
+	addis	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@l
+	lwz	r9,_LIB_VERSION@got(30)
+	lwz	r0,0(r9)
+#else
+	lis	r9,_LIB_VERSION@ha
+	lwz	r0,_LIB_VERSION@l(r9)
+#endif
+/*  if (_LIB_VERSION == _IEEE_) return z; */
+	cmpwi	cr7,r0,-1
+	beq-	cr7,.L4
+/*  if (x != x) return z; !isnan*/
+	fcmpu	cr7,fp1,fp1
+	bne-	cr7,.L4
+/*  if  (x < 0.0)
+    return __kernel_standard (x, x, 26) */
+	fmr	fp2,fp1
+	li	r3,26
+	bne- 	cr1,.L11
+.L4:
+	lwz	r0,20(r1)
+	fmr	fp1,fp12
+	lwz	r30,8(r1)
+	addi	r1,r1,16
+	mtlr 	r0
+	blr
+.L11:
+	bl	__kernel_standard@plt
+	fmr	fp12,fp1
+	b	.L4
+	END	(__sqrt)
+
+weak_alias (__sqrt, sqrt)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__sqrt, sqrtl)
+strong_alias (__sqrt, __sqrtl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_0)
+compat_symbol (libm, __sqrt, sqrtl, GLIBC_2_0)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf_compat.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf_compat.S
new file mode 100644
index 0000000000..2ca86b6155
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/fpu/w_sqrtf_compat.S
@@ -0,0 +1,98 @@
+/* sqrtf function.  PowerPC32 version.
+   Copyright (C) 2007-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* float [fp1] sqrts (float x [fp1])
+   Power4 (ISA V2.0) and above implement sqrt in hardware (not optional).
+   The fsqrts instruction generates the correct value for all inputs and
+   sets the appropriate floating point exceptions.  Extended checking is
+   only needed to set errno (via __kernel_standard) if the input value
+   is negative.
+
+   So compare the input value against the absolute value of itself.
+   This will compare equal unless the value is negative (EDOM) or a NAN,
+   in which case we branch to the extend wrapper.  If equal we can return
+   the result directly.
+
+   This part of the function looks like a leaf routine,  so no need to
+   stack a frame or execute prologue/epilogue code. It is safe to
+   branch directly to w_sqrt as long as the input value (f1) is
+   preserved. Putting the sqrt result into f2 (float parameter 2)
+   allows passing both the input value and sqrt result into the extended
+   wrapper so there is no need to recompute.
+
+   This tactic avoids the overhead of stacking a frame for the normal
+   (non-error) case.  Until gcc supports prologue shrink-wrapping
+   this is the best we can do.  */
+
+	.section	".text"
+	.machine power4
+EALIGN (__sqrtf, 5, 0)
+	fabs	fp0,fp1
+	fsqrts	fp2,fp1
+	fcmpu	cr1,fp0,fp1
+	bne-	cr1,.Lw_sqrtf
+	fmr	fp1,fp2
+	blr
+        .align 4
+.Lw_sqrtf:
+	mflr	r0
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset(16)
+	fmr	fp12,fp2
+	stw	r0,20(r1)
+	stw	r30,8(r1)
+	cfi_offset(lr,20-16)
+	cfi_offset(r30,8-16)
+#ifdef SHARED
+	SETUP_GOT_ACCESS(r30,got_label)
+	addis	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addi	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@l
+	lwz	r9,_LIB_VERSION@got(30)
+	lwz	r0,0(r9)
+#else
+	lis	r9,_LIB_VERSION@ha
+	lwz	r0,_LIB_VERSION@l(r9)
+#endif
+/*  if (_LIB_VERSION == _IEEE_) return z; */
+	cmpwi	cr7,r0,-1
+	beq-	cr7,.L4
+/*  if (x != x, 0) return z; !isnan */
+	fcmpu	cr7,fp1,fp1
+	bne-	cr7,.L4
+/*  if  (x < 0.0)
+    return __kernel_standard (x, x, 126) */
+	fmr	fp2,fp1
+	li	r3,126
+	bne- 	cr1,.L11
+.L4:
+	lwz	r0,20(r1)
+	fmr	fp1,fp12
+	lwz	r30,8(r1)
+	addi	r1,r1,16
+	mtlr 	r0
+	blr
+.L11:
+	bl	__kernel_standard@plt
+	fmr	fp12,fp1
+	b	.L4
+	END	(__sqrtf)
+
+weak_alias (__sqrtf, sqrtf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power5/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/multiarch/Implies
new file mode 100644
index 0000000000..d29e3853ab
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power5/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power4/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/Implies
new file mode 100644
index 0000000000..8e5b58a57a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power5+/fpu
+powerpc/powerpc32/power5+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/multiarch/Implies
new file mode 100644
index 0000000000..c66805ee63
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5+/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysign.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysign.S
new file mode 100644
index 0000000000..d6cc8011ae
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysign.S
@@ -0,0 +1,58 @@
+/* copysign().  PowerPC32/POWER6 version.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* double [f1] copysign (double [f1] x, double [f2] y);
+   copysign(x,y) returns a value with the magnitude of x and
+   with the sign bit of y.  */
+
+	.section    ".text"
+	.type	    __copysign, @function
+	.machine    power6
+EALIGN (__copysign, 4, 0)
+	CALL_MCOUNT
+	fcpsgn	fp1,fp2,fp1
+	blr
+END (__copysign)
+
+hidden_def (__copysign)
+weak_alias (__copysign, copysign)
+
+/* It turns out that the 'double' version will also always work for
+   single-precision.  */
+strong_alias (__copysign, __copysignf)
+hidden_def (__copysignf)
+weak_alias (__copysignf, copysignf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__copysign, __copysignl)
+weak_alias (__copysign, copysignl)
+#endif
+
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
+compat_symbol (libm, copysign, copysignl, GLIBC_2_0)
+# endif
+#else
+# if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
+compat_symbol (libc, copysign, copysignl, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysignf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysignf.S
new file mode 100644
index 0000000000..d4aa702d07
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_copysignf.S
@@ -0,0 +1 @@
+/* This function uses the same code as s_copysign.S.  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnan.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnan.S
new file mode 100644
index 0000000000..5b19433cbf
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnan.S
@@ -0,0 +1,61 @@
+/* isnan().  PowerPC32 version.
+   Copyright (C) 2008-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnan(x)  */
+	.machine power6
+EALIGN (__isnan, 4, 0)
+	stwu	r1,-32(r1)
+	cfi_adjust_cfa_offset (32)
+	ori	r1,r1,0
+	stfd	fp1,24(r1)	/* copy FPR to GPR */
+	ori	r1,r1,0
+	lwz	r4,24+HIWORD(r1)
+	lwz	r5,24+LOWORD(r1)
+	lis	r0,0x7ff0	/* const long r0 0x7ff00000 00000000 */
+	clrlwi	r4,r4,1		/* x = fabs(x) */
+	cmpw	cr7,r4,r0	/* if (fabs(x) =< inf) */
+	cmpwi	cr6,r5,0
+	li	r3,0		/* then return 0 */
+	addi	r1,r1,32
+	cfi_adjust_cfa_offset (-32)
+	bltlr+	cr7
+	bgt-	cr7,L(NaN)
+	beqlr+	cr6
+L(NaN):
+	li	r3,1		/* else return 1 */
+	blr
+	END (__isnan)
+
+hidden_def (__isnan)
+weak_alias (__isnan, isnan)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isnan, __isnanl)
+weak_alias (__isnan, isnanl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnanf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnanf.S
new file mode 100644
index 0000000000..7a19ed86d2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_isnanf.S
@@ -0,0 +1,44 @@
+/* isnanf().  PowerPC32 version.
+   Copyright (C) 2008-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnanf(x)  */
+	.machine power6
+EALIGN (__isnanf, 4, 0)
+	stwu	r1,-32(r1)
+	cfi_adjust_cfa_offset (32)
+	ori	r1,r1,0
+	stfs	fp1,24(r1)	/* copy FPR to GPR */
+	ori	r1,r1,0
+	lwz	r4,24(r1)
+	lis	r0,0x7f80	/* const long r0 0x7f800000 */
+	clrlwi	r4,r4,1		/* x = fabs(x) */
+	cmpw	cr7,r4,r0	/* if (fabs(x) =< inf) */
+	li	r3,0		/* then return 0 */
+	addi	r1,r1,32
+	cfi_adjust_cfa_offset (-32)
+	blelr+	cr7
+L(NaN):
+	li	r3,1		/* else return 1 */
+	blr
+	END (__isnanf)
+
+hidden_def (__isnanf)
+weak_alias (__isnanf, isnanf)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrint.S
new file mode 100644
index 0000000000..326e77361b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrint.S
@@ -0,0 +1,46 @@
+/* Round double to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long long int[r3, r4] __llrint (double x[fp1])  */
+ENTRY (__llrint)
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+/* Insure the following load is in a different dispatch group by
+   inserting "group ending nop".  */
+	ori	r1,r1,0
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llrint)
+
+weak_alias (__llrint, llrint)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__llrint, __llrintl)
+weak_alias (__llrint, llrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llrint, llrintl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrintf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrintf.S
new file mode 100644
index 0000000000..0950e7e7c7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llrintf.S
@@ -0,0 +1,38 @@
+/* Round float to long int.  PowerPC32 on PowerPC64 version.
+   Copyright (C) 2004-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* long long int[r3, r4] __llrintf (float x[fp1])  */
+ENTRY (__llrintf)
+	CALL_MCOUNT
+	stwu	r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	fctid	fp13,fp1
+	stfd	fp13,8(r1)
+/* Insure the following load is in a different dispatch group by
+   inserting "group ending nop".  */
+	ori	r1,r1,0
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llrintf)
+
+weak_alias (__llrintf, llrintf)
+
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llround.S
new file mode 100644
index 0000000000..83ba999a39
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llround.S
@@ -0,0 +1,59 @@
+/* lround function.  POWER5+, PowerPC32 version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long [r3] llround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we pre-round using the V2.02 Floating Round to Integer Nearest
+   instruction before we use the Floating Convert to Integer Word with
+   round to zero instruction.  */
+
+	.machine	"power5"
+ENTRY (__llround)
+	stwu    r1,-16(r1)
+	cfi_adjust_cfa_offset (16)
+	frin	fp2,fp1
+	fctidz	fp3,fp2		/* Convert To Integer Word lround toward 0.  */
+	stfd	fp3,8(r1)
+/* Insure the following load is in a different dispatch group by
+   inserting "group ending nop".  */
+	ori	r1,r1,0
+	lwz	r3,8+HIWORD(r1)
+	lwz	r4,8+LOWORD(r1)
+	addi	r1,r1,16
+	blr
+	END (__llround)
+
+weak_alias (__llround, llround)
+
+strong_alias (__llround, __llroundf)
+weak_alias (__llround, llroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__llround, llroundl)
+strong_alias (__llround, __llroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __llround, llroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llroundf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llroundf.S
new file mode 100644
index 0000000000..030d2fdff8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/fpu/s_llroundf.S
@@ -0,0 +1 @@
+/* __llroundf is in s_llround.S  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memcpy.S
new file mode 100644
index 0000000000..81b62cba21
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memcpy.S
@@ -0,0 +1,907 @@
+/* Optimized memcpy implementation for PowerPC32 on POWER6.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.
+
+   Memcpy handles short copies (< 32-bytes) using a binary move blocks
+   (no loops) of lwz/stw.  The tail (remaining 1-3) bytes is handled
+   with the appropriate combination of byte and halfword load/stores.
+   There is minimal effort to optimize the alignment of short moves.
+
+   Longer moves (>= 32-bytes) justify the effort to get at least the
+   destination word (4-byte) aligned.  Further optimization is
+   possible when both source and destination are word aligned.
+   Each case has an optimized unrolled loop.   */
+
+	.machine power6
+EALIGN (memcpy, 5, 0)
+	CALL_MCOUNT
+
+    stwu   1,-32(1)
+    cfi_adjust_cfa_offset(32)
+    cmplwi cr1,5,31     /* check for short move.  */
+    neg    0,3
+    cmplwi cr1,5,31
+    clrlwi 10,4,30	/* check alignment of src.  */
+    andi.  11,3,3	/* check alignment of dst.  */
+    clrlwi 0,0,30	/* Number of bytes until the 1st word of dst.  */
+    ble-   cr1,L(word_unaligned_short)	/* If move < 32 bytes.  */
+    cmplw  cr6,10,11
+    stw    31,24(1)
+    stw    30,20(1)
+    cfi_offset(31,(24-32))
+    cfi_offset(30,(20-32))
+    mr     30,3
+    beq    .L0
+    mtcrf  0x01,0
+    subf  31,0,5        /* Length after alignment.  */
+    add   12,4,0        /* Compute src addr after alignment.  */
+  /* Move 0-3 bytes as needed to get the destination word aligned.  */
+1:  bf    31,2f
+    lbz   6,0(4)
+    bf    30,3f
+    lhz   7,1(4)
+    stb   6,0(3)
+    sth   7,1(3)
+    addi  3,3,3
+    b     0f
+3:
+    stb   6,0(3)
+    addi  3,3,1
+    b     0f
+2:  bf    30,0f
+    lhz   6,0(4)
+    sth   6,0(3)
+    addi  3,3,2
+0:
+    clrlwi 10,12,30	/* check alignment of src again.  */
+    srwi   9,31,2	/* Number of full words remaining.  */
+    bne-   cr6,L(wdu)   /* If source is not word aligned. .L6 */
+    clrlwi 11,31,30  /* calculate the number of tail bytes */
+    b      L(word_aligned)
+  /* Copy words from source to destination, assuming the destination is
+     aligned on a word boundary.
+
+     At this point we know there are at least 29 bytes left (32-3) to copy.
+     The next step is to determine if the source is also word aligned.
+     If not branch to the unaligned move code at .L6. which uses
+     a load, shift, store strategy.
+
+     Otherwise source and destination are word aligned, and we can use
+     the optimized word copy loop.  */
+    .align  4
+.L0:
+    mr     31,5
+    mr     12,4
+    bne-   cr6,L(wdu)   /* If source is not word aligned. .L6 */
+    srwi   9,5,2	/* Number of full words remaining.  */
+    clrlwi 11,5,30      /* calculate the number of tail bytes */
+
+  /* Move words where destination and source are word aligned.
+     Use an unrolled loop to copy 4 words (16-bytes) per iteration.
+     If the copy is not an exact multiple of 16 bytes, 1-3
+     words are copied as needed to set up the main loop.  After
+     the main loop exits there may be a tail of 1-3 bytes. These bytes are
+     copied a halfword/byte at a time as needed to preserve alignment.  */
+L(word_aligned):
+    mtcrf 0x01,9
+    srwi  8,31,4    /* calculate the 16 byte loop count */
+    cmplwi	cr1,9,4
+    cmplwi	cr6,11,0
+    mr    11,12
+
+    bf    30,1f
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  11,12,8
+    mtctr 8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  10,3,8
+    bf    31,4f
+    lwz   0,8(12)
+    stw   0,8(3)
+    blt   cr1,3f
+    addi  11,12,12
+    addi  10,3,12
+    b     4f
+    .align  4
+1:
+    mr    10,3
+    mtctr 8
+    bf    31,4f
+    lwz   6,0(12)
+    addi  11,12,4
+    stw   6,0(3)
+    addi  10,3,4
+
+    .align  4
+4:
+    lwz   6,0(11)
+    lwz   7,4(11)
+    lwz   8,8(11)
+    lwz   0,12(11)
+    stw   6,0(10)
+    stw   7,4(10)
+    stw   8,8(10)
+    stw   0,12(10)
+    addi  11,11,16
+    addi  10,10,16
+    bdnz  4b
+3:
+    clrrwi 0,31,2
+    mtcrf 0x01,31
+    beq   cr6,0f
+.L9:
+    add   3,3,0
+    add   12,12,0
+
+/*  At this point we have a tail of 0-3 bytes and we know that the
+    destination is word aligned.  */
+2:  bf    30,1f
+    lhz   6,0(12)
+    addi  12,12,2
+    sth   6,0(3)
+    addi  3,3,2
+1:  bf    31,0f
+    lbz   6,0(12)
+    stb   6,0(3)
+0:
+  /* Return original dst pointer.  */
+    mr  3,30
+    lwz 30,20(1)
+    lwz 31,24(1)
+    addi 1,1,32
+    blr
+
+/* Copy up to 31 bytes.  This divided into two cases 0-8 bytes and 9-31
+   bytes.  Each case is handled without loops, using binary (1,2,4,8)
+   tests.
+
+   In the short (0-8 byte) case no attempt is made to force alignment
+   of either source or destination.  The hardware will handle the
+   unaligned load/stores with small delays for crossing 32- 128-byte,
+   and 4096-byte boundaries. Since these short moves are unlikely to be
+   unaligned or cross these boundaries, the overhead to force
+   alignment is not justified.
+
+   The longer (9-31 byte) move is more likely to cross 32- or 128-byte
+   boundaries.  Since only loads are sensitive to the 32-/128-byte
+   boundaries it is more important to align the source then the
+   destination.  If the source is not already word aligned, we first
+   move 1-3 bytes as needed.  Since we are only word aligned we don't
+   use double word load/stores to insure that all loads are aligned.
+   While the destination and stores may still be unaligned, this
+   is only an issue for page (4096 byte boundary) crossing, which
+   should be rare for these short moves.  The hardware handles this
+   case automatically with a small (~20 cycle) delay.  */
+    .align  4
+
+    cfi_same_value (31)
+    cfi_same_value (30)
+L(word_unaligned_short):
+    mtcrf 0x01,5
+    cmplwi cr6,5,8
+    neg   8,4
+    clrrwi	9,4,2
+    andi. 0,8,3
+    beq   cr6,L(wus_8)	/* Handle moves of 8 bytes.  */
+/* At least 9 bytes left.  Get the source word aligned.  */
+    cmplwi	cr1,5,16
+    mr    12,4
+    ble   cr6,L(wus_4)  /* Handle moves of 0-8 bytes.  */
+    mr    11,3
+    mr    10,5
+    cmplwi	cr6,0,2
+    beq   L(wus_tail)	/* If the source is already word aligned skip this.  */
+/* Copy 1-3 bytes to get source address word aligned.  */
+    lwz   6,0(9)
+    subf  10,0,5
+    add   12,4,0
+    blt   cr6,5f
+    srwi  7,6,16
+    bgt	  cr6,3f
+#ifdef __LITTLE_ENDIAN__
+    sth   7,0(3)
+#else
+    sth   6,0(3)
+#endif
+    b     7f
+    .align  4
+3:
+#ifdef __LITTLE_ENDIAN__
+    rotlwi 6,6,24
+    stb   6,0(3)
+    sth   7,1(3)
+#else
+    stb   7,0(3)
+    sth   6,1(3)
+#endif
+    b     7f
+    .align  4
+5:
+#ifdef __LITTLE_ENDIAN__
+    rotlwi 6,6,8
+#endif
+    stb   6,0(3)
+7:
+    cmplwi	cr1,10,16
+    add   11,3,0
+    mtcrf 0x01,10
+    .align  4
+L(wus_tail):
+/* At least 6 bytes left and the source is word aligned.  This allows
+   some speculative loads up front.  */
+/* We need to special case the fall-through because the biggest delays
+   are due to address computation not being ready in time for the
+   AGEN.  */
+    lwz   6,0(12)
+    lwz   7,4(12)
+    blt   cr1,L(wus_tail8)
+    cmplwi	cr0,10,24
+L(wus_tail16): /* Move 16 bytes.  */
+    stw   6,0(11)
+    stw   7,4(11)
+    lwz   6,8(12)
+    lwz   7,12(12)
+    stw   6,8(11)
+    stw   7,12(11)
+/* Move 8 bytes more.  */
+    bf    28,L(wus_tail16p8)
+    cmplwi	cr1,10,28
+    lwz   6,16(12)
+    lwz   7,20(12)
+    stw   6,16(11)
+    stw   7,20(11)
+/* Move 4 bytes more.  */
+    bf    29,L(wus_tail16p4)
+    lwz   6,24(12)
+    stw   6,24(11)
+    addi  12,12,28
+    addi  11,11,28
+    bgt   cr1,L(wus_tail2)
+ /* exactly 28 bytes.  Return original dst pointer and exit.  */
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_tail16p8):  /* less than 8 bytes left.  */
+    beq   cr1,L(wus_tailX) /* exactly 16 bytes, early exit.  */
+    cmplwi	cr1,10,20
+    bf    29,L(wus_tail16p2)
+/* Move 4 bytes more.  */
+    lwz   6,16(12)
+    stw   6,16(11)
+    addi  12,12,20
+    addi  11,11,20
+    bgt   cr1,L(wus_tail2)
+ /* exactly 20 bytes.  Return original dst pointer and exit.  */
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_tail16p4):  /* less than 4 bytes left.  */
+    addi  12,12,24
+    addi  11,11,24
+    bgt   cr0,L(wus_tail2)
+ /* exactly 24 bytes.  Return original dst pointer and exit.  */
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_tail16p2):  /* 16 bytes moved, less than 4 bytes left.  */
+    addi  12,12,16
+    addi  11,11,16
+    b     L(wus_tail2)
+
+    .align  4
+L(wus_tail8):  /* Move 8 bytes.  */
+/*  r6, r7 already loaded speculatively.  */
+    cmplwi	cr1,10,8
+    cmplwi	cr0,10,12
+    bf    28,L(wus_tail4)
+    stw   6,0(11)
+    stw   7,4(11)
+/* Move 4 bytes more.  */
+    bf    29,L(wus_tail8p4)
+    lwz   6,8(12)
+    stw   6,8(11)
+    addi  12,12,12
+    addi  11,11,12
+    bgt   cr0,L(wus_tail2)
+ /* exactly 12 bytes.  Return original dst pointer and exit.  */
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_tail8p4):  /* less than 4 bytes left.  */
+    addi  12,12,8
+    addi  11,11,8
+    bgt   cr1,L(wus_tail2)
+ /* exactly 8 bytes.  Return original dst pointer and exit.  */
+    addi  1,1,32
+    blr
+
+    .align  4
+L(wus_tail4):  /* Move 4 bytes.  */
+/*  r6 already loaded speculatively.  If we are here we know there is
+    more than 4 bytes left.  So there is no need to test.  */
+    addi  12,12,4
+    stw   6,0(11)
+    addi  11,11,4
+L(wus_tail2):  /* Move 2-3 bytes.  */
+    bf    30,L(wus_tail1)
+    lhz   6,0(12)
+    sth   6,0(11)
+    bf    31,L(wus_tailX)
+    lbz   7,2(12)
+    stb   7,2(11)
+    addi  1,1,32
+    blr
+L(wus_tail1):  /* Move 1 byte.  */
+    bf    31,L(wus_tailX)
+    lbz   6,0(12)
+    stb   6,0(11)
+L(wus_tailX):
+  /* Return original dst pointer.  */
+    addi  1,1,32
+    blr
+
+/* Special case to copy 0-8 bytes.  */
+    .align  4
+L(wus_8):
+    lwz   6,0(4)
+    lwz   7,4(4)
+    stw   6,0(3)
+    stw   7,4(3)
+  /* Return original dst pointer.  */
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_4):
+    bf    29,L(wus_2)
+    lwz   6,0(4)
+    stw   6,0(3)
+    bf    30,L(wus_5)
+    lhz   7,4(4)
+    sth   7,4(3)
+    bf    31,L(wus_0)
+    lbz   8,6(4)
+    stb   8,6(3)
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_5):
+    bf    31,L(wus_0)
+    lbz   6,4(4)
+    stb   6,4(3)
+  /* Return original dst pointer.  */
+    addi 1,1,32
+    blr
+    .align  4
+L(wus_2):  /* Move 2-3 bytes.  */
+    bf    30,L(wus_1)
+    lhz   6,0(4)
+    sth   6,0(3)
+    bf    31,L(wus_0)
+    lbz   7,2(4)
+    stb   7,2(3)
+    addi  1,1,32
+    blr
+    .align  4
+L(wus_1):  /* Move 1 byte.  */
+    bf    31,L(wus_0)
+    lbz   6,0(4)
+    stb   6,0(3)
+    .align  3
+L(wus_0):
+  /* Return original dst pointer.  */
+    addi  1,1,32
+    blr
+
+    .align  4
+    cfi_offset(31,(24-32))
+    cfi_offset(30,(20-32))
+L(wdu):
+
+  /* Copy words where the destination is aligned but the source is
+     not.  For power4, power5 and power6 machines there is penalty for
+     unaligned loads (src) that cross 32-byte, cacheline, or page
+     boundaries. So we want to use simple (unaligned) loads where
+     possible but avoid them where we know the load would span a 32-byte
+     boundary.
+
+     At this point we know we have at least 29 (32-3) bytes to copy
+     the src is unaligned. and we may cross at least one 32-byte
+     boundary. Also we have the following register values:
+     r3 == adjusted dst, word aligned
+     r4 == unadjusted src
+     r5 == unadjusted len
+     r9 == adjusted Word length
+     r10 == src alignment (1-3)
+     r12 == adjusted src, not aligned
+     r31 == adjusted len
+
+     First we need to copy word up to but not crossing the next 32-byte
+     boundary. Then perform aligned loads just before and just after
+     the boundary and use shifts and or to generate the next aligned
+     word for dst. If more than 32 bytes remain we copy (unaligned src)
+     the next 7 words and repeat the loop until less than 32-bytes
+     remain.
+
+     Then if more than 4 bytes remain we again use aligned loads,
+     shifts and or to generate the next dst word. We then process the
+     remaining words using unaligned loads as needed. Finally we check
+     if there are more than 0 bytes (1-3) bytes remaining and use
+     halfword and or byte load/stores to complete the copy.
+*/
+    mr      4,12      /* restore unaligned adjusted src ptr */
+    clrlwi  0,12,27   /* Find dist from previous 32-byte boundary.  */
+    slwi    10,10,3   /* calculate number of bits to shift 1st word left */
+    cmplwi  cr5,0,16
+    subfic  8,0,32   /* Number of bytes to next 32-byte boundary.  */
+
+    mtcrf   0x01,8
+    cmplwi  cr1,10,16
+    subfic  9,10,32  /* number of bits to shift 2nd word right */
+/*  This test is reversed because the timing to compare the bytes to
+    32-byte boundary could not be meet.  So we compare the bytes from
+    previous 32-byte boundary and invert the test.  */
+    bge     cr5,L(wdu_h32_8)
+    .align  4
+    lwz   6,0(4)
+    lwz   7,4(4)
+    addi  12,4,16    /* generate alternate pointers to avoid agen */
+    addi  11,3,16    /* timing issues downstream.  */
+    stw   6,0(3)
+    stw   7,4(3)
+    subi  31,31,16
+    lwz   6,8(4)
+    lwz   7,12(4)
+    addi  4,4,16
+    stw   6,8(3)
+    stw   7,12(3)
+    addi  3,3,16
+    bf    28,L(wdu_h32_4)
+    lwz   6,0(12)
+    lwz   7,4(12)
+    subi  31,31,8
+    addi  4,4,8
+    stw   6,0(11)
+    stw   7,4(11)
+    addi  3,3,8
+    bf    29,L(wdu_h32_0)
+    lwz   6,8(12)
+    addi  4,4,4
+    subi  31,31,4
+    stw   6,8(11)
+    addi  3,3,4
+    b     L(wdu_h32_0)
+    .align  4
+L(wdu_h32_8):
+    bf    28,L(wdu_h32_4)
+    lwz   6,0(4)
+    lwz   7,4(4)
+    subi  31,31,8
+    bf    29,L(wdu_h32_8x)
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,8(4)
+    addi  4,4,12
+    subi  31,31,4
+    stw   6,8(3)
+    addi  3,3,12
+    b     L(wdu_h32_0)
+    .align  4
+L(wdu_h32_8x):
+    addi  4,4,8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  3,3,8
+    b     L(wdu_h32_0)
+    .align  4
+L(wdu_h32_4):
+    bf    29,L(wdu_h32_0)
+    lwz   6,0(4)
+    subi  31,31,4
+    addi  4,4,4
+    stw   6,0(3)
+    addi  3,3,4
+    .align  4
+L(wdu_h32_0):
+/*  set up for 32-byte boundary crossing word move and possibly 32-byte
+    move loop.  */
+    clrrwi  12,4,2
+    cmplwi  cr5,31,32
+    bge     cr1,L(wdu2_32)
+#if 0
+    b       L(wdu1_32)
+/*
+    cmplwi  cr1,10,8
+    beq     cr1,L(wdu1_32)
+    cmplwi  cr1,10,16
+    beq     cr1,L(wdu2_32)
+    cmplwi  cr1,10,24
+    beq     cr1,L(wdu3_32)
+*/
+L(wdu_32):
+    lwz     6,0(12)
+    cmplwi  cr6,31,4
+    srwi    8,31,5    /* calculate the 32 byte loop count */
+    slw     0,6,10
+    clrlwi  31,31,27   /* The remaining bytes, < 32.  */
+    blt     cr5,L(wdu_32tail)
+    mtctr   8
+    cmplwi  cr6,31,4
+    .align  4
+L(wdu_loop32):
+    /* copy 32 bytes at a time */
+    lwz   8,4(12)
+    addi  12,12,32
+    lwz   7,4(4)
+    srw   8,8,9
+    or    0,0,8
+    stw   0,0(3)
+    stw   7,4(3)
+    lwz   6,8(4)
+    lwz   7,12(4)
+    stw   6,8(3)
+    stw   7,12(3)
+    lwz   6,16(4)
+    lwz   7,20(4)
+    stw   6,16(3)
+    stw   7,20(3)
+    lwz   6,24(4)
+    lwz   7,28(4)
+    lwz   8,0(12)
+    addi  4,4,32
+    stw   6,24(3)
+    stw   7,28(3)
+    addi  3,3,32
+    slw   0,8,10
+    bdnz+ L(wdu_loop32)
+
+L(wdu_32tail):
+    mtcrf   0x01,31
+    cmplwi  cr5,31,16
+    blt     cr6,L(wdu_4tail)
+    /* calculate and store the final word */
+    lwz   8,4(12)
+    srw   8,8,9
+    or    6,0,8
+    b     L(wdu_32tailx)
+#endif
+    .align  4
+L(wdu1_32):
+    lwz     6,-1(4)
+    cmplwi  cr6,31,4
+    srwi    8,31,5    /* calculate the 32 byte loop count */
+#ifdef __LITTLE_ENDIAN__
+    srwi    6,6,8
+#else
+    slwi    6,6,8
+#endif
+    clrlwi  31,31,27   /* The remaining bytes, < 32.  */
+    blt     cr5,L(wdu1_32tail)
+    mtctr   8
+    cmplwi  cr6,31,4
+
+    lwz   8,3(4)
+    lwz   7,4(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,24,32
+#else
+/*  Equivalent to: srwi   8,8,32-8;  or    6,6,8 */
+    rlwimi 6,8,8,(32-8),31
+#endif
+    b      L(wdu1_loop32x)
+    .align  4
+L(wdu1_loop32):
+    /* copy 32 bytes at a time */
+    lwz   8,3(4)
+    lwz   7,4(4)
+    stw   10,-8(3)
+    stw   11,-4(3)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,24,32
+#else
+/*  Equivalent to  srwi   8,8,32-8; or    6,6,8 */
+    rlwimi 6,8,8,(32-8),31
+#endif
+L(wdu1_loop32x):
+    lwz   10,8(4)
+    lwz   11,12(4)
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,16(4)
+    lwz   7,20(4)
+    stw   10,8(3)
+    stw   11,12(3)
+    lwz   10,24(4)
+    lwz   11,28(4)
+    lwz   8,32-1(4)
+    addi  4,4,32
+    stw   6,16(3)
+    stw   7,20(3)
+    addi  3,3,32
+#ifdef __LITTLE_ENDIAN__
+    srwi  6,8,8
+#else
+    slwi  6,8,8
+#endif
+    bdnz+ L(wdu1_loop32)
+    stw   10,-8(3)
+    stw   11,-4(3)
+
+L(wdu1_32tail):
+    mtcrf   0x01,31
+    cmplwi  cr5,31,16
+    blt     cr6,L(wdu_4tail)
+    /* calculate and store the final word */
+    lwz   8,3(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,24,32
+#else
+/*  Equivalent to: srwi   8,8,32-8;  or    6,6,8  */
+    rlwimi 6,8,8,(32-8),31
+#endif
+    b     L(wdu_32tailx)
+
+L(wdu2_32):
+    bgt     cr1,L(wdu3_32)
+    lwz     6,-2(4)
+    cmplwi  cr6,31,4
+    srwi    8,31,5    /* calculate the 32 byte loop count */
+#ifdef __LITTLE_ENDIAN__
+    srwi    6,6,16
+#else
+    slwi    6,6,16
+#endif
+    clrlwi  31,31,27   /* The remaining bytes, < 32.  */
+    blt     cr5,L(wdu2_32tail)
+    mtctr   8
+    cmplwi  cr6,31,4
+
+    lwz   8,2(4)
+    lwz   7,4(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,16,32
+#else
+    rlwimi 6,8,16,(32-16),31
+#endif
+    b      L(wdu2_loop32x)
+    .align  4
+L(wdu2_loop32):
+    /* copy 32 bytes at a time */
+    lwz   8,2(4)
+    lwz   7,4(4)
+    stw   10,-8(3)
+    stw   11,-4(3)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,16,32
+#else
+    rlwimi 6,8,16,(32-16),31
+#endif
+L(wdu2_loop32x):
+    lwz   10,8(4)
+    lwz   11,12(4)
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,16(4)
+    lwz   7,20(4)
+    stw   10,8(3)
+    stw   11,12(3)
+    lwz   10,24(4)
+    lwz   11,28(4)
+/*    lwz   8,0(12) */
+    lwz   8,32-2(4)
+    addi  4,4,32
+    stw   6,16(3)
+    stw   7,20(3)
+    addi  3,3,32
+#ifdef __LITTLE_ENDIAN__
+    srwi  6,8,16
+#else
+    slwi  6,8,16
+#endif
+    bdnz+ L(wdu2_loop32)
+    stw   10,-8(3)
+    stw   11,-4(3)
+
+L(wdu2_32tail):
+    mtcrf   0x01,31
+    cmplwi  cr5,31,16
+    blt     cr6,L(wdu_4tail)
+    /* calculate and store the final word */
+    lwz   8,2(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,16,32
+#else
+    rlwimi 6,8,16,(32-16),31
+#endif
+    b     L(wdu_32tailx)
+
+L(wdu3_32):
+/*    lwz     6,0(12) */
+    lwz     6,-3(4)
+    cmplwi  cr6,31,4
+    srwi    8,31,5    /* calculate the 32 byte loop count */
+#ifdef __LITTLE_ENDIAN__
+    srwi    6,6,24
+#else
+    slwi    6,6,24
+#endif
+    clrlwi  31,31,27   /* The remaining bytes, < 32.  */
+    blt     cr5,L(wdu3_32tail)
+    mtctr   8
+    cmplwi  cr6,31,4
+
+    lwz   8,1(4)
+    lwz   7,4(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,8,32
+#else
+    rlwimi 6,8,24,(32-24),31
+#endif
+    b      L(wdu3_loop32x)
+    .align  4
+L(wdu3_loop32):
+    /* copy 32 bytes at a time */
+    lwz   8,1(4)
+    lwz   7,4(4)
+    stw   10,-8(3)
+    stw   11,-4(3)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,8,32
+#else
+    rlwimi 6,8,24,(32-24),31
+#endif
+L(wdu3_loop32x):
+    lwz   10,8(4)
+    lwz   11,12(4)
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,16(4)
+    lwz   7,20(4)
+    stw   10,8(3)
+    stw   11,12(3)
+    lwz   10,24(4)
+    lwz   11,28(4)
+    lwz   8,32-3(4)
+    addi  4,4,32
+    stw   6,16(3)
+    stw   7,20(3)
+    addi  3,3,32
+#ifdef __LITTLE_ENDIAN__
+    srwi  6,8,24
+#else
+    slwi  6,8,24
+#endif
+    bdnz+ L(wdu3_loop32)
+    stw   10,-8(3)
+    stw   11,-4(3)
+
+L(wdu3_32tail):
+    mtcrf   0x01,31
+    cmplwi  cr5,31,16
+    blt     cr6,L(wdu_4tail)
+    /* calculate and store the final word */
+    lwz   8,1(4)
+#ifdef __LITTLE_ENDIAN__
+    rldimi 6,8,8,32
+#else
+    rlwimi 6,8,24,(32-24),31
+#endif
+    b     L(wdu_32tailx)
+    .align  4
+L(wdu_32tailx):
+    blt     cr5,L(wdu_t32_8)
+    lwz   7,4(4)
+    addi  12,4,16    /* generate alternate pointers to avoid agen */
+    addi  11,3,16    /* timing issues downstream.  */
+    stw   6,0(3)
+    stw   7,4(3)
+    subi  31,31,16
+    lwz   6,8(4)
+    lwz   7,12(4)
+    addi  4,4,16
+    stw   6,8(3)
+    stw   7,12(3)
+    addi  3,3,16
+    bf    28,L(wdu_t32_4x)
+    lwz   6,0(12)
+    lwz   7,4(12)
+    addi  4,4,8
+    subi  31,31,8
+    stw   6,0(11)
+    stw   7,4(11)
+    addi  3,3,8
+    bf    29,L(wdu_t32_0)
+    lwz   6,8(12)
+    addi  4,4,4
+    subi  31,31,4
+    stw   6,8(11)
+    addi  3,3,4
+    b     L(wdu_t32_0)
+    .align  4
+L(wdu_t32_4x):
+    bf    29,L(wdu_t32_0)
+    lwz   6,0(4)
+    addi  4,4,4
+    subi  31,31,4
+    stw   6,0(3)
+    addi  3,3,4
+    b     L(wdu_t32_0)
+    .align  4
+L(wdu_t32_8):
+    bf    28,L(wdu_t32_4)
+    lwz   7,4(4)
+    subi  31,31,8
+    bf    29,L(wdu_t32_8x)
+    stw   6,0(3)
+    stw   7,4(3)
+    lwz   6,8(4)
+    subi  31,31,4
+    addi  4,4,12
+    stw   6,8(3)
+    addi  3,3,12
+    b     L(wdu_t32_0)
+    .align  4
+L(wdu_t32_8x):
+    addi  4,4,8
+    stw   6,0(3)
+    stw   7,4(3)
+    addi  3,3,8
+    b     L(wdu_t32_0)
+    .align  4
+L(wdu_t32_4):
+    subi  31,31,4
+    stw   6,0(3)
+    addi  4,4,4
+    addi  3,3,4
+    .align  4
+L(wdu_t32_0):
+L(wdu_4tail):
+    cmplwi  cr6,31,0
+    beq   cr6,L(wdus_0)	/* If the tail is 0 bytes we are done!  */
+    bf    30,L(wdus_3)
+    lhz   7,0(4)
+    sth   7,0(3)
+    bf    31,L(wdus_0)
+    lbz   8,2(4)
+    stb   8,2(3)
+    mr    3,30
+    lwz   30,20(1)
+    lwz   31,24(1)
+    addi  1,1,32
+    blr
+    .align  4
+L(wdus_3):
+    bf    31,L(wus_0)
+    lbz   6,0(4)
+    stb   6,0(3)
+    .align  4
+L(wdus_0):
+  /* Return original dst pointer.  */
+    mr   3,30
+    lwz  30,20(1)
+    lwz  31,24(1)
+    addi 1,1,32
+    blr
+END (memcpy)
+
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memset.S
new file mode 100644
index 0000000000..f221c32d2f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/memset.S
@@ -0,0 +1,539 @@
+/* Optimized 32-bit memset implementation for POWER6.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.
+
+   The memset is done in three sizes: byte (8 bits), word (32 bits),
+   cache line (1024 bits). There is a special case for setting cache lines
+   to 0, to take advantage of the dcbz instruction.  */
+
+	.machine power6
+EALIGN (memset, 7, 0)
+	CALL_MCOUNT
+
+#define rTMP	r0
+#define rRTN	r3	/* Initial value of 1st argument.  */
+#define rMEMP0	r3	/* Original value of 1st arg.  */
+#define rCHR	r4	/* Char to set in each byte.  */
+#define rLEN	r5	/* Length of region to set.  */
+#define rMEMP	r6	/* Address at which we are storing.  */
+#define rALIGN	r7	/* Number of bytes we are setting now (when aligning). */
+#define rMEMP2	r8
+
+#define rNEG64	r8	/* Constant -64 for clearing with dcbz.  */
+#define rMEMP3	r9	/* Alt mem pointer.  */
+L(_memset):
+/* Take care of case for size <= 4.  */
+	cmplwi	cr1, rLEN, 4
+	andi.	rALIGN, rMEMP0, 3
+	mr	rMEMP, rMEMP0
+	ble-	cr1, L(small)
+/* Align to word boundary.  */
+	cmplwi	cr5, rLEN, 31
+	insrwi	rCHR, rCHR, 8, 16	/* Replicate byte to halfword.  */
+	beq+	L(aligned)
+	mtcrf	0x01, rMEMP0
+	subfic	rALIGN, rALIGN, 4
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	bf+	31, L(g0)
+	stb	rCHR, 0(rMEMP0)
+	bt	30, L(aligned)
+L(g0):
+	sth	rCHR, -2(rMEMP)
+
+        .align 4
+/* Handle the case of size < 31.  */
+L(aligned):
+	mtcrf	0x01, rLEN
+	insrwi	rCHR, rCHR, 16, 0	/* Replicate halfword to word.  */
+	ble	cr5, L(medium)
+/* Align to 32-byte boundary.  */
+	andi.	rALIGN, rMEMP, 0x1C
+	subfic	rALIGN, rALIGN, 0x20
+	beq	L(caligned)
+	mtcrf	0x01, rALIGN
+	add	rMEMP, rMEMP, rALIGN
+	sub	rLEN, rLEN, rALIGN
+	cmplwi	cr1, rALIGN, 0x10
+	mr	rMEMP2, rMEMP
+	bf	28, L(a1)
+        stw     rCHR, -4(rMEMP2)
+	stwu	rCHR, -8(rMEMP2)
+	nop
+L(a1):	blt	cr1, L(a2)
+        stw     rCHR, -4(rMEMP2)
+	stw	rCHR, -8(rMEMP2)
+	stw	rCHR, -12(rMEMP2)
+	stwu	rCHR, -16(rMEMP2)
+L(a2):  bf      29, L(caligned)
+        stw     rCHR, -4(rMEMP2)
+
+        .align 3
+/* Now aligned to a 32 byte boundary.  */
+L(caligned):
+	cmplwi	cr1, rCHR, 0
+	clrrwi.	rALIGN, rLEN, 5
+	mtcrf	0x01, rLEN
+	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
+L(nondcbz):
+	beq	L(medium)	/* We may not actually get to do a full line.  */
+	nop
+/* Storing a non-zero "c" value. We are aligned at a sector (32-byte)
+   boundary may not be at cache line (128-byte) boundary.  */
+L(nzloopstart):
+/* memset in 32-byte chunks until we get to a cache line boundary.
+   If rLEN is less than the distance to the next cache-line boundary use
+   cacheAligned1 code to finish the tail.  */
+	cmplwi	cr1,rLEN,128
+
+	andi.	rTMP,rMEMP,127
+	blt	cr1,L(cacheAligned1)
+	addi	rMEMP3,rMEMP,32
+	beq	L(nzCacheAligned)
+	addi	rLEN,rLEN,-32
+	stw	rCHR,0(rMEMP)
+        stw     rCHR,4(rMEMP)
+	stw	rCHR,8(rMEMP)
+	stw     rCHR,12(rMEMP)
+	stw	rCHR,16(rMEMP)
+        stw     rCHR,20(rMEMP)
+	addi	rMEMP,rMEMP,32
+	andi.	rTMP,rMEMP3,127
+	stw	rCHR,-8(rMEMP3)
+        stw     rCHR,-4(rMEMP3)
+
+	beq	L(nzCacheAligned)
+	addi	rLEN,rLEN,-32
+	stw	rCHR,0(rMEMP3)
+        stw     rCHR,4(rMEMP3)
+	addi	rMEMP,rMEMP,32
+	stw	rCHR,8(rMEMP3)
+	stw     rCHR,12(rMEMP3)
+	andi.	rTMP,rMEMP,127
+	stw	rCHR,16(rMEMP3)
+        stw     rCHR,20(rMEMP3)
+	stw	rCHR,24(rMEMP3)
+        stw     rCHR,28(rMEMP3)
+
+	beq	L(nzCacheAligned)
+	addi	rLEN,rLEN,-32
+/* At this point we can overrun the store queue (pipe reject) so it is
+   time to slow things down. The store queue can merge two adjacent
+   stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
+   So we add "group ending nops" to guarantee that we dispatch only two
+   stores every other cycle. */
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,32(rMEMP3)
+        stw     rCHR,36(rMEMP3)
+	addi	rMEMP,rMEMP,32
+	cmplwi	cr1,rLEN,128
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,40(rMEMP3)
+	stw     rCHR,44(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,48(rMEMP3)
+        stw     rCHR,52(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,56(rMEMP3)
+        stw     rCHR,60(rMEMP3)
+	blt	cr1,L(cacheAligned1)
+	b	L(nzCacheAligned)
+
+/* Now we are aligned to the cache line and can use dcbtst.  */
+        .align 5
+L(nzCacheAligned):
+	cmplwi	cr1,rLEN,128
+	cmplwi	cr6,rLEN,256
+	blt	cr1,L(cacheAligned1)
+	blt	cr6,L(nzCacheAligned128)
+        .align 4
+L(nzCacheAligned128):
+	nop
+	addi	rMEMP3,rMEMP,64
+	stw	rCHR,0(rMEMP)
+        stw     rCHR,4(rMEMP)
+	stw	rCHR,8(rMEMP)
+	stw     rCHR,12(rMEMP)
+	stw	rCHR,16(rMEMP)
+        stw     rCHR,20(rMEMP)
+	stw	rCHR,24(rMEMP)
+        stw     rCHR,28(rMEMP)
+	stw	rCHR,32(rMEMP)
+        stw     rCHR,36(rMEMP)
+	stw	rCHR,40(rMEMP)
+	stw     rCHR,44(rMEMP)
+	stw	rCHR,48(rMEMP)
+        stw     rCHR,52(rMEMP)
+	stw	rCHR,56(rMEMP)
+        stw     rCHR,60(rMEMP)
+	addi	rMEMP,rMEMP3,64
+	addi	rLEN,rLEN,-128
+/* At this point we can overrun the store queue (pipe reject) so it is
+   time to slow things down. The store queue can merge two adjacent
+   stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
+   So we add "group ending nops" to guarantee that we dispatch only one
+   store per cycle. */
+	stw	rCHR,0(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,4(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,8(rMEMP3)
+	ori	r1,r1,0
+	stw     rCHR,12(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,16(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,20(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,24(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,28(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,32(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,36(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,40(rMEMP3)
+	ori	r1,r1,0
+	stw     rCHR,44(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,48(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,52(rMEMP3)
+	ori	r1,r1,0
+	stw	rCHR,56(rMEMP3)
+	ori	r1,r1,0
+        stw     rCHR,60(rMEMP3)
+	blt	cr6,L(cacheAligned1)
+#if IS_IN (libc)
+	lfd	0,-128(rMEMP)
+#endif
+	b	L(nzCacheAligned256)
+        .align 5
+L(nzCacheAligned256):
+	cmplwi	cr1,rLEN,256
+	addi	rMEMP3,rMEMP,64
+#if !IS_IN (libc)
+/* When we are not in libc we should use only GPRs to avoid the FPU lock
+   interrupt.  */
+	stw	rCHR,0(rMEMP)
+        stw     rCHR,4(rMEMP)
+	stw	rCHR,8(rMEMP)
+	stw     rCHR,12(rMEMP)
+	stw	rCHR,16(rMEMP)
+        stw     rCHR,20(rMEMP)
+	stw	rCHR,24(rMEMP)
+        stw     rCHR,28(rMEMP)
+	stw	rCHR,32(rMEMP)
+        stw     rCHR,36(rMEMP)
+	stw	rCHR,40(rMEMP)
+	stw     rCHR,44(rMEMP)
+	stw	rCHR,48(rMEMP)
+        stw     rCHR,52(rMEMP)
+	stw	rCHR,56(rMEMP)
+        stw     rCHR,60(rMEMP)
+	addi	rMEMP,rMEMP3,64
+	addi	rLEN,rLEN,-128
+	stw	rCHR,0(rMEMP3)
+        stw     rCHR,4(rMEMP3)
+	stw	rCHR,8(rMEMP3)
+	stw     rCHR,12(rMEMP3)
+	stw	rCHR,16(rMEMP3)
+        stw     rCHR,20(rMEMP3)
+	stw	rCHR,24(rMEMP3)
+        stw     rCHR,28(rMEMP3)
+	stw	rCHR,32(rMEMP3)
+        stw     rCHR,36(rMEMP3)
+	stw	rCHR,40(rMEMP3)
+	stw     rCHR,44(rMEMP3)
+	stw	rCHR,48(rMEMP3)
+        stw     rCHR,52(rMEMP3)
+	stw	rCHR,56(rMEMP3)
+        stw     rCHR,60(rMEMP3)
+#else
+/* We are in libc and this is a long memset so we can use FPRs and can afford
+   occasional FPU locked interrupts.  */
+	stfd	0,0(rMEMP)
+	stfd	0,8(rMEMP)
+	stfd	0,16(rMEMP)
+	stfd	0,24(rMEMP)
+	stfd	0,32(rMEMP)
+	stfd	0,40(rMEMP)
+	stfd	0,48(rMEMP)
+	stfd	0,56(rMEMP)
+	addi	rMEMP,rMEMP3,64
+	addi	rLEN,rLEN,-128
+	stfd	0,0(rMEMP3)
+	stfd	0,8(rMEMP3)
+	stfd	0,16(rMEMP3)
+	stfd	0,24(rMEMP3)
+	stfd	0,32(rMEMP3)
+	stfd	0,40(rMEMP3)
+	stfd	0,48(rMEMP3)
+	stfd	0,56(rMEMP3)
+#endif
+	bge	cr1,L(nzCacheAligned256)
+	dcbtst	0,rMEMP
+	b	L(cacheAligned1)
+
+	.align 4
+/* Storing a zero "c" value. We are aligned at a sector (32-byte)
+   boundary but may not be at cache line (128-byte) boundary.  If the
+   remaining length spans a full cache line we can use the Data cache
+   block zero instruction. */
+L(zloopstart):
+/* memset in 32-byte chunks until we get to a cache line boundary.
+   If rLEN is less than the distance to the next cache-line boundary use
+   cacheAligned1 code to finish the tail.  */
+	cmplwi	cr1,rLEN,128
+	beq	L(medium)
+L(getCacheAligned):
+	andi.	rTMP,rMEMP,127
+	blt	cr1,L(cacheAligned1)
+	addi	rMEMP3,rMEMP,32
+	beq	L(cacheAligned)
+	addi	rLEN,rLEN,-32
+	stw	rCHR,0(rMEMP)
+        stw     rCHR,4(rMEMP)
+	stw	rCHR,8(rMEMP)
+	stw     rCHR,12(rMEMP)
+	stw	rCHR,16(rMEMP)
+        stw     rCHR,20(rMEMP)
+	addi	rMEMP,rMEMP,32
+	andi.	rTMP,rMEMP3,127
+	stw	rCHR,-8(rMEMP3)
+        stw     rCHR,-4(rMEMP3)
+L(getCacheAligned2):
+	beq	L(cacheAligned)
+	addi	rLEN,rLEN,-32
+	addi	rMEMP,rMEMP,32
+	stw	rCHR,0(rMEMP3)
+        stw     rCHR,4(rMEMP3)
+	stw	rCHR,8(rMEMP3)
+	stw     rCHR,12(rMEMP3)
+	andi.	rTMP,rMEMP,127
+	nop
+	stw	rCHR,16(rMEMP3)
+        stw     rCHR,20(rMEMP3)
+	stw	rCHR,24(rMEMP3)
+        stw     rCHR,28(rMEMP3)
+L(getCacheAligned3):
+	beq	L(cacheAligned)
+/* At this point we can overrun the store queue (pipe reject) so it is
+   time to slow things down. The store queue can merge two adjacent
+   stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
+   So we add "group ending nops" to guarantee that we dispatch only two
+   stores every other cycle. */
+	addi	rLEN,rLEN,-32
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,32(rMEMP3)
+        stw     rCHR,36(rMEMP3)
+	addi	rMEMP,rMEMP,32
+	cmplwi	cr1,rLEN,128
+	ori	r1,r1,0
+	stw	rCHR,40(rMEMP3)
+	stw     rCHR,44(rMEMP3)
+	cmplwi	cr6,rLEN,256
+	li	rMEMP2,128
+	ori	r1,r1,0
+	stw	rCHR,48(rMEMP3)
+        stw     rCHR,52(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,56(rMEMP3)
+        stw     rCHR,60(rMEMP3)
+	blt	cr1,L(cacheAligned1)
+	blt	cr6,L(cacheAligned128)
+	b	L(cacheAlignedx)
+
+/* Now we are aligned to the cache line and can use dcbz.  */
+        .align 4
+L(cacheAligned):
+	cmplwi	cr1,rLEN,128
+	cmplwi	cr6,rLEN,256
+	blt	cr1,L(cacheAligned1)
+	li	rMEMP2,128
+L(cacheAlignedx):
+	cmplwi	cr5,rLEN,640
+	blt	cr6,L(cacheAligned128)
+	bgt	cr5,L(cacheAligned512)
+	cmplwi	cr6,rLEN,512
+	dcbz	0,rMEMP
+	cmplwi	cr1,rLEN,384
+	dcbz	rMEMP2,rMEMP
+	addi	rMEMP,rMEMP,256
+	addi	rLEN,rLEN,-256
+	blt	cr1,L(cacheAligned1)
+	blt	cr6,L(cacheAligned128)
+	b	L(cacheAligned256)
+	.align 5
+/* A simple loop for the longer (>640 bytes) lengths.  This form limits
+   the branch miss-predicted to exactly 1 at loop exit.*/
+L(cacheAligned512):
+	cmplwi	cr1,rLEN,128
+	blt	cr1,L(cacheAligned1)
+	dcbz	0,rMEMP
+	addi	rLEN,rLEN,-128
+	addi	rMEMP,rMEMP,128
+	b	L(cacheAligned512)
+        .align 5
+L(cacheAligned256):
+	cmplwi	cr6,rLEN,512
+	dcbz	0,rMEMP
+	cmplwi	cr1,rLEN,384
+	dcbz	rMEMP2,rMEMP
+	addi	rMEMP,rMEMP,256
+	addi	rLEN,rLEN,-256
+	bge	cr6,L(cacheAligned256)
+	blt	cr1,L(cacheAligned1)
+        .align 4
+L(cacheAligned128):
+	dcbz	0,rMEMP
+	addi	rMEMP,rMEMP,128
+	addi	rLEN,rLEN,-128
+        .align 4
+L(cacheAligned1):
+	cmplwi	cr1,rLEN,32
+	blt	cr1,L(handletail32)
+	addi	rMEMP3,rMEMP,32
+	addi	rLEN,rLEN,-32
+	stw	rCHR,0(rMEMP)
+        stw     rCHR,4(rMEMP)
+	stw	rCHR,8(rMEMP)
+	stw     rCHR,12(rMEMP)
+	stw	rCHR,16(rMEMP)
+        stw     rCHR,20(rMEMP)
+	addi	rMEMP,rMEMP,32
+	cmplwi	cr1,rLEN,32
+	stw	rCHR,-8(rMEMP3)
+        stw     rCHR,-4(rMEMP3)
+L(cacheAligned2):
+	blt	cr1,L(handletail32)
+	addi	rLEN,rLEN,-32
+	stw	rCHR,0(rMEMP3)
+        stw     rCHR,4(rMEMP3)
+	stw	rCHR,8(rMEMP3)
+	stw     rCHR,12(rMEMP3)
+	addi	rMEMP,rMEMP,32
+	cmplwi	cr1,rLEN,32
+	stw	rCHR,16(rMEMP3)
+        stw     rCHR,20(rMEMP3)
+	stw	rCHR,24(rMEMP3)
+        stw     rCHR,28(rMEMP3)
+	nop
+L(cacheAligned3):
+	blt	cr1,L(handletail32)
+/* At this point we can overrun the store queue (pipe reject) so it is
+   time to slow things down. The store queue can merge two adjacent
+   stores into a single L1/L2 op, but the L2 is clocked at 1/2 the CPU.
+   So we add "group ending nops" to guarantee that we dispatch only two
+   stores every other cycle. */
+	ori	r1,r1,0
+	ori	r1,r1,0
+	addi	rMEMP,rMEMP,32
+	addi	rLEN,rLEN,-32
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,32(rMEMP3)
+        stw     rCHR,36(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,40(rMEMP3)
+	stw     rCHR,44(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,48(rMEMP3)
+        stw     rCHR,52(rMEMP3)
+	ori	r1,r1,0
+	ori	r1,r1,0
+	stw	rCHR,56(rMEMP3)
+        stw     rCHR,60(rMEMP3)
+
+/* We are here because the length or remainder (rLEN) is less than the
+   cache line/sector size and does not justify aggressive loop unrolling.
+   So set up the preconditions for L(medium) and go there.  */
+        .align 3
+L(handletail32):
+	cmplwi	cr1,rLEN,0
+	beqlr   cr1
+	b	L(medium)
+
+	.align 4
+L(small):
+/* Memset of 4 bytes or less.  */
+	cmplwi	cr5, rLEN, 1
+	cmplwi	cr1, rLEN, 3
+	bltlr	cr5
+	stb	rCHR, 0(rMEMP)
+	beqlr	cr5
+	stb	rCHR, 1(rMEMP)
+	bltlr	cr1
+	stb	rCHR, 2(rMEMP)
+	beqlr	cr1
+	stb	rCHR, 3(rMEMP)
+	blr
+
+/* Memset of 0-31 bytes.  */
+	.align 5
+L(medium):
+	cmplwi	cr1, rLEN, 16
+L(medium_tail2):
+	add	rMEMP, rMEMP, rLEN
+L(medium_tail):
+	bt-	31, L(medium_31t)
+	bt-	30, L(medium_30t)
+L(medium_30f):
+	bt	29, L(medium_29t)
+L(medium_29f):
+	bge	cr1, L(medium_27t)
+	bflr	28
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+
+L(medium_31t):
+	stbu	rCHR, -1(rMEMP)
+	bf-	30, L(medium_30f)
+L(medium_30t):
+	sthu	rCHR, -2(rMEMP)
+	bf-	29, L(medium_29f)
+L(medium_29t):
+	stwu	rCHR, -4(rMEMP)
+	blt	cr1, L(medium_27f)
+L(medium_27t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+        stw     rCHR, -12(rMEMP)
+	stwu	rCHR, -16(rMEMP)
+L(medium_27f):
+	bflr	28
+L(medium_28t):
+        stw     rCHR, -4(rMEMP)
+	stw	rCHR, -8(rMEMP)
+	blr
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/multiarch/Implies
new file mode 100644
index 0000000000..ff9f999749
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5+/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/Implies
new file mode 100644
index 0000000000..c0e1bea435
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power6/fpu
+powerpc/powerpc32/power6
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/Implies
new file mode 100644
index 0000000000..d53ce2573c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power6/fpu
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/multiarch/Implies
new file mode 100644
index 0000000000..c66805ee63
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5+/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lrint.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lrint.S
new file mode 100644
index 0000000000..cb780516b5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lrint.S
@@ -0,0 +1,41 @@
+/* Round double to long int.  POWER6x PowerPC32 version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+	.machine	"power6"
+/* long int[r3] __lrint (double x[fp1])  */
+ENTRY (__lrint)
+	fctiw	fp13,fp1
+	mftgpr  r3,fp13
+	blr
+	END (__lrint)
+
+weak_alias (__lrint, lrint)
+
+strong_alias (__lrint, __lrintf)
+weak_alias (__lrint, lrintf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__lrint, __lrintl)
+weak_alias (__lrint, lrintl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lrint, lrintl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lround.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lround.S
new file mode 100644
index 0000000000..05b13cd34c
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/fpu/s_lround.S
@@ -0,0 +1,51 @@
+/* lround function.  POWER6x, PowerPC32 version.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* long [r3] lround (float x [fp1])
+   IEEE 1003.1 lround function.  IEEE specifies "round to the nearest
+   integer value, rounding halfway cases away from zero, regardless of
+   the current rounding mode."  However PowerPC Architecture defines
+   "round to Nearest" as "Choose the best approximation. In case of a
+   tie, choose the one that is even (least significant bit o).".
+   So we pre-round using the V2.02 Floating Round to Integer Nearest
+   instruction before we use the Floating Convert to Integer Word with
+   round to zero instruction.  */
+
+	.machine	"power6"
+ENTRY (__lround)
+	frin	fp2,fp1	/* Pre-round +-0.5.  */
+	fctiwz	fp3,fp2	/* Convert To Integer Word lround toward 0.  */
+	mftgpr	r3,fp3	/* Transfer fpr3 to r3.  */
+	blr
+	END (__lround)
+
+weak_alias (__lround, lround)
+
+strong_alias (__lround, __lroundf)
+weak_alias (__lround, lroundf)
+
+#ifdef NO_LONG_DOUBLE
+weak_alias (__lround, lroundl)
+strong_alias (__lround, __lroundl)
+#endif
+#if LONG_DOUBLE_COMPAT(libm, GLIBC_2_1)
+compat_symbol (libm, __lround, lroundl, GLIBC_2_1)
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/multiarch/Implies
new file mode 100644
index 0000000000..ff9f999749
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power6x/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power5+/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Implies
new file mode 100644
index 0000000000..c0e1bea435
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power6/fpu
+powerpc/powerpc32/power6
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Makefile b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Makefile
new file mode 100644
index 0000000000..5e8f4a28ba
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/Makefile
@@ -0,0 +1,4 @@
+ifeq ($(subdir),string)
+CFLAGS-strncase.c += -funroll-loops
+CFLAGS-strncase_l.c += -funroll-loops
+endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/multiarch/Implies
new file mode 100644
index 0000000000..45cbaede9f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power6/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finite.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finite.S
new file mode 100644
index 0000000000..da4efa0fb9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finite.S
@@ -0,0 +1,93 @@
+/* finite().  PowerPC32/POWER7 version.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __finite(x)  */
+	.section    .rodata.cst8,"aM",@progbits,8
+	.align 3
+.LC0:   /* 1.0 */
+	.quad	    0x3ff0000000000000
+
+	.section    ".text"
+	.type	    __finite, @function
+	.machine    power7
+ENTRY (__finite)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfd	fp0,.LC0-got_label@l(r9)
+
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfd	fp0,.LC0@l(r9)
+#endif
+	ftdiv	cr7,fp1,fp0
+	li	r3,1
+	bflr	30
+
+	/* We have -INF/+INF/NaN or a denormal.  */
+
+	stwu	r1,-16(r1)    /* Allocate stack space.  */
+	stfd    fp1,8(r1)     /* Transfer FP to GPR's.  */
+
+	ori	2,2,0	      /* Force a new dispatch group.  */
+	lhz	r0,8+HISHORT(r1) /* Fetch the upper 16 bits of the FP value
+				    (biased exponent and sign bit).  */
+	clrlwi	r0,r0,17      /* r0 = abs(r0).  */
+	addi	r1,r1,16      /* Reset the stack pointer.  */
+	cmpwi	cr7,r0,0x7ff0 /* r4 == 0x7ff0?.  */
+	bltlr	cr7	      /* LT means we have a denormal.  */
+	li	r3,0
+	blr
+	END (__finite)
+
+hidden_def (__finite)
+weak_alias (__finite, finite)
+
+/* It turns out that the 'double' version will also always work for
+   single-precision.  */
+strong_alias (__finite, __finitef)
+hidden_def (__finitef)
+weak_alias (__finitef, finitef)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__finite, __finitel)
+weak_alias (__finite, finitel)
+#endif
+
+#if IS_IN (libm)
+# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_0)
+compat_symbol (libm, finite, finitel, GLIBC_2_0)
+# endif
+# if LONG_DOUBLE_COMPAT (libm, GLIBC_2_1)
+compat_symbol (libm, __finite, __finitel, GLIBC_2_1)
+# endif
+#else
+# if LONG_DOUBLE_COMPAT (libc, GLIBC_2_0)
+compat_symbol (libc, __finite, __finitel, GLIBC_2_0);
+compat_symbol (libc, finite, finitel, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finitef.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finitef.S
new file mode 100644
index 0000000000..54bd94176d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_finitef.S
@@ -0,0 +1 @@
+/* This function uses the same code as s_finite.S.  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinf.S
new file mode 100644
index 0000000000..668815761a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinf.S
@@ -0,0 +1,85 @@
+/* isinf().  PowerPC32/POWER7 version.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isinf(x)  */
+	.section    .rodata.cst8,"aM",@progbits,8
+	.align 3
+.LC0:   /* 1.0 */
+	.quad	    0x3ff0000000000000
+
+	.section    ".text"
+	.type	    __isinf, @function
+	.machine    power7
+ENTRY (__isinf)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfd	fp0,.LC0-got_label@l(r9)
+
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfd	fp0,.LC0@l(r9)
+#endif
+	ftdiv	cr7,fp1,fp0
+	li	r3,0
+	bflr    29	      /* If not INF, return.  */
+
+	/* Either we have +INF or -INF.  */
+
+	stwu    r1,-16(r1)    /* Allocate stack space.  */
+	stfd    fp1,8(r1)     /* Transfer FP to GPR's.  */
+	ori	2,2,0	      /* Force a new dispatch group.  */
+	lhz	r4,8+HISHORT(r1) /* Fetch the upper 16 bits of the FP value
+				    (biased exponent and sign bit).  */
+	addi	r1,r1,16      /* Reset the stack pointer.  */
+	cmpwi	cr7,r4,0x7ff0 /* r4 == 0x7ff0?  */
+	li	r3,1
+	beqlr   cr7	      /* EQ means INF, otherwise -INF.  */
+	li      r3,-1
+	blr
+	END (__isinf)
+
+hidden_def (__isinf)
+weak_alias (__isinf, isinf)
+
+/* It turns out that the 'double' version will also always work for
+   single-precision.  */
+strong_alias (__isinf, __isinff)
+hidden_def (__isinff)
+weak_alias (__isinff, isinff)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isinf, __isinfl)
+weak_alias (__isinf, isinfl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isinf, __isinfl, GLIBC_2_0);
+compat_symbol (libc, isinf, isinfl, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinff.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinff.S
new file mode 100644
index 0000000000..be759e091e
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isinff.S
@@ -0,0 +1 @@
+/* This function uses the same code as s_isinf.S.  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnan.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnan.S
new file mode 100644
index 0000000000..433137f1c4
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnan.S
@@ -0,0 +1,90 @@
+/* isnan().  PowerPC32/POWER7 version.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <math_ldbl_opt.h>
+
+/* int __isnan(x)  */
+	.section    .rodata.cst8,"aM",@progbits,8
+	.align 3
+.LC0:   /* 1.0 */
+	.quad	    0x3ff0000000000000
+
+	.section    ".text"
+	.type	    __isnan, @function
+	.machine    power7
+ENTRY (__isnan)
+#ifdef SHARED
+	mflr	r11
+	cfi_register(lr,r11)
+
+	SETUP_GOT_ACCESS(r9,got_label)
+	addis	r9,r9,.LC0-got_label@ha
+	lfd	fp0,.LC0-got_label@l(r9)
+
+	mtlr	r11
+	cfi_same_value (lr)
+#else
+	lis	r9,.LC0@ha
+	lfd	fp0,.LC0@l(r9)
+#endif
+	ftdiv	cr7,fp1,fp0
+	li	r3,0
+	bflr	30	      /* If not NaN or Inf, finish. */
+
+	/* We have -INF/+INF/NaN or a denormal.  */
+
+	stwu	r1,-16(r1)    /* Allocate stack space.  */
+	stfd	fp1,8(r1)     /* Transfer FP to GPR's.  */
+	ori	2,2,0	      /* Force a new dispatch group.  */
+	lwz     r4,8+HIWORD(r1) /* Load the upper half of the FP value.  */
+	lwz     r5,8+LOWORD(r1) /* Load the lower half of the FP value.  */
+	addi	r1,r1,16      /* Reset the stack pointer.  */
+	lis     r0,0x7ff0     /* Load the upper portion for an INF/NaN.  */
+	clrlwi  r4,r4,1	      /* r4 = abs(r4).  */
+	cmpw    cr7,r4,r0     /* if (abs(r4) <= inf).  */
+	cmpwi   cr6,r5,0      /* r5 == 0x00000000?  */
+	bltlr	cr7	      /* LT means we have a denormal.  */
+	bgt	cr7,L(NaN)    /* GT means we have a NaN.  */
+	beqlr	cr6	      /* EQ means we have +/-INF.  */
+L(NaN):
+	li      r3,1	      /* x == NaN?  */
+	blr
+	END (__isnan)
+
+hidden_def (__isnan)
+weak_alias (__isnan, isnan)
+
+/* It turns out that the 'double' version will also always work for
+   single-precision.  */
+strong_alias (__isnan, __isnanf)
+hidden_def (__isnanf)
+weak_alias (__isnanf, isnanf)
+
+#ifdef NO_LONG_DOUBLE
+strong_alias (__isnan, __isnanl)
+weak_alias (__isnan, isnanl)
+#endif
+
+#if !IS_IN (libm)
+# if LONG_DOUBLE_COMPAT(libc, GLIBC_2_0)
+compat_symbol (libc, __isnan, __isnanl, GLIBC_2_0);
+compat_symbol (libc, isnan, isnanl, GLIBC_2_0);
+# endif
+#endif
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnanf.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnanf.S
new file mode 100644
index 0000000000..b48c85e0d3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/fpu/s_isnanf.S
@@ -0,0 +1 @@
+/* This function uses the same code as s_isnan.S.  */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memchr.S
new file mode 100644
index 0000000000..9ce8507a82
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memchr.S
@@ -0,0 +1,193 @@
+/* Optimized memchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] memchr (char *s [r3], int byte [r4], int size [r5])  */
+	.machine  power7
+ENTRY (__memchr)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi  r8,r3,2
+	insrwi	r4,r4,8,16    /* Replicate byte to word.  */
+
+	/* Calculate the last acceptable address and check for possible
+	   addition overflow by using satured math:
+	   r7 = r3 + r5
+	   r7 |= -(r7 < x)  */
+	add     r7,r3,r5
+	subfc   r6,r3,r7
+	subfe   r9,r9,r9
+	or      r7,r7,r9
+
+	insrwi	r4,r4,16,0
+	cmplwi	r5,16
+	li	r9, -1
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	addi	r7,r7,-1
+#ifdef __LITTLE_ENDIAN__
+	slw	r9,r9,r6
+#else
+	srw	r9,r9,r6
+#endif
+	ble	L(small_range)
+
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmpb	r3,r12,r4     /* Check for BYTEs in WORD1.  */
+	and	r3,r3,r9
+	clrlwi	r5,r7,30      /* Byte count - 1 in last word.  */
+	clrrwi	r7,r7,2       /* Address of last word.  */
+	cmplwi	cr7,r3,0      /* If r3 == 0, no BYTEs have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r8
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+	bt	29,L(loop_setup)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr7,r3,0
+	bne	cr7,L(done)
+
+L(loop_setup):
+	/* The last word we want to read in the loop below is the one
+	   containing the last byte of the string, ie. the word at
+	   (s + size - 1) & ~3, or r7.  The first word read is at
+	   r8 + 4, we read 2 * cnt words, so the last word read will
+	   be at r8 + 4 + 8 * cnt - 4.  Solving for cnt gives
+	   cnt = (r7 - r8) / 8  */
+	sub	r6,r7,r8
+	srwi	r6,r6,3	      /* Number of loop iterations.  */
+	mtctr	r6            /* Setup the counter.  */
+
+	/* Main loop to look for BYTE in the string.  Since
+	   it's a small loop (8 instructions), align it to 32-bytes.  */
+	.align	5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the byte-checking process for bigger strings.  */
+	lwz	r12,4(r8)
+	lwzu	r11,8(r8)
+	cmpb	r3,r12,r4
+	cmpb	r9,r11,r4
+	or	r6,r9,r3      /* Merge everything in one word.  */
+	cmplwi	cr7,r6,0
+	bne	cr7,L(found)
+	bdnz	L(loop)
+
+	/* We may have one more dword to read.  */
+	cmplw	r8,r7
+	beqlr
+
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr6,r3,0
+	bne	cr6,L(done)
+	blr
+
+	.align	4
+L(found):
+	/* OK, one (or both) of the words contains BYTE.  Check
+	   the first word and decrement the address in case the first
+	   word really contains BYTE.  */
+	cmplwi	cr6,r3,0
+	addi	r8,r8,-4
+	bne	cr6,L(done)
+
+	/* BYTE must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r3 so we can calculate the
+	   pointer.  */
+
+	mr	r3,r9
+	addi	r8,r8,4
+
+	/* r3 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as BYTE in the original
+	   word from the string.  Use that to calculate the pointer.
+	   We need to make sure BYTE is *before* the end of the range.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi    r0,r3,-1
+	andc    r0,r0,r3
+	popcntw	r0,r0	      /* Count trailing zeros.  */
+#else
+	cntlzw	r0,r3	      /* Count leading zeros before the match.  */
+#endif
+	cmplw	r8,r7         /* Are we on the last word?  */
+	srwi	r0,r0,3	      /* Convert leading/trailing zeros to bytes.  */
+	add	r3,r8,r0
+	cmplw	cr7,r0,r5     /* If on the last dword, check byte offset.  */
+	bnelr
+	blelr	cr7
+	li	r3,0
+	blr
+
+	.align	4
+L(null):
+	li	r3,0
+	blr
+
+/* Deals with size <= 16.  */
+	.align	4
+L(small_range):
+	cmplwi	r5,0
+	beq	L(null)
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmpb	r3,r12,r4     /* Check for BYTE in DWORD1.  */
+	and	r3,r3,r9
+	cmplwi	cr7,r3,0
+	clrlwi	r5,r7,30      /* Byte count - 1 in last word.  */
+	clrrwi	r7,r7,2       /* Address of last word.  */
+	cmplw	r8,r7         /* Are we done already?  */
+	bne	cr7,L(done)
+	beqlr
+
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr6,r3,0
+	cmplw	r8,r7
+	bne	cr6,L(done)
+	beqlr
+
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr6,r3,0
+	cmplw	r8,r7
+	bne	cr6,L(done)
+	beqlr
+
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr6,r3,0
+	cmplw	r8,r7
+	bne	cr6,L(done)
+	beqlr
+
+	lwzu	r12,4(r8)
+	cmpb	r3,r12,r4
+	cmplwi	cr6,r3,0
+	bne	cr6,L(done)
+	blr
+
+END (__memchr)
+weak_alias (__memchr, memchr)
+libc_hidden_builtin_def (memchr)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcmp.S
new file mode 100644
index 0000000000..09c9b9bf4d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcmp.S
@@ -0,0 +1,1375 @@
+/* Optimized memcmp implementation for POWER7/PowerPC32.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] memcmp (const char *s1 [r3],
+		    const char *s2 [r4],
+		    size_t size [r5])  */
+
+	.machine power7
+EALIGN (memcmp, 4, 0)
+	CALL_MCOUNT
+
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3	r8	/* next word in s1 */
+#define rWORD4	r9	/* next word in s2 */
+#define rWORD5	r10	/* next word in s1 */
+#define rWORD6	r11	/* next word in s2 */
+#define rWORD7	r30	/* next word in s1 */
+#define rWORD8	r31	/* next word in s2 */
+
+	xor	r0, rSTR2, rSTR1
+	cmplwi	cr6, rN, 0
+	cmplwi	cr1, rN, 12
+	clrlwi.	r0, r0, 30
+	clrlwi	r12, rSTR1, 30
+	cmplwi	cr5, r12, 0
+	beq-	cr6, L(zeroLength)
+	dcbt	0, rSTR1
+	dcbt	0, rSTR2
+/* If less than 8 bytes or not aligned, use the unaligned
+   byte loop.  */
+	blt	cr1, L(bytealigned)
+	stwu	1, -64(r1)
+	cfi_adjust_cfa_offset(64)
+	stw	rWORD8, 48(r1)
+	stw	rWORD7, 44(r1)
+	cfi_offset(rWORD8, (48-64))
+	cfi_offset(rWORD7, (44-64))
+	bne	L(unaligned)
+/* At this point we know both strings have the same alignment and the
+   compare length is at least 8 bytes.  r12 contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of r12 to 0.  If r12 == 0 then we are already word
+   aligned and can perform the word aligned loop.
+
+   Otherwise we know the two strings have the same alignment (but not
+   yet word aligned).  So we force the string addresses to the next lower
+   word boundary and special case this first word using shift left to
+   eliminate bits preceding the first byte.  Since we want to join the
+   normal (word aligned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first word. This ensures that the loop count is
+   correct and the first word (shifted) is in the expected register pair. */
+	.align	4
+L(samealignment):
+	clrrwi	rSTR1, rSTR1, 2
+	clrrwi	rSTR2, rSTR2, 2
+	beq	cr5, L(Waligned)
+	add	rN, rN, r12
+	slwi	rWORD6, r12, 3
+	srwi	r0, rN, 4	/* Divide by 16 */
+	andi.	r12, rN, 12	/* Get the word remainder */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dPs4)
+	mtctr	r0
+	bgt	cr1, L(dPs3)
+	beq	cr1, L(dPs2)
+
+/* Remainder is 4 */
+	.align	3
+L(dsP1):
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD2, rWORD6
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(dP1e)
+/* Remainder is 8 */
+	.align	4
+L(dPs2):
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD2, rWORD6
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+/* Do something useful in this cycle since we have to branch anyway.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	b	L(dP2e)
+/* Remainder is 12 */
+	.align	4
+L(dPs3):
+	slw	rWORD3, rWORD1, rWORD6
+	slw	rWORD4, rWORD2, rWORD6
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(dP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(dPs4):
+	mtctr	r0
+	slw	rWORD1, rWORD1, rWORD6
+	slw	rWORD2, rWORD2, rWORD6
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(dP4e)
+
+/* At this point we know both strings are word aligned and the
+   compare length is at least 8 bytes.  */
+	.align	4
+L(Waligned):
+	andi.	r12, rN, 12	/* Get the word remainder */
+	srwi	r0, rN, 4	/* Divide by 16 */
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	beq	L(dP4)
+	bgt	cr1, L(dP3)
+	beq	cr1, L(dP2)
+
+/* Remainder is 4 */
+	.align	4
+L(dP1):
+	mtctr	r0
+/* Normally we'd use rWORD7/rWORD8 here, but since we might exit early
+   (8-15 byte compare), we want to use only volatile registers.  This
+   means we can avoid restoring non-volatile registers since we did not
+   change any on the early exit path.  The key here is the non-early
+   exit path only cares about the condition code (cr5), not about which
+   register pair was used.  */
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+#endif
+	cmplw	cr5, rWORD5, rWORD6
+	blt	cr7, L(dP1x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+L(dP1e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5x)
+	bne	cr7, L(dLcr7x)
+
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	bne	cr1, L(dLcr1)
+	cmplw	cr5, rWORD7, rWORD8
+	bdnz	L(dLoop)
+	bne	cr6, L(dLcr6)
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+	.align	3
+L(dP1x):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Remainder is 8 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP2):
+	mtctr	r0
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+	lwz	rWORD6, 0(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP2x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+L(dP2e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr6, L(dLcr6)
+	bne	cr5, L(dLcr5)
+	b	L(dLoop2)
+/* Again we are on a early exit path (16-23 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align	4
+L(dP2x):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	slwi.	r12, rN, 3
+	bne	cr6, L(dLcr6x)
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr1, L(dLcr1x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Remainder is 12 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP3):
+	mtctr	r0
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 0(rSTR1)
+	lwz	rWORD4, 0(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+L(dP3e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	blt	cr7, L(dP3x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	bne	cr1, L(dLcr1)
+	bne	cr6, L(dLcr6)
+	b	L(dLoop1)
+/* Again we are on a early exit path (24-31 byte compare), we want to
+   only use volatile registers and avoid restoring non-volatile
+   registers.  */
+	.align	4
+L(dP3x):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	slwi.	r12, rN, 3
+	bne	cr1, L(dLcr1x)
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	bne	cr6, L(dLcr6x)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	bne	cr7, L(dLcr7x)
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bne	L(d00)
+	li	rRTN, 0
+	blr
+
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dP4):
+	mtctr	r0
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+L(dP4e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(dLcr7)
+	bne	cr1, L(dLcr1)
+	bdz-	L(d24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align	4
+L(dLoop):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+L(dLoop1):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+L(dLoop2):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(dLcr7)
+L(dLoop3):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	bne	cr1, L(dLcr1)
+	cmplw	cr7, rWORD1, rWORD2
+	bdnz	L(dLoop)
+
+L(dL4):
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(dLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(dLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(d44):
+	bne	cr7, L(dLcr7)
+L(d34):
+	bne	cr1, L(dLcr1)
+L(d24):
+	bne	cr6, L(dLcr6)
+L(d14):
+	slwi.	r12, rN, 3
+	bne	cr5, L(dLcr5)
+L(d04):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	subfic	rN, r12, 32	/* Shift count is 32 - (rN * 8).  */
+	beq	L(zeroLength)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  Since
+   we are aligned it is safe to load the whole word, and use
+   shift right to eliminate bits beyond the compare length.  */
+L(d00):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+	sub	rRTN, rWORD1, rWORD2
+	blr
+
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr7):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr7x):
+	li	rRTN, 1
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr7
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr1):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr1x):
+	li	rRTN, 1
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr6):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr6x):
+	li	rRTN, 1
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+	.align	4
+	cfi_adjust_cfa_offset(64)
+L(dLcr5):
+	lwz	rWORD7, 44(r1)
+	lwz	rWORD8, 48(r1)
+L(dLcr5x):
+	li	rRTN, 1
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	bgtlr	cr5
+	li	rRTN, -1
+	blr
+
+	.align	4
+L(bytealigned):
+	mtctr	rN
+
+/* We need to prime this loop.  This loop is swing modulo scheduled
+   to avoid pipe delays.  The dependent instruction latencies (load to
+   compare to conditional branch) is 2 to 3 cycles.  In this loop each
+   dispatch group ends in a branch and takes 1 cycle.  Effectively
+   the first iteration of the loop only serves to load operands and
+   branches based on compares are delayed until the next loop.
+
+   So we must precondition some registers and condition codes so that
+   we don't exit the loop early on the first iteration.  */
+
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	bdz	L(b11)
+	cmplw	cr7, rWORD1, rWORD2
+	lbz	rWORD3, 1(rSTR1)
+	lbz	rWORD4, 1(rSTR2)
+	bdz	L(b12)
+	cmplw	cr1, rWORD3, rWORD4
+	lbzu	rWORD5, 2(rSTR1)
+	lbzu	rWORD6, 2(rSTR2)
+	bdz	L(b13)
+	.align	4
+L(bLoop):
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	bne	cr7, L(bLcr7)
+
+	cmplw	cr6, rWORD5, rWORD6
+	bdz	L(b3i)
+
+	lbzu	rWORD3, 1(rSTR1)
+	lbzu	rWORD4, 1(rSTR2)
+	bne	cr1, L(bLcr1)
+
+	cmplw	cr7, rWORD1, rWORD2
+	bdz	L(b2i)
+
+	lbzu	rWORD5, 1(rSTR1)
+	lbzu	rWORD6, 1(rSTR2)
+	bne	cr6, L(bLcr6)
+
+	cmplw	cr1, rWORD3, rWORD4
+	bdnz	L(bLoop)
+
+/* We speculatively loading bytes before we have tested the previous
+   bytes.  But we must avoid overrunning the length (in the ctr) to
+   prevent these speculative loads from causing a segfault.  In this
+   case the loop will exit early (before the all pending bytes are
+   tested.  In this case we must complete the pending operations
+   before returning.  */
+L(b1i):
+	bne	cr7, L(bLcr7)
+	bne	cr1, L(bLcr1)
+	b	L(bx56)
+	.align	4
+L(b2i):
+	bne	cr6, L(bLcr6)
+	bne	cr7, L(bLcr7)
+	b	L(bx34)
+	.align	4
+L(b3i):
+	bne	cr1, L(bLcr1)
+	bne	cr6, L(bLcr6)
+	b	L(bx12)
+	.align	4
+L(bLcr7):
+	li	rRTN, 1
+	bgtlr	cr7
+	li	rRTN, -1
+	blr
+L(bLcr1):
+	li	rRTN, 1
+	bgtlr	cr1
+	li	rRTN, -1
+	blr
+L(bLcr6):
+	li	rRTN, 1
+	bgtlr	cr6
+	li	rRTN, -1
+	blr
+
+L(b13):
+	bne	cr7, L(bx12)
+	bne	cr1, L(bx34)
+L(bx56):
+	sub	rRTN, rWORD5, rWORD6
+	blr
+	nop
+L(b12):
+	bne	cr7, L(bx12)
+L(bx34):
+	sub	rRTN, rWORD3, rWORD4
+	blr
+L(b11):
+L(bx12):
+	sub	rRTN, rWORD1, rWORD2
+	blr
+	.align	4
+L(zeroLength):
+	li	rRTN, 0
+	blr
+
+	.align	4
+/* At this point we know the strings have different alignment and the
+   compare length is at least 8 bytes.  r12 contains the low order
+   2 bits of rSTR1 and cr5 contains the result of the logical compare
+   of r12 to 0.  If r12 == 0 then rStr1 is word aligned and can
+   perform the Wunaligned loop.
+
+   Otherwise we know that rSTR1 is not already word aligned yet.
+   So we can force the string addresses to the next lower word
+   boundary and special case this first word using shift left to
+   eliminate bits preceding the first byte.  Since we want to join the
+   normal (Wualigned) compare loop, starting at the second word,
+   we need to adjust the length (rN) and special case the loop
+   versioning for the first W. This ensures that the loop count is
+   correct and the first W (shifted) is in the expected resister pair.  */
+#define rSHL		r29	/* Unaligned shift left count.  */
+#define rSHR		r28	/* Unaligned shift right count.  */
+#define rWORD8_SHIFT	r27	/* Left rotation temp for rWORD2.  */
+#define rWORD2_SHIFT	r26	/* Left rotation temp for rWORD4.  */
+#define rWORD4_SHIFT	r25	/* Left rotation temp for rWORD6.  */
+#define rWORD6_SHIFT	r24	/* Left rotation temp for rWORD8.  */
+	cfi_adjust_cfa_offset(64)
+L(unaligned):
+	stw	rSHL, 40(r1)
+	cfi_offset(rSHL, (40-64))
+	clrlwi	rSHL, rSTR2, 30
+	stw	rSHR, 36(r1)
+	cfi_offset(rSHR, (36-64))
+	beq	cr5, L(Wunaligned)
+	stw	rWORD8_SHIFT, 32(r1)
+	cfi_offset(rWORD8_SHIFT, (32-64))
+/* Adjust the logical start of rSTR2 to compensate for the extra bits
+   in the 1st rSTR1 W.  */
+	sub	rWORD8_SHIFT, rSTR2, r12
+/* But do not attempt to address the W before that W that contains
+   the actual start of rSTR2.  */
+	clrrwi	rSTR2, rSTR2, 2
+	stw	rWORD2_SHIFT, 28(r1)
+/* Compute the left/right shift counts for the unaligned rSTR2,
+   compensating for the logical (W aligned) start of rSTR1.  */
+	clrlwi	rSHL, rWORD8_SHIFT, 30
+	clrrwi	rSTR1, rSTR1, 2
+	stw	rWORD4_SHIFT, 24(r1)
+	slwi	rSHL, rSHL, 3
+	cmplw	cr5, rWORD8_SHIFT, rSTR2
+	add	rN, rN, r12
+	slwi	rWORD6, r12, 3
+	stw	rWORD6_SHIFT, 20(r1)
+	cfi_offset(rWORD2_SHIFT, (28-64))
+	cfi_offset(rWORD4_SHIFT, (24-64))
+	cfi_offset(rWORD6_SHIFT, (20-64))
+	subfic	rSHR, rSHL, 32
+	srwi	r0, rN, 4	/* Divide by 16 */
+	andi.	r12, rN, 12	/* Get the W remainder */
+/* We normally need to load 2 Ws to start the unaligned rSTR2, but in
+   this special case those bits may be discarded anyway.  Also we
+   must avoid loading a W where none of the bits are part of rSTR2 as
+   this may cross a page boundary and cause a page fault.  */
+	li	rWORD8, 0
+	blt	cr5, L(dus0)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD8, 0(rSTR2)
+	addi	rSTR2, rSTR2, 4
+#endif
+	slw	rWORD8, rWORD8, rSHL
+
+L(dus0):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	srw	r12, rWORD2, rSHR
+	clrlwi	rN, rN, 30
+	beq	L(duPs4)
+	mtctr	r0
+	or	rWORD8, r12, rWORD8
+	bgt	cr1, L(duPs3)
+	beq	cr1, L(duPs2)
+
+/* Remainder is 4 */
+	.align	4
+L(dusP1):
+	slw	rWORD8_SHIFT, rWORD2, rSHL
+	slw	rWORD7, rWORD1, rWORD6
+	slw	rWORD8, rWORD8, rWORD6
+	bge	cr7, L(duP1e)
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align	4
+L(duPs2):
+	slw	rWORD6_SHIFT, rWORD2, rSHL
+	slw	rWORD5, rWORD1, rWORD6
+	slw	rWORD6, rWORD8, rWORD6
+	b	L(duP2e)
+/* Remainder is 12 */
+	.align	4
+L(duPs3):
+	slw	rWORD4_SHIFT, rWORD2, rSHL
+	slw	rWORD3, rWORD1, rWORD6
+	slw	rWORD4, rWORD8, rWORD6
+	b	L(duP3e)
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(duPs4):
+	mtctr	r0
+	or	rWORD8, r12, rWORD8
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	slw	rWORD1, rWORD1, rWORD6
+	slw	rWORD2, rWORD8, rWORD6
+	b	L(duP4e)
+
+/* At this point we know rSTR1 is word aligned and the
+   compare length is at least 8 bytes.  */
+	.align	4
+L(Wunaligned):
+	stw	rWORD8_SHIFT, 32(r1)
+	clrrwi	rSTR2, rSTR2, 2
+	stw	rWORD2_SHIFT, 28(r1)
+	srwi	r0, rN, 4	/* Divide by 16 */
+	stw	rWORD4_SHIFT, 24(r1)
+	andi.	r12, rN, 12	/* Get the W remainder */
+	stw	rWORD6_SHIFT, 20(r1)
+	cfi_offset(rWORD8_SHIFT, (32-64))
+	cfi_offset(rWORD2_SHIFT, (28-64))
+	cfi_offset(rWORD4_SHIFT, (24-64))
+	cfi_offset(rWORD6_SHIFT, (20-64))
+	slwi	rSHL, rSHL, 3
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD6, 0(rSTR2)
+	lwzu	rWORD8, 4(rSTR2)
+#endif
+	cmplwi	cr1, r12, 8
+	cmplwi	cr7, rN, 16
+	clrlwi	rN, rN, 30
+	subfic	rSHR, rSHL, 32
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	beq	L(duP4)
+	mtctr	r0
+	bgt	cr1, L(duP3)
+	beq	cr1, L(duP2)
+
+/* Remainder is 4 */
+	.align	4
+L(duP1):
+	srw	r12, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD7, 0(rSTR1)
+#endif
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP1x)
+L(duP1e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	bne	cr5, L(duLcr5)
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	bne	cr7, L(duLcr7)
+	or	rWORD6, r0, rWORD4_SHIFT
+	cmplw	cr6, rWORD5, rWORD6
+	b	L(duLoop3)
+	.align	4
+/* At this point we exit early with the first word compare
+   complete and remainder of 0 to 3 bytes.  See L(du14) for details on
+   how we handle the remaining bytes.  */
+L(duP1x):
+	cmplw	cr5, rWORD7, rWORD8
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+/* Remainder is 8 */
+	.align	4
+L(duP2):
+	srw	r0, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD5, 0(rSTR1)
+#endif
+	or	rWORD6, r0, rWORD6_SHIFT
+	slw	rWORD6_SHIFT, rWORD8, rSHL
+L(duP2e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 4(rSTR1)
+	lwz	rWORD8, 4(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP2x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 8(rSTR1)
+	lwz	rWORD2, 8(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 12(rSTR1)
+	lwz	rWORD4, 12(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	bne	cr5, L(duLcr5)
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	b	L(duLoop2)
+	.align	4
+L(duP2x):
+	cmplw	cr5, rWORD7, rWORD8
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#endif
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+
+/* Remainder is 12 */
+	.align	4
+L(duP3):
+	srw	r12, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD3, 0(rSTR1)
+#endif
+	slw	rWORD4_SHIFT, rWORD8, rSHL
+	or	rWORD4, r12, rWORD6_SHIFT
+L(duP3e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 4(rSTR1)
+	lwz	rWORD6, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD7, 8(rSTR1)
+	lwz	rWORD8, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	blt	cr7, L(duP3x)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 12(rSTR1)
+	lwz	rWORD2, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	b	L(duLoop1)
+	.align	4
+L(duP3x):
+#ifndef __LITTLE_ENDIAN__
+	addi	rSTR1, rSTR1, 8
+	addi	rSTR2, rSTR2, 8
+#endif
+#if 0
+/* Huh?  We've already branched on cr1!  */
+	bne	cr1, L(duLcr1)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr6, L(duLcr6)
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	b	L(dutrim)
+
+/* Count is a multiple of 16, remainder is 0 */
+	.align	4
+L(duP4):
+	mtctr	r0
+	srw	r0, rWORD8, rSHR
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	addi	rSTR1, rSTR1, 4
+#else
+	lwz	rWORD1, 0(rSTR1)
+#endif
+	slw	rWORD2_SHIFT, rWORD8, rSHL
+	or	rWORD2, r0, rWORD6_SHIFT
+L(duP4e):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 4(rSTR1)
+	lwz	rWORD4, 4(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 8(rSTR1)
+	lwz	rWORD6, 8(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr7, L(duLcr7)
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 12(rSTR1)
+	lwzu	rWORD8, 12(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	cmplw	cr5, rWORD7, rWORD8
+	bdz	L(du24)		/* Adjust CTR as we start with +4 */
+/* This is the primary loop */
+	.align	4
+L(duLoop):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD1, 4(rSTR1)
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	srw	r0, rWORD2, rSHR
+	slw	rWORD2_SHIFT, rWORD2, rSHL
+	or	rWORD2, r0, rWORD8_SHIFT
+L(duLoop1):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD3, 0, rSTR1
+	lwbrx	rWORD4, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD3, 8(rSTR1)
+	lwz	rWORD4, 8(rSTR2)
+#endif
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	srw	r12, rWORD4, rSHR
+	slw	rWORD4_SHIFT, rWORD4, rSHL
+	or	rWORD4, r12, rWORD2_SHIFT
+L(duLoop2):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD5, 0, rSTR1
+	lwbrx	rWORD6, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD5, 12(rSTR1)
+	lwz	rWORD6, 12(rSTR2)
+#endif
+	cmplw	cr5, rWORD7, rWORD8
+	bne	cr7, L(duLcr7)
+	srw	r0, rWORD6, rSHR
+	slw	rWORD6_SHIFT, rWORD6, rSHL
+	or	rWORD6, r0, rWORD4_SHIFT
+L(duLoop3):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD7, 0, rSTR1
+	lwbrx	rWORD8, 0, rSTR2
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+#else
+	lwzu	rWORD7, 16(rSTR1)
+	lwzu	rWORD8, 16(rSTR2)
+#endif
+	cmplw	cr7, rWORD1, rWORD2
+	bne	cr1, L(duLcr1)
+	srw	r12, rWORD8, rSHR
+	slw	rWORD8_SHIFT, rWORD8, rSHL
+	or	rWORD8, r12, rWORD6_SHIFT
+	bdnz	L(duLoop)
+
+L(duL4):
+#if 0
+/* Huh?  We've already branched on cr1!  */
+	bne	cr1, L(duLcr1)
+#endif
+	cmplw	cr1, rWORD3, rWORD4
+	bne	cr6, L(duLcr6)
+	cmplw	cr6, rWORD5, rWORD6
+	bne	cr5, L(duLcr5)
+	cmplw	cr5, rWORD7, rWORD8
+L(du44):
+	bne	cr7, L(duLcr7)
+L(du34):
+	bne	cr1, L(duLcr1)
+L(du24):
+	bne	cr6, L(duLcr6)
+L(du14):
+	slwi.	rN, rN, 3
+	bne	cr5, L(duLcr5)
+/* At this point we have a remainder of 1 to 3 bytes to compare.  We use
+   shift right to eliminate bits beyond the compare length.
+   This allows the use of word subtract to compute the final result.
+
+   However it may not be safe to load rWORD2 which may be beyond the
+   string length. So we compare the bit length of the remainder to
+   the right shift count (rSHR). If the bit count is less than or equal
+   we do not need to load rWORD2 (all significant bits are already in
+   rWORD8_SHIFT).  */
+	cmplw	cr7, rN, rSHR
+	beq	L(duZeroReturn)
+	li	r0, 0
+	ble	cr7, L(dutrim)
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD2, 0, rSTR2
+	addi	rSTR2, rSTR2, 4
+#else
+	lwz	rWORD2, 4(rSTR2)
+#endif
+	srw	r0, rWORD2, rSHR
+	.align	4
+L(dutrim):
+#ifdef __LITTLE_ENDIAN__
+	lwbrx	rWORD1, 0, rSTR1
+#else
+	lwz	rWORD1, 4(rSTR1)
+#endif
+	lwz	rWORD8, 48(r1)
+	subfic	rN, rN, 32	/* Shift count is 32 - (rN * 8).  */
+	or	rWORD2, r0, rWORD8_SHIFT
+	lwz	rWORD7, 44(r1)
+	lwz	rSHL, 40(r1)
+	srw	rWORD1, rWORD1, rN
+	srw	rWORD2, rWORD2, rN
+	lwz	rSHR, 36(r1)
+	lwz	rWORD8_SHIFT, 32(r1)
+	sub	rRTN, rWORD1, rWORD2
+	b	L(dureturn26)
+	.align	4
+L(duLcr7):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr7, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr1):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr1, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr6):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr6, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	4
+L(duLcr5):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+	li	rRTN, 1
+	bgt	cr5, L(dureturn29)
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+	li	rRTN, -1
+	b	L(dureturn27)
+	.align	3
+L(duZeroReturn):
+	li	rRTN, 0
+	.align	4
+L(dureturn):
+	lwz	rWORD8, 48(r1)
+	lwz	rWORD7, 44(r1)
+L(dureturn29):
+	lwz	rSHL, 40(r1)
+	lwz	rSHR, 36(r1)
+L(dureturn27):
+	lwz	rWORD8_SHIFT, 32(r1)
+L(dureturn26):
+	lwz	rWORD2_SHIFT, 28(r1)
+L(dureturn25):
+	lwz	rWORD4_SHIFT, 24(r1)
+	lwz	rWORD6_SHIFT, 20(r1)
+	addi	r1, r1, 64
+	cfi_adjust_cfa_offset(-64)
+	blr
+END (memcmp)
+
+libc_hidden_builtin_def (memcmp)
+weak_alias (memcmp, bcmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcpy.S
new file mode 100644
index 0000000000..8e33c1d733
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memcpy.S
@@ -0,0 +1,538 @@
+/* Optimized memcpy implementation for PowerPC32/POWER7.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+   Returns 'dst'.  */
+
+	.machine  power7
+EALIGN (memcpy, 5, 0)
+	CALL_MCOUNT
+
+	stwu    1,-32(1)
+	cfi_adjust_cfa_offset(32)
+	stw	30,20(1)
+	cfi_offset(30,(20-32))
+	stw	31,24(1)
+	mr      30,3
+	cmplwi  cr1,5,31
+	neg	0,3
+	cfi_offset(31,-8)
+	ble	cr1, L(copy_LT_32)  /* If move < 32 bytes use short move
+				    code.  */
+
+	andi.   11,3,15	      /* Check alignment of DST.  */
+	clrlwi  10,4,28	      /* Check alignment of SRC.  */
+	cmplw   cr6,10,11     /* SRC and DST alignments match?  */
+	mr	12,4
+	mr	31,5
+	bne	cr6,L(copy_GE_32_unaligned)
+
+	srwi    9,5,3	      /* Number of full quadwords remaining.  */
+
+	beq	L(copy_GE_32_aligned_cont)
+
+	clrlwi  0,0,29
+	mtcrf   0x01,0
+	subf    31,0,5
+
+	/* Get the SRC aligned to 8 bytes.  */
+
+1:	bf	31,2f
+	lbz	6,0(12)
+	addi    12,12,1
+	stb	6,0(3)
+	addi    3,3,1
+2:	bf      30,4f
+	lhz     6,0(12)
+	addi    12,12,2
+	sth     6,0(3)
+	addi    3,3,2
+4:	bf      29,0f
+	lwz     6,0(12)
+	addi    12,12,4
+	stw     6,0(3)
+	addi    3,3,4
+0:
+	clrlwi  10,12,29      /* Check alignment of SRC again.  */
+	srwi    9,31,3	      /* Number of full doublewords remaining.  */
+
+L(copy_GE_32_aligned_cont):
+
+	clrlwi  11,31,29
+	mtcrf   0x01,9
+
+	srwi    8,31,5
+	cmplwi  cr1,9,4
+	cmplwi  cr6,11,0
+	mr	11,12
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+	lfd     6,0(12)
+	lfd     7,8(12)
+	addi    11,12,16
+	mtctr   8
+	stfd    6,0(3)
+	stfd    7,8(3)
+	addi    10,3,16
+	bf      31,4f
+	lfd     0,16(12)
+	stfd    0,16(3)
+	blt     cr1,3f
+	addi    11,12,24
+	addi    10,3,24
+	b       4f
+
+	.align  4
+1:	/* Copy 1 doubleword and set the counter.  */
+	mr	10,3
+	mtctr   8
+	bf      31,4f
+	lfd     6,0(12)
+	addi    11,12,8
+	stfd    6,0(3)
+	addi    10,3,8
+
+L(aligned_copy):
+	/* Main aligned copy loop. Copies up to 128-bytes at a time. */
+	.align  4
+4:
+	/* check for any 32-byte or 64-byte lumps that are outside of a
+	   nice 128-byte range.  R8 contains the number of 32-byte
+	   lumps, so drop this into the CR, and use the SO/EQ bits to help
+	   handle the 32- or 64- byte lumps.  Then handle the rest with an
+	   unrolled 128-bytes-at-a-time copy loop. */
+	mtocrf	1,8
+	li	6,16	# 16() index
+	li	7,32	# 32() index
+	li	8,48	# 48() index
+
+L(aligned_32byte):
+	/* if the SO bit (indicating a 32-byte lump) is not set, move along. */
+	bns	cr7,L(aligned_64byte)
+	lxvd2x	6,0,11
+	lxvd2x	7,11,6
+	addi	11,11,32
+	stxvd2x	6,0,10
+	stxvd2x	7,10,6
+	addi	10,10,32
+
+L(aligned_64byte):
+	/* if the EQ bit (indicating a 64-byte lump) is not set, move along. */
+	bne	cr7,L(aligned_128setup)
+	lxvd2x	6,0,11
+	lxvd2x	7,11,6
+	lxvd2x	8,11,7
+	lxvd2x	9,11,8
+	addi	11,11,64
+	stxvd2x	6,0,10
+	stxvd2x	7,10,6
+	stxvd2x	8,10,7
+	stxvd2x	9,10,8
+	addi	10,10,64
+
+L(aligned_128setup):
+	/* Set up for the 128-byte at a time copy loop.  */
+	srwi	8,31,7
+	cmpwi	8,0	# Any 4x lumps left?
+	beq	3f	# if not, move along.
+	lxvd2x	6,0,11
+	lxvd2x	7,11,6
+	mtctr	8	# otherwise, load the ctr and begin.
+	li	8,48	# 48() index
+	b	L(aligned_128loop)
+
+L(aligned_128head):
+	/* for the 2nd + iteration of this loop. */
+	lxvd2x	6,0,11
+	lxvd2x	7,11,6
+L(aligned_128loop):
+	lxvd2x	8,11,7
+	lxvd2x	9,11,8
+	stxvd2x	6,0,10
+	addi	11,11,64
+	stxvd2x	7,10,6
+	stxvd2x	8,10,7
+	stxvd2x	9,10,8
+	lxvd2x	6,0,11
+	lxvd2x	7,11,6
+	addi	10,10,64
+	lxvd2x	8,11,7
+	lxvd2x	9,11,8
+	addi	11,11,64
+	stxvd2x	6,0,10
+	stxvd2x	7,10,6
+	stxvd2x	8,10,7
+	stxvd2x	9,10,8
+	addi	10,10,64
+	bdnz	L(aligned_128head)
+
+3:
+	/* Check for tail bytes.  */
+	clrrwi  0,31,3
+	mtcrf   0x01,31
+	beq	cr6,0f
+
+.L9:
+	add	3,3,0
+	add	12,12,0
+
+	/*  At this point we have a tail of 0-7 bytes and we know that the
+	destination is doubleword-aligned.  */
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz     6,0(12)
+	addi    12,12,4
+	stw     6,0(3)
+	addi    3,3,4
+2:	/* Copy 2 bytes.  */
+	bf	30,1f
+
+	lhz     6,0(12)
+	addi    12,12,2
+	sth     6,0(3)
+	addi    3,3,2
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return original DST pointer.  */
+	mr	3,30
+	lwz	30,20(1)
+	lwz     31,24(1)
+	addi    1,1,32
+	blr
+
+	/* Handle copies of 0~31 bytes.  */
+	.align  4
+L(copy_LT_32):
+	cmplwi  cr6,5,8
+	mr	12,4
+	mtcrf   0x01,5
+	ble	cr6,L(copy_LE_8)
+
+	/* At least 9 bytes to go.  */
+	neg	8,4
+	clrrwi  11,4,2
+	andi.   0,8,3
+	cmplwi  cr1,5,16
+	mr	10,5
+	beq	L(copy_LT_32_aligned)
+
+	/* Force 4-bytes alignment for SRC.  */
+	mtocrf  0x01,0
+	subf    10,0,5
+2:	bf	30,1f
+
+	lhz	6,0(12)
+	addi    12,12,2
+	sth	6,0(3)
+	addi    3,3,2
+1:	bf	31,L(end_4bytes_alignment)
+
+	lbz	6,0(12)
+	addi    12,12,1
+	stb	6,0(3)
+	addi    3,3,1
+
+	.align  4
+L(end_4bytes_alignment):
+	cmplwi  cr1,10,16
+	mtcrf   0x01,10
+
+L(copy_LT_32_aligned):
+	/* At least 6 bytes to go, and SRC is word-aligned.  */
+	blt	cr1,8f
+
+	/* Copy 16 bytes.  */
+	lwz	6,0(12)
+	lwz     7,4(12)
+	stw     6,0(3)
+	lwz     8,8(12)
+	stw     7,4(3)
+	lwz     6,12(12)
+	addi    12,12,16
+	stw     8,8(3)
+	stw     6,12(3)
+	addi    3,3,16
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	lwz     6,0(12)
+	lwz     7,4(12)
+	addi    12,12,8
+	stw     6,0(3)
+	stw     7,4(3)
+	addi    3,3,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz     6,0(12)
+	addi    12,12,4
+	stw     6,0(3)
+	addi    3,3,4
+2:	/* Copy 2-3 bytes.  */
+	bf	30,1f
+
+	lhz     6,0(12)
+	sth     6,0(3)
+	bf      31,0f
+	lbz     7,2(12)
+	stb     7,2(3)
+
+	/* Return original DST pointer.  */
+	mr      3,30
+	lwz     30,20(1)
+	addi    1,1,32
+	blr
+
+	.align  4
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return original DST pointer.  */
+	mr	3,30
+	lwz	30,20(1)
+	addi    1,1,32
+	blr
+
+	/* Handles copies of 0~8 bytes.  */
+	.align  4
+L(copy_LE_8):
+	bne	cr6,4f
+
+	/* Though we could've used lfd/stfd here, they are still
+	slow for unaligned cases.  */
+
+	lwz	6,0(4)
+	lwz     7,4(4)
+	stw     6,0(3)
+	stw     7,4(3)
+
+	/* Return original DST pointer.  */
+	mr      3,30
+	lwz     30,20(1)
+	addi    1,1,32
+	blr
+
+	.align  4
+4:	/* Copies 4~7 bytes.  */
+	bf	29,2b
+
+	lwz	6,0(4)
+	stw     6,0(3)
+	bf      30,5f
+	lhz     7,4(4)
+	sth     7,4(3)
+	bf      31,0f
+	lbz     8,6(4)
+	stb     8,6(3)
+
+	/* Return original DST pointer.  */
+	mr      3,30
+	lwz     30,20(1)
+	addi    1,1,32
+	blr
+
+	.align  4
+5:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,4(4)
+	stb	6,4(3)
+
+0:	/* Return original DST pointer.  */
+	mr	3,30
+	lwz     30,20(1)
+	addi    1,1,32
+	blr
+
+	/* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+	SRC is not. Use aligned quadword loads from SRC, shifted to realign
+	the data, allowing for aligned DST stores.  */
+	.align  4
+L(copy_GE_32_unaligned):
+	andi.   11,3,15	      /* Check alignment of DST.  */
+	clrlwi  0,0,28	      /* Number of bytes until the 1st
+			      quadword of DST.  */
+	srwi    9,5,4	      /* Number of full quadwords remaining.  */
+
+	beq    L(copy_GE_32_unaligned_cont)
+
+	/* DST is not quadword aligned, get it aligned.  */
+
+	mtcrf   0x01,0
+	subf    31,0,5
+
+	/* Vector instructions work best when proper alignment (16-bytes)
+	is present.  Move 0~15 bytes as needed to get DST quadword-aligned.  */
+1:	/* Copy 1 byte.  */
+	bf	31,2f
+
+	lbz	6,0(12)
+	addi    12,12,1
+	stb	6,0(3)
+	addi    3,3,1
+2:	/* Copy 2 bytes.  */
+	bf	    30,4f
+
+	lhz     6,0(12)
+	addi    12,12,2
+	sth     6,0(3)
+	addi    3,3,2
+4:	/* Copy 4 bytes.  */
+	bf	29,8f
+
+	lwz     6,0(12)
+	addi    12,12,4
+	stw     6,0(3)
+	addi    3,3,4
+8:	/* Copy 8 bytes.  */
+	bf	28,0f
+
+	lfd	6,0(12)
+	addi    12,12,8
+	stfd    6,0(3)
+	addi    3,3,8
+0:
+	clrlwi  10,12,28      /* Check alignment of SRC.  */
+	srwi    9,31,4	      /* Number of full quadwords remaining.  */
+
+	/* The proper alignment is present, it is OK to copy the bytes now.  */
+L(copy_GE_32_unaligned_cont):
+
+	/* Setup two indexes to speed up the indexed vector operations.  */
+	clrlwi  11,31,28
+	li      6,16	      /* Index for 16-bytes offsets.  */
+	li	7,32	      /* Index for 32-bytes offsets.  */
+	cmplwi  cr1,11,0
+	srwi    8,31,5	      /* Setup the loop counter.  */
+	mr      10,3
+	mr      11,12
+	mtcrf   0x01,9
+	cmplwi  cr6,9,1
+#ifdef __LITTLE_ENDIAN__
+	lvsr    5,0,12
+#else
+	lvsl    5,0,12
+#endif
+	lvx     3,0,12
+	bf      31,L(setup_unaligned_loop)
+
+	/* Copy another 16 bytes to align to 32-bytes due to the loop .  */
+	lvx     4,12,6
+#ifdef __LITTLE_ENDIAN__
+	vperm   6,4,3,5
+#else
+	vperm   6,3,4,5
+#endif
+	addi    11,12,16
+	addi    10,3,16
+	stvx    6,0,3
+	vor	3,4,4
+
+L(setup_unaligned_loop):
+	mtctr   8
+	ble     cr6,L(end_unaligned_loop)
+
+	/* Copy 32 bytes at a time using vector instructions.  */
+	.align  4
+L(unaligned_loop):
+
+	/* Note: vr6/vr10 may contain data that was already copied,
+	but in order to get proper alignment, we may have to copy
+	some portions again. This is faster than having unaligned
+	vector instructions though.  */
+
+	lvx	4,11,6	      /* vr4 = r11+16.  */
+#ifdef __LITTLE_ENDIAN__
+	vperm   6,4,3,5
+#else
+	vperm   6,3,4,5
+#endif
+	lvx	3,11,7	      /* vr3 = r11+32.  */
+#ifdef __LITTLE_ENDIAN__
+	vperm   10,3,4,5
+#else
+	vperm   10,4,3,5
+#endif
+	addi    11,11,32
+	stvx    6,0,10
+	stvx    10,10,6
+	addi    10,10,32
+
+	bdnz    L(unaligned_loop)
+
+	.align  4
+L(end_unaligned_loop):
+
+	/* Check for tail bytes.  */
+	clrrwi  0,31,4
+	mtcrf   0x01,31
+	beq	cr1,0f
+
+	add	3,3,0
+	add	12,12,0
+
+	/*  We have 1~15 tail bytes to copy, and DST is quadword aligned.  */
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	lwz	6,0(12)
+	lwz	7,4(12)
+	addi    12,12,8
+	stw	6,0(3)
+	stw	7,4(3)
+	addi    3,3,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz	6,0(12)
+	addi    12,12,4
+	stw	6,0(3)
+	addi    3,3,4
+2:	/* Copy 2~3 bytes.  */
+	bf	30,1f
+
+	lhz	6,0(12)
+	addi    12,12,2
+	sth	6,0(3)
+	addi    3,3,2
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return original DST pointer.  */
+	mr	3,30
+	lwz     30,20(1)
+	lwz	31,24(1)
+	addi    1,1,32
+	blr
+
+END (memcpy)
+libc_hidden_builtin_def (memcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/mempcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/mempcpy.S
new file mode 100644
index 0000000000..1682fbcd2a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/mempcpy.S
@@ -0,0 +1,482 @@
+/* Optimized mempcpy implementation for POWER7.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+	Returns 'dst' + 'len'.  */
+
+	.machine  power7
+EALIGN (__mempcpy, 5, 0)
+	CALL_MCOUNT
+
+	stwu	1,-32(1)
+	cfi_adjust_cfa_offset(32)
+	stw	30,20(1)
+	cfi_offset(30,(20-32))
+	stw	31,24(1)
+	mr	30,3
+	cmplwi	cr1,5,31
+	neg	0,3
+	cfi_offset(31,-8)
+	ble	cr1,L(copy_LT_32)  /* If move < 32 bytes use short move
+					code.  */
+
+	andi.	11,3,7	      /* Check alignment of DST.  */
+	clrlwi	10,4,29	      /* Check alignment of SRC.  */
+	cmplw	cr6,10,11     /* SRC and DST alignments match?  */
+	mr	12,4
+	mr	31,5
+	bne	cr6,L(copy_GE_32_unaligned)
+
+	srwi	9,5,3	      /* Number of full quadwords remaining.  */
+
+	beq	L(copy_GE_32_aligned_cont)
+
+	clrlwi	0,0,29
+	mtcrf	0x01,0
+	subf	31,0,5
+
+	/* Get the SRC aligned to 8 bytes.  */
+
+1:	bf	31,2f
+	lbz	6,0(12)
+	addi	12,12,1
+	stb	6,0(3)
+	addi	3,3,1
+2:	bf	30,4f
+	lhz	6,0(12)
+	addi	12,12,2
+	sth	6,0(3)
+	addi	3,3,2
+4:	bf	29,0f
+	lwz	6,0(12)
+	addi	12,12,4
+	stw	6,0(3)
+	addi	3,3,4
+0:
+	clrlwi	10,12,29      /* Check alignment of SRC again.  */
+	srwi	9,31,3	      /* Number of full doublewords remaining.  */
+
+L(copy_GE_32_aligned_cont):
+
+	clrlwi	11,31,29
+	mtcrf	0x01,9
+
+	srwi	8,31,5
+	cmplwi	cr1,9,4
+	cmplwi	cr6,11,0
+	mr	11,12
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+	lfd	6,0(12)
+	lfd	7,8(12)
+	addi	11,12,16
+	mtctr	8
+	stfd	6,0(3)
+	stfd	7,8(3)
+	addi	10,3,16
+	bf	31,4f
+	lfd	0,16(12)
+	stfd	0,16(3)
+	blt	cr1,3f
+	addi	11,12,24
+	addi	10,3,24
+	b	4f
+
+	.align	4
+1:	/* Copy 1 doubleword and set the counter.  */
+	mr	10,3
+	mtctr	8
+	bf	31,4f
+	lfd	6,0(12)
+	addi	11,12,8
+	stfd	6,0(3)
+	addi	10,3,8
+
+	.align	4
+4:	/* Main aligned copy loop. Copies 32-bytes at a time.  */
+	lfd	6,0(11)
+	lfd	7,8(11)
+	lfd	8,16(11)
+	lfd	0,24(11)
+	addi	11,11,32
+
+	stfd	6,0(10)
+	stfd	7,8(10)
+	stfd	8,16(10)
+	stfd	0,24(10)
+	addi	10,10,32
+	bdnz	4b
+3:
+
+	/* Check for tail bytes.  */
+
+	clrrwi	0,31,3
+	mtcrf	0x01,31
+	beq	cr6,0f
+
+.L9:
+	add	3,3,0
+	add	12,12,0
+
+	/*  At this point we have a tail of 0-7 bytes and we know that the
+	destination is doubleword-aligned.  */
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz	6,0(12)
+	addi	12,12,4
+	stw	6,0(3)
+	addi	3,3,4
+2:	/* Copy 2 bytes.  */
+	bf	30,1f
+
+	lhz	6,0(12)
+	addi	12,12,2
+	sth	6,0(3)
+	addi	3,3,2
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	lwz	31,24(1)
+	addi	1,1,32
+	blr
+
+	/* Handle copies of 0~31 bytes.  */
+	.align	4
+L(copy_LT_32):
+	cmplwi	cr6,5,8
+	mr	12,4
+	mtcrf	0x01,5
+	ble	cr6,L(copy_LE_8)
+
+	/* At least 9 bytes to go.  */
+	neg	8,4
+	clrrwi	11,4,2
+	andi.	0,8,3
+	cmplwi	cr1,5,16
+	mr	10,5
+	beq	L(copy_LT_32_aligned)
+
+	/* Force 4-bytes alignment for SRC.  */
+	mtocrf  0x01,0
+	subf	10,0,5
+2:	bf	30,1f
+
+	lhz	6,0(12)
+	addi	12,12,2
+	sth	6,0(3)
+	addi	3,3,2
+1:	bf	31,L(end_4bytes_alignment)
+
+	lbz	6,0(12)
+	addi	12,12,1
+	stb	6,0(3)
+	addi	3,3,1
+
+	.align	4
+L(end_4bytes_alignment):
+	cmplwi	cr1,10,16
+	mtcrf	0x01,10
+
+L(copy_LT_32_aligned):
+	/* At least 6 bytes to go, and SRC is word-aligned.  */
+	blt	cr1,8f
+
+	/* Copy 16 bytes.  */
+	lwz	6,0(12)
+	lwz	7,4(12)
+	stw	6,0(3)
+	lwz	8,8(12)
+	stw	7,4(3)
+	lwz	6,12(12)
+	addi	12,12,16
+	stw	8,8(3)
+	stw	6,12(3)
+	addi	3,3,16
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	lwz	6,0(12)
+	lwz	7,4(12)
+	addi	12,12,8
+	stw	6,0(3)
+	stw	7,4(3)
+	addi	3,3,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz	6,0(12)
+	addi	12,12,4
+	stw	6,0(3)
+	addi	3,3,4
+2:	/* Copy 2-3 bytes.  */
+	bf	30,1f
+
+	lhz	6,0(12)
+	sth	6,0(3)
+	bf	31,0f
+	lbz	7,2(12)
+	stb	7,2(3)
+
+	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	addi	1,1,32
+	blr
+
+	.align	4
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	addi	1,1,32
+	blr
+
+	/* Handles copies of 0~8 bytes.  */
+	.align	4
+L(copy_LE_8):
+	bne	cr6,4f
+
+	/* Though we could've used lfd/stfd here, they are still
+	slow for unaligned cases.  */
+
+	lwz	6,0(4)
+	lwz	7,4(4)
+	stw	6,0(3)
+	stw	7,4(3)
+
+	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	addi	1,1,32
+	blr
+
+	.align	4
+4:	/* Copies 4~7 bytes.  */
+	bf	29,2b
+
+	lwz	6,0(4)
+	stw	6,0(3)
+	bf	30,5f
+	lhz	7,4(4)
+	sth	7,4(3)
+	bf	31,0f
+	lbz	8,6(4)
+	stb	8,6(3)
+
+	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	addi	1,1,32
+	blr
+
+	.align	4
+5:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,4(4)
+	stb	6,4(3)
+
+0:	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	addi	1,1,32
+	blr
+
+	/* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+	SRC is not. Use aligned quadword loads from SRC, shifted to realign
+	the data, allowing for aligned DST stores.  */
+	.align	4
+L(copy_GE_32_unaligned):
+	andi.	11,3,15	      /* Check alignment of DST.  */
+	clrlwi	0,0,28	      /* Number of bytes until the 1st
+				 quadword of DST.  */
+	srwi	9,5,4	      /* Number of full quadwords remaining.  */
+
+	beq	L(copy_GE_32_unaligned_cont)
+
+	/* DST is not quadword aligned, get it aligned.  */
+
+	mtcrf	0x01,0
+	subf	31,0,5
+
+	/* Vector instructions work best when proper alignment (16-bytes)
+	is present.  Move 0~15 bytes as needed to get DST quadword-aligned.  */
+1:	/* Copy 1 byte.  */
+	bf	31,2f
+
+	lbz	6,0(12)
+	addi	12,12,1
+	stb	6,0(3)
+	addi	3,3,1
+2:	/* Copy 2 bytes.  */
+	bf		30,4f
+
+	lhz	6,0(12)
+	addi	12,12,2
+	sth	6,0(3)
+	addi	3,3,2
+4:	/* Copy 4 bytes.  */
+	bf	29,8f
+
+	lwz	6,0(12)
+	addi	12,12,4
+	stw	6,0(3)
+	addi	3,3,4
+8:	/* Copy 8 bytes.  */
+	bf	28,0f
+
+	lfd	6,0(12)
+	addi	12,12,8
+	stfd	6,0(3)
+	addi	3,3,8
+0:
+	clrlwi	10,12,28      /* Check alignment of SRC.  */
+	srwi	9,31,4	      /* Number of full quadwords remaining.  */
+
+	/* The proper alignment is present, it is OK to copy the bytes now.  */
+L(copy_GE_32_unaligned_cont):
+
+	/* Setup two indexes to speed up the indexed vector operations.  */
+	clrlwi	11,31,28
+	li	6,16	      /* Index for 16-bytes offsets.  */
+	li	7,32	      /* Index for 32-bytes offsets.  */
+	cmplwi	cr1,11,0
+	srwi	8,31,5	      /* Setup the loop counter.  */
+	mr	10,3
+	mr	11,12
+	mtcrf	0x01,9
+	cmplwi	cr6,9,1
+#ifdef __LITTLE_ENDIAN__
+	lvsr    5,0,12
+#else
+	lvsl    5,0,12
+#endif
+	lvx	3,0,12
+	bf	31,L(setup_unaligned_loop)
+
+	/* Copy another 16 bytes to align to 32-bytes due to the loop .  */
+	lvx	4,12,6
+#ifdef __LITTLE_ENDIAN__
+	vperm   6,4,3,5
+#else
+	vperm   6,3,4,5
+#endif
+	addi	11,12,16
+	addi	10,3,16
+	stvx	6,0,3
+	vor	3,4,4
+
+L(setup_unaligned_loop):
+	mtctr	8
+	ble	cr6,L(end_unaligned_loop)
+
+	/* Copy 32 bytes at a time using vector instructions.  */
+	.align	4
+L(unaligned_loop):
+
+	/* Note: vr6/vr10 may contain data that was already copied,
+	but in order to get proper alignment, we may have to copy
+	some portions again. This is faster than having unaligned
+	vector instructions though.  */
+
+	lvx	4,11,6	      /* vr4 = r11+16.  */
+#ifdef __LITTLE_ENDIAN__
+	vperm   6,4,3,5
+#else
+	vperm   6,3,4,5
+#endif
+	lvx	3,11,7	      /* vr3 = r11+32.  */
+#ifdef __LITTLE_ENDIAN__
+	vperm   10,3,4,5
+#else
+	vperm   10,4,3,5
+#endif
+	addi	11,11,32
+	stvx	6,0,10
+	stvx	10,10,6
+	addi	10,10,32
+
+	bdnz	L(unaligned_loop)
+
+	.align	4
+L(end_unaligned_loop):
+
+	/* Check for tail bytes.  */
+	clrrwi	0,31,4
+	mtcrf	0x01,31
+	beq	cr1,0f
+
+	add	3,3,0
+	add	12,12,0
+
+	/*  We have 1~15 tail bytes to copy, and DST is quadword aligned.  */
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	lwz	6,0(12)
+	lwz	7,4(12)
+	addi	12,12,8
+	stw	6,0(3)
+	stw	7,4(3)
+	addi	3,3,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	lwz	6,0(12)
+	addi	12,12,4
+	stw	6,0(3)
+	addi	3,3,4
+2:	/* Copy 2~3 bytes.  */
+	bf	30,1f
+
+	lhz	6,0(12)
+	addi	12,12,2
+	sth	6,0(3)
+	addi	3,3,2
+1:	/* Copy 1 byte.  */
+	bf	31,0f
+
+	lbz	6,0(12)
+	stb	6,0(3)
+0:	/* Return DST + LEN pointer.  */
+	add	3,30,5
+	lwz	30,20(1)
+	lwz	31,24(1)
+	addi	1,1,32
+	blr
+
+END (__mempcpy)
+libc_hidden_def (__mempcpy)
+weak_alias (__mempcpy, mempcpy)
+libc_hidden_builtin_def (mempcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memrchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memrchr.S
new file mode 100644
index 0000000000..eb0c1bb8eb
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memrchr.S
@@ -0,0 +1,196 @@
+/* Optimized memrchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] memrchr (char *s [r3], int byte [r4], int size [r5])  */
+	.machine  power7
+ENTRY (__memrchr)
+	CALL_MCOUNT
+	add	r7,r3,r5      /* Calculate the last acceptable address.  */
+	neg	r0,r7
+	addi	r7,r7,-1
+	mr	r10,r3
+	clrrwi	r6,r7,7
+	li	r9,3<<5
+	dcbt	r9,r6,16      /* Stream hint, decreasing addresses.  */
+
+	/* Replicate BYTE to word.  */
+	insrwi	r4,r4,8,16
+	insrwi	r4,r4,16,0
+	li	r6,-4
+	li	r9,-1
+	rlwinm	r0,r0,3,27,28 /* Calculate padding.  */
+	clrrwi	r8,r7,2
+	srw	r9,r9,r0
+	cmplwi	r5,16
+	clrrwi	r0,r10,2
+	ble	L(small_range)
+
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,0,r8
+#else
+	lwbrx	r12,0,r8      /* Load reversed word from memory.  */
+#endif
+	cmpb	r3,r12,r4     /* Check for BYTE in WORD1.  */
+	and	r3,r3,r9
+	cmplwi	cr7,r3,0      /* If r3 == 0, no BYTEs have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r8
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+	bf	29,L(loop_setup)
+
+	/* Handle WORD2 of pair.  */
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,r8,r6
+#else
+	lwbrx	r12,r8,r6
+#endif
+	addi	r8,r8,-4
+	cmpb	r3,r12,r4
+	cmplwi	cr7,r3,0
+	bne	cr7,L(done)
+
+L(loop_setup):
+	/* The last word we want to read in the loop below is the one
+	   containing the first byte of the string, ie. the word at
+	   s & ~3, or r0.  The first word read is at r8 - 4, we
+	   read 2 * cnt words, so the last word read will be at
+	   r8 - 4 - 8 * cnt + 4.  Solving for cnt gives
+	   cnt = (r8 - r0) / 8  */
+	sub	r5,r8,r0
+	addi	r8,r8,-4
+	srwi	r9,r5,3       /* Number of loop iterations.  */
+	mtctr	r9	      /* Setup the counter.  */
+
+	/* Main loop to look for BYTE backwards in the string.
+	   FIXME: Investigate whether 32 byte align helps with this
+	   9 instruction loop.  */
+	.align	5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the byte-checking process for bigger strings.  */
+
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,0,r8
+	lwzx	r11,r8,r6
+#else
+	lwbrx	r12,0,r8
+	lwbrx	r11,r8,r6
+#endif
+	cmpb	r3,r12,r4
+	cmpb	r9,r11,r4
+	or	r5,r9,r3      /* Merge everything in one word.  */
+	cmplwi	cr7,r5,0
+	bne	cr7,L(found)
+	addi	r8,r8,-8
+	bdnz	L(loop)
+
+	/* We may have one more word to read.  */
+	cmplw	r8,r0
+	bnelr
+
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,0,r8
+#else
+	lwbrx	r12,0,r8
+#endif
+	cmpb	r3,r12,r4
+	cmplwi	cr7,r3,0
+	bne	cr7,L(done)
+	blr
+
+	.align	4
+L(found):
+	/* OK, one (or both) of the words contains BYTE.  Check
+	   the first word.  */
+	cmplwi	cr6,r3,0
+	bne	cr6,L(done)
+
+	/* BYTE must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r3 so we can calculate the
+	   pointer.  */
+
+	mr	r3,r9
+	addi	r8,r8,-4
+
+	/* r3 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as BYTE in the original
+	   word from the string.  Use that to calculate the pointer.
+	   We need to make sure BYTE is *before* the end of the
+	   range.  */
+L(done):
+	cntlzw	r9,r3	      /* Count leading zeros before the match.  */
+	cmplw	r8,r0         /* Are we on the last word?  */
+	srwi	r6,r9,3	      /* Convert leading zeros to bytes.  */
+	addi	r0,r6,-3
+	sub	r3,r8,r0
+	cmplw	cr7,r3,r10
+	bnelr
+	bgelr	cr7
+	li	r3,0
+	blr
+
+	.align	4
+L(null):
+	li	r3,0
+	blr
+
+/* Deals with size <= 16.  */
+	.align	4
+L(small_range):
+	cmplwi	r5,0
+	beq	L(null)
+
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,0,r8
+#else
+	lwbrx	r12,0,r8      /* Load reversed word from memory.  */
+#endif
+	cmpb	r3,r12,r4     /* Check for BYTE in WORD1.  */
+	and	r3,r3,r9
+	cmplwi	cr7,r3,0
+	bne	cr7,L(done)
+
+	/* Are we done already?  */
+	cmplw	r8,r0
+	addi	r8,r8,-4
+	beqlr
+
+	.align	5
+L(loop_small):
+#ifdef __LITTLE_ENDIAN__
+	lwzx	r12,0,r8
+#else
+	lwbrx	r12,0,r8
+#endif
+	cmpb	r3,r12,r4
+	cmplw	r8,r0
+	cmplwi	cr7,r3,0
+	bne	cr7,L(done)
+	addi	r8,r8,-4
+	bne	L(loop_small)
+	blr
+
+END (__memrchr)
+weak_alias (__memrchr, memrchr)
+libc_hidden_builtin_def (memrchr)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memset.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memset.S
new file mode 100644
index 0000000000..b431f5086d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/memset.S
@@ -0,0 +1,431 @@
+/* Optimized memset implementation for PowerPC32/POWER7.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
+   Returns 's'.  */
+
+	.machine  power7
+EALIGN (memset, 5, 0)
+	CALL_MCOUNT
+
+	.align	4
+L(_memset):
+	cmplwi	cr7,5,31
+	cmplwi	cr6,5,8
+	mr	10,3		/* Save original argument for later.  */
+	mr	7,1		/* Save original r1 for later.  */
+	cfi_offset(31,-8)
+
+	/* Replicate byte to word.  */
+	insrwi	4,4,8,16
+	insrwi	4,4,16,0
+
+	ble	cr6,L(small)	/* If length <= 8, use short copy code.  */
+
+	neg	0,3
+	ble	cr7,L(medium)	/* If length < 32, use medium copy code.  */
+
+	/* Save our word twice to create a doubleword that we will later
+	   copy to a FPR.  */
+	stwu	1,-32(1)
+	andi.	11,10,7		/* Check alignment of DST.  */
+	mr	12,5
+	stw	4,24(1)
+	stw	4,28(1)
+	beq	L(big_aligned)
+
+	clrlwi	0,0,29
+	mtocrf	0x01,0
+	subf	5,0,5
+
+	/* Get DST aligned to 8 bytes.  */
+1:	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	bf	30,4f
+
+	sth	4,0(10)
+	addi	10,10,2
+4:	bf	29,L(big_aligned)
+
+	stw	4,0(10)
+	addi	10,10,4
+
+	.align	4
+L(big_aligned):
+	cmplwi	cr5,5,255
+	li	0,32
+	cmplwi	cr1,5,160
+	dcbtst	0,10
+	cmplwi	cr6,4,0
+	srwi	9,5,3		/* Number of full doublewords remaining.  */
+	crand	27,26,21
+	mtocrf	0x01,9
+	bt	27,L(huge)
+
+	/* From this point on, we'll copy 32+ bytes and the value
+	   isn't 0 (so we can't use dcbz).  */
+
+	srwi	8,5,5
+	clrlwi	11,5,29
+	cmplwi	cr6,11,0
+	cmplwi	cr1,9,4
+	mtctr	8
+
+	/* Copy 1~3 doublewords so the main loop starts
+	at a multiple of 32 bytes.  */
+
+	bf	30,1f
+
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	addi	10,10,16
+	bf	31,L(big_loop)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+	mr	12,10
+	blt	cr1,L(tail_bytes)
+
+	b	L(big_loop)
+
+	.align	4
+1:	/* Copy 1 doubleword.  */
+	bf	31,L(big_loop)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+
+	/* First use a 32-bytes loop with stw's to try and avoid the LHS due
+	   to the lfd we will do next.  Also, ping-pong through r10 and r12
+	   to avoid AGEN delays.  */
+	.align	4
+L(big_loop):
+	addi	12,10,32
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	stw	4,16(10)
+	stw	4,20(10)
+	stw	4,24(10)
+	stw	4,28(10)
+	bdz	L(tail_bytes)
+
+	addi	10,10,64
+	stw	4,0(12)
+	stw	4,4(12)
+	stw	4,8(12)
+	stw	4,12(12)
+	stw	4,16(12)
+	stw	4,20(12)
+	stw	4,24(12)
+	stw	4,28(12)
+	bdnz	L(big_loop_fast_setup)
+
+	mr	12,10
+	b	L(tail_bytes)
+
+	/* Now that we're probably past the LHS window, use the VSX to
+	   speed up the loop.  */
+L(big_loop_fast_setup):
+	li	11,24
+	li	6,16
+	lxvdsx	4,1,11
+
+	.align	4
+L(big_loop_fast):
+	addi	12,10,32
+	stxvd2x	4,0,10
+	stxvd2x	4,10,6
+	bdz	L(tail_bytes)
+
+	addi	10,10,64
+	stxvd2x	4,0,12
+	stxvd2x	4,12,6
+	bdnz	L(big_loop_fast)
+
+	mr	12,10
+
+	.align	4
+L(tail_bytes):
+
+	/* Check for tail bytes.  */
+	mr	1,7		/* Restore r1.  */
+	beqlr	cr6
+
+	clrlwi	0,5,29
+	mtocrf	0x01,0
+
+	/*  At this point we have a tail of 0-7 bytes and we know that the
+	destination is doubleword-aligned.  */
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(12)
+	addi	12,12,4
+2:	/* Copy 2 bytes.  */
+	bf	30,1f
+
+	sth	4,0(12)
+	addi	12,12,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(12)
+	blr
+
+
+	/* Special case when value is 0 and we have a long length to deal
+	   with.  Use dcbz to zero out 128-bytes at a time.  Before using
+	   dcbz though, we need to get the destination 128-bytes aligned.  */
+	.align	4
+L(huge):
+	lfd	4,24(1)
+	andi.	11,10,127
+	neg	0,10
+	beq	L(huge_aligned)
+
+	clrlwi	0,0,25
+	subf	5,0,5
+	srwi	0,0,3
+	mtocrf  0x01,0
+
+	/* Get DST aligned to 128 bytes.  */
+8:	bf	28,4f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	stfd	4,32(10)
+	stfd	4,40(10)
+	stfd	4,48(10)
+	stfd	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(huge_aligned)
+
+	stfd	4,0(10)
+	addi	10,10,8
+
+L(huge_aligned):
+	srwi	8,5,7
+	clrlwi	11,5,25
+	cmplwi	cr6,11,0
+	mtctr	8
+
+	/* Copies 128-bytes at a time.  */
+	.align	4
+L(huge_loop):
+	dcbz	0,10
+	addi	10,10,128
+	bdnz	L(huge_loop)
+
+	/* We have a tail of 0~127 bytes to handle.  */
+	mr	1,7		/* Restore r1.  */
+	beqlr	cr6
+
+	subf	9,3,10
+	subf	5,9,12
+	srwi	8,5,3
+	cmplwi	cr6,8,0
+	mtocrf	0x01,8
+
+	/* We have a tail o 1~127 bytes. Copy up to 15 doublewords for
+	speed.  We'll handle the resulting tail bytes later.  */
+	beq	cr6,L(tail)
+
+8:	bf	28,4f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	stfd	4,32(10)
+	stfd	4,40(10)
+	stfd	4,48(10)
+	stfd	4,56(10)
+	addi	10,10,64
+	.align	4
+4:	bf	29,2f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	stfd	4,16(10)
+	stfd	4,24(10)
+	addi	10,10,32
+	.align	4
+2:	bf	30,1f
+
+	stfd	4,0(10)
+	stfd	4,8(10)
+	addi	10,10,16
+	.align	4
+1:	bf	31,L(tail)
+
+	stfd	4,0(10)
+	addi	10,10,8
+
+	/* Handle the rest of the tail bytes here.  */
+L(tail):
+	mtocrf	0x01,5
+
+	.align	4
+4:	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+	.align	4
+2:	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+	.align	4
+1:	bflr	31
+
+	stb	4,0(10)
+	blr
+
+
+	/* Expanded tree to copy tail bytes without increments.  */
+	.align	4
+L(copy_tail):
+	bf	29,L(FXX)
+
+	stw	4,0(10)
+	bf	30,L(TFX)
+
+	sth	4,4(10)
+	bflr	31
+
+	stb	4,6(10)
+	blr
+
+	.align	4
+L(FXX):	bf	30,L(FFX)
+
+	sth	4,0(10)
+	bflr	31
+
+	stb	4,2(10)
+	blr
+
+	.align	4
+L(TFX):	bflr	31
+
+	stb	4,4(10)
+	blr
+
+	.align	4
+L(FFX):	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handle copies of 9~31 bytes.  */
+	.align	4
+L(medium):
+	/* At least 9 bytes to go.  */
+	andi.	11,10,3
+	clrlwi	0,0,30
+	beq	L(medium_aligned)
+
+	/* Force 4-bytes alignment for DST.  */
+	mtocrf	0x01,0
+	subf	5,0,5
+1:	/* Copy 1 byte.  */
+	bf	31,2f
+
+	stb	4,0(10)
+	addi	10,10,1
+2:	/* Copy 2 bytes.  */
+	bf	30,L(medium_aligned)
+
+	sth	4,0(10)
+	addi	10,10,2
+
+	.align	4
+L(medium_aligned):
+	/* At least 6 bytes to go, and DST is word-aligned.  */
+	cmplwi	cr1,5,16
+	mtocrf	0x01,5
+	blt	cr1,8f
+
+	/* Copy 16 bytes.  */
+	stw	4,0(10)
+	stw	4,4(10)
+	stw	4,8(10)
+	stw	4,12(10)
+	addi	10,10,16
+8:	/* Copy 8 bytes.  */
+	bf	28,4f
+
+	stw	4,0(10)
+	stw	4,4(10)
+	addi	10,10,8
+4:	/* Copy 4 bytes.  */
+	bf	29,2f
+
+	stw	4,0(10)
+	addi	10,10,4
+2:	/* Copy 2-3 bytes.  */
+	bf	30,1f
+
+	sth	4,0(10)
+	addi	10,10,2
+1:	/* Copy 1 byte.  */
+	bflr	31
+
+	stb	4,0(10)
+	blr
+
+	/* Handles copies of 0~8 bytes.  */
+	.align	4
+L(small):
+	mtocrf	0x01,5
+	bne	cr6,L(copy_tail)
+
+	stw	4,0(10)
+	stw	4,4(10)
+	blr
+
+END (memset)
+libc_hidden_builtin_def (memset)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/multiarch/Implies
new file mode 100644
index 0000000000..22c12fd393
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power6/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/rawmemchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/rawmemchr.S
new file mode 100644
index 0000000000..22edcfb209
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/rawmemchr.S
@@ -0,0 +1,110 @@
+/* Optimized rawmemchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] rawmemchr (void *s [r3], int c [r4])  */
+	.machine  power7
+ENTRY (__rawmemchr)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi	r8,r3,2	      /* Align the address to word boundary.  */
+
+	/* Replicate byte to word.  */
+	insrwi	r4,r4,8,16
+	insrwi	r4,r4,16,0
+
+	/* Now r4 has a word of c bytes.  */
+
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmpb	r5,r12,r4     /* Compare each byte against c byte.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r5,r5,r6
+	slw	r5,r5,r6
+#else
+	slw	r5,r5,r6      /* Move left to discard ignored bits.  */
+	srw	r5,r5,r6      /* Bring the bits back as zeros.  */
+#endif
+	cmpwi	cr7,r5,0      /* If r5 == 0, no c bytes have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r8
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb	r5,r12,r4
+	cmpwi	cr7,r5,0
+	bne	cr7,L(done)
+	b	L(loop)	      /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+
+	/* Main loop to look for the end of the string.  Since it's a
+	   small loop (< 8 instructions), align it to 32-bytes.  */
+	.p2align  5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the byte-checking process for bigger strings.  */
+	lwz	r12,4(r8)
+	lwzu	r11,8(r8)
+	cmpb	r5,r12,r4
+	cmpb	r6,r11,r4
+	or	r7,r5,r6
+	cmpwi	cr7,r7,0
+	beq	cr7,L(loop)
+
+	/* OK, one (or both) of the words contains a 'c' byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a c byte.  */
+
+	cmpwi	cr6,r5,0
+	addi	r8,r8,-4
+	bne	cr6,L(done)
+
+	/* The 'c' byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r10 so we can calculate the
+	   pointer.  */
+	mr	r5,r6
+	addi	r8,r8,4
+
+	/* r5 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the 'c' byte in the original
+	   word from the string.  Use that fact to find out what is
+	   the position of the byte inside the string.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi    r0,r5,-1
+	andc    r0,r0,r5
+	popcntw	r0,r0
+#else
+	cntlzw	r0,r5	      /* Count leading zeros before the match.  */
+#endif
+	srwi	r0,r0,3	      /* Convert leading zeros to bytes.  */
+	add	r3,r8,r0      /* Return address of the matching char.  */
+	blr
+END (__rawmemchr)
+weak_alias (__rawmemchr,rawmemchr)
+libc_hidden_builtin_def (__rawmemchr)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp.S
new file mode 100644
index 0000000000..964875a13b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp.S
@@ -0,0 +1,129 @@
+/* Optimized strcasecmp implementation for PowerPC32.
+   Copyright (C) 2011-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <locale-defines.h>
+
+/* int [r3] strcasecmp (const char *s1 [r3], const char *s2 [r4] )
+
+   or if defined USE_IN_EXTENDED_LOCALE_MODEL:
+
+   int [r3] strcasecmp_l (const char *s1 [r3], const char *s2 [r4],
+                          __locale_t loc [r5]) */
+
+#ifndef STRCMP
+# define __STRCMP __strcasecmp
+# define STRCMP   strcasecmp
+#endif
+
+ENTRY (__STRCMP)
+
+#define rRTN	r3	/* Return value */
+#define rSTR1	r5	/* 1st string */
+#define rSTR2	r4	/* 2nd string */
+#define rLOCARG	r5	/* 3rd argument: locale_t */
+#define rCHAR1	r6	/* Byte read from 1st string */
+#define rCHAR2	r7	/* Byte read from 2nd string */
+#define rADDR1	r8	/* Address of tolower(rCHAR1) */
+#define rADDR2	r12	/* Address of tolower(rCHAR2) */
+#define rLWR1	r8	/* Byte tolower(rCHAR1) */
+#define rLWR2	r12	/* Byte tolower(rCHAR2) */
+#define rTMP	r0
+#define rGOT	r9	/* Address of the Global Offset Table */
+#define rLOC	r11	/* Default locale address */
+
+	cmpw    cr7, r3, r4
+#ifndef USE_IN_EXTENDED_LOCALE_MODEL
+# ifdef SHARED
+	mflr	rTMP
+	bcl	20,31,.L1
+.L1:	mflr	rGOT
+	addis	rGOT, rGOT, _GLOBAL_OFFSET_TABLE_-.L1@ha
+	addi 	rGOT, rGOT, _GLOBAL_OFFSET_TABLE_-.L1@l
+	lwz	rLOC, __libc_tsd_LOCALE@got@tprel(rGOT)
+	add 	rLOC, rLOC, __libc_tsd_LOCALE@tls
+	lwz	rLOC, 0(rLOC)
+	mtlr	rTMP
+# else
+	lis	rTMP,_GLOBAL_OFFSET_TABLE_@ha
+	la	rLOC,_GLOBAL_OFFSET_TABLE_@l(rTMP)
+	lwz	rLOC, __libc_tsd_LOCALE@got@tprel(rGOT)
+	add	rLOC, rLOC, __libc_tsd_LOCALE@tls
+	lwz	rLOC, 0(rLOC)
+# endif /* SHARED */
+#else
+	mr	rLOC, rLOCARG
+#endif
+	mr	rSTR1, rRTN
+	lwz	rLOC, LOCALE_CTYPE_TOLOWER(rLOC)
+	li	rRTN, 0
+	beqlr	cr7
+
+	/* Unrolling loop for POWER: loads are done with 'lbz' plus
+	offset and string descriptors are only updated in the end
+	of loop unrolling. */
+
+L(loop):
+	lbz	rCHAR1, 0(rSTR1)	/* Load char from s1 */
+	lbz	rCHAR2, 0(rSTR2)	/* Load char from s2 */
+	sldi	rADDR1, rCHAR1, 2	/* Calculate address for tolower(*s1) */
+	sldi	rADDR2, rCHAR2, 2	/* Calculate address for tolower(*s2) */
+	lwzx	rLWR1, rLOC, rADDR1	/* Load tolower(*s1) */
+	lwzx	rLWR2, rLOC, rADDR2	/* Load tolower(*s2) */
+	cmpwi	cr7, rCHAR1, 0		/* *s1 == '\0' ? */
+	subf.	r3, rLWR2, rLWR1
+	bnelr
+	beqlr	cr7
+	lbz	rCHAR1, 1(rSTR1)
+	lbz	rCHAR2, 1(rSTR2)
+	sldi	rADDR1, rCHAR1, 2
+	sldi	rADDR2, rCHAR2, 2
+	lwzx	rLWR1, rLOC, rADDR1
+	lwzx	rLWR2, rLOC, rADDR2
+	cmpwi	cr7, rCHAR1, 0
+	subf.	r3, rLWR2, rLWR1
+	bnelr
+	beqlr	cr7
+	lbz	rCHAR1, 2(rSTR1)
+	lbz	rCHAR2, 2(rSTR2)
+	sldi	rADDR1, rCHAR1, 2
+	sldi	rADDR2, rCHAR2, 2
+	lwzx	rLWR1, rLOC, rADDR1
+	lwzx	rLWR2, rLOC, rADDR2
+	cmpwi	cr7, rCHAR1, 0
+	subf.	r3, rLWR2, rLWR1
+	bnelr
+	beqlr	cr7
+	lbz	rCHAR1, 3(rSTR1)
+	lbz	rCHAR2, 3(rSTR2)
+	/* Increment both string descriptors */
+	addi	rSTR1, rSTR1, 4
+	addi	rSTR2, rSTR2, 4
+	sldi	rADDR1, rCHAR1, 2
+	sldi	rADDR2, rCHAR2, 2
+	lwzx	rLWR1, rLOC, rADDR1
+	lwzx	rLWR2, rLOC, rADDR2
+	cmpwi	cr7, rCHAR1, 0
+	subf.	r3, rLWR2, rLWR1
+	bnelr
+	bne	cr7,L(loop)
+	blr
+END (__STRCMP)
+
+weak_alias (__STRCMP, STRCMP)
+libc_hidden_builtin_def (__STRCMP)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp_l.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp_l.S
new file mode 100644
index 0000000000..c13c4ebcb8
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strcasecmp_l.S
@@ -0,0 +1,5 @@
+#define USE_IN_EXTENDED_LOCALE_MODEL
+#define STRCMP   strcasecmp_l
+#define __STRCMP __strcasecmp_l
+
+#include "strcasecmp.S"
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchr.S
new file mode 100644
index 0000000000..75ca6acb98
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchr.S
@@ -0,0 +1,225 @@
+/* Optimized strchr implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] strchr (char *s [r3], int c [r4])  */
+	.machine  power7
+ENTRY (strchr)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi	r8,r3,2	      /* Align the address to word boundary.  */
+	cmpwi	cr7,r4,0
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	li	r0,0	      /* Word with null chars to use
+				 with cmpb.  */
+
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+
+	beq	cr7,L(null_match)
+
+	/* Replicate byte to word.  */
+	insrwi	r4,r4,8,16
+	insrwi	r4,r4,16,0
+
+	/* Now r4 has a word of c bytes and r0 has
+	   a word of null bytes.  */
+
+	cmpb	r10,r12,r4     /* Compare each byte against c byte.  */
+	cmpb	r11,r12,r0     /* Compare each byte against null byte.  */
+
+	/* Move the words left and right to discard the bits that are
+	   not part of the string and to bring them back as zeros.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r10,r10,r6
+	srw	r11,r11,r6
+	slw	r10,r10,r6
+	slw	r11,r11,r6
+#else
+	slw	r10,r10,r6
+	slw	r11,r11,r6
+	srw	r10,r10,r6
+	srw	r11,r11,r6
+#endif
+	or	r5,r10,r11    /* OR the results to speed things up.  */
+	cmpwi	cr7,r5,0      /* If r5 == 0, no c or null bytes
+				 have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r8
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r4
+	cmpb	r11,r12,r0
+	or	r5,r10,r11
+	cmpwi	cr7,r5,0
+	bne	cr7,L(done)
+	b	L(loop)	      /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+
+	.p2align  5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+	lwz	r12,4(r8)
+	lwzu	r9,8(r8)
+	cmpb	r10,r12,r4
+	cmpb	r11,r12,r0
+	cmpb	r6,r9,r4
+	cmpb	r7,r9,r0
+	or	r12,r10,r11
+	or	r9,r6,r7
+	or	r5,r12,r9
+	cmpwi	cr7,r5,0
+	beq	cr7,L(loop)
+
+	/* OK, one (or both) of the words contains a c/null byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a c/null byte.  */
+
+	cmpwi	cr6,r12,0
+	addi	r8,r8,-4
+	bne	cr6,L(done)
+
+	/* The c/null byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r10/r11 so we can calculate
+	   the pointer.  */
+
+	mr	r10,r6
+	mr	r11,r7
+	addi	r8,r8,4
+
+	/* r10/r11 have the output of the cmpb instructions, that is,
+	   0xff in the same position as the c/null byte in the original
+	   word from the string.  Use that to calculate the pointer.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi    r3,r10,-1
+	andc    r3,r3,r10
+	popcntw	r0,r3
+	addi    r4,r11,-1
+	andc    r4,r4,r11
+	cmplw	cr7,r3,r4
+	bgt	cr7,L(no_match)
+#else
+	cntlzw	r0,r10	      /* Count leading zeros before c matches.  */
+	cmplw	cr7,r11,r10
+	bgt	cr7,L(no_match)
+#endif
+	srwi	r0,r0,3	      /* Convert leading zeros to bytes.  */
+	add	r3,r8,r0      /* Return address of the matching c byte
+				 or null in case c was not found.  */
+	blr
+
+	.align	4
+L(no_match):
+	li	r3,0
+	blr
+
+/* We are here because strchr was called with a null byte.  */
+	.align	4
+L(null_match):
+	/* r0 has a word of null bytes.  */
+
+	cmpb	r5,r12,r0     /* Compare each byte against null bytes.  */
+
+	/* Move the words left and right to discard the bits that are
+	   not part of the string and bring them back as zeros.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r5,r5,r6
+	slw	r5,r5,r6
+#else
+	slw	r5,r5,r6
+	srw	r5,r5,r6
+#endif
+	cmpwi	cr7,r5,0      /* If r10 == 0, no c or null bytes
+				 have been found.  */
+	bne	cr7,L(done_null)
+
+	mtcrf   0x01,r8
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop_null)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb    r5,r12,r0
+	cmpwi	cr7,r5,0
+	bne	cr7,L(done_null)
+	b	L(loop_null)  /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+
+	/* Main loop to look for the end of the string.  Since it's a
+	   small loop (< 8 instructions), align it to 32-bytes.  */
+	.p2align  5
+L(loop_null):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+	lwz	r12,4(r8)
+	lwzu    r11,8(r8)
+	cmpb	r5,r12,r0
+	cmpb	r10,r11,r0
+	or	r6,r5,r10
+	cmpwi	cr7,r6,0
+	beq	cr7,L(loop_null)
+
+	/* OK, one (or both) of the words contains a null byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a null byte.  */
+
+	cmpwi	cr6,r5,0
+	addi	r8,r8,-4
+	bne	cr6,L(done_null)
+
+	/* The null byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r10 so we can calculate the
+	   pointer.  */
+
+	mr	r5,r10
+	addi	r8,r8,4
+
+	/* r5 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the null byte in the original
+	   word from the string.  Use that to calculate the pointer.  */
+L(done_null):
+#ifdef __LITTLE_ENDIAN__
+	addi    r0,r5,-1
+	andc    r0,r0,r5
+	popcntw	r0,r0
+#else
+	cntlzw	r0,r5	      /* Count leading zeros before the match.  */
+#endif
+	srwi	r0,r0,3	      /* Convert leading zeros to bytes.  */
+	add	r3,r8,r0      /* Return address of the matching null byte.  */
+	blr
+END (strchr)
+weak_alias (strchr, index)
+libc_hidden_builtin_def (strchr)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchrnul.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchrnul.S
new file mode 100644
index 0000000000..426137e11d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strchrnul.S
@@ -0,0 +1,127 @@
+/* Optimized strchrnul implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] strchrnul (char *s [r3], int c [r4])  */
+	.machine  power7
+ENTRY (__strchrnul)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi	r8,r3,2	      /* Align the address to word boundary.  */
+
+	/* Replicate byte to word.  */
+	insrwi  r4,r4,8,16
+	insrwi  r4,r4,16,0
+
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	li	r0,0	      /* Word with null chars to use
+				 with cmpb.  */
+
+	/* Now r4 has a word of c bytes and r0 has
+	   a word of null bytes.  */
+
+	cmpb	r10,r12,r0    /* Compare each byte against c byte.  */
+	cmpb	r9,r12,r4     /* Compare each byte against null byte.  */
+
+	/* Move the words left and right to discard the bits that are
+	   not part of the string and bring them back as zeros.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r10,r10,r6
+	srw	r9,r9,r6
+	slw	r10,r10,r6
+	slw	r9,r9,r6
+#else
+	slw	r10,r10,r6
+	slw	r9,r9,r6
+	srw	r10,r10,r6
+	srw	r9,r9,r6
+#endif
+	or	r5,r9,r10     /* OR the results to speed things up.  */
+	cmpwi	cr7,r5,0      /* If r5 == 0, no c or null bytes
+				 have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r8
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r0
+	cmpb	r9,r12,r4
+	or	r5,r9,r10
+	cmpwi	cr7,r5,0
+	bne	cr7,L(done)
+	b	L(loop)	      /* We branch here (rather than falling through)
+				 to skip the nops due to heavy alignment
+				 of the loop below.  */
+
+	.p2align  5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+	lwz	r12,4(r8)
+	lwzu	r11,8(r8)
+	cmpb	r10,r12,r0
+	cmpb	r9,r12,r4
+	cmpb	r6,r11,r0
+	cmpb	r7,r11,r4
+	or	r5,r9,r10
+	or	r10,r6,r7
+	or	r11,r5,r10
+	cmpwi	cr7,r11,0
+	beq	cr7,L(loop)
+
+	/* OK, one (or both) of the words contains a c/null byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a c/null byte.  */
+
+	cmpwi	cr6,r5,0
+	addi	r8,r8,-4
+	bne	cr6,L(done)
+
+	/* The c/null byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r5 so we can calculate the
+	   pointer.  */
+	mr	r5,r10
+	addi	r8,r8,4
+
+	/* r5 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the c/null byte in the original
+	   word from the string.  Use that to calculate the pointer.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi    r0,r5,-1
+	andc    r0,r0,r5
+	popcntw	r0,r0
+#else
+	cntlzw	r0,r5	      /* Count leading zeros before the match.  */
+#endif
+	srwi	r0,r0,3	      /* Convert leading zeros to bytes.  */
+	add	r3,r8,r0      /* Return address of matching c/null byte.  */
+	blr
+END (__strchrnul)
+weak_alias (__strchrnul,strchrnul)
+libc_hidden_builtin_def (__strchrnul)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strlen.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strlen.S
new file mode 100644
index 0000000000..3699791fa6
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strlen.S
@@ -0,0 +1,102 @@
+/* Optimized strlen implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] strlen (char *s [r3])  */
+	.machine  power7
+ENTRY (strlen)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi	r4,r3,2	      /* Align the address to word boundary.  */
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	li	r0,0	      /* Word with null chars to use with cmpb.  */
+	li	r5,-1	      /* MASK = 0xffffffffffffffff.  */
+	lwz	r12,0(r4)     /* Load word from memory.  */
+#ifdef __LITTLE_ENDIAN__
+	slw	r5,r5,r6
+#else
+	srw	r5,r5,r6      /* MASK = MASK >> padding.  */
+#endif
+	orc	r9,r12,r5     /* Mask bits that are not part of the string.  */
+	cmpb	r10,r9,r0     /* Check for null bytes in WORD1.  */
+	cmpwi	cr7,r10,0     /* If r10 == 0, no null's have been found.  */
+	bne	cr7,L(done)
+
+	mtcrf   0x01,r4
+
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r4)
+	cmpb	r10,r12,r0
+	cmpwi	cr7,r10,0
+	bne	cr7,L(done)
+
+	/* Main loop to look for the end of the string.  Since it's a
+	   small loop (< 8 instructions), align it to 32-bytes.  */
+	.p2align  5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+
+	lwz	r12, 4(r4)
+	lwzu	r11, 8(r4)
+	cmpb	r10,r12,r0
+	cmpb	r9,r11,r0
+	or	r8,r9,r10     /* Merge everything in one word.  */
+	cmpwi	cr7,r8,0
+	beq	cr7,L(loop)
+
+	/* OK, one (or both) of the words contains a null byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a null byte.  */
+
+	cmpwi	cr6,r10,0
+	addi	r4,r4,-4
+	bne	cr6,L(done)
+
+	/* The null byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r10 so we can calculate the
+	   length.  */
+
+	mr	r10,r9
+	addi	r4,r4,4
+
+	/* r10 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the null byte in the original
+	   word from the string.  Use that to calculate the length.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi	r9, r10, -1   /* Form a mask from trailing zeros.  */
+	andc	r9, r9, r10
+	popcntw r0, r9	      /* Count the bits in the mask.  */
+#else
+	cntlzw	r0,r10	      /* Count leading zeros before the match.  */
+#endif
+	subf	r5,r3,r4
+	srwi	r0,r0,3	      /* Convert leading zeros to bytes.  */
+	add	r3,r5,r0      /* Compute final length.  */
+	blr
+END (strlen)
+libc_hidden_builtin_def (strlen)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strncmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strncmp.S
new file mode 100644
index 0000000000..d4598e1930
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strncmp.S
@@ -0,0 +1,199 @@
+/* Optimized strcmp implementation for POWER7/PowerPC32.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strncmp (const char *s1 [r3],
+		     const char *s2 [r4],
+		     size_t size [r5])  */
+
+EALIGN (strncmp,5,0)
+
+#define rTMP2	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rWORD3	r10
+#define rWORD4	r11
+#define rFEFE	r8	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r9	/* constant 0x7f7f7f7f */
+#define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+#define rTMP	r12
+
+	dcbt	0,rSTR1
+	nop
+	or	rTMP,rSTR2,rSTR1
+	lis	r7F7F,0x7f7f
+	dcbt	0,rSTR2
+	nop
+	clrlwi.	rTMP,rTMP,30
+	cmplwi	cr1,rN,0
+	lis	rFEFE,-0x101
+	bne	L(unaligned)
+/* We are word aligned so set up for two loops.  first a word
+   loop, then fall into the byte loop if any residual.  */
+	srwi.	rTMP,rN,2
+	clrlwi	rN,rN,30
+	addi	rFEFE,rFEFE,-0x101
+	addi	r7F7F,r7F7F,0x7f7f
+	cmplwi	cr1,rN,0
+	beq	L(unaligned)
+
+	mtctr	rTMP
+	lwz	rWORD1,0(rSTR1)
+	lwz	rWORD2,0(rSTR2)
+	b	L(g1)
+
+L(g0):
+	lwzu	rWORD1,4(rSTR1)
+	bne	cr1,L(different)
+	lwzu	rWORD2,4(rSTR2)
+L(g1):	add	rTMP,rFEFE,rWORD1
+	nor	rNEG,r7F7F,rWORD1
+	bdz	L(tail)
+	and.	rTMP,rTMP,rNEG
+	cmpw	cr1,rWORD1,rWORD2
+	beq	L(g0)
+
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+#ifdef __LITTLE_ENDIAN__
+L(endstring):
+	slwi	rTMP, rTMP, 1
+	addi    rTMP2, rTMP, -1
+	andc    rTMP2, rTMP2, rTMP
+	and	rWORD2, rWORD2, rTMP2		/* Mask off gunk.  */
+	and	rWORD1, rWORD1, rTMP2
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr
+	ori	rRTN, rTMP2, 1
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rldimi	rTMP2, rWORD2, 24, 32
+	rldimi	rTMP, rWORD1, 24, 32
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr
+	ori	rRTN, rTMP2, 1
+	blr
+
+#else
+L(endstring):
+	and	rTMP,r7F7F,rWORD1
+	beq	cr1,L(equal)
+	add	rTMP,rTMP,r7F7F
+	xor.	rBITDIF,rWORD1,rWORD2
+	andc	rNEG,rNEG,rTMP
+	blt	L(highbit)
+	cntlzw	rBITDIF,rBITDIF
+	cntlzw	rNEG,rNEG
+	addi	rNEG,rNEG,7
+	cmpw	cr1,rNEG,rBITDIF
+	sub	rRTN,rWORD1,rWORD2
+	bgelr	cr1
+L(equal):
+	li	rRTN,0
+	blr
+
+L(different):
+	lwz	rWORD1,-4(rSTR1)
+	xor.	rBITDIF,rWORD1,rWORD2
+	sub	rRTN,rWORD1,rWORD2
+	bgelr
+L(highbit):
+	ori	rRTN, rWORD2, 1
+	blr
+#endif
+
+/* Oh well. In this case, we just do a byte-by-byte comparison.  */
+	.align	4
+L(tail):
+	and.	rTMP,rTMP,rNEG
+	cmpw	cr1,rWORD1,rWORD2
+	bne	L(endstring)
+	addi	rSTR1,rSTR1,4
+	bne	cr1,L(different)
+	addi	rSTR2,rSTR2,4
+	cmplwi	cr1,rN,0
+L(unaligned):
+	mtctr	rN
+	ble	cr1,L(ux)
+L(uz):
+	lbz	rWORD1,0(rSTR1)
+	lbz	rWORD2,0(rSTR2)
+	.align	4
+L(u1):
+	cmpwi	cr1,rWORD1,0
+	bdz	L(u4)
+	cmpw	rWORD1,rWORD2
+	beq	cr1,L(u4)
+	bne	L(u4)
+	lbzu	rWORD3,1(rSTR1)
+	lbzu	rWORD4,1(rSTR2)
+	cmpwi	cr1,rWORD3,0
+	bdz	L(u3)
+	cmpw	rWORD3,rWORD4
+	beq	cr1,L(u3)
+	bne	L(u3)
+	lbzu	rWORD1,1(rSTR1)
+	lbzu	rWORD2,1(rSTR2)
+	cmpwi	cr1,rWORD1,0
+	bdz	L(u4)
+	cmpw	rWORD1,rWORD2
+	beq	cr1,L(u4)
+	bne	L(u4)
+	lbzu	rWORD3,1(rSTR1)
+	lbzu	rWORD4,1(rSTR2)
+	cmpwi	cr1,rWORD3,0
+	bdz	L(u3)
+	cmpw	rWORD3,rWORD4
+	beq	cr1,L(u3)
+	bne	L(u3)
+	lbzu	rWORD1,1(rSTR1)
+	lbzu	rWORD2,1(rSTR2)
+	b	L(u1)
+
+L(u3):  sub	rRTN,rWORD3,rWORD4
+	blr
+L(u4):	sub	rRTN,rWORD1,rWORD2
+	blr
+L(ux):
+	li	rRTN,0
+	blr
+END (strncmp)
+libc_hidden_builtin_def (strncmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strnlen.S b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strnlen.S
new file mode 100644
index 0000000000..6019d5be5b
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power7/strnlen.S
@@ -0,0 +1,176 @@
+/* Optimized strnlen implementation for PowerPC32/POWER7 using cmpb insn.
+   Copyright (C) 2010-2017 Free Software Foundation, Inc.
+   Contributed by Luis Machado <luisgpm@br.ibm.com>.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* int [r3] strnlen (char *s [r3], int size [r4])  */
+	.machine  power7
+ENTRY (__strnlen)
+	CALL_MCOUNT
+	dcbt	0,r3
+	clrrwi	r8,r3,2	      /* Align the address to word boundary.  */
+	add	r7,r3,r4      /* Calculate the last acceptable address.  */
+	cmplwi	r4,16
+	li	r0,0	      /* Word with null chars.  */
+	addi	r7,r7,-1
+	ble	L(small_range)
+
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmpb	r10,r12,r0    /* Check for null bytes in DWORD1.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r10,r10,r6
+	slw	r10,r10,r6
+#else
+	slw	r10,r10,r6
+	srw	r10,r10,r6
+#endif
+	cmplwi	cr7,r10,0     /* If r10 == 0, no null's have been found.  */
+	bne	cr7,L(done)
+
+	clrrwi	r7,r7,2       /* Address of last word.  */
+	mtcrf   0x01,r8
+	/* Are we now aligned to a doubleword boundary?  If so, skip to
+	   the main loop.  Otherwise, go through the alignment code.  */
+
+	bt	29,L(loop_setup)
+
+	/* Handle WORD2 of pair.  */
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r0
+	cmplwi	cr7,r10,0
+	bne	cr7,L(done)
+
+L(loop_setup):
+	/* The last word we want to read in the loop below is the one
+	   containing the last byte of the string, ie. the word at
+	   (s + size - 1) & ~3, or r7.  The first word read is at
+	   r8 + 4, we read 2 * cnt words, so the last word read will
+	   be at r8 + 4 + 8 * cnt - 4.  Solving for cnt gives
+	   cnt = (r7 - r8) / 8  */
+	sub	r5,r7,r8
+	srwi	r6,r5,3	      /* Number of loop iterations.  */
+	mtctr	r6	      /* Setup the counter.  */
+
+	/* Main loop to look for the null byte in the string.  Since
+	   it's a small loop (< 8 instructions), align it to 32-bytes.  */
+	.p2align  5
+L(loop):
+	/* Load two words, compare and merge in a
+	   single register for speed.  This is an attempt
+	   to speed up the null-checking process for bigger strings.  */
+
+	lwz	r12,4(r8)
+	lwzu	r11,8(r8)
+	cmpb	r10,r12,r0
+	cmpb	r9,r11,r0
+	or	r5,r9,r10     /* Merge everything in one word.  */
+	cmplwi	cr7,r5,0
+	bne	cr7,L(found)
+	bdnz	L(loop)
+
+	/* We may have one more word to read.  */
+	cmplw	cr6,r8,r7
+	beq	cr6,L(end_max)
+
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r0
+	cmplwi	cr6,r10,0
+	bne	cr6,L(done)
+
+L(end_max):
+	mr	r3,r4
+	blr
+
+	/* OK, one (or both) of the words contains a null byte.  Check
+	   the first word and decrement the address in case the first
+	   word really contains a null byte.  */
+	.align	4
+L(found):
+	cmplwi	cr6,r10,0
+	addi	r8,r8,-4
+	bne	cr6,L(done)
+
+	/* The null byte must be in the second word.  Adjust the address
+	   again and move the result of cmpb to r10 so we can calculate the
+	   length.  */
+
+	mr	r10,r9
+	addi	r8,r8,4
+
+	/* r10 has the output of the cmpb instruction, that is, it contains
+	   0xff in the same position as the null byte in the original
+	   word from the string.  Use that to calculate the length.
+	   We need to make sure the null char is *before* the end of the
+	   range.  */
+L(done):
+#ifdef __LITTLE_ENDIAN__
+	addi	r0,r10,-1
+	andc	r0,r0,r10
+	popcntw	r0,r0
+#else
+	cntlzw	r0,r10	      /* Count leading zeros before the match.  */
+#endif
+	sub	r3,r8,r3
+	srwi	r0,r0,3	      /* Convert leading/trailing zeros to bytes.  */
+	add	r3,r3,r0      /* Length until the match.  */
+	cmplw	r3,r4
+	blelr
+	mr	r3,r4
+	blr
+
+/* Deals with size <= 16.  */
+	.align	4
+L(small_range):
+	cmplwi	r4,0
+	beq	L(end_max)
+
+	clrrwi	r7,r7,2       /* Address of last word.  */
+
+	rlwinm	r6,r3,3,27,28 /* Calculate padding.  */
+	lwz	r12,0(r8)     /* Load word from memory.  */
+	cmpb	r10,r12,r0    /* Check for null bytes in WORD1.  */
+#ifdef __LITTLE_ENDIAN__
+	srw	r10,r10,r6
+	slw	r10,r10,r6
+#else
+	slw	r10,r10,r6
+	srw	r10,r10,r6
+#endif
+	cmplwi	cr7,r10,0
+	bne	cr7,L(done)
+
+	cmplw	r8,r7
+	beq	L(end_max)
+
+	.p2align  5
+L(loop_small):
+	lwzu	r12,4(r8)
+	cmpb	r10,r12,r0
+	cmplwi	cr6,r10,0
+	bne	cr6,L(done)
+	cmplw	r8,r7
+	bne	L(loop_small)
+	mr	r3,r4
+	blr
+
+END (__strnlen)
+libc_hidden_def (__strnlen)
+weak_alias (__strnlen, strnlen)
+libc_hidden_builtin_def (strnlen)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power8/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/Implies
new file mode 100644
index 0000000000..083f3e950a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power7/fpu
+powerpc/powerpc32/power7
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power8/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/fpu/multiarch/Implies
new file mode 100644
index 0000000000..43a3b83e2a
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power7/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power8/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/multiarch/Implies
new file mode 100644
index 0000000000..f18504408f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power8/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power7/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power9/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/Implies
new file mode 100644
index 0000000000..066dea2798
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/power8/fpu
+powerpc/powerpc32/power8
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power9/fpu/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/fpu/multiarch/Implies
new file mode 100644
index 0000000000..4393b56872
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/fpu/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power8/fpu/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/power9/multiarch/Implies b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/multiarch/Implies
new file mode 100644
index 0000000000..1a46ef0035
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/power9/multiarch/Implies
@@ -0,0 +1 @@
+powerpc/powerpc32/power8/multiarch
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/ppc-mcount.S b/REORG.TODO/sysdeps/powerpc/powerpc32/ppc-mcount.S
new file mode 100644
index 0000000000..8a6b205c37
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/ppc-mcount.S
@@ -0,0 +1,104 @@
+/* PowerPC-specific implementation of profiling support.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This would be bad.  */
+#ifdef PROF
+#undef PROF
+#endif
+
+#include <sysdep.h>
+
+/* We do profiling as described in the SYSV ELF ABI, except that glibc
+   _mcount manages its own counters.  The caller has put the address the
+   caller will return to in the usual place on the stack, 4(r1).  _mcount
+   is responsible for ensuring that when it returns no argument-passing
+   registers are disturbed, and that the LR is set back to (what the
+   caller sees as) 4(r1).
+
+   This is intended so that the following code can be inserted at the
+   front of any routine without changing the routine:
+
+	.data
+	mflr	r0
+	stw	r0,4(r1)
+	bl	_mcount
+*/
+
+ENTRY(_mcount)
+#if defined PIC && !defined SHARED
+# define CALLER_LR_OFFSET 68
+	stwu	r1,-64(r1)
+	cfi_adjust_cfa_offset (64)
+	stw	r30, 48(r1)
+	cfi_rel_offset (r30, 48)
+#else
+# define CALLER_LR_OFFSET 52
+	stwu	r1,-48(r1)
+	cfi_adjust_cfa_offset (48)
+#endif
+/* We need to save the parameter-passing registers.  */
+	stw	r3, 12(r1)
+	stw	r4, 16(r1)
+	stw	r5, 20(r1)
+	stw	r6, 24(r1)
+	mflr	r4
+#if defined PIC && !defined SHARED
+	bcl	20,31,0f
+0:
+	mflr	r30
+	addis	r30, r30, _GLOBAL_OFFSET_TABLE_-0b@ha
+	addi	r30, r30, _GLOBAL_OFFSET_TABLE_-0b@l
+#endif
+	lwz	r3, CALLER_LR_OFFSET(r1)
+	mfcr	r5
+	stw	r7, 28(r1)
+	stw	r8, 32(r1)
+	stw	r9, 36(r1)
+	stw	r10,40(r1)
+	stw	r4, 44(r1)
+	cfi_rel_offset (lr, 44)
+	stw	r5,  8(r1)
+#ifndef SHARED
+	bl	JUMPTARGET(__mcount_internal)
+#else
+	bl	__mcount_internal@local
+#endif
+ /* Restore the registers...  */
+	lwz     r6,  8(r1)
+	lwz	r0, 44(r1)
+	lwz	r3, 12(r1)
+	mtctr	r0
+	lwz	r4, 16(r1)
+	mtcrf	0xff,r6
+	lwz	r5, 20(r1)
+	lwz	r6, 24(r1)
+	lwz	r0, CALLER_LR_OFFSET(r1)
+	lwz	r7, 28(r1)
+	lwz	r8, 32(r1)
+	mtlr	r0
+	lwz	r9, 36(r1)
+	lwz	r10,40(r1)
+ /* ...unwind the stack frame, and return to your usual programming.  */
+#if defined PIC && !defined SHARED
+	lwz	r30, 48(r1)
+	addi	r1,r1,64
+#else
+	addi	r1,r1,48
+#endif
+	bctr
+END(_mcount)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/register-dump.h b/REORG.TODO/sysdeps/powerpc/powerpc32/register-dump.h
new file mode 100644
index 0000000000..6e533a75a3
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/register-dump.h
@@ -0,0 +1,120 @@
+/* Dump registers.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sys/uio.h>
+#include <_itoa.h>
+
+/* This prints out the information in the following form: */
+static const char dumpform[] = "\
+Register dump:\n\
+fp0-3:   0000030%0000031% 0000032%0000033% 0000034%0000035% 0000036%0000037%\n\
+fp4-7:   0000038%0000039% 000003a%000003b% 000003c%000003d% 000003e%000003f%\n\
+fp8-11:  0000040%0000041% 0000042%0000043% 0000044%0000045% 0000046%0000047%\n\
+fp12-15: 0000048%0000049% 000004a%000004b% 000004c%000004d% 000004e%000004f%\n\
+fp16-19: 0000050%0000051% 0000052%0000053% 0000054%0000055% 0000056%0000057%\n\
+fp20-23: 0000058%0000059% 000005a%000005b% 000005c%000005d% 000005e%000005f%\n\
+fp24-27: 0000060%0000061% 0000062%0000063% 0000064%0000065% 0000066%0000067%\n\
+fp28-31: 0000068%0000069% 000006a%000006b% 000006c%000006d% 000006e%000006f%\n\
+r0 =0000000% sp =0000001% r2 =0000002% r3 =0000003%  trap=0000028%\n\
+r4 =0000004% r5 =0000005% r6 =0000006% r7 =0000007%   sr0=0000020% sr1=0000021%\n\
+r8 =0000008% r9 =0000009% r10=000000a% r11=000000b%   dar=0000029% dsi=000002a%\n\
+r12=000000c% r13=000000d% r14=000000e% r15=000000f%   r3*=0000022%\n\
+r16=0000010% r17=0000011% r18=0000012% r19=0000013%\n\
+r20=0000014% r21=0000015% r22=0000016% r23=0000017%    lr=0000024% xer=0000025%\n\
+r24=0000018% r25=0000019% r26=000001a% r27=000001b%    mq=0000027% ctr=0000023%\n\
+r28=000001c% r29=000001d% r30=000001e% r31=000001f%  fscr=0000071% ccr=0000026%\n\
+";
+
+/* Most of the fields are self-explanatory.  'sr0' is the next
+   instruction to execute, from SRR0, which may have some relationship
+   with the instruction that caused the exception.  'r3*' is the value
+   that will be returned in register 3 when the current system call
+   returns.  'sr1' is SRR1, bits 16-31 of which are copied from the MSR:
+
+   16 - External interrupt enable
+   17 - Privilege level (1=user, 0=supervisor)
+   18 - FP available
+   19 - Machine check enable (if clear, processor locks up on machine check)
+   20 - FP exception mode bit 0 (FP exceptions recoverable)
+   21 - Single-step trace enable
+   22 - Branch trace enable
+   23 - FP exception mode bit 1
+   25 - exception prefix (if set, exceptions are taken from 0xFFFnnnnn,
+        otherwise from 0x000nnnnn).
+   26 - Instruction address translation enabled.
+   27 - Data address translation enabled.
+   30 - Exception is recoverable (otherwise, don't try to return).
+   31 - Little-endian mode enable.
+
+   'Trap' is the address of the exception:
+
+   00200 - Machine check exception (memory parity error, for instance)
+   00300 - Data access exception (memory not mapped, see dsisr for why)
+   00400 - Instruction access exception (memory not mapped)
+   00500 - External interrupt
+   00600 - Alignment exception (see dsisr for more information)
+   00700 - Program exception (illegal/trap instruction, FP exception)
+   00800 - FP unavailable (should not be seen by user code)
+   00900 - Decrementer exception (for instance, SIGALRM)
+   00A00 - I/O controller interface exception
+   00C00 - System call exception (for instance, kill(3)).
+   00E00 - FP assist exception (optional FP instructions, etc.)
+
+   'dar' is the memory location, for traps 00300, 00400, 00600, 00A00.
+   'dsisr' has the following bits under trap 00300:
+   0 - direct-store error exception
+   1 - no page table entry for page
+   4 - memory access not permitted
+   5 - trying to access I/O controller space or using lwarx/stwcx on
+       non-write-cached memory
+   6 - access was store
+   9 - data access breakpoint hit
+   10 - segment table search failed to find translation (64-bit ppcs only)
+   11 - I/O controller instruction not permitted
+   For trap 00400, the same bits are set in SRR1 instead.
+   For trap 00600, bits 12-31 of the DSISR set to allow emulation of
+   the instruction without actually having to read it from memory.
+*/
+
+#define xtoi(x) (x >= 'a' ? x + 10 - 'a' : x - '0')
+
+static void
+register_dump (int fd, struct sigcontext *ctx)
+{
+  char buffer[sizeof(dumpform)];
+  char *bufferpos;
+  unsigned regno;
+  unsigned *regs = (unsigned *)(ctx->regs);
+
+  memcpy(buffer, dumpform, sizeof(dumpform));
+
+  /* Generate the output.  */
+  while ((bufferpos = memchr (buffer, '%', sizeof(dumpform))))
+    {
+      regno = xtoi (bufferpos[-1]) | xtoi (bufferpos[-2]) << 4;
+      memset (bufferpos-2, '0', 3);
+      _itoa_word (regs[regno], bufferpos+1, 16, 0);
+    }
+
+  /* Write the output.  */
+  write (fd, buffer, sizeof(buffer) - 1);
+}
+
+
+#define REGISTER_DUMP \
+  register_dump (fd, ctx)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/rshift.S b/REORG.TODO/sysdeps/powerpc/powerpc32/rshift.S
new file mode 100644
index 0000000000..58abdcad59
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/rshift.S
@@ -0,0 +1,55 @@
+/* Shift a limb right, low level routine.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* INPUT PARAMETERS
+   res_ptr	r3
+   s1_ptr	r4
+   size		r5
+   cnt		r6  */
+
+ENTRY (__mpn_rshift)
+	mtctr	r5		# copy size into CTR
+	addi	r7,r3,-4	# move adjusted res_ptr to free return reg
+	subfic	r8,r6,32
+	lwz	r11,0(r4)	# load first s1 limb
+	slw	r3,r11,r8	# compute function return value
+	bdz	L(1)
+
+L(0):	lwzu	r10,4(r4)
+	srw	r9,r11,r6
+	slw	r12,r10,r8
+	or	r9,r9,r12
+	stwu	r9,4(r7)
+	bdz	L(2)
+	lwzu	r11,4(r4)
+	srw	r9,r10,r6
+	slw	r12,r11,r8
+	or	r9,r9,r12
+	stwu	r9,4(r7)
+	bdnz	L(0)
+
+L(1):	srw	r0,r11,r6
+	stw	r0,4(r7)
+	blr
+
+L(2):	srw	r0,r10,r6
+	stw	r0,4(r7)
+	blr
+END (__mpn_rshift)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/rtld-memset.c b/REORG.TODO/sysdeps/powerpc/powerpc32/rtld-memset.c
new file mode 100644
index 0000000000..f3ed8ad1e7
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/rtld-memset.c
@@ -0,0 +1,4 @@
+/* PPCA2 has a different cache-line size than the usual 128 bytes.  To avoid
+   using code that assumes cache-line size to be 128 bytes (with dcbz
+   instructions) we use the generic code instead.  */
+#include <string/memset.c>
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp-common.S b/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp-common.S
new file mode 100644
index 0000000000..c74c492cec
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp-common.S
@@ -0,0 +1,78 @@
+/* setjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+#include <stap-probe.h>
+#define _ASM
+#ifdef __NO_VMX__
+# include <novmxsetjmp.h>
+#else
+# include <jmpbuf-offsets.h>
+#endif
+
+#if defined __SPE__ || (defined __NO_FPRS__ && !defined _SOFT_FLOAT)
+# define SAVE_GP(N)	evstdd r##N,((JB_FPRS+((N)-14)*2)*4)(3)
+#else
+# define SAVE_GP(N)	stw r##N,((JB_GPRS+(N)-14)*4)(3)
+#endif
+
+ENTRY (__sigsetjmp_symbol)
+
+#ifdef PTR_MANGLE
+	mr   r5,r1
+	PTR_MANGLE(r5, r10)
+	stw  r5,(JB_GPR1*4)(3)
+#else
+	stw  r1,(JB_GPR1*4)(3)
+#endif
+	mflr r0
+	/* setjmp probe expects longjmp first argument (4@3), second argument
+	   (-4@4), and target address (4@0), respectively.  */
+	LIBC_PROBE (setjmp, 3, 4@3, -4@4, 4@0)
+	SAVE_GP (14)
+#ifdef PTR_MANGLE
+	PTR_MANGLE2 (r0, r10)
+	li   r10,0
+#endif
+	stw  r0,(JB_LR*4)(3)
+	SAVE_GP (15)
+	mfcr r0
+	SAVE_GP (16)
+	stw  r0,(JB_CR*4)(3)
+	SAVE_GP (17)
+	SAVE_GP (18)
+	SAVE_GP (19)
+	SAVE_GP (20)
+	SAVE_GP (21)
+	SAVE_GP (22)
+	SAVE_GP (23)
+	SAVE_GP (24)
+	SAVE_GP (25)
+	SAVE_GP (26)
+	SAVE_GP (27)
+	SAVE_GP (28)
+	SAVE_GP (29)
+	SAVE_GP (30)
+	SAVE_GP (31)
+#if IS_IN (rtld)
+	li   r3,0
+	blr
+#else
+	b __sigjmp_save_symbol@local
+#endif
+END (__sigsetjmp_symbol)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp.S
new file mode 100644
index 0000000000..2800466276
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/setjmp.S
@@ -0,0 +1,46 @@
+/* non altivec (old) version of setjmp for PowerPC.
+   Copyright (C) 1995-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <shlib-compat.h>
+#include <libc-symbols.h>
+
+#if !IS_IN (libc)
+/* Build a non-versioned object for rtld-*.  */
+# define __sigsetjmp_symbol __sigsetjmp
+# define __sigjmp_save_symbol __sigjmp_save
+# include "setjmp-common.S"
+
+#else /* IS_IN (libc) */
+/* Build a versioned object for libc.  */
+versioned_symbol (libc, __vmx__sigsetjmp, __sigsetjmp, GLIBC_2_3_4)
+# define __sigsetjmp_symbol __vmx__sigsetjmp
+# define __sigjmp_save_symbol __vmx__sigjmp_save
+# include "setjmp-common.S"
+libc_hidden_ver (__vmx__sigsetjmp, __sigsetjmp)
+
+# if defined SHARED && SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4)
+#  define __NO_VMX__
+#  undef __sigsetjmp_symbol
+#  undef __sigjmp_save_symbol
+#  undef JB_SIZE
+compat_symbol (libc, __novmx__sigsetjmp, __sigsetjmp, GLIBC_2_0)
+#  define __sigsetjmp_symbol __novmx__sigsetjmp
+#  define __sigjmp_save_symbol __novmx__sigjmp_save
+#  include "setjmp-common.S"
+# endif
+#endif /* IS_IN (libc) */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/stackguard-macros.h b/REORG.TODO/sysdeps/powerpc/powerpc32/stackguard-macros.h
new file mode 100644
index 0000000000..b3d0af830f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/stackguard-macros.h
@@ -0,0 +1,14 @@
+#include <stdint.h>
+
+#define STACK_CHK_GUARD \
+  ({ uintptr_t x; asm ("lwz %0,-28680(2)" : "=r" (x)); x; })
+
+#define POINTER_CHK_GUARD \
+  ({												\
+     uintptr_t x;										\
+     asm ("lwz %0,%1(2)"									\
+	  : "=r" (x)										\
+	  : "i" (offsetof (tcbhead_t, pointer_guard) - TLS_TCB_OFFSET - sizeof (tcbhead_t))	\
+         );											\
+     x;												\
+   })
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/start.S b/REORG.TODO/sysdeps/powerpc/powerpc32/start.S
new file mode 100644
index 0000000000..d510a56c0f
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/start.S
@@ -0,0 +1,95 @@
+/* Startup code for programs linked with GNU libc.
+   Copyright (C) 1998-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   In addition to the permissions in the GNU Lesser General Public
+   License, the Free Software Foundation gives you unlimited
+   permission to link the compiled version of this file with other
+   programs, and to distribute those programs without any restriction
+   coming from the use of this file. (The GNU Lesser General Public
+   License restrictions do apply in other respects; for example, they
+   cover modification of the file, and distribution when not linked
+   into another program.)
+
+   Note that people who make modified versions of this file are not
+   obligated to grant this special exception for their modified
+   versions; it is their choice whether to do so. The GNU Lesser
+   General Public License gives permission to release a modified
+   version without this exception; this exception also makes it
+   possible to release a modified version which carries forward this
+   exception.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* We do not want .eh_frame info for crt1.o since crt1.o is linked
+   before crtbegin.o, the file defining __EH_FRAME_BEGIN__.  */
+#undef cfi_startproc
+#define cfi_startproc
+#undef cfi_endproc
+#define cfi_endproc
+
+ /* These are the various addresses we require.  */
+#ifdef PIC
+	.section ".data"
+#else
+	.section ".rodata"
+#endif
+	.align	2
+L(start_addresses):
+	.long	_SDA_BASE_
+	.long	main
+	.long 	__libc_csu_init
+	.long 	__libc_csu_fini
+	ASM_SIZE_DIRECTIVE(L(start_addresses))
+
+	.section ".text"
+ENTRY(_start)
+ /* Save the stack pointer, in case we're statically linked under Linux.  */
+	mr	r9,r1
+ /* Set up an initial stack frame, and clear the LR.  */
+	clrrwi	r1,r1,4
+#ifdef PIC
+	SETUP_GOT_ACCESS(r13,got_label)
+	li	r0,0
+#else
+	li	r0,0
+#endif
+	stwu	r1,-16(r1)
+	mtlr	r0
+	stw	r0,0(r1)
+ /* Set r13 to point at the 'small data area', and put the address of
+    start_addresses in r8.  Also load the GOT pointer so that new PLT
+    calls work, like the one to __libc_start_main.  */
+#ifdef PIC
+	addis	r30,r13,_GLOBAL_OFFSET_TABLE_-got_label@ha
+	addis	r8,r13,L(start_addresses)-got_label@ha
+	addi	r30,r30,_GLOBAL_OFFSET_TABLE_-got_label@l
+	lwzu	r13, L(start_addresses)-got_label@l(r8)
+#else
+	lis	r8,L(start_addresses)@ha
+	lwzu	r13,L(start_addresses)@l(r8)
+#endif
+ /* and continue in libc-start, in glibc.  */
+	b	JUMPTARGET(__libc_start_main)
+END(_start)
+
+/* Define a symbol for the first piece of initialized data.  */
+	.section ".data"
+	.globl	__data_start
+__data_start:
+	.long	0
+weak_alias (__data_start, data_start)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/stpcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/stpcpy.S
new file mode 100644
index 0000000000..6ef249f80d
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/stpcpy.S
@@ -0,0 +1,119 @@
+/* Optimized stpcpy implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* char * [r3] stpcpy (char *dest [r3], const char *src [r4])  */
+
+EALIGN (__stpcpy, 4, 0)
+
+#define rTMP	r0
+#define rRTN	r3
+#define rDEST	r3		/* pointer to previous word in dest */
+#define rSRC	r4		/* pointer to previous word in src */
+#define rWORD	r6		/* current word from src */
+#define rFEFE	r7		/* 0xfefefeff */
+#define r7F7F	r8		/* 0x7f7f7f7f */
+#define rNEG	r9		/* ~(word in src | 0x7f7f7f7f) */
+#define rALT	r10		/* alternate word from src */
+
+
+	or	rTMP, rSRC, rDEST
+	clrlwi.	rTMP, rTMP, 30
+	addi	rDEST, rDEST, -4
+	bne	L(unaligned)
+
+	lis	rFEFE, -0x101
+	lis	r7F7F, 0x7f7f
+	lwz	rWORD, 0(rSRC)
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	b	L(g2)
+
+L(g0):	lwzu	rALT, 4(rSRC)
+	stwu	rWORD, 4(rDEST)
+	add	rTMP, rFEFE, rALT
+	nor	rNEG, r7F7F, rALT
+	and.	rTMP, rTMP, rNEG
+	bne-	L(g1)
+	lwzu	rWORD, 4(rSRC)
+	stwu	rALT, 4(rDEST)
+L(g2):	add	rTMP, rFEFE, rWORD
+	nor	rNEG, r7F7F, rWORD
+	and.	rTMP, rTMP, rNEG
+	beq+	L(g0)
+
+	mr	rALT, rWORD
+/* We've hit the end of the string.  Do the rest byte-by-byte.  */
+L(g1):
+#ifdef __LITTLE_ENDIAN__
+	rlwinm.	rTMP, rALT, 0, 24, 31
+	stbu	rALT, 4(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 24, 24, 31
+	stbu	rTMP, 1(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 16, 24, 31
+	stbu	rTMP, 1(rDEST)
+	beqlr-
+	rlwinm	rTMP, rALT, 8, 24, 31
+	stbu	rTMP, 1(rDEST)
+	blr
+#else
+	rlwinm.	rTMP, rALT, 8, 24, 31
+	stbu	rTMP, 4(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 16, 24, 31
+	stbu	rTMP, 1(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 24, 24, 31
+	stbu	rTMP, 1(rDEST)
+	beqlr-
+	stbu	rALT, 1(rDEST)
+	blr
+#endif
+
+/* Oh well.  In this case, we just do a byte-by-byte copy.  */
+	.align 4
+	nop
+L(unaligned):
+	lbz	rWORD, 0(rSRC)
+	addi	rDEST, rDEST, 3
+	cmpwi	rWORD, 0
+	beq-	L(u2)
+
+L(u0):	lbzu	rALT, 1(rSRC)
+	stbu	rWORD, 1(rDEST)
+	cmpwi	rALT, 0
+	beq-	L(u1)
+	nop		/* Let 601 load start of loop.  */
+	lbzu	rWORD, 1(rSRC)
+	stbu	rALT, 1(rDEST)
+	cmpwi	rWORD, 0
+	bne+	L(u0)
+L(u2):	stbu	rWORD, 1(rDEST)
+	blr
+L(u1):	stbu	rALT, 1(rDEST)
+	blr
+END (__stpcpy)
+
+weak_alias (__stpcpy, stpcpy)
+libc_hidden_def (__stpcpy)
+libc_hidden_builtin_def (stpcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/strchr.S b/REORG.TODO/sysdeps/powerpc/powerpc32/strchr.S
new file mode 100644
index 0000000000..868cbd46aa
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/strchr.S
@@ -0,0 +1,146 @@
+/* Optimized strchr implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how this works.  */
+
+/* char * [r3] strchr (const char *s [r3] , int c [r4] )  */
+
+ENTRY (strchr)
+
+#define rTMP1	r0
+#define rRTN	r3	/* outgoing result */
+#define rSTR	r8	/* current word pointer */
+#define rCHR	r4	/* byte we're looking for, spread over the whole word */
+#define rWORD	r5	/* the current word */
+#define rCLZB	rCHR	/* leading zero byte count */
+#define rFEFE	r6	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r7	/* constant 0x7f7f7f7f */
+#define rTMP2	r9
+#define rIGN	r10	/* number of bits we should ignore in the first word */
+#define rMASK	r11	/* mask with the bits to ignore set to 0 */
+#define rTMP3	r12
+#define rTMP4	rIGN
+#define rTMP5	rMASK
+
+
+	rlwimi	rCHR, rCHR, 8, 16, 23
+	li	rMASK, -1
+	rlwimi	rCHR, rCHR, 16, 0, 15
+	rlwinm	rIGN, rRTN, 3, 27, 28
+	lis	rFEFE, -0x101
+	lis	r7F7F, 0x7f7f
+	clrrwi	rSTR, rRTN, 2
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+/* Test the first (partial?) word.  */
+	lwz	rWORD, 0(rSTR)
+#ifdef __LITTLE_ENDIAN__
+	slw	rMASK, rMASK, rIGN
+#else
+	srw	rMASK, rMASK, rIGN
+#endif
+	orc	rWORD, rWORD, rMASK
+	add	rTMP1, rFEFE, rWORD
+	nor	rTMP2, r7F7F, rWORD
+	and.	rTMP4, rTMP1, rTMP2
+	xor	rTMP3, rCHR, rWORD
+	orc	rTMP3, rTMP3, rMASK
+	b	L(loopentry)
+
+/* The loop.  */
+
+L(loop):
+	lwzu	rWORD, 4(rSTR)
+	and.	rTMP5, rTMP1, rTMP2
+/* Test for 0.	*/
+	add	rTMP1, rFEFE, rWORD /* x - 0x01010101.  */
+	nor	rTMP2, r7F7F, rWORD /* ~(x | 0x7f7f7f7f) == ~x & 0x80808080.  */
+	bne	L(foundit)
+	and.	rTMP4, rTMP1, rTMP2 /* (x - 0x01010101) & ~x & 0x80808080.  */
+/* Start test for the bytes we're looking for.  */
+	xor	rTMP3, rCHR, rWORD
+L(loopentry):
+	add	rTMP1, rFEFE, rTMP3
+	nor	rTMP2, r7F7F, rTMP3
+	beq	L(loop)
+
+/* There is a zero byte in the word, but may also be a matching byte (either
+   before or after the zero byte).  In fact, we may be looking for a
+   zero byte, in which case we return a match.  */
+	and.	rTMP5, rTMP1, rTMP2
+	li	rRTN, 0
+	beqlr
+/* At this point:
+   rTMP5 bytes are 0x80 for each match of c, 0 otherwise.
+   rTMP4 bytes are 0x80 for each match of 0, 0 otherwise.
+   But there may be false matches in the next most significant byte from
+   a true match due to carries.  This means we need to recalculate the
+   matches using a longer method for big-endian.  */
+#ifdef __LITTLE_ENDIAN__
+	addi	rTMP1, rTMP5, -1
+	andc	rTMP1, rTMP1, rTMP5
+	cntlzw	rCLZB, rTMP1
+	addi	rTMP2, rTMP4, -1
+	andc	rTMP2, rTMP2, rTMP4
+	cmplw	rTMP1, rTMP2
+	bgtlr
+	subfic	rCLZB, rCLZB, 32-7
+#else
+/* I think we could reduce this by two instructions by keeping the "nor"
+   results from the loop for reuse here.  See strlen.S tail.  Similarly
+   one instruction could be pruned from L(foundit).  */
+	and	rFEFE, r7F7F, rWORD
+	or	rTMP5, r7F7F, rWORD
+	and	rTMP1, r7F7F, rTMP3
+	or	rTMP4, r7F7F, rTMP3
+	add	rFEFE, rFEFE, r7F7F
+	add	rTMP1, rTMP1, r7F7F
+	nor	rWORD, rTMP5, rFEFE
+	nor	rTMP2, rTMP4, rTMP1
+	cntlzw	rCLZB, rTMP2
+	cmplw	rWORD, rTMP2
+	bgtlr
+#endif
+	srwi	rCLZB, rCLZB, 3
+	add	rRTN, rSTR, rCLZB
+	blr
+
+L(foundit):
+#ifdef __LITTLE_ENDIAN__
+	addi	rTMP1, rTMP5, -1
+	andc	rTMP1, rTMP1, rTMP5
+	cntlzw	rCLZB, rTMP1
+	subfic	rCLZB, rCLZB, 32-7-32
+	srawi	rCLZB, rCLZB, 3
+#else
+	and	rTMP1, r7F7F, rTMP3
+	or	rTMP4, r7F7F, rTMP3
+	add	rTMP1, rTMP1, r7F7F
+	nor	rTMP2, rTMP4, rTMP1
+	cntlzw	rCLZB, rTMP2
+	subi	rSTR, rSTR, 4
+	srwi	rCLZB, rCLZB, 3
+#endif
+	add	rRTN, rSTR, rCLZB
+	blr
+END (strchr)
+
+weak_alias (strchr, index)
+libc_hidden_builtin_def (strchr)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/strcmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/strcmp.S
new file mode 100644
index 0000000000..52c4bbc1f2
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/strcmp.S
@@ -0,0 +1,150 @@
+/* Optimized strcmp implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])  */
+
+EALIGN (strcmp, 4, 0)
+
+#define rTMP2	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rWORD1	r5	/* current word in s1 */
+#define rWORD2	r6	/* current word in s2 */
+#define rFEFE	r7	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r8	/* constant 0x7f7f7f7f */
+#define rNEG	r9	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF	r10	/* bits that differ in s1 & s2 words */
+#define rTMP	r11
+
+
+	or	rTMP, rSTR2, rSTR1
+	clrlwi.	rTMP, rTMP, 30
+	lis	rFEFE, -0x101
+	bne	L(unaligned)
+
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	lis	r7F7F, 0x7f7f
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	b	L(g1)
+
+L(g0):	lwzu	rWORD1, 4(rSTR1)
+	bne	cr1, L(different)
+	lwzu	rWORD2, 4(rSTR2)
+L(g1):	add	rTMP, rFEFE, rWORD1
+	nor	rNEG, r7F7F, rWORD1
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	beq+	L(g0)
+
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+#ifdef __LITTLE_ENDIAN__
+L(endstring):
+	addi    rTMP2, rTMP, -1
+	andc    rTMP2, rTMP2, rTMP
+	rlwimi	rTMP2, rTMP2, 1, 0, 30
+	and	rWORD2, rWORD2, rTMP2		/* Mask off gunk.  */
+	and	rWORD1, rWORD1, rTMP2
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rlwimi	rTMP2, rWORD2, 24, 0, 7
+	rlwimi	rTMP, rWORD1, 24, 0, 7
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rlwimi	rTMP2, rWORD2, 24, 0, 7
+	rlwimi	rTMP, rWORD1, 24, 0, 7
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+#else
+L(endstring):
+	and	rTMP, r7F7F, rWORD1
+	beq	cr1, L(equal)
+	add	rTMP, rTMP, r7F7F
+	xor.	rBITDIF, rWORD1, rWORD2
+	andc	rNEG, rNEG, rTMP
+	blt-	L(highbit)
+	cntlzw	rBITDIF, rBITDIF
+	cntlzw	rNEG, rNEG
+	addi	rNEG, rNEG, 7
+	cmpw	cr1, rNEG, rBITDIF
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+	cr1
+L(equal):
+	li	rRTN, 0
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	xor.	rBITDIF, rWORD1, rWORD2
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+
+L(highbit):
+	ori	rRTN, rWORD2, 1
+	blr
+#endif
+
+/* Oh well.  In this case, we just do a byte-by-byte comparison.  */
+	.align 4
+L(unaligned):
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	b	L(u1)
+
+L(u0):	lbzu	rWORD1, 1(rSTR1)
+	bne-	L(u4)
+	lbzu	rWORD2, 1(rSTR2)
+L(u1):	cmpwi	cr1, rWORD1, 0
+	beq-	cr1, L(u3)
+	cmpw	rWORD1, rWORD2
+	bne-	L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	lbzu	rWORD2, 1(rSTR2)
+	cmpwi	cr1, rWORD1, 0
+	cmpw	rWORD1, rWORD2
+	bne+	cr1, L(u0)
+L(u3):	sub	rRTN, rWORD1, rWORD2
+	blr
+L(u4):	lbz	rWORD1, -1(rSTR1)
+	sub	rRTN, rWORD1, rWORD2
+	blr
+END (strcmp)
+libc_hidden_builtin_def (strcmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/strcpy.S b/REORG.TODO/sysdeps/powerpc/powerpc32/strcpy.S
new file mode 100644
index 0000000000..c7af830dda
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/strcpy.S
@@ -0,0 +1,117 @@
+/* Optimized strcpy implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* char * [r3] strcpy (char *dest [r3], const char *src [r4])  */
+
+EALIGN (strcpy, 4, 0)
+
+#define rTMP	r0
+#define rRTN	r3	/* incoming DEST arg preserved as result */
+#define rSRC	r4	/* pointer to previous word in src */
+#define rDEST	r5	/* pointer to previous word in dest */
+#define rWORD	r6	/* current word from src */
+#define rFEFE	r7	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r8	/* constant 0x7f7f7f7f */
+#define rNEG	r9	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rALT	r10	/* alternate word from src */
+
+
+	or	rTMP, rSRC, rRTN
+	clrlwi.	rTMP, rTMP, 30
+	addi	rDEST, rRTN, -4
+	bne	L(unaligned)
+
+	lis	rFEFE, -0x101
+	lis	r7F7F, 0x7f7f
+	lwz	rWORD, 0(rSRC)
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	b	L(g2)
+
+L(g0):	lwzu	rALT, 4(rSRC)
+	stwu	rWORD, 4(rDEST)
+	add	rTMP, rFEFE, rALT
+	nor	rNEG, r7F7F, rALT
+	and.	rTMP, rTMP, rNEG
+	bne-	L(g1)
+	lwzu	rWORD, 4(rSRC)
+	stwu	rALT, 4(rDEST)
+L(g2):	add	rTMP, rFEFE, rWORD
+	nor	rNEG, r7F7F, rWORD
+	and.	rTMP, rTMP, rNEG
+	beq+	L(g0)
+
+	mr	rALT, rWORD
+/* We've hit the end of the string.  Do the rest byte-by-byte.  */
+L(g1):
+#ifdef __LITTLE_ENDIAN__
+	rlwinm.	rTMP, rALT, 0, 24, 31
+	stb	rALT, 4(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 24, 24, 31
+	stb	rTMP, 5(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 16, 24, 31
+	stb	rTMP, 6(rDEST)
+	beqlr-
+	rlwinm	rTMP, rALT, 8, 24, 31
+	stb	rTMP, 7(rDEST)
+	blr
+#else
+	rlwinm.	rTMP, rALT, 8, 24, 31
+	stb	rTMP, 4(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 16, 24, 31
+	stb	rTMP, 5(rDEST)
+	beqlr-
+	rlwinm.	rTMP, rALT, 24, 24, 31
+	stb	rTMP, 6(rDEST)
+	beqlr-
+	stb	rALT, 7(rDEST)
+	blr
+#endif
+
+/* Oh well.  In this case, we just do a byte-by-byte copy.  */
+	.align 4
+	nop
+L(unaligned):
+	lbz	rWORD, 0(rSRC)
+	addi	rDEST, rRTN, -1
+	cmpwi	rWORD, 0
+	beq-	L(u2)
+
+L(u0):	lbzu	rALT, 1(rSRC)
+	stbu	rWORD, 1(rDEST)
+	cmpwi	rALT, 0
+	beq-	L(u1)
+	nop		/* Let 601 load start of loop.  */
+	lbzu	rWORD, 1(rSRC)
+	stbu	rALT, 1(rDEST)
+	cmpwi	rWORD, 0
+	bne+	L(u0)
+L(u2):	stb	rWORD, 1(rDEST)
+	blr
+L(u1):	stb	rALT, 1(rDEST)
+	blr
+
+END (strcpy)
+libc_hidden_builtin_def (strcpy)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/strlen.S b/REORG.TODO/sysdeps/powerpc/powerpc32/strlen.S
new file mode 100644
index 0000000000..fa245f0760
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/strlen.S
@@ -0,0 +1,190 @@
+/* Optimized strlen implementation for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* The algorithm here uses the following techniques:
+
+   1) Given a word 'x', we can test to see if it contains any 0 bytes
+      by subtracting 0x01010101, and seeing if any of the high bits of each
+      byte changed from 0 to 1. This works because the least significant
+      0 byte must have had no incoming carry (otherwise it's not the least
+      significant), so it is 0x00 - 0x01 == 0xff. For all other
+      byte values, either they have the high bit set initially, or when
+      1 is subtracted you get a value in the range 0x00-0x7f, none of which
+      have their high bit set. The expression here is
+      (x + 0xfefefeff) & ~(x | 0x7f7f7f7f), which gives 0x00000000 when
+      there were no 0x00 bytes in the word.  You get 0x80 in bytes that
+      match, but possibly false 0x80 matches in the next more significant
+      byte to a true match due to carries.  For little-endian this is
+      of no consequence since the least significant match is the one
+      we're interested in, but big-endian needs method 2 to find which
+      byte matches.
+
+   2) Given a word 'x', we can test to see _which_ byte was zero by
+      calculating ~(((x & 0x7f7f7f7f) + 0x7f7f7f7f) | x | 0x7f7f7f7f).
+      This produces 0x80 in each byte that was zero, and 0x00 in all
+      the other bytes. The '| 0x7f7f7f7f' clears the low 7 bits in each
+      byte, and the '| x' part ensures that bytes with the high bit set
+      produce 0x00. The addition will carry into the high bit of each byte
+      iff that byte had one of its low 7 bits set. We can then just see
+      which was the most significant bit set and divide by 8 to find how
+      many to add to the index.
+      This is from the book 'The PowerPC Compiler Writer's Guide',
+      by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
+
+   We deal with strings not aligned to a word boundary by taking the
+   first word and ensuring that bytes not part of the string
+   are treated as nonzero. To allow for memory latency, we unroll the
+   loop a few times, being careful to ensure that we do not read ahead
+   across cache line boundaries.
+
+   Questions to answer:
+   1) How long are strings passed to strlen? If they're often really long,
+   we should probably use cache management instructions and/or unroll the
+   loop more. If they're often quite short, it might be better to use
+   fact (2) in the inner loop than have to recalculate it.
+   2) How popular are bytes with the high bit set? If they are very rare,
+   on some processors it might be useful to use the simpler expression
+   ~((x - 0x01010101) | 0x7f7f7f7f) (that is, on processors with only one
+   ALU), but this fails when any character has its high bit set.  */
+
+/* Some notes on register usage: Under the SVR4 ABI, we can use registers
+   0 and 3 through 12 (so long as we don't call any procedures) without
+   saving them. We can also use registers 14 through 31 if we save them.
+   We can't use r1 (it's the stack pointer), r2 nor r13 because the user
+   program may expect them to hold their usual value if we get sent
+   a signal. Integer parameters are passed in r3 through r10.
+   We can use condition registers cr0, cr1, cr5, cr6, and cr7 without saving
+   them, the others we must save.  */
+
+/* int [r3] strlen (char *s [r3])  */
+
+ENTRY (strlen)
+
+#define rTMP4	r0
+#define rRTN	r3	/* incoming STR arg, outgoing result */
+#define rSTR	r4	/* current string position */
+#define rPADN	r5	/* number of padding bits we prepend to the
+			   string to make it start at a word boundary */
+#define rFEFE	r6	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r7	/* constant 0x7f7f7f7f */
+#define rWORD1	r8	/* current string word */
+#define rWORD2	r9	/* next string word */
+#define rMASK	r9	/* mask for first string word */
+#define rTMP1	r10
+#define rTMP2	r11
+#define rTMP3	r12
+
+
+	clrrwi	rSTR, rRTN, 2
+	lis	r7F7F, 0x7f7f
+	rlwinm	rPADN, rRTN, 3, 27, 28
+	lwz	rWORD1, 0(rSTR)
+	li	rMASK, -1
+	addi	r7F7F, r7F7F, 0x7f7f
+/* We use method (2) on the first two words, because rFEFE isn't
+   required which reduces setup overhead.  Also gives a faster return
+   for small strings on big-endian due to needing to recalculate with
+   method (2) anyway.  */
+#ifdef __LITTLE_ENDIAN__
+	slw	rMASK, rMASK, rPADN
+#else
+	srw	rMASK, rMASK, rPADN
+#endif
+	and	rTMP1, r7F7F, rWORD1
+	or	rTMP2, r7F7F, rWORD1
+	add	rTMP1, rTMP1, r7F7F
+	nor	rTMP3, rTMP2, rTMP1
+	and.	rTMP3, rTMP3, rMASK
+	mtcrf	0x01, rRTN
+	bne	L(done0)
+	lis	rFEFE, -0x101
+	addi	rFEFE, rFEFE, -0x101
+/* Are we now aligned to a doubleword boundary?  */
+	bt	29, L(loop)
+
+/* Handle second word of pair.  */
+/* Perhaps use method (1) here for little-endian, saving one instruction?  */
+	lwzu	rWORD1, 4(rSTR)
+	and	rTMP1, r7F7F, rWORD1
+	or	rTMP2, r7F7F, rWORD1
+	add	rTMP1, rTMP1, r7F7F
+	nor.	rTMP3, rTMP2, rTMP1
+	bne	L(done0)
+
+/* The loop.  */
+
+L(loop):
+	lwz	rWORD1, 4(rSTR)
+	lwzu	rWORD2, 8(rSTR)
+	add	rTMP1, rFEFE, rWORD1
+	nor	rTMP2, r7F7F, rWORD1
+	and.	rTMP1, rTMP1, rTMP2
+	add	rTMP3, rFEFE, rWORD2
+	nor	rTMP4, r7F7F, rWORD2
+	bne	L(done1)
+	and.	rTMP3, rTMP3, rTMP4
+	beq	L(loop)
+
+#ifndef __LITTLE_ENDIAN__
+	and	rTMP1, r7F7F, rWORD2
+	add	rTMP1, rTMP1, r7F7F
+	andc	rTMP3, rTMP4, rTMP1
+	b	L(done0)
+
+L(done1):
+	and	rTMP1, r7F7F, rWORD1
+	subi	rSTR, rSTR, 4
+	add	rTMP1, rTMP1, r7F7F
+	andc	rTMP3, rTMP2, rTMP1
+
+/* When we get to here, rSTR points to the first word in the string that
+   contains a zero byte, and rTMP3 has 0x80 for bytes that are zero,
+   and 0x00 otherwise.  */
+L(done0):
+	cntlzw	rTMP3, rTMP3
+	subf	rTMP1, rRTN, rSTR
+	srwi	rTMP3, rTMP3, 3
+	add	rRTN, rTMP1, rTMP3
+	blr
+#else
+
+L(done0):
+	addi	rTMP1, rTMP3, -1	/* Form a mask from trailing zeros.  */
+	andc	rTMP1, rTMP1, rTMP3
+	cntlzw	rTMP1, rTMP1		/* Count bits not in the mask.  */
+	subf	rTMP3, rRTN, rSTR
+	subfic	rTMP1, rTMP1, 32-7
+	srwi	rTMP1, rTMP1, 3
+	add	rRTN, rTMP1, rTMP3
+	blr
+
+L(done1):
+	addi	rTMP3, rTMP1, -1
+	andc	rTMP3, rTMP3, rTMP1
+	cntlzw	rTMP3, rTMP3
+	subf	rTMP1, rRTN, rSTR
+	subfic	rTMP3, rTMP3, 32-7-32
+	srawi	rTMP3, rTMP3, 3
+	add	rRTN, rTMP1, rTMP3
+	blr
+#endif
+
+END (strlen)
+libc_hidden_builtin_def (strlen)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/strncmp.S b/REORG.TODO/sysdeps/powerpc/powerpc32/strncmp.S
new file mode 100644
index 0000000000..dadc90d661
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/strncmp.S
@@ -0,0 +1,181 @@
+/* Optimized strcmp implementation for PowerPC32.
+   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* See strlen.s for comments on how the end-of-string testing works.  */
+
+/* int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t size [r5])  */
+
+EALIGN (strncmp, 4, 0)
+
+#define rTMP2	r0
+#define rRTN	r3
+#define rSTR1	r3	/* first string arg */
+#define rSTR2	r4	/* second string arg */
+#define rN	r5	/* max string length */
+#define rWORD1	r6	/* current word in s1 */
+#define rWORD2	r7	/* current word in s2 */
+#define rFEFE	r8	/* constant 0xfefefeff (-0x01010101) */
+#define r7F7F	r9	/* constant 0x7f7f7f7f */
+#define rNEG	r10	/* ~(word in s1 | 0x7f7f7f7f) */
+#define rBITDIF	r11	/* bits that differ in s1 & s2 words */
+#define rTMP	r12
+
+	dcbt	0,rSTR1
+	or	rTMP, rSTR2, rSTR1
+	lis	r7F7F, 0x7f7f
+	dcbt	0,rSTR2
+	clrlwi.	rTMP, rTMP, 30
+	cmplwi	cr1, rN, 0
+	lis	rFEFE, -0x101
+	bne	L(unaligned)
+/* We are word aligned so set up for two loops.  first a word
+   loop, then fall into the byte loop if any residual.  */
+	srwi.	rTMP, rN, 2
+	clrlwi	rN, rN, 30
+	addi	rFEFE, rFEFE, -0x101
+	addi	r7F7F, r7F7F, 0x7f7f
+	cmplwi	cr1, rN, 0
+	beq	L(unaligned)
+
+	mtctr	rTMP	/* Power4 wants mtctr 1st in dispatch group.  */
+	lwz	rWORD1, 0(rSTR1)
+	lwz	rWORD2, 0(rSTR2)
+	b	L(g1)
+
+L(g0):
+	lwzu	rWORD1, 4(rSTR1)
+	bne-	cr1, L(different)
+	lwzu	rWORD2, 4(rSTR2)
+L(g1):	add	rTMP, rFEFE, rWORD1
+	nor	rNEG, r7F7F, rWORD1
+	bdz	L(tail)
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	beq+	L(g0)
+
+/* OK. We've hit the end of the string. We need to be careful that
+   we don't compare two strings as different because of gunk beyond
+   the end of the strings...  */
+
+#ifdef __LITTLE_ENDIAN__
+L(endstring):
+	slwi	rTMP, rTMP, 1
+	addi    rTMP2, rTMP, -1
+	andc    rTMP2, rTMP2, rTMP
+	and	rWORD2, rWORD2, rTMP2		/* Mask off gunk.  */
+	and	rWORD1, rWORD1, rTMP2
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rlwimi	rTMP2, rWORD2, 24, 0, 7
+	rlwimi	rTMP, rWORD1, 24, 0, 7
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	rlwinm	rTMP2, rWORD2, 8, 0xffffffff	/* Byte reverse word.  */
+	rlwinm	rTMP, rWORD1, 8, 0xffffffff
+	rlwimi	rTMP2, rWORD2, 24, 0, 7
+	rlwimi	rTMP, rWORD1, 24, 0, 7
+	rlwimi	rTMP2, rWORD2, 24, 16, 23
+	rlwimi	rTMP, rWORD1, 24, 16, 23
+	xor.	rBITDIF, rTMP, rTMP2
+	sub	rRTN, rTMP, rTMP2
+	bgelr+
+	ori	rRTN, rTMP2, 1
+	blr
+
+#else
+L(endstring):
+	and	rTMP, r7F7F, rWORD1
+	beq	cr1, L(equal)
+	add	rTMP, rTMP, r7F7F
+	xor.	rBITDIF, rWORD1, rWORD2
+	andc	rNEG, rNEG, rTMP
+	blt-	L(highbit)
+	cntlzw	rBITDIF, rBITDIF
+	cntlzw	rNEG, rNEG
+	addi	rNEG, rNEG, 7
+	cmpw	cr1, rNEG, rBITDIF
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+	cr1
+L(equal):
+	li	rRTN, 0
+	blr
+
+L(different):
+	lwz	rWORD1, -4(rSTR1)
+	xor.	rBITDIF, rWORD1, rWORD2
+	sub	rRTN, rWORD1, rWORD2
+	bgelr+
+L(highbit):
+	ori	rRTN, rWORD2, 1
+	blr
+#endif
+
+/* Oh well.  In this case, we just do a byte-by-byte comparison.  */
+	.align 4
+L(tail):
+	and.	rTMP, rTMP, rNEG
+	cmpw	cr1, rWORD1, rWORD2
+	bne-	L(endstring)
+	addi	rSTR1, rSTR1, 4
+	bne-	cr1, L(different)
+	addi	rSTR2, rSTR2, 4
+	cmplwi	cr1, rN, 0
+L(unaligned):
+	mtctr   rN	/* Power4 wants mtctr 1st in dispatch group */
+	bgt	cr1, L(uz)
+L(ux):
+	li	rRTN, 0
+	blr
+	.align 4
+L(uz):
+	lbz	rWORD1, 0(rSTR1)
+	lbz	rWORD2, 0(rSTR2)
+	nop
+	b	L(u1)
+L(u0):
+	lbzu	rWORD2, 1(rSTR2)
+L(u1):
+	bdz	L(u3)
+	cmpwi	cr1, rWORD1, 0
+	cmpw	rWORD1, rWORD2
+	beq-	cr1, L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	bne-	L(u2)
+	lbzu	rWORD2, 1(rSTR2)
+	bdz	L(u3)
+	cmpwi	cr1, rWORD1, 0
+	cmpw	rWORD1, rWORD2
+	bne-	L(u3)
+	lbzu	rWORD1, 1(rSTR1)
+	bne+	cr1, L(u0)
+
+L(u2):	lbzu	rWORD1, -1(rSTR1)
+L(u3):	sub	rRTN, rWORD1, rWORD2
+	blr
+END (strncmp)
+libc_hidden_builtin_def (strncmp)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/sub_n.S b/REORG.TODO/sysdeps/powerpc/powerpc32/sub_n.S
new file mode 100644
index 0000000000..659f348079
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/sub_n.S
@@ -0,0 +1,68 @@
+/* Subtract two limb vectors of equal, non-zero length for PowerPC.
+   Copyright (C) 1997-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_sub_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
+                        mp_size_t size)
+   Calculate s1-s2 and put result in res_ptr; return borrow, 0 or 1.  */
+
+/* Note on optimisation: This code is optimal for the 601.  Almost every other
+   possible 2-unrolled inner loop will not be.  Also, watch out for the
+   alignment...  */
+
+EALIGN (__mpn_sub_n, 3, 1)
+
+/* Set up for loop below.  */
+	mtcrf 0x01,r6
+	srwi. r7,r6,1
+	mtctr r7
+	bt    31,L(2)
+
+/* Set the carry (clear the borrow).  */
+	subfc r0,r0,r0
+/* Adjust pointers for loop.  */
+	addi  r3,r3,-4
+	addi  r4,r4,-4
+	addi  r5,r5,-4
+	b     L(0)
+
+L(2):	lwz   r7,0(r5)
+	lwz   r6,0(r4)
+	subfc r6,r7,r6
+	stw   r6,0(r3)
+        beq   L(1)
+
+/* Align start of loop to an odd word boundary to guarantee that the
+   last two words can be fetched in one access (for 601).  This turns
+   out to be important.  */
+L(0):
+	lwz   r9,4(r4)
+	lwz   r8,4(r5)
+	lwzu  r6,8(r4)
+	lwzu  r7,8(r5)
+	subfe r8,r8,r9
+	stw   r8,4(r3)
+	subfe r6,r7,r6
+	stwu  r6,8(r3)
+	bdnz  L(0)
+/* Return the borrow. */
+L(1):	subfe r3,r3,r3
+	neg   r3,r3
+	blr
+END (__mpn_sub_n)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/submul_1.S b/REORG.TODO/sysdeps/powerpc/powerpc32/submul_1.S
new file mode 100644
index 0000000000..c1183c40f9
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/submul_1.S
@@ -0,0 +1,51 @@
+/* Multiply a limb vector by a single limb, for PowerPC.
+   Copyright (C) 1993-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+/* mp_limb_t mpn_submul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
+                           mp_size_t s1_size, mp_limb_t s2_limb)
+   Calculate res-s1*s2 and put result back in res; return carry.  */
+
+ENTRY (__mpn_submul_1)
+	mtctr	r5
+
+	lwz	r0,0(r4)
+	mullw	r7,r0,r6
+	mulhwu	r10,r0,r6
+	lwz     r9,0(r3)
+	subf 	r8,r7,r9
+	addc    r7,r7,r8		# invert cy (r7 is junk)
+	addi	r3,r3,-4		# adjust res_ptr
+	bdz	L(1)
+
+L(0):	lwzu	r0,4(r4)
+	stwu	r8,4(r3)
+	mullw	r8,r0,r6
+	adde	r7,r8,r10
+	mulhwu	r10,r0,r6
+	lwz     r9,4(r3)
+	addze   r10,r10
+	subf    r8,r7,r9
+	addc    r7,r7,r8		# invert cy (r7 is junk)
+	bdnz	L(0)
+
+L(1):	stw	r8,4(r3)
+	addze	r3,r10
+	blr
+END (__mpn_submul_1)
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/symbol-hacks.h b/REORG.TODO/sysdeps/powerpc/powerpc32/symbol-hacks.h
new file mode 100644
index 0000000000..dbb3141621
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/symbol-hacks.h
@@ -0,0 +1,21 @@
+/* Hacks needed for symbol manipulation.  powerpc version.
+   Copyright (C) 2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/wordsize-32/divdi3-symbol-hacks.h>
+
+#include_next "symbol-hacks.h"
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/sysdep.h b/REORG.TODO/sysdeps/powerpc/powerpc32/sysdep.h
new file mode 100644
index 0000000000..f92ab2cded
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/sysdep.h
@@ -0,0 +1,174 @@
+/* Assembly macros for 32-bit PowerPC.
+   Copyright (C) 1999-2017 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <sysdeps/powerpc/sysdep.h>
+
+#ifdef __ASSEMBLER__
+
+/* If compiled for profiling, call `_mcount' at the start of each
+   function.  */
+#ifdef	PROF
+/* The mcount code relies on a the return address being on the stack
+   to locate our caller and so it can restore it; so store one just
+   for its benefit.  */
+# define CALL_MCOUNT							      \
+  mflr  r0;								      \
+  stw   r0,4(r1);							      \
+  cfi_offset (lr, 4);							      \
+  bl    JUMPTARGET(_mcount);
+#else  /* PROF */
+# define CALL_MCOUNT		/* Do nothing.  */
+#endif /* PROF */
+
+#define	ENTRY(name)							      \
+  .globl C_SYMBOL_NAME(name);						      \
+  .type C_SYMBOL_NAME(name),@function;					      \
+  .align ALIGNARG(2);							      \
+  C_LABEL(name)								      \
+  cfi_startproc;							      \
+  CALL_MCOUNT
+
+/* helper macro for accessing the 32-bit powerpc GOT. */
+
+#define	SETUP_GOT_ACCESS(regname,GOT_LABEL)				      \
+	bcl	20,31,GOT_LABEL	;					      \
+GOT_LABEL:			;					      \
+	mflr	(regname)
+
+#define EALIGN_W_0  /* No words to insert.  */
+#define EALIGN_W_1  nop
+#define EALIGN_W_2  nop;nop
+#define EALIGN_W_3  nop;nop;nop
+#define EALIGN_W_4  EALIGN_W_3;nop
+#define EALIGN_W_5  EALIGN_W_4;nop
+#define EALIGN_W_6  EALIGN_W_5;nop
+#define EALIGN_W_7  EALIGN_W_6;nop
+
+/* EALIGN is like ENTRY, but does alignment to 'words'*4 bytes
+   past a 2^align boundary.  */
+#ifdef PROF
+# define EALIGN(name, alignt, words)					      \
+  .globl C_SYMBOL_NAME(name);						      \
+  .type C_SYMBOL_NAME(name),@function;					      \
+  .align ALIGNARG(2);							      \
+  C_LABEL(name)								      \
+  cfi_startproc;							      \
+  CALL_MCOUNT								      \
+  b 0f;									      \
+  .align ALIGNARG(alignt);						      \
+  EALIGN_W_##words;							      \
+  0:
+#else /* PROF */
+# define EALIGN(name, alignt, words)					      \
+  .globl C_SYMBOL_NAME(name);						      \
+  .type C_SYMBOL_NAME(name),@function;					      \
+  .align ALIGNARG(alignt);						      \
+  EALIGN_W_##words;							      \
+  C_LABEL(name)								      \
+  cfi_startproc;
+#endif
+
+#undef	END
+#define END(name)							      \
+  cfi_endproc;								      \
+  ASM_SIZE_DIRECTIVE(name)
+
+#if ! IS_IN(rtld) && defined (ENABLE_LOCK_ELISION)
+# define ABORT_TRANSACTION \
+    cmpwi    2,0;		\
+    beq      1f;		\
+    lwz      0,TM_CAPABLE(2);	\
+    cmpwi    0,0;		\
+    beq	     1f;		\
+    li       11,_ABORT_SYSCALL;	\
+    tabort.  11;		\
+    .align 4;			\
+1:
+#else
+# define ABORT_TRANSACTION
+#endif
+
+#define DO_CALL(syscall)						      \
+    ABORT_TRANSACTION							      \
+    li 0,syscall;							      \
+    sc
+
+#undef JUMPTARGET
+#ifdef PIC
+# define JUMPTARGET(name) name##@plt
+#else
+# define JUMPTARGET(name) name
+#endif
+
+#if defined SHARED && defined PIC && !defined NO_HIDDEN
+# undef HIDDEN_JUMPTARGET
+# define HIDDEN_JUMPTARGET(name) __GI_##name##@local
+#endif
+
+#define PSEUDO(name, syscall_name, args)				      \
+  .section ".text";							      \
+  ENTRY (name)								      \
+    DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET							      \
+    bnslr+;								      \
+    b __syscall_error@local
+#define ret PSEUDO_RET
+
+#undef	PSEUDO_END
+#define	PSEUDO_END(name)						      \
+  END (name)
+
+#define PSEUDO_NOERRNO(name, syscall_name, args)			      \
+  .section ".text";							      \
+  ENTRY (name)								      \
+    DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET_NOERRNO						      \
+    blr
+#define ret_NOERRNO PSEUDO_RET_NOERRNO
+
+#undef	PSEUDO_END_NOERRNO
+#define	PSEUDO_END_NOERRNO(name)					      \
+  END (name)
+
+#define PSEUDO_ERRVAL(name, syscall_name, args)				      \
+  .section ".text";							      \
+  ENTRY (name)								      \
+    DO_CALL (SYS_ify (syscall_name));
+
+#define PSEUDO_RET_ERRVAL						      \
+    blr
+#define ret_ERRVAL PSEUDO_RET_ERRVAL
+
+#undef	PSEUDO_END_ERRVAL
+#define	PSEUDO_END_ERRVAL(name)						      \
+  END (name)
+
+/* Local labels stripped out by the linker.  */
+#undef L
+#define L(x) .L##x
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE (a,b)
+#define GENERATE_GOT_LABEL(name) GLUE (.got_label, name)
+
+/* Label in text section.  */
+#define C_TEXT(name) name
+
+#endif	/* __ASSEMBLER__ */
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/tls-macros.h b/REORG.TODO/sysdeps/powerpc/powerpc32/tls-macros.h
new file mode 100644
index 0000000000..ee0eac4858
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/tls-macros.h
@@ -0,0 +1,49 @@
+/* Include sysdeps/powerpc/tls-macros.h for __TLS_CALL_CLOBBERS  */
+#include_next "tls-macros.h"
+
+/* PowerPC32 Local Exec TLS access.  */
+#define TLS_LE(x)							      \
+  ({ int *__result;							      \
+     asm ("addi %0,2," #x "@tprel"					      \
+	  : "=r" (__result));						      \
+     __result; })
+
+/* PowerPC32 Initial Exec TLS access.  */
+#define TLS_IE(x)							      \
+  ({ int *__result;							      \
+     asm ("bcl 20,31,1f\n1:\t"						      \
+	  "mflr %0\n\t"							      \
+	  "addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n\t"			      \
+	  "addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n\t"			      \
+	  "lwz %0," #x "@got@tprel(%0)\n\t"				      \
+	  "add %0,%0," #x "@tls"					      \
+	  : "=b" (__result) :						      \
+	  : "lr");							      \
+     __result; })
+
+/* PowerPC32 Local Dynamic TLS access.  */
+#define TLS_LD(x)							      \
+  ({ int *__result;							      \
+     asm ("bcl 20,31,1f\n1:\t"						      \
+	  "mflr 3\n\t"							      \
+	  "addis 3,3,_GLOBAL_OFFSET_TABLE_-1b@ha\n\t"			      \
+	  "addi 3,3,_GLOBAL_OFFSET_TABLE_-1b@l\n\t"			      \
+	  "addi 3,3," #x "@got@tlsld\n\t"				      \
+	  "bl __tls_get_addr@plt\n\t"					      \
+	  "addi %0,3," #x "@dtprel"					      \
+	  : "=r" (__result) :						      \
+	  : "3", __TLS_CALL_CLOBBERS);					      \
+     __result; })
+
+/* PowerPC32 General Dynamic TLS access.  */
+#define TLS_GD(x)							      \
+  ({ register int *__result __asm__ ("r3");				      \
+     asm ("bcl 20,31,1f\n1:\t"						      \
+	  "mflr 3\n\t"							      \
+	  "addis 3,3,_GLOBAL_OFFSET_TABLE_-1b@ha\n\t"			      \
+	  "addi 3,3,_GLOBAL_OFFSET_TABLE_-1b@l\n\t"			      \
+	  "addi 3,3," #x "@got@tlsgd\n\t"				      \
+	  "bl __tls_get_addr@plt"					      \
+	  : "=r" (__result) :						      \
+	  : __TLS_CALL_CLOBBERS);					      \
+     __result; })
diff --git a/REORG.TODO/sysdeps/powerpc/powerpc32/tst-audit.h b/REORG.TODO/sysdeps/powerpc/powerpc32/tst-audit.h
new file mode 100644
index 0000000000..2e5e0c91d5
--- /dev/null
+++ b/REORG.TODO/sysdeps/powerpc/powerpc32/tst-audit.h
@@ -0,0 +1,25 @@
+/* Definitions for testing PLT entry/exit auditing.  PowerPC32 version.
+
+   Copyright (C) 2012-2017 Free Software Foundation, Inc.
+
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define pltenter la_ppc32_gnu_pltenter
+#define pltexit la_ppc32_gnu_pltexit
+#define La_regs La_ppc32_regs
+#define La_retval La_ppc32_retval
+#define int_retval lrv_r3