about summary refs log tree commit diff
path: root/sysdeps
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2004-03-07 19:53:49 +0000
committerUlrich Drepper <drepper@redhat.com>2004-03-07 19:53:49 +0000
commitef690addb555ce45696315a2558cde6f70971f1a (patch)
tree8ea57cd3a3ee44fcb95d37e613effd505b5c29ea /sysdeps
parent41661489123c7da38c1239281e42bc2b1449d547 (diff)
downloadglibc-ef690addb555ce45696315a2558cde6f70971f1a.tar.gz
glibc-ef690addb555ce45696315a2558cde6f70971f1a.tar.xz
glibc-ef690addb555ce45696315a2558cde6f70971f1a.zip
Update.
2004-03-07  Ulrich Drepper  <drepper@redhat.com>

	* sysdeps/powerpc/elf/rtld-global-offsets.sym: Adjust for moving
	_dl_hwcap into _rtld_global_ro.
	* sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc64/__longjmp-common.S: Likewise.
	* sysdeps/powerpc/powerpc64/setjmp-common.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S: Likewise.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/powerpc/elf/rtld-global-offsets.sym4
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S6
-rw-r--r--sysdeps/powerpc/powerpc32/fpu/setjmp-common.S6
-rw-r--r--sysdeps/powerpc/powerpc64/__longjmp-common.S18
-rw-r--r--sysdeps/powerpc/powerpc64/setjmp-common.S18
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S8
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S56
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S16
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S8
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S218
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S10
11 files changed, 184 insertions, 184 deletions
diff --git a/sysdeps/powerpc/elf/rtld-global-offsets.sym b/sysdeps/powerpc/elf/rtld-global-offsets.sym
index 6bc8aaac55..830106ba21 100644
--- a/sysdeps/powerpc/elf/rtld-global-offsets.sym
+++ b/sysdeps/powerpc/elf/rtld-global-offsets.sym
@@ -2,6 +2,6 @@
 
 #include <ldsodefs.h>
 
-#define rtdl_global_offsetof(mem) offsetof (struct rtld_global, mem)
+#define rtdl_global_ro_offsetof(mem) offsetof (struct rtld_global_ro, mem)
 
-RTLD_GLOBAL_DL_HWCAP_OFFSET	rtdl_global_offsetof (_dl_hwcap)
+RTLD_GLOBAL_RO_DL_HWCAP_OFFSET	rtdl_global_ro_offsetof (_dl_hwcap)
diff --git a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
index 60f1df7d46..dbb28aef21 100644
--- a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S
@@ -36,11 +36,11 @@ ENTRY (BP_SYM (__longjmp))
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r5
 #  ifdef SHARED
-	lwz     r5,_rtld_global@got(r5)
+	lwz     r5,_rtld_global_ro@got(r5)
 	mtlr    r6
-	lwz     r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
 #  else
-	lwz     r5,_rtld_global@got(r5)
+	lwz     r5,_rtld_global_ro@got(r5)
 	mtlr    r6
 	lwz     r5,0(r5)
 #  endif
diff --git a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
index b69ce33755..6f63512e61 100644
--- a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S
@@ -79,11 +79,11 @@ ENTRY (BP_SYM (__sigsetjmp))
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r5
 #ifdef SHARED
-	lwz     r5,_rtld_global@got(r5)
+	lwz     r5,_rtld_global_ro@got(r5)
 	mtlr    r6
-	lwz     r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
+	lwz     r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
 #else
-	lwz     r5,_rtld_global@got(r5)
+	lwz     r5,_rtld_global_ro@got(r5)
 	mtlr    r6
 	lwz     r5,0(r5)
 #endif
diff --git a/sysdeps/powerpc/powerpc64/__longjmp-common.S b/sysdeps/powerpc/powerpc64/__longjmp-common.S
index df316234d9..0e2f0d580f 100644
--- a/sysdeps/powerpc/powerpc64/__longjmp-common.S
+++ b/sysdeps/powerpc/powerpc64/__longjmp-common.S
@@ -32,11 +32,11 @@
 #ifndef __NO_VMX__
 	.section	".toc","aw"
 .LC__dl_hwcap:
-#ifdef SHARED
-	.tc _rtld_global[TC],_rtld_global
-#else
+# ifdef SHARED
+	.tc _rtld_global_ro[TC],_rtld_global_ro
+# else
 	.tc _dl_hwcap[TC],_dl_hwcap
-#endif
+# endif
 	.section ".text"
 #endif
 
@@ -44,12 +44,12 @@ ENTRY (BP_SYM (__longjmp))
 	CHECK_BOUNDS_BOTH_WIDE_LIT (r3, r8, r9, JB_SIZE)
 #ifndef __NO_VMX__
 	ld    r5,.LC__dl_hwcap@toc(r2)
-#ifdef SHARED
+# ifdef SHARED
 	/* Load _rtld-global._dl_hwcap.  */
-	ld    r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
-#else
+	ld    r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
 	ld    r5,0(r5) /* Load extern _dl_hwcap.  */
-#endif
+# endif
 	andis.  r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
 	beq	no_vmx
 	la	r5,((JB_VRS)*8)(3)
@@ -63,7 +63,7 @@ ENTRY (BP_SYM (__longjmp))
 	addi    r5,r5,32
 	lvx	v21,0,r6
 	vperm   v20,v1,v21,v0
-#define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \
+# define load_misaligned_vmx_lo_loaded(loadvr,lovr,shiftvr,loadgpr,addgpr) \
 	addi    addgpr,addgpr,32; \
 	lvx	lovr,0,loadgpr; \
 	vperm   loadvr,loadvr,lovr,shiftvr;
diff --git a/sysdeps/powerpc/powerpc64/setjmp-common.S b/sysdeps/powerpc/powerpc64/setjmp-common.S
index 541b5d4276..85c840348d 100644
--- a/sysdeps/powerpc/powerpc64/setjmp-common.S
+++ b/sysdeps/powerpc/powerpc64/setjmp-common.S
@@ -31,11 +31,11 @@
 #ifndef __NO_VMX__
 	.section	".toc","aw"
 .LC__dl_hwcap:
-#ifdef SHARED
-	.tc _rtld_global[TC],_rtld_global
-#else
+# ifdef SHARED
+	.tc _rtld_global_ro[TC],_rtld_global_ro
+# else
 	.tc _dl_hwcap[TC],_dl_hwcap
-#endif
+# endif
 	.section ".text"
 #endif
 
@@ -85,12 +85,12 @@ ENTRY (BP_SYM (__sigsetjmp))
 	stfd fp31,((JB_FPRS+17)*8)(3)
 #ifndef __NO_VMX__
 	ld    r5,.LC__dl_hwcap@toc(r2)
-#ifdef SHARED
+# ifdef SHARED
 	/* Load _rtld-global._dl_hwcap.  */
-	ld    r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
-#else
+	ld    r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
 	ld    r5,0(r5) /* Load extern _dl_hwcap.  */
-#endif
+# endif
 	andis.  r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
 	beq	no_vmx
 	la	r5,((JB_VRS)*8)(3)
@@ -114,7 +114,7 @@ ENTRY (BP_SYM (__sigsetjmp))
 	vsel    v20,v20,v2,v3
 	stvx    v5,0,r5
 
-#define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
+# define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \
 	addi    addgpr,addgpr,32; \
 	vperm   savevr,savevr,savevr,shiftvr; \
 	vsel    hivr,prev_savevr,savevr,maskvr; \
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S
index d3e9b49cfb..0c04df5119 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/getcontext.S
@@ -121,15 +121,15 @@ ENTRY(__getcontext)
 	mflr    r8
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r7
-#ifdef SHARED
+# ifdef SHARED
 	lwz     r7,_rtld_global_ro@got(r7)
 	mtlr    r8
-	lwz     r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7)
-#else
+	lwz     r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
+# else
 	lwz     r7,_dl_hwcap@got(r7)
 	mtlr    r8
 	lwz     r7,0(r7)
-#endif
+# endif
 #else
 	lis	r7,_dl_hwcap@ha
 	lwz     r7,_dl_hwcap@l(r7)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S
index cb2779731e..16d5a3b745 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/setcontext.S
@@ -54,20 +54,20 @@ ENTRY(__setcontext)
 	bl	JUMPTARGET(sigprocmask)
 	cmpwi	r3,0
 	bne	L(error_exit)
-	
+
 #ifdef PIC
 	mflr    r8
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r7
-#ifdef SHARED	
-	lwz     r7,_rtld_global@got(r7)
+# ifdef SHARED
+	lwz     r7,_rtld_global_ro@got(r7)
 	mtlr    r8
-	lwz     r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7)
-#else	
+	lwz     r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
+# else
 	lwz     r7,_dl_hwcap@got(r7)
 	mtlr    r8
 	lwz     r7,0(r7)
-#endif
+# endif
 #else
 	lis	r7,_dl_hwcap@ha
 	lwz     r7,_dl_hwcap@l(r7)
@@ -75,100 +75,100 @@ ENTRY(__setcontext)
 	andis.	r7,r7,(PPC_FEATURE_HAS_ALTIVEC >> 16)
 	la	r10,(_UC_VREGS)(r31)
 	beq	L(has_no_vec)
-	
+
 	lwz   r0,(32*16)(r10)
 	li    r9,(32*16)
 	cmpwi r0,0
 	mtspr VRSAVE,r0
-	beq   L(has_no_vec)  
+	beq   L(has_no_vec)
 
 	lvx   v19,r9,r10
 	la    r9,(16)(r10)
 
-	lvx   v0,0,r10  
+	lvx   v0,0,r10
 	lvx   v1,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
 	mtvscr  v19
-	lvx   v2,0,r10  
+	lvx   v2,0,r10
 	lvx   v3,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v4,0,r10  
+	lvx   v4,0,r10
 	lvx   v5,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v6,0,r10  
+	lvx   v6,0,r10
 	lvx   v7,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v8,0,r10  
+	lvx   v8,0,r10
 	lvx   v9,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v10,0,r10  
+	lvx   v10,0,r10
 	lvx   v11,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v12,0,r10  
+	lvx   v12,0,r10
 	lvx   v13,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v14,0,r10  
+	lvx   v14,0,r10
 	lvx   v15,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v16,0,r10  
+	lvx   v16,0,r10
 	lvx   v17,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v18,0,r10  
+	lvx   v18,0,r10
 	lvx   v11,0,r9
 	addi  r19,r10,32
 	addi  r9,r9,32
 
-	lvx   v20,0,r10  
+	lvx   v20,0,r10
 	lvx   v21,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v22,0,r10  
+	lvx   v22,0,r10
 	lvx   v23,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v24,0,r10  
+	lvx   v24,0,r10
 	lvx   v25,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v26,0,r10  
+	lvx   v26,0,r10
 	lvx   v27,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v28,0,r10  
+	lvx   v28,0,r10
 	lvx   v29,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v30,0,r10  
+	lvx   v30,0,r10
 	lvx   v31,0,r9
 	addi  r10,r10,32
 	addi  r9,r9,32
 
-	lvx   v10,0,r10  
+	lvx   v10,0,r10
 	lvx   v11,0,r9
-	
+
 L(has_no_vec):
 	/* Restore the floating-point registers */
 	lfd	fp31,_UC_FREGS+(32*8)(r31)
@@ -254,7 +254,7 @@ L(error_exit):
 	addi	r1,r1,16
 	mtlr	r0
 	blr
-	
+
 L(do_sigret):
 	addi	r1,r3,-0xd0
 	li	r0,SYS_ify(rt_sigreturn)
@@ -381,7 +381,7 @@ L(novec_error_exit):
 	addi	r1,r1,16
 	mtlr	r0
 	blr
-	
+
 L(novec_do_sigret):
 	addi	r1,r3,-0xd0
 	li	r0,SYS_ify(rt_sigreturn)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S
index 4c92dd7583..607f282a9a 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/swapcontext.S
@@ -123,15 +123,15 @@ ENTRY(__swapcontext)
 	mflr    r8
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r7
-#ifdef SHARED
+# ifdef SHARED
 	lwz     r7,_rtld_global_ro@got(r7)
 	mtlr    r8
-	lwz     r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7)
-#else
+	lwz     r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
+# else
 	lwz     r7,_dl_hwcap@got(r7)
 	mtlr    r8
 	lwz     r7,0(r7)
-#endif
+# endif
 #else
 	lis	r7,_dl_hwcap@ha
 	lwz     r7,_dl_hwcap@l(r7)
@@ -265,15 +265,15 @@ L(no_vec):
 	mflr    r8
 	bl      _GLOBAL_OFFSET_TABLE_@local-4
 	mflr    r7
-#ifdef SHARED
+# ifdef SHARED
 	lwz     r7,_rtld_global_ro@got(r7)
 	mtlr    r8
-	lwz     r7,RTLD_GLOBAL_DL_HWCAP_OFFSET(r7)
-#else
+	lwz     r7,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r7)
+# else
 	lwz     r7,_dl_hwcap@got(r7)
 	mtlr    r8
 	lwz     r7,0(r7)
-#endif
+# endif
 #else
 	lis	r7,_dl_hwcap@ha
 	lwz     r7,_dl_hwcap@l(r7)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S
index c950316b2f..13f2039766 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/getcontext.S
@@ -268,12 +268,12 @@ ENTRY(__getcontext)
 
   ld    r5,.LC__dl_hwcap@toc(r2)
   li    r10,0
-#ifdef SHARED
+# ifdef SHARED
 /* Load _rtld-global._dl_hwcap.  */
-  ld    r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5)
-#else
+  ld    r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
   ld    r5,0(r5) /* Load extern _dl_hwcap.  */
-#endif
+# endif
   andis.  r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
   beq   L(has_no_vec)
 
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S
index 490eb27578..1ff9448764 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S
@@ -59,41 +59,41 @@ ENTRY(__novec_setcontext)
   bne   L(nv_error_exit)
 
   lfd  fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31)
-  lfd  fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)	
+  lfd  fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)
   lfd  fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31)
   mtfsf  0xff,fp0
-  lfd  fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)	
+  lfd  fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)
   lfd  fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31)
-  lfd  fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)	
+  lfd  fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)
   lfd  fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31)
-  lfd  fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)	
+  lfd  fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)
   lfd  fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31)
-  lfd  fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)	
+  lfd  fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)
   lfd  fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31)
-  lfd  fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)	
+  lfd  fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)
   lfd  fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31)
-  lfd  fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)	
+  lfd  fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)
   lfd  fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31)
-  lfd  fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)	
+  lfd  fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)
   lfd  fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31)
-  lfd  fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)	
+  lfd  fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)
   lfd  fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31)
-  lfd  fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)	
+  lfd  fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)
   lfd  fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31)
-  lfd  fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)	
+  lfd  fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)
   lfd  fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31)
-  lfd  fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)	
+  lfd  fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)
   lfd  fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31)
-  lfd  fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)	
+  lfd  fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)
   lfd  fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31)
-  lfd  fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)	
+  lfd  fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)
   lfd  fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31)
-  lfd  fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)	
+  lfd  fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)
   lfd  fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31)
-  lfd  fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)	
+  lfd  fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)
   lfd  fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31)
-  
-  ld   r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)  
+
+  ld   r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)
   ld   r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31)
   mtlr r0
   ld   r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31)
@@ -129,33 +129,33 @@ ENTRY(__novec_setcontext)
   ld   r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31)
   ld   r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31)
   ld   r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31)
-  
+
   /* Now we branch to the "Next Instruction Pointer" from the saved
-     context.  With the powerpc64 instruction set there is no good way to 
+     context.  With the powerpc64 instruction set there is no good way to
      do this (from user state) without clobbering either the LR or CTR.
-     The makecontext and swapcontext functions depend on the callers 
+     The makecontext and swapcontext functions depend on the callers
      LR being preserved so we use the CTR.  */
   ld   r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31)
   mtctr r0
   ld   r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31)
   ld   r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31)
   bctr
-  
+
 L(nv_error_exit):
-  ld   r0,128+FRAME_LR_SAVE(r1)  
+  ld   r0,128+FRAME_LR_SAVE(r1)
   addi r1,r1,128
   mtlr r0
 	ld   r31,-8(r1)
   blr
 
-  /* At this point we assume that the ucontext was created by a 
-     rt_signal and we should use rt_sigreturn to restore the original 
-     state.  As of the 2.4.21 kernel the ucontext is the first thing 
-     (offset 0) in the rt_signal frame and rt_sigreturn expects the 
-     ucontext address in R1.  Normally the rt-signal trampoline handles 
-     this by popping dummy frame before the rt_signal syscall.  In our 
-     case the stack may not be in its original (signal handler return with 
-     R1 pointing at the dummy frame) state.  We do have the ucontext 
+  /* At this point we assume that the ucontext was created by a
+     rt_signal and we should use rt_sigreturn to restore the original
+     state.  As of the 2.4.21 kernel the ucontext is the first thing
+     (offset 0) in the rt_signal frame and rt_sigreturn expects the
+     ucontext address in R1.  Normally the rt-signal trampoline handles
+     this by popping dummy frame before the rt_signal syscall.  In our
+     case the stack may not be in its original (signal handler return with
+     R1 pointing at the dummy frame) state.  We do have the ucontext
      address in R3, so simply copy R3 to R1 before the syscall.  */
 L(nv_do_sigret):
   mr   r1,r3,
@@ -171,7 +171,7 @@ L(nv_do_sigret):
   bl   JUMPTARGET(__syscall_error)
   nop
   li   r3,-1
-  ld   r0,128+FRAME_LR_SAVE(r1)  
+  ld   r0,128+FRAME_LR_SAVE(r1)
   addi r1,r1,128
   mtlr r0
   blr
@@ -186,7 +186,7 @@ compat_symbol (libc, __novec_setcontext, setcontext, GLIBC_2_3)
 	.section	".toc","aw"
 .LC__dl_hwcap:
 #ifdef SHARED
-	.tc _rtld_global[TC],_rtld_global
+	.tc _rtld_global_ro[TC],_rtld_global_ro
 #else
 	.tc _dl_hwcap[TC],_dl_hwcap
 #endif
@@ -221,153 +221,153 @@ ENTRY(__setcontext)
   nop
   cmpdi r3,0
   bne   L(error_exit)
-  
+
   ld    r5,.LC__dl_hwcap@toc(r2)
   ld    r10,(SIGCONTEXT_V_REGS_PTR)(r31)
-#ifdef SHARED  
+# ifdef SHARED
 /* Load _rtld-global._dl_hwcap.  */
-  ld    r5,RTLD_GLOBAL_DL_HWCAP_OFFSET(r5) 
-#else  
+  ld    r5,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r5)
+# else
   ld    r5,0(r5) /* Load extern _dl_hwcap.  */
-#endif
+# endif
   andis.  r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16)
   beq   L(has_no_vec)
-  
+
   cmpdi r10,0
   beq   L(has_no_vec)
   lwz   r0,(33*16)(r10)
-  
+
   li    r9,(16*32)
   mtspr VRSAVE,r0
   cmpwi r0,0
-  beq   L(has_no_vec)  
-  
+  beq   L(has_no_vec)
+
   lvx   v19,r9,r10
   la    r9,(16)(r10)
-  
-  lvx   v0,0,r10  
+
+  lvx   v0,0,r10
   lvx   v1,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
+
   mtvscr  v19
-  lvx   v2,0,r10  
+  lvx   v2,0,r10
   lvx   v3,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v4,0,r10  
+
+  lvx   v4,0,r10
   lvx   v5,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v6,0,r10  
+
+  lvx   v6,0,r10
   lvx   v7,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v8,0,r10  
+
+  lvx   v8,0,r10
   lvx   v9,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v10,0,r10  
+
+  lvx   v10,0,r10
   lvx   v11,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v12,0,r10  
+
+  lvx   v12,0,r10
   lvx   v13,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v14,0,r10  
+
+  lvx   v14,0,r10
   lvx   v15,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v16,0,r10  
+
+  lvx   v16,0,r10
   lvx   v17,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v18,0,r10  
+
+  lvx   v18,0,r10
   lvx   v11,0,r9
   addi  r19,r10,32
   addi  r9,r9,32
-  
-  lvx   v20,0,r10  
+
+  lvx   v20,0,r10
   lvx   v21,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v22,0,r10  
+
+  lvx   v22,0,r10
   lvx   v23,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v24,0,r10  
+
+  lvx   v24,0,r10
   lvx   v25,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v26,0,r10  
+
+  lvx   v26,0,r10
   lvx   v27,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v28,0,r10  
+
+  lvx   v28,0,r10
   lvx   v29,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v30,0,r10  
+
+  lvx   v30,0,r10
   lvx   v31,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
-  lvx   v10,0,r10  
+
+  lvx   v10,0,r10
   lvx   v11,0,r9
   addi  r10,r10,32
   addi  r9,r9,32
-  
+
 L(has_no_vec):
   lfd  fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31)
-  lfd  fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)	
+  lfd  fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31)
   lfd  fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31)
   mtfsf  0xff,fp0
-  lfd  fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)	
+  lfd  fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31)
   lfd  fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31)
-  lfd  fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)	
+  lfd  fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31)
   lfd  fp26,(SIGCONTEXT_FP_REGS+(PT_R26*8))(r31)
-  lfd  fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)	
+  lfd  fp25,(SIGCONTEXT_FP_REGS+(PT_R25*8))(r31)
   lfd  fp24,(SIGCONTEXT_FP_REGS+(PT_R24*8))(r31)
-  lfd  fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)	
+  lfd  fp23,(SIGCONTEXT_FP_REGS+(PT_R23*8))(r31)
   lfd  fp22,(SIGCONTEXT_FP_REGS+(PT_R22*8))(r31)
-  lfd  fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)	
+  lfd  fp21,(SIGCONTEXT_FP_REGS+(PT_R21*8))(r31)
   lfd  fp20,(SIGCONTEXT_FP_REGS+(PT_R20*8))(r31)
-  lfd  fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)	
+  lfd  fp19,(SIGCONTEXT_FP_REGS+(PT_R19*8))(r31)
   lfd  fp18,(SIGCONTEXT_FP_REGS+(PT_R18*8))(r31)
-  lfd  fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)	
+  lfd  fp17,(SIGCONTEXT_FP_REGS+(PT_R17*8))(r31)
   lfd  fp16,(SIGCONTEXT_FP_REGS+(PT_R16*8))(r31)
-  lfd  fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)	
+  lfd  fp15,(SIGCONTEXT_FP_REGS+(PT_R15*8))(r31)
   lfd  fp14,(SIGCONTEXT_FP_REGS+(PT_R14*8))(r31)
-  lfd  fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)	
+  lfd  fp13,(SIGCONTEXT_FP_REGS+(PT_R13*8))(r31)
   lfd  fp12,(SIGCONTEXT_FP_REGS+(PT_R12*8))(r31)
-  lfd  fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)	
+  lfd  fp11,(SIGCONTEXT_FP_REGS+(PT_R11*8))(r31)
   lfd  fp10,(SIGCONTEXT_FP_REGS+(PT_R10*8))(r31)
-  lfd  fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)	
+  lfd  fp9,(SIGCONTEXT_FP_REGS+(PT_R9*8))(r31)
   lfd  fp8,(SIGCONTEXT_FP_REGS+(PT_R8*8))(r31)
-  lfd  fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)	
+  lfd  fp7,(SIGCONTEXT_FP_REGS+(PT_R7*8))(r31)
   lfd  fp6,(SIGCONTEXT_FP_REGS+(PT_R6*8))(r31)
-  lfd  fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)	
+  lfd  fp5,(SIGCONTEXT_FP_REGS+(PT_R5*8))(r31)
   lfd  fp4,(SIGCONTEXT_FP_REGS+(PT_R4*8))(r31)
-  lfd  fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)	
+  lfd  fp3,(SIGCONTEXT_FP_REGS+(PT_R3*8))(r31)
   lfd  fp2,(SIGCONTEXT_FP_REGS+(PT_R2*8))(r31)
-  lfd  fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)	
+  lfd  fp1,(SIGCONTEXT_FP_REGS+(PT_R1*8))(r31)
   lfd  fp0,(SIGCONTEXT_FP_REGS+(PT_R0*8))(r31)
-  
-  ld   r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)  
+
+  ld   r0,(SIGCONTEXT_GP_REGS+(PT_LNK*8))(r31)
   ld   r1,(SIGCONTEXT_GP_REGS+(PT_R1*8))(r31)
   mtlr r0
   ld   r2,(SIGCONTEXT_GP_REGS+(PT_R2*8))(r31)
@@ -403,33 +403,33 @@ L(has_no_vec):
   ld   r28,(SIGCONTEXT_GP_REGS+(PT_R28*8))(r31)
   ld   r29,(SIGCONTEXT_GP_REGS+(PT_R29*8))(r31)
   ld   r30,(SIGCONTEXT_GP_REGS+(PT_R30*8))(r31)
-  
+
   /* Now we branch to the "Next Instruction Pointer" from the saved
-     context.  With the powerpc64 instruction set there is no good way to 
+     context.  With the powerpc64 instruction set there is no good way to
      do this (from user state) without clobbering either the LR or CTR.
-     The makecontext and swapcontext functions depend on the callers 
+     The makecontext and swapcontext functions depend on the callers
      LR being preserved so we use the CTR.  */
   ld   r0,(SIGCONTEXT_GP_REGS+(PT_NIP*8))(r31)
   mtctr r0
   ld   r0,(SIGCONTEXT_GP_REGS+(PT_R0*8))(r31)
   ld   r31,(SIGCONTEXT_GP_REGS+(PT_R31*8))(r31)
   bctr
-  
+
 L(error_exit):
-  ld   r0,128+FRAME_LR_SAVE(r1)  
+  ld   r0,128+FRAME_LR_SAVE(r1)
   addi r1,r1,128
   mtlr r0
 	ld   r31,-8(r1)
   blr
 
-  /* At this point we assume that the ucontext was created by a 
-     rt_signal and we should use rt_sigreturn to restore the original 
-     state.  As of the 2.4.21 kernel the ucontext is the first thing 
-     (offset 0) in the rt_signal frame and rt_sigreturn expects the 
-     ucontext address in R1.  Normally the rt-signal trampoline handles 
-     this by popping dummy frame before the rt_signal syscall.  In our 
-     case the stack may not be in its original (signal handler return with 
-     R1 pointing at the dummy frame) state.  We do have the ucontext 
+  /* At this point we assume that the ucontext was created by a
+     rt_signal and we should use rt_sigreturn to restore the original
+     state.  As of the 2.4.21 kernel the ucontext is the first thing
+     (offset 0) in the rt_signal frame and rt_sigreturn expects the
+     ucontext address in R1.  Normally the rt-signal trampoline handles
+     this by popping dummy frame before the rt_signal syscall.  In our
+     case the stack may not be in its original (signal handler return with
+     R1 pointing at the dummy frame) state.  We do have the ucontext
      address in R3, so simply copy R3 to R1 before the syscall.  */
 L(do_sigret):
   mr   r1,r3,
@@ -445,7 +445,7 @@ L(do_sigret):
   bl   JUMPTARGET(__syscall_error)
   nop
   li   r3,-1
-  ld   r0,128+FRAME_LR_SAVE(r1)  
+  ld   r0,128+FRAME_LR_SAVE(r1)
   addi r1,r1,128
   mtlr r0
   blr
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S
index 8e2cc71b0a..64f29829e6 100644
--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S
@@ -393,7 +393,7 @@ ENTRY(__swapcontext)
   li    r10,0
 #ifdef SHARED
 /* Load _rtld-global._dl_hwcap.  */
-  ld    r8,RTLD_GLOBAL_DL_HWCAP_OFFSET(r8)
+  ld    r8,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r8)
 #else
   ld    r8,0(r8) /* Load extern _dl_hwcap.  */
 #endif
@@ -527,12 +527,12 @@ L(has_no_vec):
 
   ld    r8,.LC__dl_hwcap@toc(r2)
   ld    r10,(SIGCONTEXT_V_REGS_PTR)(r31)
-#ifdef SHARED
+# ifdef SHARED
 /* Load _rtld-global._dl_hwcap.  */
-  ld    r8,RTLD_GLOBAL_DL_HWCAP_OFFSET(r8)
-#else
+  ld    r8,RTLD_GLOBAL_RO_DL_HWCAP_OFFSET(r8)
+# else
   ld    r8,0(r8) /* Load extern _dl_hwcap.  */
-#endif
+# endif
   andis.  r8,r8,(PPC_FEATURE_HAS_ALTIVEC >> 16)
   beq   L(has_no_vec2)