From 372723065a1689b8943af9b3eb83f56577069eb1 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Wed, 15 Dec 2004 20:36:01 +0000 Subject: 2004-12-15 Steven Munroe * sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S: Make no_vmx symbol local. * sysdeps/powerpc/powerpc32/fpu/setjmp-common.S: Make no_vmx symbol local. * sysdeps/powerpc/powerpc64/__longjmp-common.S: Make no_vmx symbol local. * sysdeps/powerpc/powerpc64/setjmp-common.S: Make no_vmx and aligned_save_vmx symbol local. --- sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S | 6 +++--- sysdeps/powerpc/powerpc32/fpu/setjmp-common.S | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'sysdeps/powerpc/powerpc32/fpu') diff --git a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S index b61e127a99..a2415b9542 100644 --- a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S +++ b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S @@ -50,7 +50,7 @@ ENTRY (BP_SYM (__longjmp)) lwz r5,_dl_hwcap@l(r5) # endif andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) - beq no_vmx + beq L(no_vmx) la r5,((JB_VRS)*4)(3) andi. r6,r5,0xf lwz r0,((JB_VRSAVE)*4)(3) @@ -78,7 +78,7 @@ ENTRY (BP_SYM (__longjmp)) load_misaligned_vmx_lo_loaded(v30,v31,v0,r6,r5) lvx v1,0,r5 vperm v31,v31,v1,v0 - b no_vmx + b L(no_vmx) aligned_restore_vmx: addi r6,r5,16 lvx v20,0,r5 @@ -103,7 +103,7 @@ aligned_restore_vmx: addi r6,r6,32 lvx v30,0,r5 lvx v31,0,r6 -no_vmx: +L(no_vmx): #endif lwz r1,(JB_GPR1*4)(r3) lwz r0,(JB_LR*4)(r3) diff --git a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S index 796d24f25c..77ee05f487 100644 --- a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S +++ b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S @@ -92,13 +92,13 @@ ENTRY (BP_SYM (__sigsetjmp)) lwz r5,_dl_hwcap@l(r5) #endif andis. r5,r5,(PPC_FEATURE_HAS_ALTIVEC >> 16) - beq no_vmx + beq L(no_vmx) la r5,((JB_VRS)*4)(3) andi. r6,r5,0xf mfspr r0,VRSAVE stw r0,((JB_VRSAVE)*4)(3) addi r6,r5,16 - beq+ aligned_save_vmx + beq+ L(aligned_save_vmx) lvsr v0,0,r5 vspltisb v1,-1 /* set v1 to all 1's */ vspltisb v2,0 /* set v2 to all 0's */ @@ -137,9 +137,9 @@ ENTRY (BP_SYM (__sigsetjmp)) stvx v5,0,r6 vsel v4,v31,v4,v3 stvx v4,0,r5 - b no_vmx + b L(no_vmx) -aligned_save_vmx: +L(aligned_save_vmx): stvx 20,0,r5 addi r5,r5,32 stvx 21,0,r6 @@ -162,7 +162,7 @@ aligned_save_vmx: addi r6,r6,32 stvx 30,0,r5 stvx 31,0,r6 -no_vmx: +L(no_vmx): #endif b JUMPTARGET (BP_SYM (__sigjmp_save)) END (BP_SYM (__sigsetjmp)) -- cgit 1.4.1