diff options
Diffstat (limited to 'sysdeps/powerpc/powerpc64/setjmp-common.S')
-rw-r--r-- | sysdeps/powerpc/powerpc64/setjmp-common.S | 72 |
1 files changed, 34 insertions, 38 deletions
diff --git a/sysdeps/powerpc/powerpc64/setjmp-common.S b/sysdeps/powerpc/powerpc64/setjmp-common.S index 58ec610620..1829b9ab65 100644 --- a/sysdeps/powerpc/powerpc64/setjmp-common.S +++ b/sysdeps/powerpc/powerpc64/setjmp-common.S @@ -95,7 +95,7 @@ JUMPTARGET(GLUE(__sigsetjmp,_ent)): mfcr r0 std r16,((JB_GPRS+2)*8)(3) stfd fp16,((JB_FPRS+2)*8)(3) - std r0,(JB_CR*8)(3) + stw r0,((JB_CR*8)+4)(3) /* 32-bit CR. */ std r17,((JB_GPRS+3)*8)(3) stfd fp17,((JB_FPRS+3)*8)(3) std r18,((JB_GPRS+4)*8)(3) @@ -139,50 +139,46 @@ JUMPTARGET(GLUE(__sigsetjmp,_ent)): la r5,((JB_VRS)*8)(3) andi. r6,r5,0xf mfspr r0,VRSAVE - stw r0,((JB_VRSAVE)*8)(3) + stw r0,((JB_VRSAVE)*8)(3) /* 32-bit VRSAVE. */ addi r6,r5,16 beq+ L(aligned_save_vmx) - lvsr v0,0,r5 - vspltisb v1,-1 /* set v1 to all 1's */ - vspltisb v2,0 /* set v2 to all 0's */ - vperm v3,v2,v1,v0 /* v3 contains shift mask with num all 1 bytes - on left = misalignment */ + lvsr v0,0,r5 + lvsl v1,0,r5 + addi r6,r5,-16 - /* Special case for v20 we need to preserve what is in save area - below v20 before obliterating it */ - lvx v5,0,r5 - vperm v20,v20,v20,v0 - vsel v5,v5,v20,v3 - vsel v20,v20,v2,v3 - stvx v5,0,r5 +# define save_misaligned_vmx(savevr,prevvr,shiftvr,tmpvr,savegpr,addgpr) \ + addi addgpr,addgpr,32; \ + vperm tmpvr,prevvr,savevr,shiftvr; \ + stvx tmpvr,0,savegpr -# define save_2vmx_partial(savevr,prev_savevr,hivr,shiftvr,maskvr,savegpr,addgpr) \ - addi addgpr,addgpr,32; \ - vperm savevr,savevr,savevr,shiftvr; \ - vsel hivr,prev_savevr,savevr,maskvr; \ - stvx hivr,0,savegpr; + /* + * We have to be careful not to corrupt the data below v20 and + * above v31. To keep things simple we just rotate both ends in + * the opposite direction to our main permute so we can use + * the common macro. + */ - save_2vmx_partial(v21,v20,v5,v0,v3,r6,r5) - save_2vmx_partial(v22,v21,v5,v0,v3,r5,r6) - save_2vmx_partial(v23,v22,v5,v0,v3,r6,r5) - save_2vmx_partial(v24,v23,v5,v0,v3,r5,r6) - save_2vmx_partial(v25,v24,v5,v0,v3,r6,r5) - save_2vmx_partial(v26,v25,v5,v0,v3,r5,r6) - save_2vmx_partial(v27,v26,v5,v0,v3,r6,r5) - save_2vmx_partial(v28,v27,v5,v0,v3,r5,r6) - save_2vmx_partial(v29,v28,v5,v0,v3,r6,r5) - save_2vmx_partial(v30,v29,v5,v0,v3,r5,r6) + /* load and rotate data below v20 */ + lvx v2,0,r5 + vperm v2,v2,v2,v1 + save_misaligned_vmx(v20,v2,v0,v3,r5,r6) + save_misaligned_vmx(v21,v20,v0,v3,r6,r5) + save_misaligned_vmx(v22,v21,v0,v3,r5,r6) + save_misaligned_vmx(v23,v22,v0,v3,r6,r5) + save_misaligned_vmx(v24,v23,v0,v3,r5,r6) + save_misaligned_vmx(v25,v24,v0,v3,r6,r5) + save_misaligned_vmx(v26,v25,v0,v3,r5,r6) + save_misaligned_vmx(v27,v26,v0,v3,r6,r5) + save_misaligned_vmx(v28,v27,v0,v3,r5,r6) + save_misaligned_vmx(v29,v28,v0,v3,r6,r5) + save_misaligned_vmx(v30,v29,v0,v3,r5,r6) + save_misaligned_vmx(v31,v30,v0,v3,r6,r5) + /* load and rotate data above v31 */ + lvx v2,0,r6 + vperm v2,v2,v2,v1 + save_misaligned_vmx(v2,v31,v0,v3,r5,r6) - /* Special case for r31 we need to preserve what is in save area - above v31 before obliterating it */ - addi r5,r5,32 - vperm v31,v31,v31,v0 - lvx v4,0,r5 - vsel v5,v30,v31,v3 - stvx v5,0,r6 - vsel v4,v31,v4,v3 - stvx v4,0,r5 b L(no_vmx) L(aligned_save_vmx): |