diff options
author | Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com> | 2016-02-23 11:10:34 +0530 |
---|---|---|
committer | Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com> | 2016-03-11 11:31:58 -0300 |
commit | 869d7180dd4a462674971a25b66070da7f2eabcf (patch) | |
tree | d7eff84810c8790b631e2e2d1d7fc35add9be12a /sysdeps/powerpc/powerpc32 | |
parent | f60ee13f5d078b29e7ccdaae57bb4e9f14946eb4 (diff) | |
download | glibc-869d7180dd4a462674971a25b66070da7f2eabcf.tar.gz glibc-869d7180dd4a462674971a25b66070da7f2eabcf.tar.xz glibc-869d7180dd4a462674971a25b66070da7f2eabcf.zip |
powerpc: Rearrange cfi_offset calls
This patch rearranges cfi_offset() calls after the last store so as to avoid extra DW_CFA_advance opcodes in unwind information.
Diffstat (limited to 'sysdeps/powerpc/powerpc32')
-rw-r--r-- | sysdeps/powerpc/powerpc32/power4/memcmp.S | 12 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power6/memcpy.S | 2 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power7/memcmp.S | 12 |
3 files changed, 13 insertions, 13 deletions
diff --git a/sysdeps/powerpc/powerpc32/power4/memcmp.S b/sysdeps/powerpc/powerpc32/power4/memcmp.S index 602a7957ad..9cb116e232 100644 --- a/sysdeps/powerpc/powerpc32/power4/memcmp.S +++ b/sysdeps/powerpc/powerpc32/power4/memcmp.S @@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0) stwu 1, -64(r1) cfi_adjust_cfa_offset(64) stw rWORD8, 48(r1) - cfi_offset(rWORD8, (48-64)) stw rWORD7, 44(r1) + cfi_offset(rWORD8, (48-64)) cfi_offset(rWORD7, (44-64)) bne L(unaligned) /* At this point we know both strings have the same alignment and the @@ -747,18 +747,18 @@ L(unaligned): the actual start of rSTR2. */ clrrwi rSTR2, rSTR2, 2 stw rWORD2_SHIFT, 28(r1) - cfi_offset(rWORD2_SHIFT, (28-64)) /* Compute the left/right shift counts for the unaligned rSTR2, compensating for the logical (W aligned) start of rSTR1. */ clrlwi rSHL, rWORD8_SHIFT, 30 clrrwi rSTR1, rSTR1, 2 stw rWORD4_SHIFT, 24(r1) - cfi_offset(rWORD4_SHIFT, (24-64)) slwi rSHL, rSHL, 3 cmplw cr5, rWORD8_SHIFT, rSTR2 add rN, rN, r12 slwi rWORD6, r12, 3 stw rWORD6_SHIFT, 20(r1) + cfi_offset(rWORD2_SHIFT, (28-64)) + cfi_offset(rWORD4_SHIFT, (24-64)) cfi_offset(rWORD6_SHIFT, (20-64)) subfic rSHR, rSHL, 32 srwi r0, rN, 4 /* Divide by 16 */ @@ -852,15 +852,15 @@ L(duPs4): .align 4 L(Wunaligned): stw rWORD8_SHIFT, 32(r1) - cfi_offset(rWORD8_SHIFT, (32-64)) clrrwi rSTR2, rSTR2, 2 stw rWORD2_SHIFT, 28(r1) - cfi_offset(rWORD2_SHIFT, (28-64)) srwi r0, rN, 4 /* Divide by 16 */ stw rWORD4_SHIFT, 24(r1) - cfi_offset(rWORD4_SHIFT, (24-64)) andi. r12, rN, 12 /* Get the W remainder */ stw rWORD6_SHIFT, 20(r1) + cfi_offset(rWORD8_SHIFT, (32-64)) + cfi_offset(rWORD2_SHIFT, (28-64)) + cfi_offset(rWORD4_SHIFT, (24-64)) cfi_offset(rWORD6_SHIFT, (20-64)) slwi rSHL, rSHL, 3 #ifdef __LITTLE_ENDIAN__ diff --git a/sysdeps/powerpc/powerpc32/power6/memcpy.S b/sysdeps/powerpc/powerpc32/power6/memcpy.S index 6dff0ed6df..ae796a26c1 100644 --- a/sysdeps/powerpc/powerpc32/power6/memcpy.S +++ b/sysdeps/powerpc/powerpc32/power6/memcpy.S @@ -46,8 +46,8 @@ EALIGN (memcpy, 5, 0) ble- cr1,L(word_unaligned_short) /* If move < 32 bytes. */ cmplw cr6,10,11 stw 31,24(1) - cfi_offset(31,(24-32)) stw 30,20(1) + cfi_offset(31,(24-32)) cfi_offset(30,(20-32)) mr 30,3 beq .L0 diff --git a/sysdeps/powerpc/powerpc32/power7/memcmp.S b/sysdeps/powerpc/powerpc32/power7/memcmp.S index 9c06a89491..13e8492106 100644 --- a/sysdeps/powerpc/powerpc32/power7/memcmp.S +++ b/sysdeps/powerpc/powerpc32/power7/memcmp.S @@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0) stwu 1, -64(r1) cfi_adjust_cfa_offset(64) stw rWORD8, 48(r1) - cfi_offset(rWORD8, (48-64)) stw rWORD7, 44(r1) + cfi_offset(rWORD8, (48-64)) cfi_offset(rWORD7, (44-64)) bne L(unaligned) /* At this point we know both strings have the same alignment and the @@ -747,18 +747,18 @@ L(unaligned): the actual start of rSTR2. */ clrrwi rSTR2, rSTR2, 2 stw rWORD2_SHIFT, 28(r1) - cfi_offset(rWORD2_SHIFT, (28-64)) /* Compute the left/right shift counts for the unaligned rSTR2, compensating for the logical (W aligned) start of rSTR1. */ clrlwi rSHL, rWORD8_SHIFT, 30 clrrwi rSTR1, rSTR1, 2 stw rWORD4_SHIFT, 24(r1) - cfi_offset(rWORD4_SHIFT, (24-64)) slwi rSHL, rSHL, 3 cmplw cr5, rWORD8_SHIFT, rSTR2 add rN, rN, r12 slwi rWORD6, r12, 3 stw rWORD6_SHIFT, 20(r1) + cfi_offset(rWORD2_SHIFT, (28-64)) + cfi_offset(rWORD4_SHIFT, (24-64)) cfi_offset(rWORD6_SHIFT, (20-64)) subfic rSHR, rSHL, 32 srwi r0, rN, 4 /* Divide by 16 */ @@ -852,15 +852,15 @@ L(duPs4): .align 4 L(Wunaligned): stw rWORD8_SHIFT, 32(r1) - cfi_offset(rWORD8_SHIFT, (32-64)) clrrwi rSTR2, rSTR2, 2 stw rWORD2_SHIFT, 28(r1) - cfi_offset(rWORD2_SHIFT, (28-64)) srwi r0, rN, 4 /* Divide by 16 */ stw rWORD4_SHIFT, 24(r1) - cfi_offset(rWORD4_SHIFT, (24-64)) andi. r12, rN, 12 /* Get the W remainder */ stw rWORD6_SHIFT, 20(r1) + cfi_offset(rWORD8_SHIFT, (32-64)) + cfi_offset(rWORD2_SHIFT, (28-64)) + cfi_offset(rWORD4_SHIFT, (24-64)) cfi_offset(rWORD6_SHIFT, (20-64)) slwi rSHL, rSHL, 3 #ifdef __LITTLE_ENDIAN__ |