diff options
author | Sunil K Pandey <skpgkp2@gmail.com> | 2022-03-07 10:47:13 -0800 |
---|---|---|
committer | Sunil K Pandey <skpgkp2@gmail.com> | 2022-03-07 21:14:11 -0800 |
commit | 214e4f0c200ef27ff57301ae863cd9b8cd6c3665 (patch) | |
tree | a9b51a7fafe1c7e71b166c3c359668107c5eb8b5 | |
parent | 075dd8a017b87e0d5d5ba139ceb61e6cdd4b267b (diff) | |
download | glibc-214e4f0c200ef27ff57301ae863cd9b8cd6c3665.tar.gz glibc-214e4f0c200ef27ff57301ae863cd9b8cd6c3665.tar.xz glibc-214e4f0c200ef27ff57301ae863cd9b8cd6c3665.zip |
x86_64: Fix svml_s_hypotf4_core_sse4.S code formatting
This commit contains following formatting changes 1. Instructions proceeded by a tab. 2. Instruction less than 8 characters in length have a tab between it and the first operand. 3. Instruction greater than 7 characters in length have a space between it and the first operand. 4. Tabs after `#define`d names and their value. 5. 8 space at the beginning of line replaced by tab. 6. Indent comments with code. 7. Remove redundent .text section. 8. 1 space between line content and line comment. 9. Space after all commas. Reviewed-by: Noah Goldstein <goldstein.w.n@gmail.com>
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S | 326 |
1 files changed, 162 insertions, 164 deletions
diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S index 95f040c831..5cf9b1c339 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_hypotf4_core_sse4.S @@ -63,7 +63,7 @@ * result = s - h * d * * EP version of the function can be implemented as y[i]=sqrt(a[i]^2+b[i]^2) - * with all intermediate operations done in target precision for i=1,..,n. + * with all intermediate operations done in target precision for i=1, .., n. * It can return result y[i]=0 in case a[i]^2 and b[i]^2 underflow in target * precision (for some i). It can return result y[i]=NAN in case * a[i]^2+b[i]^2 overflow in target precision, for some i. It can return @@ -74,192 +74,190 @@ /* Offsets for data table __svml_shypot_data_internal */ -#define _sHiLoMask 0 -#define _sAbsMask 16 -#define _sHalf 32 -#define _LowBoundary 48 -#define _HighBoundary 64 +#define _sHiLoMask 0 +#define _sAbsMask 16 +#define _sHalf 32 +#define _LowBoundary 48 +#define _HighBoundary 64 #include <sysdep.h> - .text - .section .text.sse4,"ax",@progbits + .section .text.sse4, "ax", @progbits ENTRY(_ZGVbN4vv_hypotf_sse4) - subq $88, %rsp - cfi_def_cfa_offset(96) - -/* - * Implementation - * Multiprecision branch for _HA_ only - * No multiprecision branch for _LA_ - * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 - */ - movaps %xmm0, %xmm8 - movaps %xmm1, %xmm2 - mulps %xmm0, %xmm8 - mulps %xmm1, %xmm2 - -/* - * Variables - * Defines - * Constants loading - */ - movups _sHalf+__svml_shypot_data_internal(%rip), %xmm5 - addps %xmm2, %xmm8 - -/* _s0 ~ 1.0/sqrt(_z) */ - rsqrtps %xmm8, %xmm10 - -/* First iteration */ - movaps %xmm10, %xmm2 - movaps %xmm8, %xmm3 - mulps %xmm8, %xmm2 - mulps %xmm5, %xmm10 - movaps %xmm2, %xmm6 - mulps %xmm10, %xmm6 - -/* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ - movdqu _LowBoundary+__svml_shypot_data_internal(%rip), %xmm4 - subps %xmm6, %xmm5 - -/* Second iteration */ - movaps %xmm5, %xmm7 - pcmpgtd %xmm8, %xmm4 - mulps %xmm2, %xmm5 - mulps %xmm10, %xmm7 - addps %xmm5, %xmm2 - addps %xmm7, %xmm10 - -/* Finish second iteration in native precision for _LA_ */ - movaps %xmm2, %xmm9 - mulps %xmm2, %xmm9 - pcmpgtd _HighBoundary+__svml_shypot_data_internal(%rip), %xmm3 - subps %xmm8, %xmm9 - mulps %xmm9, %xmm10 - por %xmm3, %xmm4 - movmskps %xmm4, %edx - subps %xmm10, %xmm2 - -/* The end of implementation */ - testl %edx, %edx - -/* Go to special inputs processing branch */ - jne L(SPECIAL_VALUES_BRANCH) - # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1 xmm2 - -/* Restore registers - * and exit the function - */ + subq $88, %rsp + cfi_def_cfa_offset(96) + + /* + * Implementation + * Multiprecision branch for _HA_ only + * No multiprecision branch for _LA_ + * _z = _VARG1 * _VARG1 + _VARG2 * _VARG2 + */ + movaps %xmm0, %xmm8 + movaps %xmm1, %xmm2 + mulps %xmm0, %xmm8 + mulps %xmm1, %xmm2 + + /* + * Variables + * Defines + * Constants loading + */ + movups _sHalf+__svml_shypot_data_internal(%rip), %xmm5 + addps %xmm2, %xmm8 + + /* _s0 ~ 1.0/sqrt(_z) */ + rsqrtps %xmm8, %xmm10 + + /* First iteration */ + movaps %xmm10, %xmm2 + movaps %xmm8, %xmm3 + mulps %xmm8, %xmm2 + mulps %xmm5, %xmm10 + movaps %xmm2, %xmm6 + mulps %xmm10, %xmm6 + + /* Check _z exponent to be withing borders [1E3 ; 60A] else goto Callout */ + movdqu _LowBoundary+__svml_shypot_data_internal(%rip), %xmm4 + subps %xmm6, %xmm5 + + /* Second iteration */ + movaps %xmm5, %xmm7 + pcmpgtd %xmm8, %xmm4 + mulps %xmm2, %xmm5 + mulps %xmm10, %xmm7 + addps %xmm5, %xmm2 + addps %xmm7, %xmm10 + + /* Finish second iteration in native precision for _LA_ */ + movaps %xmm2, %xmm9 + mulps %xmm2, %xmm9 + pcmpgtd _HighBoundary+__svml_shypot_data_internal(%rip), %xmm3 + subps %xmm8, %xmm9 + mulps %xmm9, %xmm10 + por %xmm3, %xmm4 + movmskps %xmm4, %edx + subps %xmm10, %xmm2 + + /* The end of implementation */ + testl %edx, %edx + + /* Go to special inputs processing branch */ + jne L(SPECIAL_VALUES_BRANCH) + # LOE rbx rbp r12 r13 r14 r15 edx xmm0 xmm1 xmm2 + + /* Restore registers + * and exit the function + */ L(EXIT): - movaps %xmm2, %xmm0 - addq $88, %rsp - cfi_def_cfa_offset(8) - ret - cfi_def_cfa_offset(96) - -/* Branch to process - * special inputs - */ + movaps %xmm2, %xmm0 + addq $88, %rsp + cfi_def_cfa_offset(8) + ret + cfi_def_cfa_offset(96) + + /* Branch to process + * special inputs + */ L(SPECIAL_VALUES_BRANCH): - movups %xmm0, 32(%rsp) - movups %xmm1, 48(%rsp) - movups %xmm2, 64(%rsp) - # LOE rbx rbp r12 r13 r14 r15 edx - - xorl %eax, %eax - movq %r12, 16(%rsp) - cfi_offset(12, -80) - movl %eax, %r12d - movq %r13, 8(%rsp) - cfi_offset(13, -88) - movl %edx, %r13d - movq %r14, (%rsp) - cfi_offset(14, -96) - # LOE rbx rbp r15 r12d r13d - -/* Range mask - * bits check - */ + movups %xmm0, 32(%rsp) + movups %xmm1, 48(%rsp) + movups %xmm2, 64(%rsp) + # LOE rbx rbp r12 r13 r14 r15 edx + + xorl %eax, %eax + movq %r12, 16(%rsp) + cfi_offset(12, -80) + movl %eax, %r12d + movq %r13, 8(%rsp) + cfi_offset(13, -88) + movl %edx, %r13d + movq %r14, (%rsp) + cfi_offset(14, -96) + # LOE rbx rbp r15 r12d r13d + + /* Range mask + * bits check + */ L(RANGEMASK_CHECK): - btl %r12d, %r13d + btl %r12d, %r13d -/* Call scalar math function */ - jc L(SCALAR_MATH_CALL) - # LOE rbx rbp r15 r12d r13d + /* Call scalar math function */ + jc L(SCALAR_MATH_CALL) + # LOE rbx rbp r15 r12d r13d -/* Special inputs - * processing loop - */ + /* Special inputs + * processing loop + */ L(SPECIAL_VALUES_LOOP): - incl %r12d - cmpl $4, %r12d - -/* Check bits in range mask */ - jl L(RANGEMASK_CHECK) - # LOE rbx rbp r15 r12d r13d - - movq 16(%rsp), %r12 - cfi_restore(12) - movq 8(%rsp), %r13 - cfi_restore(13) - movq (%rsp), %r14 - cfi_restore(14) - movups 64(%rsp), %xmm2 - -/* Go to exit */ - jmp L(EXIT) - cfi_offset(12, -80) - cfi_offset(13, -88) - cfi_offset(14, -96) - # LOE rbx rbp r12 r13 r14 r15 xmm2 - -/* Scalar math fucntion call - * to process special input - */ + incl %r12d + cmpl $4, %r12d + + /* Check bits in range mask */ + jl L(RANGEMASK_CHECK) + # LOE rbx rbp r15 r12d r13d + + movq 16(%rsp), %r12 + cfi_restore(12) + movq 8(%rsp), %r13 + cfi_restore(13) + movq (%rsp), %r14 + cfi_restore(14) + movups 64(%rsp), %xmm2 + + /* Go to exit */ + jmp L(EXIT) + cfi_offset(12, -80) + cfi_offset(13, -88) + cfi_offset(14, -96) + # LOE rbx rbp r12 r13 r14 r15 xmm2 + + /* Scalar math fucntion call + * to process special input + */ L(SCALAR_MATH_CALL): - movl %r12d, %r14d - movss 32(%rsp,%r14,4), %xmm0 - movss 48(%rsp,%r14,4), %xmm1 - call hypotf@PLT - # LOE rbx rbp r14 r15 r12d r13d xmm0 + movl %r12d, %r14d + movss 32(%rsp, %r14, 4), %xmm0 + movss 48(%rsp, %r14, 4), %xmm1 + call hypotf@PLT + # LOE rbx rbp r14 r15 r12d r13d xmm0 - movss %xmm0, 64(%rsp,%r14,4) + movss %xmm0, 64(%rsp, %r14, 4) -/* Process special inputs in loop */ - jmp L(SPECIAL_VALUES_LOOP) - # LOE rbx rbp r15 r12d r13d + /* Process special inputs in loop */ + jmp L(SPECIAL_VALUES_LOOP) + # LOE rbx rbp r15 r12d r13d END(_ZGVbN4vv_hypotf_sse4) - .section .rodata, "a" - .align 16 + .section .rodata, "a" + .align 16 #ifdef __svml_shypot_data_internal_typedef typedef unsigned int VUINT32; -typedef struct -{ - __declspec(align(16)) VUINT32 _sHiLoMask[4][1]; - __declspec(align(16)) VUINT32 _sAbsMask[4][1]; - __declspec(align(16)) VUINT32 _sHalf[4][1]; - __declspec(align(16)) VUINT32 _LowBoundary[4][1]; - __declspec(align(16)) VUINT32 _HighBoundary[4][1]; +typedef struct { + __declspec(align(16)) VUINT32 _sHiLoMask[4][1]; + __declspec(align(16)) VUINT32 _sAbsMask[4][1]; + __declspec(align(16)) VUINT32 _sHalf[4][1]; + __declspec(align(16)) VUINT32 _LowBoundary[4][1]; + __declspec(align(16)) VUINT32 _HighBoundary[4][1]; } __svml_shypot_data_internal; #endif __svml_shypot_data_internal: - /* legacy algorithm */ - .long 0xFFF80000, 0xFFF80000, 0xFFF80000, 0xFFF80000 /* _sHiLoMask */ - .align 16 - .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _sAbsMask */ - .align 16 - .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000 /* _sHalf */ - .align 16 - .long 0x1E300000, 0x1E300000, 0x1E300000, 0x1E300000 /* _LowBoundary */ - .align 16 - .long 0x60A00000, 0x60A00000, 0x60A00000, 0x60A00000 /* _HighBoundary */ - .align 16 - .type __svml_shypot_data_internal,@object - .size __svml_shypot_data_internal,.-__svml_shypot_data_internal + /* legacy algorithm */ + .long 0xFFF80000, 0xFFF80000, 0xFFF80000, 0xFFF80000 /* _sHiLoMask */ + .align 16 + .long 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff /* _sAbsMask */ + .align 16 + .long 0x3f000000, 0x3f000000, 0x3f000000, 0x3f000000 /* _sHalf */ + .align 16 + .long 0x1E300000, 0x1E300000, 0x1E300000, 0x1E300000 /* _LowBoundary */ + .align 16 + .long 0x60A00000, 0x60A00000, 0x60A00000, 0x60A00000 /* _HighBoundary */ + .align 16 + .type __svml_shypot_data_internal, @object + .size __svml_shypot_data_internal, .-__svml_shypot_data_internal |