diff options
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/mips/memcpy.S | 4 | ||||
-rw-r--r-- | sysdeps/mips/memset.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S | 2 |
5 files changed, 6 insertions, 6 deletions
diff --git a/sysdeps/mips/memcpy.S b/sysdeps/mips/memcpy.S index af01d0dd73..2b84c75807 100644 --- a/sysdeps/mips/memcpy.S +++ b/sysdeps/mips/memcpy.S @@ -507,7 +507,7 @@ L(skip_pref): move a2,t8 /* Here we have src and dest word-aligned but less than 64-bytes or - * 128 bytes to go. Check for a 32(64) byte chunk and copy if if there + * 128 bytes to go. Check for a 32(64) byte chunk and copy if there * is one. Otherwise jump down to L(chk1w) to handle the tail end of * the copy. */ @@ -736,7 +736,7 @@ L(ua_skip_pref): move a2,t8 /* Here we have src and dest word-aligned but less than 64-bytes or - * 128 bytes to go. Check for a 32(64) byte chunk and copy if if there + * 128 bytes to go. Check for a 32(64) byte chunk and copy if there * is one. Otherwise jump down to L(ua_chk1w) to handle the tail end of * the copy. */ diff --git a/sysdeps/mips/memset.S b/sysdeps/mips/memset.S index 04370a8660..f6bd624e6f 100644 --- a/sysdeps/mips/memset.S +++ b/sysdeps/mips/memset.S @@ -370,7 +370,7 @@ L(skip_pref): move a2,t8 /* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go. - Check for a 32(64) byte chunk and copy if if there is one. Otherwise + Check for a 32(64) byte chunk and copy if there is one. Otherwise jump down to L(chk1w) to handle the tail end of the copy. */ L(chkw): andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */ diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S index 8fa4255d6d..64c91e6e2c 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S @@ -41,7 +41,7 @@ b) Calculate 2 polynomials for sin and cos: RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3)))); RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))); - c) Swap RS & RC if if first bit of obtained value after + c) Swap RS & RC if first bit of obtained value after Right Shifting is set to 1. Using And, Andnot & Or operations. 3) Destination sign setting a) Set shifted destination sign using XOR operation: diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S index 74a6ac1157..748646e8d9 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S @@ -42,7 +42,7 @@ ENTRY (_ZGVbN4vl4l4_sincosf_sse4) b) Calculate 2 polynomials for sin and cos: RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3)))); RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))); - c) Swap RS & RC if if first bit of obtained value after + c) Swap RS & RC if first bit of obtained value after Right Shifting is set to 1. Using And, Andnot & Or operations. 3) Destination sign setting a) Set shifted destination sign using XOR operation: diff --git a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S index 9e4e2c71c5..aadf45dcb3 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S @@ -42,7 +42,7 @@ ENTRY (_ZGVdN8vl4l4_sincosf_avx2) b) Calculate 2 polynomials for sin and cos: RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3)))); RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))); - c) Swap RS & RC if if first bit of obtained value after + c) Swap RS & RC if first bit of obtained value after Right Shifting is set to 1. Using And, Andnot & Or operations. 3) Destination sign setting a) Set shifted destination sign using XOR operation: |