diff options
author | Ondřej Bílka <neleai@seznam.cz> | 2013-08-30 18:09:30 +0200 |
---|---|---|
committer | Ondřej Bílka <neleai@seznam.cz> | 2013-08-30 18:10:31 +0200 |
commit | f24a6d086b96a65a73c68ecf349b03321cb03720 (patch) | |
tree | 823c4db1322b11c81ba5ce05d77adbf4afbdbbc8 /sysdeps | |
parent | 382466e04e347d3b3fa221897efb8f4cdc239105 (diff) | |
download | glibc-f24a6d086b96a65a73c68ecf349b03321cb03720.tar.gz glibc-f24a6d086b96a65a73c68ecf349b03321cb03720.tar.xz glibc-f24a6d086b96a65a73c68ecf349b03321cb03720.zip |
Fix then/than typos.
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/i386/stpncpy.S | 2 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power6/memcpy.S | 18 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc32/power6/memset.S | 4 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc64/power6/memcpy.S | 10 | ||||
-rw-r--r-- | sysdeps/powerpc/powerpc64/power6/memset.S | 4 |
5 files changed, 19 insertions, 19 deletions
diff --git a/sysdeps/i386/stpncpy.S b/sysdeps/i386/stpncpy.S index 11882738d9..b23e8208a0 100644 --- a/sysdeps/i386/stpncpy.S +++ b/sysdeps/i386/stpncpy.S @@ -1,4 +1,4 @@ -/* copy no more then N bytes from SRC to DEST, returning the address of +/* copy no more than N bytes from SRC to DEST, returning the address of the terminating '\0' in DEST. For Intel 80x86, x>=3. Copyright (C) 1994-2013 Free Software Foundation, Inc. diff --git a/sysdeps/powerpc/powerpc32/power6/memcpy.S b/sysdeps/powerpc/powerpc32/power6/memcpy.S index c3d55b7681..c7868069ab 100644 --- a/sysdeps/powerpc/powerpc32/power6/memcpy.S +++ b/sysdeps/powerpc/powerpc32/power6/memcpy.S @@ -269,7 +269,7 @@ L(wus_tail16): /* Move 16 bytes. */ addi 1,1,32 blr .align 4 -L(wus_tail16p8): /* less then 8 bytes left. */ +L(wus_tail16p8): /* less than 8 bytes left. */ beq cr1,L(wus_tailX) /* exactly 16 bytes, early exit. */ cmplwi cr1,10,20 bf 29,L(wus_tail16p2) @@ -283,7 +283,7 @@ L(wus_tail16p8): /* less then 8 bytes left. */ addi 1,1,32 blr .align 4 -L(wus_tail16p4): /* less then 4 bytes left. */ +L(wus_tail16p4): /* less than 4 bytes left. */ addi 12,12,24 addi 11,11,24 bgt cr0,L(wus_tail2) @@ -291,7 +291,7 @@ L(wus_tail16p4): /* less then 4 bytes left. */ addi 1,1,32 blr .align 4 -L(wus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */ +L(wus_tail16p2): /* 16 bytes moved, less than 4 bytes left. */ addi 12,12,16 addi 11,11,16 b L(wus_tail2) @@ -315,7 +315,7 @@ L(wus_tail8): /* Move 8 bytes. */ addi 1,1,32 blr .align 4 -L(wus_tail8p4): /* less then 4 bytes left. */ +L(wus_tail8p4): /* less than 4 bytes left. */ addi 12,12,8 addi 11,11,8 bgt cr1,L(wus_tail2) @@ -326,7 +326,7 @@ L(wus_tail8p4): /* less then 4 bytes left. */ .align 4 L(wus_tail4): /* Move 4 bytes. */ /* r6 already loaded speculatively. If we are here we know there is - more then 4 bytes left. So there is no need to test. */ + more than 4 bytes left. So there is no need to test. */ addi 12,12,4 stw 6,0(11) addi 11,11,4 @@ -426,14 +426,14 @@ L(wdu): First we need to copy word up to but not crossing the next 32-byte boundary. Then perform aligned loads just before and just after the boundary and use shifts and or to generate the next aligned - word for dst. If more then 32 bytes remain we copy (unaligned src) - the next 7 words and repeat the loop until less then 32-bytes + word for dst. If more than 32 bytes remain we copy (unaligned src) + the next 7 words and repeat the loop until less than 32-bytes remain. - Then if more then 4 bytes remain we again use aligned loads, + Then if more than 4 bytes remain we again use aligned loads, shifts and or to generate the next dst word. We then process the remaining words using unaligned loads as needed. Finally we check - if there more then 0 bytes (1-3) bytes remaining and use + if there more than 0 bytes (1-3) bytes remaining and use halfword and or byte load/stores to complete the copy. */ mr 4,12 /* restore unaligned adjusted src ptr */ diff --git a/sysdeps/powerpc/powerpc32/power6/memset.S b/sysdeps/powerpc/powerpc32/power6/memset.S index ce06630014..8c23c8d136 100644 --- a/sysdeps/powerpc/powerpc32/power6/memset.S +++ b/sysdeps/powerpc/powerpc32/power6/memset.S @@ -101,7 +101,7 @@ L(nondcbz): boundary may not be at cache line (128-byte) boundary. */ L(nzloopstart): /* memset in 32-byte chunks until we get to a cache line boundary. - If rLEN is less then the distance to the next cache-line boundary use + If rLEN is less than the distance to the next cache-line boundary use cacheAligned1 code to finish the tail. */ cmplwi cr1,rLEN,128 @@ -306,7 +306,7 @@ L(nzCacheAligned256): block zero instruction. */ L(zloopstart): /* memset in 32-byte chunks until we get to a cache line boundary. - If rLEN is less then the distance to the next cache-line boundary use + If rLEN is less than the distance to the next cache-line boundary use cacheAligned1 code to finish the tail. */ cmplwi cr1,rLEN,128 beq L(medium) diff --git a/sysdeps/powerpc/powerpc64/power6/memcpy.S b/sysdeps/powerpc/powerpc64/power6/memcpy.S index db29e2b065..d6d242d293 100644 --- a/sysdeps/powerpc/powerpc64/power6/memcpy.S +++ b/sysdeps/powerpc/powerpc64/power6/memcpy.S @@ -450,7 +450,7 @@ L(dus_tail16): /* Move 16 bytes. */ ld 3,-16(1) blr .align 4 -L(dus_tail16p8): /* less then 8 bytes left. */ +L(dus_tail16p8): /* less than 8 bytes left. */ beq cr1,L(dus_tailX) /* exactly 16 bytes, early exit. */ cmpldi cr1,10,20 bf 29,L(dus_tail16p2) @@ -464,7 +464,7 @@ L(dus_tail16p8): /* less then 8 bytes left. */ ld 3,-16(1) blr .align 4 -L(dus_tail16p4): /* less then 4 bytes left. */ +L(dus_tail16p4): /* less than 4 bytes left. */ addi 12,12,24 addi 3,3,24 bgt cr0,L(dus_tail2) @@ -472,7 +472,7 @@ L(dus_tail16p4): /* less then 4 bytes left. */ ld 3,-16(1) blr .align 4 -L(dus_tail16p2): /* 16 bytes moved, less then 4 bytes left. */ +L(dus_tail16p2): /* 16 bytes moved, less than 4 bytes left. */ addi 12,12,16 addi 3,3,16 b L(dus_tail2) @@ -497,7 +497,7 @@ L(dus_tail8): /* Move 8 bytes. */ ld 3,-16(1) blr .align 4 -L(dus_tail8p4): /* less then 4 bytes left. */ +L(dus_tail8p4): /* less than 4 bytes left. */ addi 12,12,8 addi 3,3,8 bgt cr1,L(dus_tail2) @@ -508,7 +508,7 @@ L(dus_tail8p4): /* less then 4 bytes left. */ .align 4 L(dus_tail4): /* Move 4 bytes. */ /* r6 already loaded speculatively. If we are here we know there is - more then 4 bytes left. So there is no need to test. */ + more than 4 bytes left. So there is no need to test. */ addi 12,12,4 stw 6,0(3) addi 3,3,4 diff --git a/sysdeps/powerpc/powerpc64/power6/memset.S b/sysdeps/powerpc/powerpc64/power6/memset.S index 541a45fd36..3e8ae2d25e 100644 --- a/sysdeps/powerpc/powerpc64/power6/memset.S +++ b/sysdeps/powerpc/powerpc64/power6/memset.S @@ -110,7 +110,7 @@ L(caligned): boundary may not be at cache line (128-byte) boundary. */ L(nzloopstart): /* memset in 32-byte chunks until we get to a cache line boundary. - If rLEN is less then the distance to the next cache-line boundary use + If rLEN is less than the distance to the next cache-line boundary use cacheAligned1 code to finish the tail. */ cmpldi cr1,rLEN,128 @@ -186,7 +186,7 @@ L(nzCacheAligned128): block zero instruction. */ L(zloopstart): /* memset in 32-byte chunks until we get to a cache line boundary. - If rLEN is less then the distance to the next cache-line boundary use + If rLEN is less than the distance to the next cache-line boundary use cacheAligned1 code to finish the tail. */ cmpldi cr1,rLEN,128 beq L(medium) |