diff options
Diffstat (limited to 'sysdeps/tile/tilepro')
-rw-r--r-- | sysdeps/tile/tilepro/Implies | 2 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/bits/atomic.h | 81 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/bits/wordsize.h | 3 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/memchr.c | 72 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/memcpy.S | 397 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/memset.c | 151 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/memusage.h | 29 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/rawmemchr.c | 45 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/strchr.c | 68 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/strchrnul.c | 65 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/strlen.c | 39 | ||||
-rw-r--r-- | sysdeps/tile/tilepro/strrchr.c | 73 |
12 files changed, 1025 insertions, 0 deletions
diff --git a/sysdeps/tile/tilepro/Implies b/sysdeps/tile/tilepro/Implies new file mode 100644 index 0000000000..709e1dc122 --- /dev/null +++ b/sysdeps/tile/tilepro/Implies @@ -0,0 +1,2 @@ +tile +wordsize-32 diff --git a/sysdeps/tile/tilepro/bits/atomic.h b/sysdeps/tile/tilepro/bits/atomic.h new file mode 100644 index 0000000000..cbbf64cef1 --- /dev/null +++ b/sysdeps/tile/tilepro/bits/atomic.h @@ -0,0 +1,81 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _BITS_ATOMIC_H +#define _BITS_ATOMIC_H 1 + +#include <asm/unistd.h> + +/* 32-bit integer compare-and-exchange. */ +static __inline __attribute__ ((always_inline)) +int __atomic_cmpxchg_32 (volatile int *mem, int newval, int oldval) +{ + int result; + __asm__ __volatile__ ("swint1" + : "=R00" (result), "=m" (*mem) + : "R10" (__NR_FAST_cmpxchg), "R00" (mem), + "R01" (oldval), "R02" (newval), "m" (*mem) + : "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29", "memory"); + return result; +} + +#define atomic_compare_and_exchange_val_acq(mem, n, o) \ + ((__typeof (*(mem))) \ + ((sizeof (*(mem)) == 4) ? \ + __atomic_cmpxchg_32 ((int *) (mem), (int) (n), (int) (o)) : \ + __atomic_error_bad_argument_size())) + +/* Atomically compute: + int old = *ptr; + *ptr = (old & mask) + addend; + return old; */ + +static __inline __attribute__ ((always_inline)) +int __atomic_update_32 (volatile int *mem, int mask, int addend) +{ + int result; + __asm__ __volatile__ ("swint1" + : "=R00" (result), "=m" (*mem) + : "R10" (__NR_FAST_atomic_update), "R00" (mem), + "R01" (mask), "R02" (addend), "m" (*mem) + : "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29", "memory"); + return result; +} + +/* Size-checked verson of __atomic_update_32. */ +#define __atomic_update(mem, mask, addend) \ + ((__typeof (*(mem))) \ + ((sizeof (*(mem)) == 4) ? \ + __atomic_update_32 ((int *) (mem), (int) (mask), (int) (addend)) : \ + __atomic_error_bad_argument_size ())) + +#define atomic_exchange_acq(mem, newvalue) \ + __atomic_update ((mem), 0, (newvalue)) +#define atomic_exchange_and_add(mem, value) \ + __atomic_update ((mem), -1, (value)) +#define atomic_and_val(mem, mask) \ + __atomic_update ((mem), (mask), 0) +#define atomic_or_val(mem, mask) \ + ({ __typeof (mask) __att1_v = (mask); \ + __atomic_update ((mem), ~__att1_v, __att1_v); }) + +#include <sysdeps/tile/bits/atomic.h> + +#endif /* bits/atomic.h */ diff --git a/sysdeps/tile/tilepro/bits/wordsize.h b/sysdeps/tile/tilepro/bits/wordsize.h new file mode 100644 index 0000000000..da587a2f12 --- /dev/null +++ b/sysdeps/tile/tilepro/bits/wordsize.h @@ -0,0 +1,3 @@ +/* Determine the wordsize from the preprocessor defines. */ + +#define __WORDSIZE 32 diff --git a/sysdeps/tile/tilepro/memchr.c b/sysdeps/tile/tilepro/memchr.c new file mode 100644 index 0000000000..87e64d2216 --- /dev/null +++ b/sysdeps/tile/tilepro/memchr.c @@ -0,0 +1,72 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +void * +__memchr (const void *s, int c, size_t n) +{ + const uint32_t *last_word_ptr; + const uint32_t *p; + const char *last_byte_ptr; + uintptr_t s_int; + uint32_t goal, before_mask, v, bits; + char *ret; + + if (__builtin_expect (n == 0, 0)) + { + /* Don't dereference any memory if the array is empty. */ + return NULL; + } + + /* Get an aligned pointer. */ + s_int = (uintptr_t) s; + p = (const uint32_t *) (s_int & -4); + + /* Create four copies of the byte for which we are looking. */ + goal = 0x01010101 * (uint8_t) c; + + /* Read the first word, but munge it so that bytes before the array + will not match goal. Note that this shift count expression works + because we know shift counts are taken mod 32. */ + before_mask = (1 << (s_int << 3)) - 1; + v = (*p | before_mask) ^ (goal & before_mask); + + /* Compute the address of the last byte. */ + last_byte_ptr = (const char *) s + n - 1; + + /* Compute the address of the word containing the last byte. */ + last_word_ptr = (const uint32_t *) ((uintptr_t) last_byte_ptr & -4); + + while ((bits = __insn_seqb (v, goal)) == 0) + { + if (__builtin_expect (p == last_word_ptr, 0)) + { + /* We already read the last word in the array, so give up. */ + return NULL; + } + v = *++p; + } + + /* We found a match, but it might be in a byte past the end of the array. */ + ret = ((char *) p) + (__insn_ctz (bits) >> 3); + return (ret <= last_byte_ptr) ? ret : NULL; +} +weak_alias (__memchr, memchr) +libc_hidden_builtin_def (memchr) diff --git a/sysdeps/tile/tilepro/memcpy.S b/sysdeps/tile/tilepro/memcpy.S new file mode 100644 index 0000000000..1d496a4f2c --- /dev/null +++ b/sysdeps/tile/tilepro/memcpy.S @@ -0,0 +1,397 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <arch/chip.h> +#include <sysdep.h> + + .text +ENTRY (__memcpy) + FEEDBACK_ENTER(__memcpy) + + /* r0 is the dest, r1 is the source, r2 is the size. */ + + /* Save aside original dest so we can return it at the end. */ + { sw sp, lr; move r23, r0; or r4, r0, r1 } + cfi_offset (lr, 0) + + /* Check for an empty size. */ + { bz r2, .Ldone; andi r4, r4, 3 } + + /* Check for an unaligned source or dest. */ + { bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 } + +.Lcheck_aligned_copy_size: + /* If we are copying < 256 bytes, branch to simple case. */ + { blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 } + + /* Copying >= 256 bytes, so jump to complex prefetching loop. */ + { andi r6, r1, 63; j .Lcopy_many } + +/* Aligned 4 byte at a time copy loop. */ + +.Lcopy_8_loop: + /* Copy two words at a time to hide load latency. */ + { lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 } + { lw r4, r1; addi r1, r1, 4 } + { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } + { sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 } +.Lcopy_8_check: + { bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 } + + /* Copy odd leftover word, if any. */ + { bnzt r4, .Lcheck_odd_stragglers } + { lw r3, r1; addi r1, r1, 4 } + { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } + +.Lcheck_odd_stragglers: + { bnz r2, .Lcopy_unaligned_few } + +.Ldone: + { move r0, r23; jrp lr } + +/* Prefetching multiple cache line copy handler (for large transfers). */ + + /* Copy words until r1 is cache-line-aligned. */ +.Lalign_loop: + { lw r3, r1; addi r1, r1, 4 } + { andi r6, r1, 63 } + { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } +.Lcopy_many: + { bnzt r6, .Lalign_loop; addi r9, r0, 63 } + + { addi r3, r1, 60; andi r9, r9, -64 } + + /* No need to prefetch dst, we'll just do the wh64 + right before we copy a line. */ + { lw r5, r3; addi r3, r3, 64; movei r4, 1 } + /* Intentionally stall for a few cycles to leave L2 cache alone. */ + { bnzt zero, .; move r27, lr } + { lw r6, r3; addi r3, r3, 64 } + /* Intentionally stall for a few cycles to leave L2 cache alone. */ + { bnzt zero, . } + { lw r7, r3; addi r3, r3, 64 } + /* Intentionally stall for a few cycles to leave L2 cache alone. */ + { bz zero, .Lbig_loop2 } + + /* On entry to this loop: + - r0 points to the start of dst line 0 + - r1 points to start of src line 0 + - r2 >= (256 - 60), only the first time the loop trips. + - r3 contains r1 + 128 + 60 [pointer to end of source line 2] + This is our prefetch address. When we get near the end + rather than prefetching off the end this is changed to point + to some "safe" recently loaded address. + - r5 contains *(r1 + 60) [i.e. last word of source line 0] + - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1] + - r9 contains ((r0 + 63) & -64) + [start of next dst cache line.] */ + +.Lbig_loop: + { jal .Lcopy_line2; add r15, r1, r2 } + +.Lbig_loop2: + /* Copy line 0, first stalling until r5 is ready. */ + { move r12, r5; lw r16, r1 } + { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } + /* Prefetch several lines ahead. */ + { lw r5, r3; addi r3, r3, 64 } + { jal .Lcopy_line } + + /* Copy line 1, first stalling until r6 is ready. */ + { move r12, r6; lw r16, r1 } + { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } + /* Prefetch several lines ahead. */ + { lw r6, r3; addi r3, r3, 64 } + { jal .Lcopy_line } + + /* Copy line 2, first stalling until r7 is ready. */ + { move r12, r7; lw r16, r1 } + { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } + /* Prefetch several lines ahead. */ + { lw r7, r3; addi r3, r3, 64 } + /* Use up a caches-busy cycle by jumping back to the top of the + loop. Might as well get it out of the way now. */ + { j .Lbig_loop } + + + /* On entry: + - r0 points to the destination line. + - r1 points to the source line. + - r3 is the next prefetch address. + - r9 holds the last address used for wh64. + - r12 = WORD_15 + - r16 = WORD_0. + - r17 == r1 + 16. + - r27 holds saved lr to restore. + + On exit: + - r0 is incremented by 64. + - r1 is incremented by 64, unless that would point to a word + beyond the end of the source array, in which case it is redirected + to point to an arbitrary word already in the cache. + - r2 is decremented by 64. + - r3 is unchanged, unless it points to a word beyond the + end of the source array, in which case it is redirected + to point to an arbitrary word already in the cache. + Redirecting is OK since if we are that close to the end + of the array we will not come back to this subroutine + and use the contents of the prefetched address. + - r4 is nonzero iff r2 >= 64. + - r9 is incremented by 64, unless it points beyond the + end of the last full destination cache line, in which + case it is redirected to a "safe address" that can be + clobbered (sp - 64) + - lr contains the value in r27. */ + +/* r26 unused */ + +.Lcopy_line: + /* TODO: when r3 goes past the end, we would like to redirect it + to prefetch the last partial cache line (if any) just once, for the + benefit of the final cleanup loop. But we don't want to + prefetch that line more than once, or subsequent prefetches + will go into the RTF. But then .Lbig_loop should unconditionally + branch to top of loop to execute final prefetch, and its + nop should become a conditional branch. */ + + /* We need two non-memory cycles here to cover the resources + used by the loads initiated by the caller. */ + { add r15, r1, r2 } +.Lcopy_line2: + { slt_u r13, r3, r15; addi r17, r1, 16 } + + /* NOTE: this will stall for one cycle as L1 is busy. */ + + /* Fill second L1D line. */ + { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ + + /* Prepare destination line for writing. */ + { wh64 r9; addi r9, r9, 64 } + + /* Load seven words that are L1D hits to cover wh64 L2 usage. */ + + /* Load the three remaining words from the last L1D line, which + we know has already filled the L1D. */ + { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */ + { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */ + { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */ + + /* Load the three remaining words from the first L1D line, first + stalling until it has filled by "looking at" r16. */ + { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */ + { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */ + { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */ + + /* Load second word from the second L1D line, first + stalling until it has filled by "looking at" r17. */ + { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */ + + /* Store last word to the destination line, potentially dirtying it + for the first time, which keeps the L2 busy for two cycles. */ + { sw r10, r12 } /* store(WORD_15) */ + + /* Use two L1D hits to cover the sw L2 access above. */ + { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */ + { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */ + + /* Fill third L1D line. */ + { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */ + + /* Store first L1D line. */ + { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ + { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ + { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ + { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ + + /* Store second L1D line. */ + { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */ + { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */ + { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */ + { sw r0, r12; addi r0, r0, 4 } /* store(WORD_7) */ + + { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */ + { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */ + { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */ + + /* Store third L1D line. */ + { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */ + { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */ + { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */ + { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */ + + /* Store rest of fourth L1D line. */ + { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */ + { + sw r0, r8 /* store(WORD_13) */ + addi r0, r0, 4 + /* Will r2 be > 64 after we subtract 64 below? */ + shri r4, r2, 7 + } + { + sw r0, r11 /* store(WORD_14) */ + addi r0, r0, 8 + /* Record 64 bytes successfully copied. */ + addi r2, r2, -64 + } + + { jrp lr; move lr, r27 } + + /* Convey to the backtrace library that the stack frame is + size zero, and the real return address is on the stack + rather than in 'lr'. */ + { info 8 } + + .align 64 +.Lcopy_unaligned_maybe_many: + /* Skip the setup overhead if we aren't copying many bytes. */ + { slti_u r8, r2, 20; sub r4, zero, r0 } + { bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 } + { bz r4, .Ldest_is_word_aligned; add r18, r1, r2 } + +/* Unaligned 4 byte at a time copy handler. */ + + /* Copy single bytes until r0 == 0 mod 4, so we can store words. */ +.Lalign_dest_loop: + { lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 } + { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } + { bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 } + + /* If source and dest are now *both* aligned, do an aligned copy. */ + { bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 } + +.Ldest_is_word_aligned: + + { andi r8, r0, 63; lwadd_na r6, r1, 4} + { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned } + + /* This copies unaligned words until either there are fewer + than 4 bytes left to copy, or until the destination pointer + is cache-aligned, whichever comes first. + + On entry: + - r0 is the next store address. + - r1 points 4 bytes past the load address corresponding to r0. + - r2 >= 4 + - r6 is the next aligned word loaded. */ +.Lcopy_unaligned_src_words: + { lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 } + /* stall */ + { dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 } + { swadd r0, r6, 4; addi r2, r2, -4 } + { bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 } + { bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 } + + /* On entry: + - r0 is the next store address. + - r1 points 4 bytes past the load address corresponding to r0. + - r2 >= 4 (# of bytes left to store). + - r6 is the next aligned src word value. + - r9 = (r2 < 64U). + - r18 points one byte past the end of source memory. */ +.Ldest_is_L2_line_aligned: + + { + /* Not a full cache line remains. */ + bnz r9, .Lcleanup_unaligned_words + move r7, r6 + } + + /* r2 >= 64 */ + + /* Kick off two prefetches, but don't go past the end. */ + { addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 } + { prefetch r3; move r3, r8; slt_u r8, r8, r18 } + { mvz r3, r8, r1; addi r8, r3, 64 } + { prefetch r3; move r3, r8; slt_u r8, r8, r18 } + { mvz r3, r8, r1; movei r17, 0 } + +.Lcopy_unaligned_line: + /* Prefetch another line. */ + { prefetch r3; addi r15, r1, 60; addi r3, r3, 64 } + /* Fire off a load of the last word we are about to copy. */ + { lw_na r15, r15; slt_u r8, r3, r18 } + + { mvz r3, r8, r1; wh64 r0 } + + /* This loop runs twice. + + On entry: + - r17 is even before the first iteration, and odd before + the second. It is incremented inside the loop. Encountering + an even value at the end of the loop makes it stop. */ +.Lcopy_half_an_unaligned_line: + { + /* Stall until the last byte is ready. In the steady state this + guarantees all words to load below will be in the L2 cache, which + avoids shunting the loads to the RTF. */ + move zero, r15 + lwadd_na r7, r1, 16 + } + { lwadd_na r11, r1, 12 } + { lwadd_na r14, r1, -24 } + { lwadd_na r8, r1, 4 } + { lwadd_na r9, r1, 4 } + { + lwadd_na r10, r1, 8 + /* r16 = (r2 < 64), after we subtract 32 from r2 below. */ + slti_u r16, r2, 64 + 32 + } + { lwadd_na r12, r1, 4; addi r17, r17, 1 } + { lwadd_na r13, r1, 8; dword_align r6, r7, r1 } + { swadd r0, r6, 4; dword_align r7, r8, r1 } + { swadd r0, r7, 4; dword_align r8, r9, r1 } + { swadd r0, r8, 4; dword_align r9, r10, r1 } + { swadd r0, r9, 4; dword_align r10, r11, r1 } + { swadd r0, r10, 4; dword_align r11, r12, r1 } + { swadd r0, r11, 4; dword_align r12, r13, r1 } + { swadd r0, r12, 4; dword_align r13, r14, r1 } + { swadd r0, r13, 4; addi r2, r2, -32 } + { move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line } + + { bzt r16, .Lcopy_unaligned_line; move r7, r6 } + + /* On entry: + - r0 is the next store address. + - r1 points 4 bytes past the load address corresponding to r0. + - r2 >= 0 (# of bytes left to store). + - r7 is the next aligned src word value. */ +.Lcleanup_unaligned_words: + /* Handle any trailing bytes. */ + { bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 } + { bzt r8, .Lcopy_unaligned_src_words; move r6, r7 } + + /* Move r1 back to the point where it corresponds to r0. */ + { addi r1, r1, -4 } + + /* Fall through */ + +/* 1 byte at a time copy handler. */ + +.Lcopy_unaligned_few: + { lb_u r3, r1; addi r1, r1, 1 } + { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } + { bnzt r2, .Lcopy_unaligned_few } + +.Lcopy_unaligned_done: + + { move r0, r23; jrp lr } + +END (__memcpy) + +weak_alias (__memcpy, memcpy) +libc_hidden_builtin_def (memcpy) diff --git a/sysdeps/tile/tilepro/memset.c b/sysdeps/tile/tilepro/memset.c new file mode 100644 index 0000000000..85d6b810ed --- /dev/null +++ b/sysdeps/tile/tilepro/memset.c @@ -0,0 +1,151 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> +#include <arch/chip.h> + +void * +__memset (void *s, int c, size_t n) +{ + uint32_t *out32; + int n32; + uint32_t v16, v32; + uint8_t *out8 = s; + int to_align32; + + /* Experimentation shows that a trivial tight loop is a win up until + around a size of 20, where writing a word at a time starts to win. */ +#define BYTE_CUTOFF 20 + +#if BYTE_CUTOFF < 3 + /* This must be at least at least this big, or some code later + on doesn't work. */ +# error "BYTE_CUTOFF is too small." +#endif + + if (n < BYTE_CUTOFF) + { + /* Strangely, this turns out to be the tightest way to write + this loop. */ + if (n != 0) + { + do + { + /* Strangely, combining these into one line performs worse. */ + *out8 = c; + out8++; + } + while (--n != 0); + } + + return s; + } + + /* Align 'out8'. We know n >= 3 so this won't write past the end. */ + while (((uintptr_t) out8 & 3) != 0) + { + *out8++ = c; + --n; + } + + /* Align 'n'. */ + while (n & 3) + out8[--n] = c; + + out32 = (uint32_t *) out8; + n32 = n >> 2; + + /* Tile input byte out to 32 bits. */ + v16 = __insn_intlb (c, c); + v32 = __insn_intlh (v16, v16); + + /* This must be at least 8 or the following loop doesn't work. */ +#define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4) + + /* Determine how many words we need to emit before the 'out32' + pointer becomes aligned modulo the cache line size. */ + to_align32 = (-((uintptr_t) out32 >> 2)) & (CACHE_LINE_SIZE_IN_WORDS - 1); + + /* Only bother aligning and using wh64 if there is at least one full + cache line to process. This check also prevents overrunning the + end of the buffer with alignment words. */ + if (to_align32 <= n32 - CACHE_LINE_SIZE_IN_WORDS) + { + int lines_left; + + /* Align out32 mod the cache line size so we can use wh64. */ + n32 -= to_align32; + for (; to_align32 != 0; to_align32--) + { + *out32 = v32; + out32++; + } + + /* Use unsigned divide to turn this into a right shift. */ + lines_left = (unsigned) n32 / CACHE_LINE_SIZE_IN_WORDS; + + do + { + /* Only wh64 a few lines at a time, so we don't exceed the + maximum number of victim lines. */ + int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS ())? lines_left + : CHIP_MAX_OUTSTANDING_VICTIMS ()); + uint32_t *wh = out32; + int i = x; + int j; + + lines_left -= x; + + do + { + __insn_wh64 (wh); + wh += CACHE_LINE_SIZE_IN_WORDS; + } + while (--i); + + for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4); j != 0; j--) + { + *out32++ = v32; + *out32++ = v32; + *out32++ = v32; + *out32++ = v32; + } + } + while (lines_left != 0); + + /* We processed all full lines above, so only this many words + remain to be processed. */ + n32 &= CACHE_LINE_SIZE_IN_WORDS - 1; + } + + /* Now handle any leftover values. */ + if (n32 != 0) + { + do + { + *out32 = v32; + out32++; + } + while (--n32 != 0); + } + + return s; +} +weak_alias (__memset, memset) +libc_hidden_builtin_def (memset) diff --git a/sysdeps/tile/tilepro/memusage.h b/sysdeps/tile/tilepro/memusage.h new file mode 100644 index 0000000000..1ce1a29fbe --- /dev/null +++ b/sysdeps/tile/tilepro/memusage.h @@ -0,0 +1,29 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <arch/spr_def.h> + +#define GETSP() ({ register uintptr_t stack_ptr asm ("sp"); stack_ptr; }) + +#define GETTIME(low,high) \ + { \ + low = __insn_mfspr (SPR_CYCLE_LOW); \ + high = __insn_mfspr (SPR_CYCLE_HIGH); \ + } + +#include <sysdeps/generic/memusage.h> diff --git a/sysdeps/tile/tilepro/rawmemchr.c b/sysdeps/tile/tilepro/rawmemchr.c new file mode 100644 index 0000000000..46d9593a7f --- /dev/null +++ b/sysdeps/tile/tilepro/rawmemchr.c @@ -0,0 +1,45 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +void * +__rawmemchr (const void *s, int c) +{ + /* Get an aligned pointer. */ + const uintptr_t s_int = (uintptr_t) s; + const uint32_t *p = (const uint32_t *) (s_int & -4); + + /* Create four copies of the byte for which we are looking. */ + const uint32_t goal = 0x01010101 * (uint8_t) c; + + /* Read the first word, but munge it so that bytes before the array + will not match goal. Note that this shift count expression works + because we know shift counts are taken mod 32. */ + const uint32_t before_mask = (1 << (s_int << 3)) - 1; + uint32_t v = (*p | before_mask) ^ (goal & before_mask); + + uint32_t bits; + while ((bits = __insn_seqb (v, goal)) == 0) + v = *++p; + + return ((char *) p) + (__insn_ctz (bits) >> 3); +} +libc_hidden_def (__rawmemchr) +weak_alias (__rawmemchr, rawmemchr) diff --git a/sysdeps/tile/tilepro/strchr.c b/sysdeps/tile/tilepro/strchr.c new file mode 100644 index 0000000000..ecdbcfe5d1 --- /dev/null +++ b/sysdeps/tile/tilepro/strchr.c @@ -0,0 +1,68 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +#undef strchr + +char * +strchr (const char *s, int c) +{ + int z, g; + + /* Get an aligned pointer. */ + const uintptr_t s_int = (uintptr_t) s; + const uint32_t *p = (const uint32_t *) (s_int & -4); + + /* Create four copies of the byte for which we are looking. */ + const uint32_t goal = 0x01010101 * (uint8_t) c; + + /* Read the first aligned word, but force bytes before the string to + match neither zero nor goal (we make sure the high bit of each + byte is 1, and the low 7 bits are all the opposite of the goal + byte). Note that this shift count expression works because we + know shift counts are taken mod 32. */ + const uint32_t before_mask = (1 << (s_int << 3)) - 1; + uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib (before_mask, 1)); + + uint32_t zero_matches, goal_matches; + while (1) + { + /* Look for a terminating '\0'. */ + zero_matches = __insn_seqb (v, 0); + + /* Look for the goal byte. */ + goal_matches = __insn_seqb (v, goal); + + if (__builtin_expect ((zero_matches | goal_matches) != 0, 0)) + break; + + v = *++p; + } + + z = __insn_ctz (zero_matches); + g = __insn_ctz (goal_matches); + + /* If we found c before '\0' we got a match. Note that if c == '\0' + then g == z, and we correctly return the address of the '\0' + rather than NULL. */ + return (g <= z) ? ((char *) p) + (g >> 3) : NULL; +} +weak_alias (strchr, index) +libc_hidden_builtin_def (strchr) diff --git a/sysdeps/tile/tilepro/strchrnul.c b/sysdeps/tile/tilepro/strchrnul.c new file mode 100644 index 0000000000..2dccfe1908 --- /dev/null +++ b/sysdeps/tile/tilepro/strchrnul.c @@ -0,0 +1,65 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +char * +__strchrnul (const char *s, int c) +{ + int z, g; + + /* Get an aligned pointer. */ + const uintptr_t s_int = (uintptr_t) s; + const uint32_t *p = (const uint32_t *) (s_int & -4); + + /* Create four copies of the byte for which we are looking. */ + const uint32_t goal = 0x01010101 * (uint8_t) c; + + /* Read the first aligned word, but force bytes before the string to + match neither zero nor goal (we make sure the high bit of each + byte is 1, and the low 7 bits are all the opposite of the goal + byte). Note that this shift count expression works because we + know shift counts are taken mod 32. */ + const uint32_t before_mask = (1 << (s_int << 3)) - 1; + uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib (before_mask, 1)); + + uint32_t zero_matches, goal_matches; + while (1) + { + /* Look for a terminating '\0'. */ + zero_matches = __insn_seqb (v, 0); + + /* Look for the goal byte. */ + goal_matches = __insn_seqb (v, goal); + + if (__builtin_expect ((zero_matches | goal_matches) != 0, 0)) + break; + + v = *++p; + } + + z = __insn_ctz (zero_matches); + g = __insn_ctz (goal_matches); + + /* Return a pointer to the NUL or goal, whichever is first. */ + if (z < g) + g = z; + return ((char *) p) + (g >> 3); +} +weak_alias (__strchrnul, strchrnul) diff --git a/sysdeps/tile/tilepro/strlen.c b/sysdeps/tile/tilepro/strlen.c new file mode 100644 index 0000000000..c40ee56c6f --- /dev/null +++ b/sysdeps/tile/tilepro/strlen.c @@ -0,0 +1,39 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +size_t +strlen (const char *s) +{ + /* Get an aligned pointer. */ + const uintptr_t s_int = (uintptr_t) s; + const uint32_t *p = (const uint32_t *) (s_int & -4); + + /* Read the first word, but force bytes before the string to be nonzero. + This expression works because we know shift counts are taken mod 32. */ + uint32_t v = *p | ((1 << (s_int << 3)) - 1); + + uint32_t bits; + while ((bits = __insn_seqb (v, 0)) == 0) + v = *++p; + + return ((const char *) p) + (__insn_ctz (bits) >> 3) - s; +} +libc_hidden_builtin_def (strlen) diff --git a/sysdeps/tile/tilepro/strrchr.c b/sysdeps/tile/tilepro/strrchr.c new file mode 100644 index 0000000000..1c7f5e7014 --- /dev/null +++ b/sysdeps/tile/tilepro/strrchr.c @@ -0,0 +1,73 @@ +/* Copyright (C) 2011-2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> +#include <stdint.h> + +char * +strrchr (const char *s, int c) +{ + /* Get an aligned pointer. */ + const uintptr_t s_int = (uintptr_t) s; + const uint32_t *p = (const uint32_t *) (s_int & -4); + + /* Create four copies of the byte for which we are looking. */ + const uint32_t goal = 0x01010101 * (uint8_t) c; + + /* Read the first aligned word, but force bytes before the string to + match neither zero nor goal (we make sure the high bit of each + byte is 1, and the low 7 bits are all the opposite of the goal + byte). Note that this shift count expression works because we + know shift counts are taken mod 32. */ + const uint32_t before_mask = (1 << (s_int << 3)) - 1; + uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib (before_mask, 1)); + const char *found = NULL; + uint32_t zero_matches, goal_matches; + while (1) + { + /* Look for a terminating '\0'. */ + zero_matches = __insn_seqb (v, 0); + + /* Look for the goal byte. */ + goal_matches = __insn_seqb (v, goal); + + /* If we found the goal, record the last offset. */ + if (__builtin_expect (goal_matches != 0, 0)) + { + if (__builtin_expect (zero_matches != 0, 0)) + { + /* Clear any goal after the first zero. */ + int first_nul = __insn_ctz (zero_matches); + /* The number of top bits we need to clear is + 32 - (first_nul + 8). */ + int shift_amt = (24 - first_nul); + goal_matches <<= shift_amt; + goal_matches >>= shift_amt; + } + if (__builtin_expect (goal_matches != 0, 1)) + found = ((char *) p) + 3 - (__insn_clz (goal_matches) >> 3); + } + + if (__builtin_expect (zero_matches != 0, 0)) + return (char *) found; + + v = *++p; + } +} +weak_alias (strrchr, rindex) +libc_hidden_builtin_def (strrchr) |