summary refs log tree commit diff
path: root/sysdeps/powerpc/powerpc64/power6/memset.S
blob: 7ad82c38e6e483d5cb43290c23d51fdae76ff015 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
/* Optimized 64-bit memset implementation for POWER6.
   Copyright (C) 1997-2022 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <sysdep.h>

/* void * [r3] memset (void *s [r3], int c [r4], size_t n [r5]));
   Returns 's'.

   The memset is done in three sizes: byte (8 bits), word (32 bits),
   cache line (256 bits). There is a special case for setting cache lines
   to 0, to take advantage of the dcbz instruction.  */

#ifndef MEMSET
# define MEMSET memset
#endif
	.machine power6
ENTRY_TOCLESS (MEMSET, 7)
	CALL_MCOUNT 3

#define rTMP	r0
#define rRTN	r3	/* Initial value of 1st argument.  */
#define rMEMP0	r3	/* Original value of 1st arg.  */
#define rCHR	r4	/* Char to set in each byte.  */
#define rLEN	r5	/* Length of region to set.  */
#define rMEMP	r6	/* Address at which we are storing.  */
#define rALIGN	r7	/* Number of bytes we are setting now (when aligning). */
#define rMEMP2	r8
#define rMEMP3	r9	/* Alt mem pointer.  */
L(_memset):
/* Take care of case for size <= 4.  */
	cmpldi	cr1, rLEN, 8
	andi.	rALIGN, rMEMP0, 7
	mr	rMEMP, rMEMP0
	ble	cr1, L(small)

/* Align to doubleword boundary.  */
	cmpldi	cr5, rLEN, 31
	insrdi	rCHR, rCHR, 8, 48	/* Replicate byte to halfword.  */
	beq+	L(aligned2)
	mtcrf	0x01, rMEMP0
	subfic	rALIGN, rALIGN, 8
	cror	28,30,31		/* Detect odd word aligned.  */
	add	rMEMP, rMEMP, rALIGN
	sub	rLEN, rLEN, rALIGN
	insrdi	rCHR, rCHR, 16, 32	/* Replicate halfword to word.  */
	bt	29, L(g4)
/* Process the even word of doubleword.  */
	bf+	31, L(g2)
	stb	rCHR, 0(rMEMP0)
	bt	30, L(g4x)
L(g2):
	sth	rCHR, -6(rMEMP)
L(g4x):
	stw	rCHR, -4(rMEMP)
	b	L(aligned)
/* Process the odd word of doubleword.  */
L(g4):
	bf	28, L(g4x) /* If false, word aligned on odd word.  */
	bf+	31, L(g0)
	stb	rCHR, 0(rMEMP0)
	bt	30, L(aligned)
L(g0):
	sth	rCHR, -2(rMEMP)

/* Handle the case of size < 31.  */
L(aligned2):
	insrdi	rCHR, rCHR, 16, 32	/* Replicate halfword to word.  */
L(aligned):
	mtcrf	0x01, rLEN
	ble	cr5, L(medium)
/* Align to 32-byte boundary.  */
	andi.	rALIGN, rMEMP, 0x18
	subfic	rALIGN, rALIGN, 0x20
	insrdi	rCHR, rCHR, 32, 0	/* Replicate word to double word. */
	beq	L(caligned)
	mtcrf	0x01, rALIGN
	add	rMEMP, rMEMP, rALIGN
	sub	rLEN, rLEN, rALIGN
	cmplwi	cr1, rALIGN, 0x10
	mr	rMEMP2, rMEMP
	bf	28, L(a1)
	stdu	rCHR, -8(rMEMP2)
L(a1):	blt	cr1, L(a2)
	std	rCHR, -8(rMEMP2)
	stdu	rCHR, -16(rMEMP2)
L(a2):

/* Now aligned to a 32 byte boundary.  */
        .align 4
L(caligned):
	cmpldi	cr1, rCHR, 0
	clrrdi.	rALIGN, rLEN, 5
	mtcrf	0x01, rLEN
	beq	cr1, L(zloopstart) /* Special case for clearing memory using dcbz.  */
	beq	L(medium)	/* We may not actually get to do a full line.  */
	.align 4
/* Storing a non-zero "c" value. We are aligned at a sector (32-byte)
   boundary may not be at cache line (128-byte) boundary.  */
L(nzloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
   If rLEN is less than the distance to the next cache-line boundary use
   cacheAligned1 code to finish the tail.  */
	cmpldi	cr1,rLEN,128

	andi.	rTMP,rMEMP,127
	blt	cr1,L(cacheAligned1)
	addi	rMEMP3,rMEMP,32
	beq	L(nzCacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP)
	std	rCHR,8(rMEMP)
	std	rCHR,16(rMEMP)
	addi	rMEMP,rMEMP,32
	andi.	rTMP,rMEMP3,127
	std	rCHR,-8(rMEMP3)

	beq	L(nzCacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP3)
	addi	rMEMP,rMEMP,32
	std	rCHR,8(rMEMP3)
	andi.	rTMP,rMEMP,127
	std	rCHR,16(rMEMP3)
	std	rCHR,24(rMEMP3)

	beq	L(nzCacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,32(rMEMP3)
	addi	rMEMP,rMEMP,32
	cmpldi	cr1,rLEN,128
	std	rCHR,40(rMEMP3)
	cmpldi	cr6,rLEN,256
	li	rMEMP2,128
	std	rCHR,48(rMEMP3)
	std	rCHR,56(rMEMP3)
	blt	cr1,L(cacheAligned1)
	b	L(nzCacheAligned128)

/* Now we are aligned to the cache line and can use dcbtst.  */
        .align 4
L(nzCacheAligned):
	cmpldi	cr1,rLEN,128
	blt	cr1,L(cacheAligned1)
	b	L(nzCacheAligned128)
        .align 5
L(nzCacheAligned128):
	cmpldi	cr1,rLEN,256
	addi	rMEMP3,rMEMP,64
	std	rCHR,0(rMEMP)
	std	rCHR,8(rMEMP)
	std	rCHR,16(rMEMP)
	std	rCHR,24(rMEMP)
	std	rCHR,32(rMEMP)
	std	rCHR,40(rMEMP)
	std	rCHR,48(rMEMP)
	std	rCHR,56(rMEMP)
	addi	rMEMP,rMEMP3,64
	addi	rLEN,rLEN,-128
	std	rCHR,0(rMEMP3)
	std	rCHR,8(rMEMP3)
	std	rCHR,16(rMEMP3)
	std	rCHR,24(rMEMP3)
	std	rCHR,32(rMEMP3)
	std	rCHR,40(rMEMP3)
	std	rCHR,48(rMEMP3)
	std	rCHR,56(rMEMP3)
	bge	cr1,L(nzCacheAligned128)
	dcbtst	0,rMEMP
	b	L(cacheAligned1)
	.align 5
/* Storing a zero "c" value. We are aligned at a sector (32-byte)
   boundary but may not be at cache line (128-byte) boundary.  If the
   remaining length spans a full cache line we can use the Data cache
   block zero instruction. */
L(zloopstart):
/* memset in 32-byte chunks until we get to a cache line boundary.
   If rLEN is less than the distance to the next cache-line boundary use
   cacheAligned1 code to finish the tail.  */
	cmpldi	cr1,rLEN,128
	beq	L(medium)
L(getCacheAligned):
	andi.	rTMP,rMEMP,127
	nop
	blt	cr1,L(cacheAligned1)
	addi	rMEMP3,rMEMP,32
	beq	L(cacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP)
	std	rCHR,8(rMEMP)
	std	rCHR,16(rMEMP)
	addi	rMEMP,rMEMP,32
	andi.	rTMP,rMEMP3,127
	std	rCHR,-8(rMEMP3)
L(getCacheAligned2):
	beq	L(cacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP3)
	std	rCHR,8(rMEMP3)
	addi	rMEMP,rMEMP,32
	andi.	rTMP,rMEMP,127
	std	rCHR,16(rMEMP3)
	std	rCHR,24(rMEMP3)
L(getCacheAligned3):
	beq	L(cacheAligned)
	addi	rLEN,rLEN,-32
	std	rCHR,32(rMEMP3)
	addi	rMEMP,rMEMP,32
	cmpldi	cr1,rLEN,128
	std	rCHR,40(rMEMP3)
	cmpldi	cr6,rLEN,256
	li	rMEMP2,128
	std	rCHR,48(rMEMP3)
	std	rCHR,56(rMEMP3)
	blt	cr1,L(cacheAligned1)
	blt	cr6,L(cacheAligned128)
	b	L(cacheAlignedx)

/* Now we are aligned to the cache line and can use dcbz.  */
        .align 5
L(cacheAligned):
	cmpldi	cr1,rLEN,128
	cmpldi	cr6,rLEN,256
	blt	cr1,L(cacheAligned1)
	li	rMEMP2,128
L(cacheAlignedx):
	cmpldi	cr5,rLEN,640
	blt	cr6,L(cacheAligned128)
	bgt	cr5,L(cacheAligned512)
	cmpldi	cr6,rLEN,512
	dcbz	0,rMEMP
	cmpldi	cr1,rLEN,384
	dcbz	rMEMP2,rMEMP
	addi	rMEMP,rMEMP,256
	addi	rLEN,rLEN,-256
	blt	cr1,L(cacheAligned1)
	blt	cr6,L(cacheAligned128)
	b	L(cacheAligned256)
	.align 5
/* A simple loop for the longer (>640 bytes) lengths.  This form limits
   the branch miss-predicted to exactly 1 at loop exit.*/
L(cacheAligned512):
	cmpldi	cr1,rLEN,128
	blt	cr1,L(cacheAligned1)
	dcbz	0,rMEMP
	addi	rLEN,rLEN,-128
	addi	rMEMP,rMEMP,128
	b	L(cacheAligned512)
        .align 5
L(cacheAligned256):

	cmpldi	cr6,rLEN,512

	dcbz	0,rMEMP
	cmpldi	cr1,rLEN,384
	dcbz	rMEMP2,rMEMP
	addi	rMEMP,rMEMP,256
	addi	rLEN,rLEN,-256

	bge	cr6,L(cacheAligned256)

	blt	cr1,L(cacheAligned1)
        .align 4
L(cacheAligned128):
	dcbz	0,rMEMP
	addi	rMEMP,rMEMP,128
	addi	rLEN,rLEN,-128
        nop
L(cacheAligned1):
	cmpldi	cr1,rLEN,32
	blt	cr1,L(handletail32)
	addi	rMEMP3,rMEMP,32
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP)
	std	rCHR,8(rMEMP)
	std	rCHR,16(rMEMP)
	addi	rMEMP,rMEMP,32
	cmpldi	cr1,rLEN,32
	std	rCHR,-8(rMEMP3)
L(cacheAligned2):
	blt	cr1,L(handletail32)
	addi	rLEN,rLEN,-32
	std	rCHR,0(rMEMP3)
	std	rCHR,8(rMEMP3)
	addi	rMEMP,rMEMP,32
	cmpldi	cr1,rLEN,32
	std	rCHR,16(rMEMP3)
	std	rCHR,24(rMEMP3)
	nop
L(cacheAligned3):
	blt	cr1,L(handletail32)
	addi	rMEMP,rMEMP,32
	addi	rLEN,rLEN,-32
	std	rCHR,32(rMEMP3)
	std	rCHR,40(rMEMP3)
	std	rCHR,48(rMEMP3)
	std	rCHR,56(rMEMP3)

/* We are here because the length or remainder (rLEN) is less than the
   cache line/sector size and does not justify aggressive loop unrolling.
   So set up the preconditions for L(medium) and go there.  */
        .align 3
L(handletail32):
	cmpldi	cr1,rLEN,0
	beqlr   cr1
	b	L(medium)

	.align 5
L(small):
/* Memset of 8 bytes or less.  */
	cmpldi	cr6, rLEN, 4
	cmpldi	cr5, rLEN, 1
	ble	cr6,L(le4)
	subi	rLEN, rLEN, 4
	stb	rCHR,0(rMEMP)
	stb	rCHR,1(rMEMP)
	stb	rCHR,2(rMEMP)
	stb	rCHR,3(rMEMP)
	addi	rMEMP,rMEMP, 4
	cmpldi	cr5, rLEN, 1
L(le4):
	cmpldi	cr1, rLEN, 3
	bltlr	cr5
	stb	rCHR, 0(rMEMP)
	beqlr	cr5
	stb	rCHR, 1(rMEMP)
	bltlr	cr1
	stb	rCHR, 2(rMEMP)
	beqlr	cr1
	stb	rCHR, 3(rMEMP)
	blr

/* Memset of 0-31 bytes.  */
	.align 5
L(medium):
	insrdi	rCHR, rCHR, 32, 0	/* Replicate word to double word.  */
	cmpldi	cr1, rLEN, 16
L(medium_tail2):
	add	rMEMP, rMEMP, rLEN
L(medium_tail):
	bt-	31, L(medium_31t)
	bt-	30, L(medium_30t)
L(medium_30f):
	bt	29, L(medium_29t)
L(medium_29f):
	bge	cr1, L(medium_27t)
	bflr	28
	std	rCHR, -8(rMEMP)
	blr

L(medium_31t):
	stbu	rCHR, -1(rMEMP)
	bf-	30, L(medium_30f)
L(medium_30t):
	sthu	rCHR, -2(rMEMP)
	bf-	29, L(medium_29f)
L(medium_29t):
	stwu	rCHR, -4(rMEMP)
	blt	cr1, L(medium_27f)
L(medium_27t):
	std	rCHR, -8(rMEMP)
	stdu	rCHR, -16(rMEMP)
L(medium_27f):
	bflr	28
L(medium_28t):
	std	rCHR, -8(rMEMP)
	blr
END_GEN_TB (MEMSET,TB_TOCLESS)
libc_hidden_builtin_def (memset)

/* Copied from bzero.S to prevent the linker from inserting a stub
   between bzero and memset.  */
ENTRY_TOCLESS (__bzero)
	CALL_MCOUNT 3
	mr	r5,r4
	li	r4,0
	b	L(_memset)
END (__bzero)
#ifndef __bzero
weak_alias (__bzero, bzero)
#endif