about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/memcmpeq-evex.S
blob: 7ae3e3c8c952fdbe5dfb84c328482fc988b1bb68 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
/* __memcmpeq optimized with EVEX.
   Copyright (C) 2017-2023 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <isa-level.h>

#if ISA_SHOULD_BUILD (4)

/* __memcmpeq is implemented as:
   1. Use ymm vector compares when possible. The only case where
      vector compares is not possible for when size < VEC_SIZE
      and loading from either s1 or s2 would cause a page cross.
   2. Use xmm vector compare when size >= 8 bytes.
   3. Optimistically compare up to first 4 * VEC_SIZE one at a
      to check for early mismatches. Only do this if its guranteed the
      work is not wasted.
   4. If size is 8 * VEC_SIZE or less, unroll the loop.
   5. Compare 4 * VEC_SIZE at a time with the aligned first memory
      area.
   6. Use 2 vector compares when size is 2 * VEC_SIZE or less.
   7. Use 4 vector compares when size is 4 * VEC_SIZE or less.
   8. Use 8 vector compares when size is 8 * VEC_SIZE or less.  */

# include <sysdep.h>

# ifndef MEMCMPEQ
#  define MEMCMPEQ	__memcmpeq_evex
# endif

# ifndef VEC_SIZE
#  include "x86-evex512-vecs.h"
# endif
# include "reg-macros.h"


# if VEC_SIZE == 32

#  define TEST_ZERO_VCMP(reg)	inc %VGPR(reg)
#  define TEST_ZERO(reg)	test %VGPR(reg), %VGPR(reg)

#  define TO_32BIT_P1(reg)	/* Do nothing. */
#  define TO_32BIT_P2(reg)	/* Do nothing. */
#  define TO_32BIT(reg)	/* Do nothing. */

#  define VEC_CMP	VPCMPEQ

# elif VEC_SIZE == 64

#  define TEST_ZERO_VCMP(reg)	TEST_ZERO(reg)
#  define TEST_ZERO(reg)	neg %VGPR(reg)


	/* VEC_SIZE == 64 needs to reduce the 64-bit mask to a 32-bit
	   int. We have two methods for this. If the mask with branched
	   on, we use `neg` for the branch then `sbb` to get the 32-bit
	   return. If the mask was no branched on, we just use
	   `popcntq`.  */
#  define TO_32BIT_P1(reg)	TEST_ZERO(reg)
#  define TO_32BIT_P2(reg)	sbb %VGPR_SZ(reg, 32), %VGPR_SZ(reg, 32)
#  define TO_32BIT(reg)	popcntq %reg, %reg

#  define VEC_CMP	VPCMPNEQ

# else
#  error "Unsupported VEC_SIZE"
# endif


# define VMOVU_MASK	vmovdqu8
# define VPCMPNEQ	vpcmpneqb
# define VPCMPEQ	vpcmpeqb
# define VPTEST	vptestmb

# define PAGE_SIZE	4096

	.section SECTION(.text), "ax", @progbits
ENTRY_P2ALIGN (MEMCMPEQ, 6)
# ifdef __ILP32__
	/* Clear the upper 32 bits.  */
	movl	%edx, %edx
# endif
	cmp	$VEC_SIZE, %RDX_LP
	/* Fall through for [0, VEC_SIZE] as its the hottest.  */
	ja	L(more_1x_vec)

	/* Create mask of bytes that are guranteed to be valid because
	   of length (edx). Using masked movs allows us to skip checks
	   for page crosses/zero size.  */
	mov	$-1, %VRAX
	bzhi	%VRDX, %VRAX, %VRAX
	/* NB: A `jz` might be useful here. Page-faults that are
	   invalidated by predicate execution (the evex mask) can be
	   very slow.  The expectation is this is not the norm so and
	   "most" code will not regularly call 'memcmp' with length = 0
	   and memory that is not wired up.  */
	KMOV	%VRAX, %k2

	/* Use masked loads as VEC_SIZE could page cross where length
	   (edx) would not.  */
	VMOVU_MASK (%rsi), %VMM(2){%k2}{z}
	VPCMPNEQ (%rdi), %VMM(2), %k1{%k2}
	KMOV	%k1, %VRAX
	TO_32BIT (VRAX)
	ret

	.p2align 4,, 3
L(last_1x_vec):
	VMOVU	-(VEC_SIZE * 1)(%rsi, %rdx), %VMM(1)
	VPCMPNEQ -(VEC_SIZE * 1)(%rdi, %rdx), %VMM(1), %k1
	KMOV	%k1, %VRAX
	TO_32BIT_P1 (rax)
L(return_neq0):
	TO_32BIT_P2 (rax)
	ret


	.p2align 4,, 12
L(more_1x_vec):
	/* From VEC + 1 to 2 * VEC.  */
	VMOVU	(%rsi), %VMM(1)
	/* Use compare not equals to directly check for mismatch.  */
	VPCMPNEQ (%rdi), %VMM(1), %k1
	KMOV	%k1, %VRAX
	TEST_ZERO (rax)
	jnz	L(return_neq0)

	cmpq	$(VEC_SIZE * 2), %rdx
	jbe	L(last_1x_vec)

	/* Check second VEC no matter what.  */
	VMOVU	VEC_SIZE(%rsi), %VMM(2)
	VPCMPNEQ VEC_SIZE(%rdi), %VMM(2), %k1
	KMOV	%k1, %VRAX
	TEST_ZERO (rax)
	jnz	L(return_neq0)

	/* Less than 4 * VEC.  */
	cmpq	$(VEC_SIZE * 4), %rdx
	jbe	L(last_2x_vec)

	/* Check third and fourth VEC no matter what.  */
	VMOVU	(VEC_SIZE * 2)(%rsi), %VMM(3)
	VEC_CMP	(VEC_SIZE * 2)(%rdi), %VMM(3), %k1
	KMOV	%k1, %VRAX
	TEST_ZERO_VCMP (rax)
	jnz	L(return_neq0)

	VMOVU	(VEC_SIZE * 3)(%rsi), %VMM(4)
	VEC_CMP	(VEC_SIZE * 3)(%rdi), %VMM(4), %k1
	KMOV	%k1, %VRAX
	TEST_ZERO_VCMP (rax)
	jnz	L(return_neq0)

	/* Go to 4x VEC loop.  */
	cmpq	$(VEC_SIZE * 8), %rdx
	ja	L(more_8x_vec)

	/* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any
	   branches.  */

	VMOVU	-(VEC_SIZE * 1)(%rsi, %rdx), %VMM(1)
	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %VMM(2)
	addq	%rdx, %rdi

	/* Wait to load from s1 until addressed adjust due to
	   unlamination.  */

	/* vpxor will be all 0s if s1 and s2 are equal. Otherwise it
	   will have some 1s.  */
	vpxorq	-(VEC_SIZE * 1)(%rdi), %VMM(1), %VMM(1)
	/* Ternary logic to xor -(VEC_SIZE * 3)(%rdi) with VEC(2) while
	   oring with VEC(1). Result is stored in VEC(1).  */
	vpternlogd $0xde, -(VEC_SIZE * 2)(%rdi), %VMM(1), %VMM(2)

	cmpl	$(VEC_SIZE * 6), %edx
	jbe	L(4x_last_2x_vec)

	VMOVU	-(VEC_SIZE * 3)(%rsi, %rdx), %VMM(3)
	vpxorq	-(VEC_SIZE * 3)(%rdi), %VMM(3), %VMM(3)
	/* Or together VEC(1), VEC(2), and VEC(3) into VEC(3).  */
	VMOVU	-(VEC_SIZE * 4)(%rsi, %rdx), %VMM(4)
	vpxorq	-(VEC_SIZE * 4)(%rdi), %VMM(4), %VMM(4)

	/* Or together VEC(4), VEC(3), and VEC(2) into VEC(2).  */
	vpternlogd $0xfe, %VMM(4), %VMM(3), %VMM(2)

	/* Compare VEC(4) with 0. If any 1s s1 and s2 don't match.  */
L(4x_last_2x_vec):
	VPTEST	%VMM(2), %VMM(2), %k1
	KMOV	%k1, %VRAX
	TO_32BIT (VRAX)
	ret


	.p2align 4,, 10
L(more_8x_vec):
	/* Set end of s1 in rdx.  */
	leaq	-(VEC_SIZE * 4)(%rdi, %rdx), %rdx
	/* rsi stores s2 - s1. This allows loop to only update one
	   pointer.  */
	subq	%rdi, %rsi
	/* Align s1 pointer.  */
	andq	$-VEC_SIZE, %rdi
	/* Adjust because first 4x vec where check already.  */
	subq	$-(VEC_SIZE * 4), %rdi
	.p2align 5,, 12
	.p2align 4,, 8
L(loop_4x_vec):
	VMOVU	(%rsi, %rdi), %VMM(1)
	vpxorq	(%rdi), %VMM(1), %VMM(1)

	VMOVU	VEC_SIZE(%rsi, %rdi), %VMM(2)
	vpternlogd $0xde, (VEC_SIZE)(%rdi), %VMM(1), %VMM(2)

	VMOVU	(VEC_SIZE * 2)(%rsi, %rdi), %VMM(3)
	vpxorq	(VEC_SIZE * 2)(%rdi), %VMM(3), %VMM(3)

	VMOVU	(VEC_SIZE * 3)(%rsi, %rdi), %VMM(4)
	vpxorq	(VEC_SIZE * 3)(%rdi), %VMM(4), %VMM(4)

	vpternlogd $0xfe, %VMM(2), %VMM(3), %VMM(4)
	VPTEST	%VMM(4), %VMM(4), %k1
	KMOV	%k1, %VRAX
	TEST_ZERO (rax)
	jnz	L(return_neq2)
	subq	$-(VEC_SIZE * 4), %rdi
	cmpq	%rdx, %rdi
	jb	L(loop_4x_vec)

	subq	%rdx, %rdi

	VMOVU	(VEC_SIZE * 3)(%rsi, %rdx), %VMM(4)
	vpxorq	(VEC_SIZE * 3)(%rdx), %VMM(4), %VMM(4)
	/* rdi has 4 * VEC_SIZE - remaining length.  */

	/* Load regardless of branch.  */
	VMOVU	(VEC_SIZE * 2)(%rsi, %rdx), %VMM(3)
	/* Ternary logic to xor (VEC_SIZE * 2)(%rdx) with VEC(3) while
	   oring with VEC(4). Result is stored in VEC(4).  */
	vpternlogd $0xf6, (VEC_SIZE * 2)(%rdx), %VMM(3), %VMM(4)

	/* Seperate logic as we can only use testb for VEC_SIZE == 64.
	 */
# if VEC_SIZE == 64
	testb	%dil, %dil
	js	L(8x_last_2x_vec)
# else
	cmpl	$(VEC_SIZE * 2), %edi
	jge	L(8x_last_2x_vec)
# endif

	VMOVU	VEC_SIZE(%rsi, %rdx), %VMM(2)
	vpxorq	VEC_SIZE(%rdx), %VMM(2), %VMM(2)

	VMOVU	(%rsi, %rdx), %VMM(1)
	vpxorq	(%rdx), %VMM(1), %VMM(1)

	vpternlogd $0xfe, %VMM(1), %VMM(2), %VMM(4)
L(8x_last_1x_vec):
L(8x_last_2x_vec):
	VPTEST	%VMM(4), %VMM(4), %k1
	KMOV	%k1, %VRAX
	TO_32BIT_P1 (rax)
L(return_neq2):
	TO_32BIT_P2 (rax)
	ret

	.p2align 4,, 4
L(last_2x_vec):
	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %VMM(1)
	vpxorq	-(VEC_SIZE * 2)(%rdi, %rdx), %VMM(1), %VMM(1)
	VMOVU	-(VEC_SIZE * 1)(%rsi, %rdx), %VMM(2)
	vpternlogd $0xde, -(VEC_SIZE * 1)(%rdi, %rdx), %VMM(1), %VMM(2)
	VPTEST	%VMM(2), %VMM(2), %k1
	KMOV	%k1, %VRAX
	TO_32BIT (VRAX)
	ret

	/* evex256: 1 Bytes from next cache line. evex512: 15 Bytes from
	   next cache line.  */
END (MEMCMPEQ)
#endif