about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch/ifunc-memmove.h
blob: 6da591cdeb56795897ea73743330c4af43a724c9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/* Common definition for memcpy, mempcpy and memmove implementation.
   All versions must be listed in ifunc-impl-list.c.
   Copyright (C) 2017-2024 Free Software Foundation, Inc.
   This file is part of the GNU C Library.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <https://www.gnu.org/licenses/>.  */

#include <init-arch.h>

extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden;

extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
  attribute_hidden;

extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
  attribute_hidden;

extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned) attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_rtm)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms_rtm)
  attribute_hidden;

extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden;

extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
  attribute_hidden;
extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
  attribute_hidden;

static inline void *
inhibit_stack_protector
IFUNC_SELECTOR (void)
{
  const struct cpu_features *cpu_features = __get_cpu_features ();

  if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS)
      || CPU_FEATURES_ARCH_P (cpu_features, Prefer_FSRM))
    return OPTIMIZE (erms);

  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
      && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
    {
      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
	{
	  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
	    return OPTIMIZE (avx512_unaligned_erms);

	  return OPTIMIZE (avx512_unaligned);
	}

      return OPTIMIZE (avx512_no_vzeroupper);
    }

  if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
				   AVX_Fast_Unaligned_Load, ))
    {
      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
	{
	  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
	    return OPTIMIZE (evex_unaligned_erms);

	  return OPTIMIZE (evex_unaligned);
	}

      if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
	{
	  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
	    return OPTIMIZE (avx_unaligned_erms_rtm);

	  return OPTIMIZE (avx_unaligned_rtm);
	}

      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
				       Prefer_No_VZEROUPPER, !))
	{
	  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
	    return OPTIMIZE (avx_unaligned_erms);

	  return OPTIMIZE (avx_unaligned);
	}
    }

  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, SSSE3)
      /* Leave this as runtime check.  The SSSE3 is optimized almost
         exclusively for avoiding unaligned memory access during the
         copy and by and large is not better than the sse2
         implementation as a general purpose memmove.  */
      && !CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Copy))
    {
      return OPTIMIZE (ssse3);
    }

  if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
    return OPTIMIZE (sse2_unaligned_erms);

  return OPTIMIZE (sse2_unaligned);
}