diff options
author | Florian Weimer <fweimer@redhat.com> | 2018-01-11 16:54:40 +0100 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2018-01-11 16:54:40 +0100 |
commit | fbd72f14904b8a81816528e0cc5bb3315fc70a47 (patch) | |
tree | 42cb021fc9c161755e38b07b97c6433d54410597 | |
parent | 26d289bb92b6d1125536644f607c73617463477d (diff) | |
download | glibc-fbd72f14904b8a81816528e0cc5bb3315fc70a47.tar.gz glibc-fbd72f14904b8a81816528e0cc5bb3315fc70a47.tar.xz glibc-fbd72f14904b8a81816528e0cc5bb3315fc70a47.zip |
x86: Fix mis-merge of XSAVE ld.so trampoline selection [BZ #22641]
The change is best viewed with “diff -w”: @@ -226,6 +226,7 @@ init_cpu_features (struct cpu_features *cpu_features) /* Determine if FMA4 is usable. */ if (HAS_CPU_FEATURE (FMA4)) cpu_features->feature[index_FMA4_Usable] |= bit_FMA4_Usable; + } /* For _dl_runtime_resolve, set xsave_state_size to xsave area size + integer register save size and align it to 64 bytes. */ @@ -292,7 +293,6 @@ init_cpu_features (struct cpu_features *cpu_features) } } } - } #if !HAS_CPUID no_cpuid: Without this change, XSAVE support will never be selected unless the CPU also supports AVX, which is not what we want. For example, if AVX is disabled, but MPX is supported, the BND registers are not preserved if we use FXSAVE instead of XSAVE. This fixes commit 26d289bb92b6d1125536644f607c73617463477d (x86-64: Use fxsave/xsave/xsavec in _dl_runtime_resolve).
-rw-r--r-- | ChangeLog | 5 | ||||
-rw-r--r-- | NEWS | 1 | ||||
-rw-r--r-- | sysdeps/x86/cpu-features.c | 100 |
3 files changed, 56 insertions, 50 deletions
diff --git a/ChangeLog b/ChangeLog index e7db122597..95be6382d9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2018-01-11 Florian Weimer <fweimer@redhat.com> + + * sysdeps/x86/cpu-features.c (init_cpu_features): Move check for + XSAVE out of the AVX check. + 2017-10-22 H.J. Lu <hongjiu.lu@intel.com> [BZ #21265] diff --git a/NEWS b/NEWS index eb7cb338e0..7dd7193225 100644 --- a/NEWS +++ b/NEWS @@ -47,6 +47,7 @@ The following bugs are resolved with this release: [21289] Fix symbol redirect for fts_set [21624] Unsafe alloca allows local attackers to alias stack and heap (CVE-2017-1000366) [21666] Avoid .symver on common symbols + [22641] x86: Fix mis-merge of XSAVE ld.so trampoline selection Version 2.23 diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index 2060fa38e6..316a1180d1 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -226,69 +226,69 @@ init_cpu_features (struct cpu_features *cpu_features) /* Determine if FMA4 is usable. */ if (HAS_CPU_FEATURE (FMA4)) cpu_features->feature[index_FMA4_Usable] |= bit_FMA4_Usable; + } - /* For _dl_runtime_resolve, set xsave_state_size to xsave area - size + integer register save size and align it to 64 bytes. */ - if (cpu_features->max_cpuid >= 0xd) - { - unsigned int eax, ebx, ecx, edx; + /* For _dl_runtime_resolve, set xsave_state_size to xsave area + size + integer register save size and align it to 64 bytes. */ + if (cpu_features->max_cpuid >= 0xd) + { + unsigned int eax, ebx, ecx, edx; - __cpuid_count (0xd, 0, eax, ebx, ecx, edx); - if (ebx != 0) - { - cpu_features->xsave_state_size + __cpuid_count (0xd, 0, eax, ebx, ecx, edx); + if (ebx != 0) + { + cpu_features->xsave_state_size = ALIGN_UP (ebx + STATE_SAVE_OFFSET, 64); - __cpuid_count (0xd, 1, eax, ebx, ecx, edx); + __cpuid_count (0xd, 1, eax, ebx, ecx, edx); - /* Check if XSAVEC is available. */ - if ((eax & (1 << 1)) != 0) - { - unsigned int xstate_comp_offsets[32]; - unsigned int xstate_comp_sizes[32]; - unsigned int i; + /* Check if XSAVEC is available. */ + if ((eax & (1 << 1)) != 0) + { + unsigned int xstate_comp_offsets[32]; + unsigned int xstate_comp_sizes[32]; + unsigned int i; - xstate_comp_offsets[0] = 0; - xstate_comp_offsets[1] = 160; - xstate_comp_offsets[2] = 576; - xstate_comp_sizes[0] = 160; - xstate_comp_sizes[1] = 256; + xstate_comp_offsets[0] = 0; + xstate_comp_offsets[1] = 160; + xstate_comp_offsets[2] = 576; + xstate_comp_sizes[0] = 160; + xstate_comp_sizes[1] = 256; - for (i = 2; i < 32; i++) + for (i = 2; i < 32; i++) + { + if ((STATE_SAVE_MASK & (1 << i)) != 0) { - if ((STATE_SAVE_MASK & (1 << i)) != 0) - { - __cpuid_count (0xd, i, eax, ebx, ecx, edx); - xstate_comp_sizes[i] = eax; - } - else - { - ecx = 0; - xstate_comp_sizes[i] = 0; - } - - if (i > 2) - { - xstate_comp_offsets[i] - = (xstate_comp_offsets[i - 1] - + xstate_comp_sizes[i -1]); - if ((ecx & (1 << 1)) != 0) - xstate_comp_offsets[i] - = ALIGN_UP (xstate_comp_offsets[i], 64); - } + __cpuid_count (0xd, i, eax, ebx, ecx, edx); + xstate_comp_sizes[i] = eax; + } + else + { + ecx = 0; + xstate_comp_sizes[i] = 0; } - /* Use XSAVEC. */ - unsigned int size - = xstate_comp_offsets[31] + xstate_comp_sizes[31]; - if (size) + if (i > 2) { - cpu_features->xsave_state_size - = ALIGN_UP (size + STATE_SAVE_OFFSET, 64); - cpu_features->feature[index_XSAVEC_Usable] - |= bit_XSAVEC_Usable; + xstate_comp_offsets[i] + = (xstate_comp_offsets[i - 1] + + xstate_comp_sizes[i -1]); + if ((ecx & (1 << 1)) != 0) + xstate_comp_offsets[i] + = ALIGN_UP (xstate_comp_offsets[i], 64); } } + + /* Use XSAVEC. */ + unsigned int size + = xstate_comp_offsets[31] + xstate_comp_sizes[31]; + if (size) + { + cpu_features->xsave_state_size + = ALIGN_UP (size + STATE_SAVE_OFFSET, 64); + cpu_features->feature[index_XSAVEC_Usable] + |= bit_XSAVEC_Usable; + } } } } |