diff options
author | Florian Weimer <fweimer@redhat.com> | 2015-11-24 17:21:01 +0100 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2015-11-24 17:21:01 +0100 |
commit | 2359035ac52db69eb427c3b4b9d9297c98d2b225 (patch) | |
tree | 61cd8b4054f8f708b4d41b9427a989f547cf3a0c /sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c | |
parent | c100dca32a6859a47789ddcbb5679e74f8d92a41 (diff) | |
download | glibc-2359035ac52db69eb427c3b4b9d9297c98d2b225.tar.gz glibc-2359035ac52db69eb427c3b4b9d9297c98d2b225.tar.xz glibc-2359035ac52db69eb427c3b4b9d9297c98d2b225.zip |
Remove CPU set size checking from affinity functions [BZ #19143]
With current kernel versions, the check does not reliably detect that unavailable CPUs are requested, for these reasons: (1) The kernel will silently ignore non-allowed CPUs, that is, CPUs which are physically present but disallowed for the thread based on system configuration. (2) Similarly, CPU bits which lack an online CPU (possible CPUs) are ignored. (3) The existing probing code assumes that the CPU mask size is a power of two and at least 1024. Neither has it to be a power of two, nor is the minimum possible value 1024, so the value determined is often too large. This means that the CPU set size check in glibc accepts CPU bits beyond the actual hard system limit. (4) Future kernel versions may not even have a fixed CPU set size. After the removal of the probing code, the kernel still returns EINVAL if no CPU in the requested set remains which can run the thread after the affinity change. Applications which care about the exact affinity mask will have to query it using sched_getaffinity after setting it. Due to the effects described above, this commit does not change this. The new tests supersede tst-getcpu, which is removed. This addresses bug 19164 because the new tests allocate CPU sets dynamically. * nptl/check-cpuset.h: Remove. * nptl/pthread_attr_setaffinity.c (__pthread_attr_setaffinity_new): Remove CPU set size check. * nptl/pthread_setattr_default_np.c (pthread_setattr_default_np): Likewise. * sysdeps/unix/sysv/linux/check-cpuset.h: Remove. * sysdeps/unix/sysv/linux/pthread_setaffinity.c (__kernel_cpumask_size, __determine_cpumask_size): Remove. (__pthread_setaffinity_new): Remove CPU set size check. * sysdeps/unix/sysv/linux/sched_setaffinity.c (__kernel_cpumask_size): Remove. (__sched_setaffinity_new): Remove CPU set size check. * manual/threads.texi (Default Thread Attributes): Remove stale reference to check_cpuset_attr, determine_cpumask_size in comment. * sysdeps/unix/sysv/linux/Makefile [$(subdir) == posix] (tests): Remove tst-getcpu. Add tst-affinity, tst-affinity-pid. [$(subdir) == nptl] (tests): Add tst-thread-affinity-pthread, tst-thread-affinity-pthread2, tst-thread-affinity-sched. * sysdeps/unix/sysv/linux/tst-affinity.c: New file. * sysdeps/unix/sysv/linux/tst-affinity-pid.c: New file. * sysdeps/unix/sysv/linux/tst-skeleton-affinity.c: New skeleton test file. * sysdeps/unix/sysv/linux/tst-thread-affinity-sched.c: New file. * sysdeps/unix/sysv/linux/tst-thread-affinity-pthread.c: New file. * sysdeps/unix/sysv/linux/tst-thread-affinity-pthread2.c: New file. * sysdeps/unix/sysv/linux/tst-thread-skeleton-affinity.c: New skeleton test file. * sysdeps/unix/sysv/linux/tst-getcpu.c: Remove. Superseded by tst-affinity-pid.
Diffstat (limited to 'sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c')
-rw-r--r-- | sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c | 280 |
1 files changed, 280 insertions, 0 deletions
diff --git a/sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c b/sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c new file mode 100644 index 0000000000..69e09bb5cd --- /dev/null +++ b/sysdeps/unix/sysv/linux/tst-skeleton-thread-affinity.c @@ -0,0 +1,280 @@ +/* Generic test for CPU affinity functions, multi-threaded variant. + Copyright (C) 2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +/* Before including this file, a test has to declare the helper + getaffinity and setaffinity functions described in + tst-skeleton-affinity.c, which is included below. */ + +#include <errno.h> +#include <pthread.h> +#include <stdbool.h> +#include <stdlib.h> +#include <sys/time.h> + +struct conf; +static bool early_test (struct conf *); + +/* Arbitrary run time for each pass. */ +#define PASS_TIMEOUT 2 + +/* There are two passes (one with sched_yield, one without), and we + double the timeout to be on the safe side. */ +#define TIMEOUT (2 * PASS_TIMEOUT * 2) + +#include "tst-skeleton-affinity.c" + +/* 0 if still running, 1 of stopping requested. */ +static int still_running; + +/* 0 if no scheduling failures, 1 if failures are encountered. */ +static int failed; + +static void * +thread_burn_one_cpu (void *closure) +{ + int cpu = (uintptr_t) closure; + while (__atomic_load_n (&still_running, __ATOMIC_RELAXED) == 0) + { + int current = sched_getcpu (); + if (sched_getcpu () != cpu) + { + printf ("error: Pinned thread %d ran on impossible cpu %d\n", + cpu, current); + __atomic_store_n (&failed, 1, __ATOMIC_RELAXED); + /* Terminate early. */ + __atomic_store_n (&still_running, 1, __ATOMIC_RELAXED); + } + } + return NULL; +} + +struct burn_thread +{ + pthread_t self; + struct conf *conf; + cpu_set_t *initial_set; + cpu_set_t *seen_set; + int thread; +}; + +static void * +thread_burn_any_cpu (void *closure) +{ + struct burn_thread *param = closure; + + /* Schedule this thread around a bit to see if it lands on another + CPU. Run this for 2 seconds, once with sched_yield, once + without. */ + for (int pass = 1; pass <= 2; ++pass) + { + time_t start = time (NULL); + while (time (NULL) - start <= PASS_TIMEOUT) + { + int cpu = sched_getcpu (); + if (cpu > param->conf->last_cpu + || !CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (param->conf->set_size), + param->initial_set)) + { + printf ("error: Unpinned thread %d ran on impossible CPU %d\n", + param->thread, cpu); + __atomic_store_n (&failed, 1, __ATOMIC_RELAXED); + return NULL; + } + CPU_SET_S (cpu, CPU_ALLOC_SIZE (param->conf->set_size), + param->seen_set); + if (pass == 1) + sched_yield (); + } + } + return NULL; +} + +static void +stop_and_join_threads (struct conf *conf, cpu_set_t *set, + pthread_t *pinned_first, pthread_t *pinned_last, + struct burn_thread *other_first, + struct burn_thread *other_last) +{ + __atomic_store_n (&still_running, 1, __ATOMIC_RELAXED); + for (pthread_t *p = pinned_first; p < pinned_last; ++p) + { + int cpu = p - pinned_first; + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), set)) + continue; + + int ret = pthread_join (*p, NULL); + if (ret != 0) + { + printf ("error: Failed to join thread %d: %s\n", cpu, strerror (ret)); + fflush (stdout); + /* Cannot shut down cleanly with threads still running. */ + abort (); + } + } + + for (struct burn_thread *p = other_first; p < other_last; ++p) + { + int cpu = p - other_first; + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), set)) + continue; + + int ret = pthread_join (p->self, NULL); + if (ret != 0) + { + printf ("error: Failed to join thread %d: %s\n", cpu, strerror (ret)); + fflush (stdout); + /* Cannot shut down cleanly with threads still running. */ + abort (); + } + } +} + +/* Tries to check that the initial set of CPUs is complete and that + the main thread will not run on any other threads. */ +static bool +early_test (struct conf *conf) +{ + pthread_t *pinned_threads + = calloc (conf->last_cpu + 1, sizeof (*pinned_threads)); + struct burn_thread *other_threads + = calloc (conf->last_cpu + 1, sizeof (*other_threads)); + cpu_set_t *initial_set = CPU_ALLOC (conf->set_size); + cpu_set_t *scratch_set = CPU_ALLOC (conf->set_size); + + if (pinned_threads == NULL || other_threads == NULL + || initial_set == NULL || scratch_set == NULL) + { + puts ("error: Memory allocation failure"); + return false; + } + if (getaffinity (CPU_ALLOC_SIZE (conf->set_size), initial_set) < 0) + { + printf ("error: pthread_getaffinity_np failed: %m\n"); + return false; + } + for (int cpu = 0; cpu <= conf->last_cpu; ++cpu) + { + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), initial_set)) + continue; + other_threads[cpu].conf = conf; + other_threads[cpu].initial_set = initial_set; + other_threads[cpu].thread = cpu; + other_threads[cpu].seen_set = CPU_ALLOC (conf->set_size); + if (other_threads[cpu].seen_set == NULL) + { + puts ("error: Memory allocation failure"); + return false; + } + CPU_ZERO_S (CPU_ALLOC_SIZE (conf->set_size), + other_threads[cpu].seen_set); + } + + pthread_attr_t attr; + int ret = pthread_attr_init (&attr); + if (ret != 0) + { + printf ("error: pthread_attr_init failed: %s\n", strerror (ret)); + return false; + } + + /* Spawn a thread pinned to each available CPU. */ + for (int cpu = 0; cpu <= conf->last_cpu; ++cpu) + { + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), initial_set)) + continue; + CPU_ZERO_S (CPU_ALLOC_SIZE (conf->set_size), scratch_set); + CPU_SET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), scratch_set); + ret = pthread_attr_setaffinity_np + (&attr, CPU_ALLOC_SIZE (conf->set_size), scratch_set); + if (ret != 0) + { + printf ("error: pthread_attr_setaffinity_np for CPU %d failed: %s\n", + cpu, strerror (ret)); + stop_and_join_threads (conf, initial_set, + pinned_threads, pinned_threads + cpu, + NULL, NULL); + return false; + } + ret = pthread_create (pinned_threads + cpu, &attr, + thread_burn_one_cpu, (void *) (uintptr_t) cpu); + if (ret != 0) + { + printf ("error: pthread_create for CPU %d failed: %s\n", + cpu, strerror (ret)); + stop_and_join_threads (conf, initial_set, + pinned_threads, pinned_threads + cpu, + NULL, NULL); + return false; + } + } + + /* Spawn another set of threads running on all CPUs. */ + for (int cpu = 0; cpu <= conf->last_cpu; ++cpu) + { + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), initial_set)) + continue; + ret = pthread_create (&other_threads[cpu].self, NULL, + thread_burn_any_cpu, other_threads + cpu); + if (ret != 0) + { + printf ("error: pthread_create for thread %d failed: %s\n", + cpu, strerror (ret)); + stop_and_join_threads (conf, initial_set, + pinned_threads, + pinned_threads + conf->last_cpu + 1, + other_threads, other_threads + cpu); + return false; + } + } + + /* Main thread. */ + struct burn_thread main_thread; + main_thread.conf = conf; + main_thread.initial_set = initial_set; + main_thread.seen_set = scratch_set; + main_thread.thread = -1; + CPU_ZERO_S (CPU_ALLOC_SIZE (conf->set_size), main_thread.seen_set); + thread_burn_any_cpu (&main_thread); + stop_and_join_threads (conf, initial_set, + pinned_threads, + pinned_threads + conf->last_cpu + 1, + other_threads, other_threads + conf->last_cpu + 1); + + printf ("info: Main thread ran on %d CPU(s) of %d available CPU(s)\n", + CPU_COUNT_S (CPU_ALLOC_SIZE (conf->set_size), scratch_set), + CPU_COUNT_S (CPU_ALLOC_SIZE (conf->set_size), initial_set)); + CPU_ZERO_S (CPU_ALLOC_SIZE (conf->set_size), scratch_set); + for (int cpu = 0; cpu <= conf->last_cpu; ++cpu) + { + if (!CPU_ISSET_S (cpu, CPU_ALLOC_SIZE (conf->set_size), initial_set)) + continue; + CPU_OR_S (CPU_ALLOC_SIZE (conf->set_size), + scratch_set, scratch_set, other_threads[cpu].seen_set); + CPU_FREE (other_threads[cpu].seen_set); + } + printf ("info: Other threads ran on %d CPU(s)\n", + CPU_COUNT_S (CPU_ALLOC_SIZE (conf->set_size), scratch_set));; + + + pthread_attr_destroy (&attr); + CPU_FREE (scratch_set); + CPU_FREE (initial_set); + free (pinned_threads); + free (other_threads); + return failed == 0; +} |