summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sched.c
diff options
context:
space:
mode:
authorkettenis <kettenis@openbsd.org>2018-06-19 19:29:52 +0000
committerkettenis <kettenis@openbsd.org>2018-06-19 19:29:52 +0000
commit96c11352863a7f6240b4e5e388052f414b75f95b (patch)
tree2cc9d15a1be01db410a8dc3477c12b027d4e0315 /sys/kern/kern_sched.c
parentUpdate update-patches description, document EDIT_PATCHES (diff)
downloadwireguard-openbsd-96c11352863a7f6240b4e5e388052f414b75f95b.tar.xz
wireguard-openbsd-96c11352863a7f6240b4e5e388052f414b75f95b.zip
SMT (Simultanious Multi Threading) implementations typically share
TLBs and L1 caches between threads. This can make cache timing attacks a lot easier and we strongly suspect that this will make several spectre-class bugs exploitable. Especially on Intel's SMT implementation which is better known as Hypter-threading. We really should not run different security domains on different processor threads of the same core. Unfortunately changing our scheduler to take this into account is far from trivial. Since many modern machines no longer provide the ability to disable Hyper-threading in the BIOS setup, provide a way to disable the use of additional processor threads in our scheduler. And since we suspect there are serious risks, we disable them by default. This can be controlled through a new hw.smt sysctl. For now this only works on Intel CPUs when running OpenBSD/amd64. But we're planning to extend this feature to CPUs from other vendors and other hardware architectures. Note that SMT doesn't necessarily have a posive effect on performance; it highly depends on the workload. In all likelyhood it will actually slow down most workloads if you have a CPU with more than two cores. ok deraadt@
Diffstat (limited to 'sys/kern/kern_sched.c')
-rw-r--r--sys/kern/kern_sched.c54
1 files changed, 52 insertions, 2 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index cf45011b853..f566795dc60 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.47 2017/12/14 23:21:04 dlg Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.48 2018/06/19 19:29:52 kettenis Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@@ -56,6 +56,8 @@ uint64_t sched_wasidle; /* Times we came out of idle */
struct taskq *sbartq;
#endif
+int sched_smt;
+
/*
* A few notes about cpu_switchto that is implemented in MD code.
*
@@ -97,6 +99,11 @@ sched_init_cpu(struct cpu_info *ci)
* structures.
*/
cpuset_init_cpu(ci);
+
+#ifdef __HAVE_CPU_TOPOLOGY
+ if (!sched_smt && ci->ci_smt_id > 0)
+ return;
+#endif
cpuset_add(&sched_all_cpus, ci);
}
@@ -615,9 +622,13 @@ sched_start_secondary_cpus(void)
if (CPU_IS_PRIMARY(ci))
continue;
- cpuset_add(&sched_all_cpus, ci);
atomic_clearbits_int(&spc->spc_schedflags,
SPCF_SHOULDHALT | SPCF_HALTED);
+#ifdef __HAVE_CPU_TOPOLOGY
+ if (!sched_smt && ci->ci_smt_id > 0)
+ continue;
+#endif
+ cpuset_add(&sched_all_cpus, ci);
}
}
@@ -793,3 +804,42 @@ cpuset_complement(struct cpuset *to, struct cpuset *a, struct cpuset *b)
for (i = 0; i < CPUSET_ASIZE(ncpus); i++)
to->cs_set[i] = b->cs_set[i] & ~a->cs_set[i];
}
+
+#ifdef __HAVE_CPU_TOPOLOGY
+
+#include <sys/sysctl.h>
+
+int
+sysctl_hwsmt(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ int err, newsmt;
+
+ newsmt = sched_smt;
+ err = sysctl_int(oldp, oldlenp, newp, newlen, &newsmt);
+ if (err)
+ return err;
+ if (newsmt > 1)
+ newsmt = 1;
+ if (newsmt < 0)
+ newsmt = 0;
+ if (newsmt == sched_smt)
+ return 0;
+
+ sched_smt = newsmt;
+ CPU_INFO_FOREACH(cii, ci) {
+ if (CPU_IS_PRIMARY(ci))
+ continue;
+ if (ci->ci_smt_id == 0)
+ continue;
+ if (sched_smt)
+ cpuset_add(&sched_all_cpus, ci);
+ else
+ cpuset_del(&sched_all_cpus, ci);
+ }
+
+ return 0;
+}
+
+#endif