summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sched.c
diff options
context:
space:
mode:
authordlg <dlg@openbsd.org>2017-12-14 23:21:04 +0000
committerdlg <dlg@openbsd.org>2017-12-14 23:21:04 +0000
commit0e4e575207c5ab24aa65ccf970655a99237396c1 (patch)
treefb48f92075dece640fd03c18ec8a71338f2eeff4 /sys/kern/kern_sched.c
parentset Location header for 307 and 308 status codes (diff)
downloadwireguard-openbsd-0e4e575207c5ab24aa65ccf970655a99237396c1.tar.xz
wireguard-openbsd-0e4e575207c5ab24aa65ccf970655a99237396c1.zip
make sched_barrier use cond_wait/cond_signal.
previously the code was using a percpu flag to manage the sleeps/wakeups, which means multiple threads waiting for a barrier on a cpu could race. moving to a cond struct on the stack fixes this. while here, get rid of the sbar taskq and just use systqmp instead. the barrier tasks are short, so there's no real downside. ok mpi@
Diffstat (limited to 'sys/kern/kern_sched.c')
-rw-r--r--sys/kern/kern_sched.c36
1 files changed, 16 insertions, 20 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 54705ee5019..cf45011b853 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.46 2017/11/28 16:22:27 visa Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.47 2017/12/14 23:21:04 dlg Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@@ -227,12 +227,6 @@ sched_exit(struct proc *p)
void
sched_init_runqueues(void)
{
-#ifdef MULTIPROCESSOR
- sbartq = taskq_create("sbar", 1, IPL_VM,
- TASKQ_MPSAFE | TASKQ_CANTSLEEP);
- if (sbartq == NULL)
- panic("unable to create sbar taskq");
-#endif
}
void
@@ -658,24 +652,28 @@ sched_stop_secondary_cpus(void)
}
}
+struct sched_barrier_state {
+ struct cpu_info *ci;
+ struct cond cond;
+};
+
void
sched_barrier_task(void *arg)
{
- struct cpu_info *ci = arg;
+ struct sched_barrier_state *sb = arg;
+ struct cpu_info *ci = sb->ci;
sched_peg_curproc(ci);
- ci->ci_schedstate.spc_barrier = 1;
- wakeup(&ci->ci_schedstate.spc_barrier);
+ cond_signal(&sb->cond);
atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
}
void
sched_barrier(struct cpu_info *ci)
{
- struct sleep_state sls;
+ struct sched_barrier_state sb;
struct task task;
CPU_INFO_ITERATOR cii;
- struct schedstate_percpu *spc;
if (ci == NULL) {
CPU_INFO_FOREACH(cii, ci) {
@@ -688,14 +686,12 @@ sched_barrier(struct cpu_info *ci)
if (ci == curcpu())
return;
- task_set(&task, sched_barrier_task, ci);
- spc = &ci->ci_schedstate;
- spc->spc_barrier = 0;
- task_add(sbartq, &task);
- while (!spc->spc_barrier) {
- sleep_setup(&sls, &spc->spc_barrier, PWAIT, "sbar");
- sleep_finish(&sls, !spc->spc_barrier);
- }
+ sb.ci = ci;
+ cond_init(&sb.cond);
+ task_set(&task, sched_barrier_task, &sb);
+
+ task_add(systqmp, &task);
+ cond_wait(&sb.cond, "sbar");
}
#else