summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_task.c
diff options
context:
space:
mode:
authordlg <dlg@openbsd.org>2019-04-28 04:20:40 +0000
committerdlg <dlg@openbsd.org>2019-04-28 04:20:40 +0000
commit60aa962e73cae02e1e20bd0158dfeeaca6adff42 (patch)
treec18830273d8bd643957273e71f58fb9d7358166e /sys/kern/kern_task.c
parentTweak; work in progress. (diff)
downloadwireguard-openbsd-60aa962e73cae02e1e20bd0158dfeeaca6adff42.tar.xz
wireguard-openbsd-60aa962e73cae02e1e20bd0158dfeeaca6adff42.zip
add WITNESS support to barriers modelled on the timeout stuff visa did.
if a taskq takes a lock, and something holding that lock calls taskq_barrier, there's a potential deadlock. detect this as a lock order problem when witness is enable. task_del conditionally followed by taskq_barrier is a common pattern, so add a taskq_del_barrier wrapper for it that unconditionally checks for the deadlock, like timeout_del_barrier. ok visa@
Diffstat (limited to 'sys/kern/kern_task.c')
-rw-r--r--sys/kern/kern_task.c70
1 files changed, 65 insertions, 5 deletions
diff --git a/sys/kern/kern_task.c b/sys/kern/kern_task.c
index c5e824636c9..74f3ea4ab18 100644
--- a/sys/kern/kern_task.c
+++ b/sys/kern/kern_task.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_task.c,v 1.24 2019/04/01 03:23:45 dlg Exp $ */
+/* $OpenBSD: kern_task.c,v 1.25 2019/04/28 04:20:40 dlg Exp $ */
/*
* Copyright (c) 2013 David Gwynne <dlg@openbsd.org>
@@ -23,6 +23,18 @@
#include <sys/kthread.h>
#include <sys/task.h>
#include <sys/proc.h>
+#include <sys/witness.h>
+
+#ifdef WITNESS
+
+static struct lock_type taskq_lock_type = {
+ .lt_name = "taskq"
+};
+
+#define TASKQ_LOCK_FLAGS LO_WITNESS | LO_INITIALIZED | LO_SLEEPABLE | \
+ (LO_CLASS_RWLOCK << LO_CLASSSHIFT)
+
+#endif /* WITNESS */
struct taskq {
enum {
@@ -37,26 +49,45 @@ struct taskq {
struct mutex tq_mtx;
struct task_list tq_worklist;
+#ifdef WITNESS
+ struct lock_object tq_lock_object;
+#endif
};
+static const char taskq_sys_name[] = "systq";
+
struct taskq taskq_sys = {
TQ_S_CREATED,
0,
1,
0,
- "systq",
+ taskq_sys_name,
MUTEX_INITIALIZER(IPL_HIGH),
- TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist)
+ TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist),
+#ifdef WITNESS
+ {
+ .lo_name = taskq_sys_name,
+ .lo_flags = TASKQ_LOCK_FLAGS,
+ },
+#endif
};
+static const char taskq_sys_mp_name[] = "systqmp";
+
struct taskq taskq_sys_mp = {
TQ_S_CREATED,
0,
1,
TASKQ_MPSAFE,
- "systqmp",
+ taskq_sys_mp_name,
MUTEX_INITIALIZER(IPL_HIGH),
- TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist)
+ TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist),
+#ifdef WITNESS
+ {
+ .lo_name = taskq_sys_mp_name,
+ .lo_flags = TASKQ_LOCK_FLAGS,
+ },
+#endif
};
struct taskq *const systq = &taskq_sys;
@@ -73,7 +104,9 @@ void taskq_thread(void *);
void
taskq_init(void)
{
+ WITNESS_INIT(&systq->tq_lock_object, &taskq_lock_type);
kthread_create_deferred(taskq_create_thread, systq);
+ WITNESS_INIT(&systqmp->tq_lock_object, &taskq_lock_type);
kthread_create_deferred(taskq_create_thread, systqmp);
}
@@ -96,6 +129,13 @@ taskq_create(const char *name, unsigned int nthreads, int ipl,
mtx_init_flags(&tq->tq_mtx, ipl, name, 0);
TAILQ_INIT(&tq->tq_worklist);
+#ifdef WITNESS
+ memset(&tq->tq_lock_object, 0, sizeof(tq->tq_lock_object));
+ tq->tq_lock_object.lo_name = name;
+ tq->tq_lock_object.lo_flags = TASKQ_LOCK_FLAGS;
+ witness_init(&tq->tq_lock_object, &taskq_lock_type);
+#endif
+
/* try to create a thread to guarantee that tasks will be serviced */
kthread_create_deferred(taskq_create_thread, tq);
@@ -181,8 +221,24 @@ taskq_barrier(struct taskq *tq)
struct cond c = COND_INITIALIZER();
struct task t = TASK_INITIALIZER(taskq_barrier_task, &c);
+ WITNESS_CHECKORDER(&tq->tq_lock_object, LOP_NEWORDER, NULL);
+
task_add(tq, &t);
+ cond_wait(&c, "tqbar");
+}
+void
+taskq_del_barrier(struct taskq *tq, struct task *del)
+{
+ struct cond c = COND_INITIALIZER();
+ struct task t = TASK_INITIALIZER(taskq_barrier_task, &c);
+
+ WITNESS_CHECKORDER(&tq->tq_lock_object, LOP_NEWORDER, NULL);
+
+ if (task_del(tq, del))
+ return;
+
+ task_add(tq, &t);
cond_wait(&c, "tqbar");
}
@@ -281,8 +337,12 @@ taskq_thread(void *xtq)
if (ISSET(tq->tq_flags, TASKQ_MPSAFE))
KERNEL_UNLOCK();
+ WITNESS_CHECKORDER(&tq->tq_lock_object, LOP_NEWORDER, NULL);
+
while (taskq_next_work(tq, &work)) {
+ WITNESS_LOCK(&tq->tq_lock_object, 0);
(*work.t_func)(work.t_arg);
+ WITNESS_UNLOCK(&tq->tq_lock_object, 0);
sched_pause(yield);
}