diff options
author | anton <anton@openbsd.org> | 2020-08-29 08:41:11 +0000 |
---|---|---|
committer | anton <anton@openbsd.org> | 2020-08-29 08:41:11 +0000 |
commit | ece33e2f6ca275f2d073ee51b56bb933b5c2b064 (patch) | |
tree | aa393ca15d742b908de382fdf2d827f5b981d05a /sys/dev/kcov.c | |
parent | Prepare to extend the scope of the kcov remote mutex by renaming it to (diff) | |
download | wireguard-openbsd-ece33e2f6ca275f2d073ee51b56bb933b5c2b064.tar.xz wireguard-openbsd-ece33e2f6ca275f2d073ee51b56bb933b5c2b064.zip |
Before clearing the kcov descriptor associated with a thread make sure
no other thread is currently within a remote section. Otherwise, the
remote subsystem could end up in a broken state where it doesn't reset
the necessary bits upon leaving the remote section.
Therefore introduce the kr_barrier() routine which waits until all
ongoing remote sections have been left. Also, extend the scope of the
mutex to also cover fields of struct kcov_dev. This is necessary to
ensure correctness.
Reported-by: syzbot+64122a5f01be1b1abb96@syzkaller.appspotmail.com
Diffstat (limited to 'sys/dev/kcov.c')
-rw-r--r-- | sys/dev/kcov.c | 143 |
1 files changed, 102 insertions, 41 deletions
diff --git a/sys/dev/kcov.c b/sys/dev/kcov.c index 6f1bfb1c892..372a5870bc4 100644 --- a/sys/dev/kcov.c +++ b/sys/dev/kcov.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kcov.c,v 1.25 2020/08/29 08:24:33 anton Exp $ */ +/* $OpenBSD: kcov.c,v 1.26 2020/08/29 08:41:11 anton Exp $ */ /* * Copyright (c) 2018 Anton Lindqvist <anton@openbsd.org> @@ -38,17 +38,25 @@ #define KCOV_STATE_TRACE 2 #define KCOV_STATE_DYING 3 +/* + * Coverage structure. + * + * Locking: + * I immutable after creation + * M kcov_mtx + * a atomic operations + */ struct kcov_dev { - int kd_state; - int kd_mode; - int kd_unit; /* device minor */ - uintptr_t *kd_buf; /* traced coverage */ - size_t kd_nmemb; - size_t kd_size; + int kd_state; /* [M] */ + int kd_mode; /* [M] */ + int kd_unit; /* [I] device minor */ + uintptr_t *kd_buf; /* [a] traced coverage */ + size_t kd_nmemb; /* [I] */ + size_t kd_size; /* [I] */ - struct kcov_remote *kd_kr; + struct kcov_remote *kd_kr; /* [M] */ - TAILQ_ENTRY(kcov_dev) kd_entry; + TAILQ_ENTRY(kcov_dev) kd_entry; /* [M] */ }; /* @@ -78,6 +86,7 @@ struct kcov_remote *kcov_remote_register_locked(int, void *); int kcov_remote_attach(struct kcov_dev *, struct kio_remote_attach *); void kcov_remote_detach(struct kcov_dev *, struct kcov_remote *); void kr_free(struct kcov_remote *); +void kr_barrier(struct kcov_remote *); struct kcov_remote *kr_lookup(int, void *); static struct kcov_dev *kd_curproc(int); @@ -244,15 +253,22 @@ kcovopen(dev_t dev, int flag, int mode, struct proc *p) { struct kcov_dev *kd; - if (kd_lookup(minor(dev)) != NULL) + mtx_enter(&kcov_mtx); + + if (kd_lookup(minor(dev)) != NULL) { + mtx_leave(&kcov_mtx); return (EBUSY); + } if (kcov_cold) kcov_cold = 0; + mtx_leave(&kcov_mtx); kd = malloc(sizeof(*kd), M_SUBPROC, M_WAITOK | M_ZERO); kd->kd_unit = minor(dev); + mtx_enter(&kcov_mtx); TAILQ_INSERT_TAIL(&kd_list, kd, kd_entry); + mtx_leave(&kcov_mtx); return (0); } @@ -261,9 +277,13 @@ kcovclose(dev_t dev, int flag, int mode, struct proc *p) { struct kcov_dev *kd; + mtx_enter(&kcov_mtx); + kd = kd_lookup(minor(dev)); - if (kd == NULL) + if (kd == NULL) { + mtx_leave(&kcov_mtx); return (EINVAL); + } if (kd->kd_state == KCOV_STATE_TRACE && kd->kd_kr == NULL) { /* @@ -276,6 +296,7 @@ kcovclose(dev_t dev, int flag, int mode, struct proc *p) kd_free(kd); } + mtx_leave(&kcov_mtx); return (0); } @@ -286,9 +307,13 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) int mode; int error = 0; + mtx_enter(&kcov_mtx); + kd = kd_lookup(minor(dev)); - if (kd == NULL) + if (kd == NULL) { + mtx_leave(&kcov_mtx); return (ENXIO); + } switch (cmd) { case KIOSETBUFSIZE: @@ -320,6 +345,8 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) } kd->kd_state = KCOV_STATE_READY; kd->kd_mode = KCOV_MODE_NONE; + if (kd->kd_kr != NULL) + kr_barrier(kd->kd_kr); p->p_kd = NULL; break; case KIOREMOTEATTACH: @@ -329,6 +356,7 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) default: error = ENOTTY; } + mtx_leave(&kcov_mtx); return (error); } @@ -337,19 +365,24 @@ paddr_t kcovmmap(dev_t dev, off_t offset, int prot) { struct kcov_dev *kd; - paddr_t pa; + paddr_t pa = -1; vaddr_t va; + mtx_enter(&kcov_mtx); + kd = kd_lookup(minor(dev)); if (kd == NULL) - return (paddr_t)(-1); + goto out; if (offset < 0 || offset >= kd->kd_nmemb * KCOV_BUF_MEMB_SIZE) - return (paddr_t)(-1); + goto out; va = (vaddr_t)kd->kd_buf + offset; if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) - return (paddr_t)(-1); + pa = -1; + +out: + mtx_leave(&kcov_mtx); return (pa); } @@ -358,17 +391,25 @@ kcov_exit(struct proc *p) { struct kcov_dev *kd; + mtx_enter(&kcov_mtx); + kd = p->p_kd; - if (kd == NULL) + if (kd == NULL) { + mtx_leave(&kcov_mtx); return; + } if (kd->kd_state == KCOV_STATE_DYING) { kd_free(kd); } else { kd->kd_state = KCOV_STATE_READY; kd->kd_mode = KCOV_MODE_NONE; + if (kd->kd_kr != NULL) + kr_barrier(kd->kd_kr); } p->p_kd = NULL; + + mtx_leave(&kcov_mtx); } struct kcov_dev * @@ -376,6 +417,8 @@ kd_lookup(int unit) { struct kcov_dev *kd; + MUTEX_ASSERT_LOCKED(&kcov_mtx); + TAILQ_FOREACH(kd, &kd_list, kd_entry) { if (kd->kd_unit == unit) return (kd); @@ -388,6 +431,7 @@ kd_init(struct kcov_dev *kd, unsigned long nmemb) { void *buf; size_t size; + int error; KASSERT(kd->kd_buf == NULL); @@ -398,20 +442,30 @@ kd_init(struct kcov_dev *kd, unsigned long nmemb) return (EINVAL); size = roundup(nmemb * KCOV_BUF_MEMB_SIZE, PAGE_SIZE); + mtx_leave(&kcov_mtx); buf = km_alloc(size, &kv_any, &kp_zero, &kd_waitok); - if (buf == NULL) - return (ENOMEM); + if (buf == NULL) { + error = ENOMEM; + goto err; + } /* km_malloc() can sleep, ensure the race was won. */ if (kd->kd_state != KCOV_STATE_NONE) { - km_free(buf, size, &kv_any, &kp_zero); - return (EBUSY); + error = EBUSY; + goto err; } + mtx_enter(&kcov_mtx); kd->kd_buf = buf; /* The first element is reserved to hold the number of used elements. */ kd->kd_nmemb = nmemb - 1; kd->kd_size = size; kd->kd_state = KCOV_STATE_READY; return (0); + +err: + if (buf != NULL) + km_free(buf, size, &kv_any, &kp_zero); + mtx_enter(&kcov_mtx); + return (error); } void @@ -419,14 +473,19 @@ kd_free(struct kcov_dev *kd) { struct kcov_remote *kr; + MUTEX_ASSERT_LOCKED(&kcov_mtx); + TAILQ_REMOVE(&kd_list, kd, kd_entry); kr = kd->kd_kr; if (kr != NULL) kcov_remote_detach(kd, kr); - if (kd->kd_buf != NULL) + if (kd->kd_buf != NULL) { + mtx_leave(&kcov_mtx); km_free(kd->kd_buf, kd->kd_size, &kv_any, &kp_zero); + mtx_enter(&kcov_mtx); + } free(kd, M_SUBPROC, sizeof(*kd)); } @@ -543,7 +602,7 @@ kcov_remote_leave(int subsystem, void *id) if (kr == NULL || curproc->p_kd == NULL || curproc->p_kd != kr->kr_kd) goto out; curproc->p_kd = NULL; - if (--kr->kr_nsections == 0 && kr->kr_state == KCOV_STATE_DYING) + if (--kr->kr_nsections == 0) wakeup(kr); out: mtx_leave(&kcov_mtx); @@ -611,46 +670,40 @@ int kcov_remote_attach(struct kcov_dev *kd, struct kio_remote_attach *arg) { struct kcov_remote *kr = NULL; - int error = 0; + + MUTEX_ASSERT_LOCKED(&kcov_mtx); if (kd->kd_state != KCOV_STATE_READY) return (EBUSY); - mtx_enter(&kcov_mtx); if (arg->subsystem == KCOV_REMOTE_COMMON) kr = kcov_remote_register_locked(KCOV_REMOTE_COMMON, curproc->p_p); - if (kr == NULL) { - error = EINVAL; - goto out; - } - if (kr->kr_state != KCOV_STATE_NONE) { - error = EBUSY; - goto out; - } + if (kr == NULL) + return (EINVAL); + if (kr->kr_state != KCOV_STATE_NONE) + return (EBUSY); kr->kr_state = KCOV_STATE_READY; kr->kr_kd = kd; kd->kd_kr = kr; - -out: - mtx_leave(&kcov_mtx); - return (error); + return (0); } void kcov_remote_detach(struct kcov_dev *kd, struct kcov_remote *kr) { - mtx_enter(&kcov_mtx); + MUTEX_ASSERT_LOCKED(&kcov_mtx); + KASSERT(kd == kr->kr_kd); if (kr->kr_subsystem == KCOV_REMOTE_COMMON) { kr_free(kr); } else { kr->kr_state = KCOV_STATE_NONE; + kr_barrier(kr); kd->kd_kr = NULL; kr->kr_kd = NULL; } - mtx_leave(&kcov_mtx); } void @@ -659,15 +712,23 @@ kr_free(struct kcov_remote *kr) MUTEX_ASSERT_LOCKED(&kcov_mtx); kr->kr_state = KCOV_STATE_DYING; + kr_barrier(kr); if (kr->kr_kd != NULL) kr->kr_kd->kd_kr = NULL; kr->kr_kd = NULL; - if (kr->kr_nsections > 0) - msleep_nsec(kr, &kcov_mtx, PWAIT, "kcov", INFSLP); TAILQ_REMOVE(&kr_list, kr, kr_entry); pool_put(&kr_pool, kr); } +void +kr_barrier(struct kcov_remote *kr) +{ + MUTEX_ASSERT_LOCKED(&kcov_mtx); + + while (kr->kr_nsections > 0) + msleep_nsec(kr, &kcov_mtx, PWAIT, "kcov", INFSLP); +} + struct kcov_remote * kr_lookup(int subsystem, void *id) { |