summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkettenis <kettenis@openbsd.org>2017-03-13 23:20:12 +0000
committerkettenis <kettenis@openbsd.org>2017-03-13 23:20:12 +0000
commit51a294dfb3432429fd38a2c33fa8b5a917ba62ef (patch)
tree86660afccc02130f47c67b2dc593736914d217ae
parent- no KERN_RND: from schwarze (diff)
downloadwireguard-openbsd-51a294dfb3432429fd38a2c33fa8b5a917ba62ef.tar.xz
wireguard-openbsd-51a294dfb3432429fd38a2c33fa8b5a917ba62ef.zip
When we do an ASID rollover, we unassign all ASIDs and do a complete
TLB flush to remove any cached translations. The problem is that we do this while we're still running with the page tables of the old process. Even if we don't actually reference any userland pages, the CPU can speculatively load translations into the TLB. And those might survive until we reassign the ASID of the old process to a new process. This new process will then see the wrong physical page, which inevitably leads to corruption of some sorts. Fix this issue by delaying the TLB flush until after we switch to the page tables and ASID of the new process. ok patrick@, drahn@
-rw-r--r--sys/arch/arm64/arm64/pmap.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c
index 02e79a34e15..90f98deb985 100644
--- a/sys/arch/arm64/arm64/pmap.c
+++ b/sys/arch/arm64/arm64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 2017/03/12 16:35:09 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 2017/03/13 23:20:12 kettenis Exp $ */
/*
* Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
*
@@ -2269,6 +2269,7 @@ pmap_show_mapping(uint64_t va)
#define MAX_ASID 256
struct pmap *pmap_asid[MAX_ASID];
int pmap_asid_id_next = 1;
+int pmap_asid_id_flush = 0;
// stupid quick allocator, flush all asid when we run out
// XXX never searches, just flushes all on rollover (or out)
@@ -2282,17 +2283,20 @@ pmap_allocate_asid(pmap_t pm)
int i, new_asid;
if (pmap_asid_id_next == MAX_ASID) {
- // out of asid, flush all
- cpu_tlb_flush();
+ /*
+ * Out of ASIDs. Reclaim them all and schedule a full
+ * TLB flush. We can't flush here as the CPU could
+ * (and would) speculatively load TLB entries for the
+ * ASID of the current pmap.
+ */
for (i = 0;i < MAX_ASID; i++) {
if (pmap_asid[i] != NULL) {
- // printf("reclaiming asid %d from %p\n", i,
- // pmap_asid[i] );
pmap_asid[i]->pm_asid = -1;
pmap_asid[i] = NULL;
}
}
pmap_asid_id_next = 1;
+ pmap_asid_id_flush = 1;
}
// locks?
@@ -2334,6 +2338,10 @@ pmap_setttb(struct proc *p, paddr_t pagedir, struct pcb *pcb)
// pm, pmap_kernel(), oasid, pm->pm_asid);
cpu_setttb(pagedir);
+ if (pmap_asid_id_flush) {
+ pmap_asid_id_flush = 0;
+ cpu_tlb_flush();
+ }
} else {
// XXX what to do if switching to kernel pmap !?!?
}