diff options
author | 2018-08-18 15:42:19 +0000 | |
---|---|---|
committer | 2018-08-18 15:42:19 +0000 | |
commit | d5feffbf859e1937fc1f92248eda4ff4859f8a9a (patch) | |
tree | 358fcb42e759406a8681b8a264b9ceb46991e47e | |
parent | After calling getaddrinfo(3) both on rfc868 and ntp cases we can drop the "dns" (diff) | |
download | wireguard-openbsd-d5feffbf859e1937fc1f92248eda4ff4859f8a9a.tar.xz wireguard-openbsd-d5feffbf859e1937fc1f92248eda4ff4859f8a9a.zip |
Add support for flushing the instruction cache of other processes. This is
needed for inserting and removing breakpoints through ptrace(2).
The approach here only works for CPUs that have a PIPT instruction cache
as we use aliased mappings to invalidate the instruction cache. That doesn't
work on CPUs that have a virtually indexed instruction cache.
ok deraadt@, visa@
-rw-r--r-- | sys/arch/arm64/arm64/pmap.c | 40 |
1 files changed, 37 insertions, 3 deletions
diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c index df734710e98..9beb6047c0d 100644 --- a/sys/arch/arm64/arm64/pmap.c +++ b/sys/arch/arm64/arm64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.56 2018/08/16 15:36:04 patrick Exp $ */ +/* $OpenBSD: pmap.c,v 1.57 2018/08/18 15:42:19 kettenis Exp $ */ /* * Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com> * @@ -1537,9 +1537,43 @@ pmap_init(void) void pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len) { - /* We only need to do anything if it is the current process. */ - if (pr == curproc->p_p) + struct pmap *pm = vm_map_pmap(&pr->ps_vmspace->vm_map); + vaddr_t kva = zero_page + cpu_number() * PAGE_SIZE; + paddr_t pa; + vsize_t clen; + vsize_t off; + + /* + * If we're caled for the current processes, we can simply + * flush the data cache to the point of unification and + * invalidate the instruction cache. + */ + if (pr == curproc->p_p) { cpu_icache_sync_range(va, len); + return; + } + + /* + * Flush and invalidate through an aliased mapping. This + * assumes the instruction cache is PIPT. That is only true + * for some of the hardware we run on. + */ + while (len > 0) { + /* add one to always round up to the next page */ + clen = round_page(va + 1) - va; + if (clen > len) + clen = len; + + off = va - trunc_page(va); + if (pmap_extract(pm, trunc_page(va), &pa)) { + pmap_kenter_pa(kva, pa, PROT_READ|PROT_WRITE); + cpu_icache_sync_range(kva + off, clen); + pmap_kremove_pg(kva); + } + + len -= clen; + va += clen; + } } void |