summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorart <art@openbsd.org>2010-07-01 21:27:39 +0000
committerart <art@openbsd.org>2010-07-01 21:27:39 +0000
commit9b40e6e1d4a2183f03fb341bc3972233e67fb400 (patch)
treef3d8b271e3f984d14dbcc9b4d1635dd34b6042f8
parentFix memory leak by adding a missing free(lsa). (diff)
downloadwireguard-openbsd-9b40e6e1d4a2183f03fb341bc3972233e67fb400.tar.xz
wireguard-openbsd-9b40e6e1d4a2183f03fb341bc3972233e67fb400.zip
Implement vs{,un}lock_device and use it for physio.
Just like normal vs{,un}lock, but in case the pages we get are not dma accessible, we bounce them, if they are dma acessible, the functions behave exactly like normal vslock. The plan for the future is to have fault_wire allocate dma acessible pages so that we don't need to bounce (especially in cases where the same buffer is reused for physio over and over again), but for now, keep it as simple as possible.
-rw-r--r--sys/kern/kern_physio.c21
-rw-r--r--sys/uvm/uvm_extern.h7
-rw-r--r--sys/uvm/uvm_glue.c99
3 files changed, 115 insertions, 12 deletions
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index 0dae0225874..546b18fd27e 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_physio.c,v 1.30 2010/06/23 14:18:32 thib Exp $ */
+/* $OpenBSD: kern_physio.c,v 1.31 2010/07/01 21:27:39 art Exp $ */
/* $NetBSD: kern_physio.c,v 1.28 1997/05/19 10:43:28 pk Exp $ */
/*-
@@ -113,6 +113,8 @@ physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev, int flags,
for (i = 0; i < uio->uio_iovcnt; i++) {
iovp = &uio->uio_iov[i];
while (iovp->iov_len > 0) {
+ void *map = NULL;
+
/*
* [mark the buffer busy for physical I/O]
* (i.e. set B_PHYS (because it's an I/O to user
@@ -124,7 +126,6 @@ physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev, int flags,
/* [set up the buffer for a maximum-sized transfer] */
bp->b_blkno = btodb(uio->uio_offset);
- bp->b_data = iovp->iov_base;
/*
* Because iov_len is unsigned but b_bcount is signed,
@@ -157,15 +158,20 @@ physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev, int flags,
* saves it in b_saveaddr. However, vunmapbuf()
* restores it.
*/
- error = uvm_vslock(p, bp->b_data, todo,
+ error = uvm_vslock_device(p, iovp->iov_base, todo,
(flags & B_READ) ?
- VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ);
+ VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ, &map);
if (error) {
bp->b_flags |= B_ERROR;
bp->b_error = error;
goto after_unlock;
}
- vmapbuf(bp, todo);
+ if (map) {
+ bp->b_data = map;
+ } else {
+ bp->b_data = iovp->iov_base;
+ vmapbuf(bp, todo);
+ }
/* [call strategy to start the transfer] */
(*strategy)(bp);
@@ -194,8 +200,9 @@ physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev, int flags,
* [unlock the part of the address space previously
* locked]
*/
- vunmapbuf(bp, todo);
- uvm_vsunlock(p, bp->b_data, todo);
+ if (!map)
+ vunmapbuf(bp, todo);
+ uvm_vsunlock_device(p, iovp->iov_base, todo, map);
after_unlock:
/* remember error value (save a splbio/splx pair) */
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index e9ea11298e5..b70dd6a903d 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.87 2010/06/27 03:03:49 thib Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.88 2010/07/01 21:27:39 art Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -503,6 +503,11 @@ int uvm_vslock(struct proc *, caddr_t, size_t,
vm_prot_t);
void uvm_vsunlock(struct proc *, caddr_t, size_t);
+int uvm_vslock_device(struct proc *, void *, size_t,
+ vm_prot_t, void **);
+void uvm_vsunlock_device(struct proc *, void *, size_t,
+ void *);
+
/* uvm_init.c */
void uvm_init(void);
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index 453e524f797..3783e418b0a 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_glue.c,v 1.51 2010/06/30 20:20:18 thib Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.52 2010/07/01 21:27:39 art Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
@@ -157,13 +157,12 @@ uvm_chgkprot(caddr_t addr, size_t len, int rw)
* uvm_vslock: wire user memory for I/O
*
* - called from physio and sys___sysctl
- * - XXXCDC: consider nuking this (or making it a macro?)
*/
int
uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type)
{
- vm_map_t map;
+ struct vm_map *map;
vaddr_t start, end;
int rv;
@@ -182,7 +181,6 @@ uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type)
* uvm_vsunlock: unwire user memory wired by uvm_vslock()
*
* - called from physio and sys___sysctl
- * - XXXCDC: consider nuking this (or making it a macro?)
*/
void
@@ -199,6 +197,99 @@ uvm_vsunlock(struct proc *p, caddr_t addr, size_t len)
}
/*
+ * uvm_vslock_device: wire user memory, make sure it's device reachable
+ * and bounce if necessary.
+ * Always bounces for now.
+ */
+int
+uvm_vslock_device(struct proc *p, void *addr, size_t len,
+ vm_prot_t access_type, void **retp)
+{
+ struct vm_page *pg;
+ struct pglist pgl;
+ int npages;
+ vaddr_t start, end, off;
+ vaddr_t sva, va;
+ vsize_t sz;
+ int error, i;
+
+ start = trunc_page((vaddr_t)addr);
+ end = round_page((vaddr_t)addr + len);
+ sz = end - start;
+ off = (vaddr_t)addr - start;
+ if (end <= start)
+ return (EINVAL);
+
+ if ((error = uvm_fault_wire(&p->p_vmspace->vm_map, start, end,
+ access_type))) {
+ return (error);
+ }
+
+ npages = atop(sz);
+ for (i = 0; i < npages; i++) {
+ paddr_t pa;
+
+ if (!pmap_extract(p->p_vmspace->vm_map.pmap,
+ start + ptoa(i), &pa))
+ return (EFAULT);
+ if (!PADDR_IS_DMA_REACHABLE(pa))
+ break;
+ }
+ if (i == npages) {
+ *retp = NULL;
+ return (0);
+ }
+
+ if ((va = uvm_km_valloc(kernel_map, sz)) == 0) {
+ return (ENOMEM);
+ }
+ TAILQ_INIT(&pgl);
+ if (uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low,
+ dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_WAITOK)) {
+ uvm_km_free(kernel_map, va, sz);
+ return (ENOMEM);
+ }
+
+ sva = va;
+ while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
+ TAILQ_REMOVE(&pgl, pg, pageq);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ va += PAGE_SIZE;
+ }
+ KASSERT(va == sva + sz);
+ *retp = (void *)(sva + off);
+
+ error = copyin(addr, *retp, len);
+ return (error);
+}
+
+void
+uvm_vsunlock_device(struct proc *p, void *addr, size_t len, void *map)
+{
+ vaddr_t start, end;
+ vaddr_t kva;
+ vsize_t sz;
+
+ start = trunc_page((vaddr_t)addr);
+ end = round_page((vaddr_t)addr + len);
+ sz = end - start;
+ if (end <= start)
+ return;
+
+ if (map)
+ copyout(map, addr, len);
+ uvm_fault_unwire(&p->p_vmspace->vm_map, start, end);
+
+ if (!map)
+ return;
+
+ kva = trunc_page((vaddr_t)map);
+ pmap_kremove(kva, sz);
+ uvm_km_pgremove_intrsafe(kva, kva + sz);
+ uvm_km_free(kernel_map, kva, sz);
+}
+
+/*
* uvm_fork: fork a virtual address space
*
* - the address space is copied as per parent map's inherit values