summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authorart <art@openbsd.org>2001-12-10 04:45:31 +0000
committerart <art@openbsd.org>2001-12-10 04:45:31 +0000
commit871a420e01ed1fbe4f8fc32c1039bdd7b291ea8d (patch)
tree161c609a05c12521f8584cd9f38910cdab1cd2f0 /sys/kern
parentAdd example on how to create FQDN certificates suitable for use with isakmpd. Requires an FQDN addition to /etc/ssl/x509v3.cnf (diff)
downloadwireguard-openbsd-871a420e01ed1fbe4f8fc32c1039bdd7b291ea8d.tar.xz
wireguard-openbsd-871a420e01ed1fbe4f8fc32c1039bdd7b291ea8d.zip
Big cleanup inspired by NetBSD with some parts of the code from NetBSD.
- get rid of VOP_BALLOCN and VOP_SIZE - move the generic getpages and putpages into miscfs/genfs - create a genfs_node which must be added to the top of the private portion of each vnode for filsystems that want to use genfs_{get,put}pages - rename genfs_mmap to vop_generic_mmap
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_default.c686
-rw-r--r--sys/kern/vfs_subr.c3
-rw-r--r--sys/kern/vnode_if.c64
-rw-r--r--sys/kern/vnode_if.src22
4 files changed, 4 insertions, 771 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index df32acf1323..73414b79b42 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_default.c,v 1.14 2001/12/10 02:19:34 art Exp $ */
+/* $OpenBSD: vfs_default.c,v 1.15 2001/12/10 04:45:31 art Exp $ */
/*
* Portions of this code are:
@@ -312,690 +312,8 @@ lease_check(void *v)
return (0);
}
-/*
- * generic VM getpages routine.
- * Return PG_BUSY pages for the given range,
- * reading from backing store if necessary.
- */
-
-int
-genfs_getpages(v)
- void *v;
-{
- struct vop_getpages_args /* {
- struct vnode *a_vp;
- voff_t a_offset;
- vm_page_t *a_m;
- int *a_count;
- int a_centeridx;
- vm_prot_t a_access_type;
- int a_advice;
- int a_flags;
- } */ *ap = v;
-
- off_t newsize, diskeof, memeof;
- off_t offset, origoffset, startoffset, endoffset, raoffset;
- daddr_t lbn, blkno;
- int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
- int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
- int flags = ap->a_flags;
- size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
- vaddr_t kva;
- struct buf *bp, *mbp;
- struct vnode *vp = ap->a_vp;
- struct uvm_object *uobj = &vp->v_uobj;
- struct vm_page *pgs[16]; /* XXXUBC 16 */
- struct ucred *cred = curproc->p_ucred; /* XXXUBC curproc */
- boolean_t async = (flags & PGO_SYNCIO) == 0;
- boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
- boolean_t sawhole = FALSE;
- struct proc *p = curproc;
- UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
-
- UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
- vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
-
- /* XXXUBC temp limit */
- if (*ap->a_count > 16) {
- return EINVAL;
- }
-
- error = 0;
- origoffset = ap->a_offset;
- orignpages = *ap->a_count;
- error = VOP_SIZE(vp, vp->v_size, &diskeof);
- if (error) {
- return error;
- }
- if (flags & PGO_PASTEOF) {
- newsize = MAX(vp->v_size,
- origoffset + (orignpages << PAGE_SHIFT));
- error = VOP_SIZE(vp, newsize, &memeof);
- if (error) {
- return error;
- }
- } else {
- memeof = diskeof;
- }
- KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
- KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
- KASSERT(orignpages > 0);
-
- /*
- * Bounds-check the request.
- */
-
- if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
- if ((flags & PGO_LOCKED) == 0) {
- simple_unlock(&uobj->vmobjlock);
- }
- UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
- origoffset, *ap->a_count, memeof,0);
- return EINVAL;
- }
-
- /*
- * For PGO_LOCKED requests, just return whatever's in memory.
- */
-
- if (flags & PGO_LOCKED) {
- uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
- UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
-
- return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
- }
-
- /* vnode is VOP_LOCKed, uobj is locked */
-
- if (write && (vp->v_bioflag & VBIOONSYNCLIST) == 0) {
- vn_syncer_add_to_worklist(vp, syncdelay);
- }
-
- /*
- * find the requested pages and make some simple checks.
- * leave space in the page array for a whole block.
- */
-
- fs_bshift = vp->v_mount->mnt_fs_bshift;
- fs_bsize = 1 << fs_bshift;
- dev_bshift = vp->v_mount->mnt_dev_bshift;
- dev_bsize = 1 << dev_bshift;
- KASSERT((diskeof & (dev_bsize - 1)) == 0);
- KASSERT((memeof & (dev_bsize - 1)) == 0);
-
- orignpages = MIN(orignpages,
- round_page(memeof - origoffset) >> PAGE_SHIFT);
- npages = orignpages;
- startoffset = origoffset & ~(fs_bsize - 1);
- endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
- + fs_bsize - 1) & ~(fs_bsize - 1));
- endoffset = MIN(endoffset, round_page(memeof));
- ridx = (origoffset - startoffset) >> PAGE_SHIFT;
-
- memset(pgs, 0, sizeof(pgs));
- uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
-
- /*
- * if PGO_OVERWRITE is set, don't bother reading the pages.
- * PGO_OVERWRITE also means that the caller guarantees
- * that the pages already have backing store allocated.
- */
-
- if (flags & PGO_OVERWRITE) {
- UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
-
- for (i = 0; i < npages; i++) {
- struct vm_page *pg = pgs[ridx + i];
-
- if (pg->flags & PG_FAKE) {
- uvm_pagezero(pg);
- pg->flags &= ~(PG_FAKE);
- }
- pg->flags &= ~(PG_RDONLY);
- }
- npages += ridx;
- goto out;
- }
-
- /*
- * if the pages are already resident, just return them.
- */
-
- for (i = 0; i < npages; i++) {
- struct vm_page *pg = pgs[ridx + i];
-
- if ((pg->flags & PG_FAKE) ||
- (write && (pg->flags & PG_RDONLY))) {
- break;
- }
- }
- if (i == npages) {
- UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
- raoffset = origoffset + (orignpages << PAGE_SHIFT);
- npages += ridx;
- goto raout;
- }
-
- /*
- * the page wasn't resident and we're not overwriting,
- * so we're going to have to do some i/o.
- * find any additional pages needed to cover the expanded range.
- */
-
- npages = (endoffset - startoffset) >> PAGE_SHIFT;
- if (startoffset != origoffset || npages != orignpages) {
-
- /*
- * XXXUBC we need to avoid deadlocks caused by locking
- * additional pages at lower offsets than pages we
- * already have locked. for now, unlock them all and
- * start over.
- */
-
- for (i = 0; i < orignpages; i++) {
- struct vm_page *pg = pgs[ridx + i];
-
- if (pg->flags & PG_FAKE) {
- pg->flags |= PG_RELEASED;
- }
- }
- uvm_page_unbusy(&pgs[ridx], orignpages);
- memset(pgs, 0, sizeof(pgs));
-
- UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
- startoffset, endoffset, 0,0);
- npgs = npages;
- uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
- }
- simple_unlock(&uobj->vmobjlock);
-
- /*
- * read the desired page(s).
- */
-
- totalbytes = npages << PAGE_SHIFT;
- bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
- tailbytes = totalbytes - bytes;
- skipbytes = 0;
-
- kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WAITOK |
- UVMPAGER_MAPIN_READ);
-
- s = splbio();
- mbp = pool_get(&bufpool, PR_WAITOK);
- splx(s);
- mbp->b_bufsize = totalbytes;
- mbp->b_data = (void *)kva;
- mbp->b_resid = mbp->b_bcount = bytes;
- mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL : 0);
- mbp->b_iodone = uvm_aio_biodone;
- mbp->b_vp = NULL;
- LIST_INIT(&mbp->b_dep);
- bgetvp(vp, mbp);
-
- /*
- * if EOF is in the middle of the range, zero the part past EOF.
- */
-
- if (tailbytes > 0) {
- memset((void *)(kva + bytes), 0, tailbytes);
- }
-
- /*
- * now loop over the pages, reading as needed.
- */
-
- if (write) {
- lockmgr(&vp->v_glock, LK_EXCLUSIVE, NULL, p);
- } else {
- lockmgr(&vp->v_glock, LK_SHARED, NULL, p);
- }
-
- bp = NULL;
- for (offset = startoffset;
- bytes > 0;
- offset += iobytes, bytes -= iobytes) {
-
- /*
- * skip pages which don't need to be read.
- */
-
- pidx = (offset - startoffset) >> PAGE_SHIFT;
- while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) {
- size_t b;
-
- KASSERT((offset & (PAGE_SIZE - 1)) == 0);
- b = MIN(PAGE_SIZE, bytes);
- offset += b;
- bytes -= b;
- skipbytes += b;
- pidx++;
- UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
- offset, 0,0,0);
- if (bytes == 0) {
- goto loopdone;
- }
- }
-
- /*
- * bmap the file to find out the blkno to read from and
- * how much we can read in one i/o. if bmap returns an error,
- * skip the rest of the top-level i/o.
- */
-
- lbn = offset >> fs_bshift;
- error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
- if (error) {
- UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
- lbn, error,0,0);
- skipbytes += bytes;
- goto loopdone;
- }
-
- /*
- * see how many pages can be read with this i/o.
- * reduce the i/o size if necessary to avoid
- * overwriting pages with valid data.
- */
-
- iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
- bytes);
- if (offset + iobytes > round_page(offset)) {
- pcount = 1;
- while (pidx + pcount < npages &&
- pgs[pidx + pcount]->flags & PG_FAKE) {
- pcount++;
- }
- iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
- (offset - trunc_page(offset)));
- }
-
- /*
- * if this block isn't allocated, zero it instead of reading it.
- * if this is a read access, mark the pages we zeroed PG_RDONLY.
- */
-
- if (blkno < 0) {
- int holepages = (round_page(offset + iobytes) -
- trunc_page(offset)) >> PAGE_SHIFT;
- UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
-
- sawhole = TRUE;
- memset((char *)kva + (offset - startoffset), 0,
- iobytes);
- skipbytes += iobytes;
-
- for (i = 0; i < holepages; i++) {
- if (write) {
- pgs[pidx + i]->flags &= ~PG_CLEAN;
- } else {
- pgs[pidx + i]->flags |= PG_RDONLY;
- }
- }
- continue;
- }
-
- /*
- * allocate a sub-buf for this piece of the i/o
- * (or just use mbp if there's only 1 piece),
- * and start it going.
- */
-
- if (offset == startoffset && iobytes == bytes) {
- bp = mbp;
- } else {
- s = splbio();
- bp = pool_get(&bufpool, PR_WAITOK);
- splx(s);
- bp->b_data = (char *)kva + offset - startoffset;
- bp->b_resid = bp->b_bcount = iobytes;
- bp->b_flags = B_BUSY|B_READ|B_CALL;
- bp->b_iodone = uvm_aio_biodone1;
- bp->b_vp = vp;
- LIST_INIT(&bp->b_dep);
- }
- bp->b_lblkno = 0;
- bp->b_private = mbp;
-
- /* adjust physical blkno for partial blocks */
- bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
- dev_bshift);
-
- UVMHIST_LOG(ubchist, "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
- bp, offset, iobytes, bp->b_blkno);
-
- VOP_STRATEGY(bp);
- }
-
-loopdone:
- if (skipbytes) {
- s = splbio();
- if (error) {
- mbp->b_flags |= B_ERROR;
- mbp->b_error = error;
- }
- mbp->b_resid -= skipbytes;
- if (mbp->b_resid == 0) {
- biodone(mbp);
- }
- splx(s);
- }
-
- if (async) {
- UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
- lockmgr(&vp->v_glock, LK_RELEASE, NULL, p);
- return 0;
- }
- if (bp != NULL) {
- error = biowait(mbp);
- }
- s = splbio();
- (void) buf_cleanout(mbp);
- pool_put(&bufpool, mbp);
- splx(s);
- uvm_pagermapout(kva, npages);
- raoffset = startoffset + totalbytes;
-
- /*
- * if this we encountered a hole then we have to do a little more work.
- * for read faults, we marked the page PG_RDONLY so that future
- * write accesses to the page will fault again.
- * for write faults, we must make sure that the backing store for
- * the page is completely allocated while the pages are locked.
- */
-
- if (error == 0 && sawhole && write) {
- error = VOP_BALLOCN(vp, startoffset, npages << PAGE_SHIFT,
- cred, 0);
- if (error) {
- UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
- lbn, error,0,0);
- lockmgr(&vp->v_glock, LK_RELEASE, NULL, p);
- simple_lock(&uobj->vmobjlock);
- goto out;
- }
- }
- lockmgr(&vp->v_glock, LK_RELEASE, NULL, p);
- simple_lock(&uobj->vmobjlock);
-
- /*
- * see if we want to start any readahead.
- * XXXUBC for now, just read the next 128k on 64k boundaries.
- * this is pretty nonsensical, but it is 50% faster than reading
- * just the next 64k.
- */
-
-raout:
- if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
- PAGE_SHIFT <= 16) {
- int racount;
-
- racount = 1 << (16 - PAGE_SHIFT);
- (void) VOP_GETPAGES(vp, raoffset, NULL, &racount, 0,
- VM_PROT_READ, 0, 0);
- simple_lock(&uobj->vmobjlock);
-
- racount = 1 << (16 - PAGE_SHIFT);
- (void) VOP_GETPAGES(vp, raoffset + 0x10000, NULL, &racount, 0,
- VM_PROT_READ, 0, 0);
- simple_lock(&uobj->vmobjlock);
- }
-
- /*
- * we're almost done! release the pages...
- * for errors, we free the pages.
- * otherwise we activate them and mark them as valid and clean.
- * also, unbusy pages that were not actually requested.
- */
-
-out:
- if (error) {
- uvm_lock_pageq();
- for (i = 0; i < npages; i++) {
- if (pgs[i] == NULL) {
- continue;
- }
- UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
- pgs[i], pgs[i]->flags, 0,0);
- if (pgs[i]->flags & PG_WANTED) {
- wakeup(pgs[i]);
- }
- if (pgs[i]->flags & PG_RELEASED) {
- uvm_unlock_pageq();
- (uobj->pgops->pgo_releasepg)(pgs[i], NULL);
- uvm_lock_pageq();
- continue;
- }
- if (pgs[i]->flags & PG_FAKE) {
- uvm_pagefree(pgs[i]);
- continue;
- }
- uvm_pageactivate(pgs[i]);
- pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
- UVM_PAGE_OWN(pgs[i], NULL);
- }
- uvm_unlock_pageq();
- simple_unlock(&uobj->vmobjlock);
- UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
- return error;
- }
-
- UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
- uvm_lock_pageq();
- for (i = 0; i < npages; i++) {
- if (pgs[i] == NULL) {
- continue;
- }
- UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
- pgs[i], pgs[i]->flags, 0,0);
- if (pgs[i]->flags & PG_FAKE) {
- UVMHIST_LOG(ubchist, "unfaking pg %p offset 0x%x",
- pgs[i], pgs[i]->offset,0,0);
- pgs[i]->flags &= ~(PG_FAKE);
- pmap_clear_modify(pgs[i]);
- pmap_clear_reference(pgs[i]);
- }
- if (write) {
- pgs[i]->flags &= ~(PG_RDONLY);
- }
- if (i < ridx || i >= ridx + orignpages || async) {
- UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
- pgs[i], pgs[i]->offset,0,0);
- if (pgs[i]->flags & PG_WANTED) {
- wakeup(pgs[i]);
- }
- if (pgs[i]->flags & PG_RELEASED) {
- uvm_unlock_pageq();
- (uobj->pgops->pgo_releasepg)(pgs[i], NULL);
- uvm_lock_pageq();
- continue;
- }
- uvm_pageactivate(pgs[i]);
- pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
- UVM_PAGE_OWN(pgs[i], NULL);
- }
- }
- uvm_unlock_pageq();
- simple_unlock(&uobj->vmobjlock);
- if (ap->a_m != NULL) {
- memcpy(ap->a_m, &pgs[ridx],
- orignpages * sizeof(struct vm_page *));
- }
- return 0;
-}
-
-/*
- * generic VM putpages routine.
- * Write the given range of pages to backing store.
- */
-
-int
-genfs_putpages(v)
- void *v;
-{
- struct vop_putpages_args /* {
- struct vnode *a_vp;
- struct vm_page **a_m;
- int a_count;
- int a_flags;
- int *a_rtvals;
- } */ *ap = v;
-
- int s, error, npages, run;
- int fs_bshift, dev_bshift, dev_bsize;
- vaddr_t kva;
- off_t eof, offset, startoffset;
- size_t bytes, iobytes, skipbytes;
- daddr_t lbn, blkno;
- struct vm_page *pg;
- struct buf *mbp, *bp;
- struct vnode *vp = ap->a_vp;
- boolean_t async = (ap->a_flags & PGO_SYNCIO) == 0;
- UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
- UVMHIST_LOG(ubchist, "vp %p offset 0x%x count %d",
- vp, ap->a_m[0]->offset, ap->a_count, 0);
-
- simple_unlock(&vp->v_uobj.vmobjlock);
-
- error = VOP_SIZE(vp, vp->v_size, &eof);
- if (error) {
- return error;
- }
-
- error = 0;
- npages = ap->a_count;
- fs_bshift = vp->v_mount->mnt_fs_bshift;
- dev_bshift = vp->v_mount->mnt_dev_bshift;
- dev_bsize = 1 << dev_bshift;
- KASSERT((eof & (dev_bsize - 1)) == 0);
-
- pg = ap->a_m[0];
- startoffset = pg->offset;
- bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
- skipbytes = 0;
- KASSERT(bytes != 0);
-
- kva = uvm_pagermapin(ap->a_m, npages, UVMPAGER_MAPIN_WAITOK);
-
- s = splbio();
- vp->v_numoutput += 2;
- mbp = pool_get(&bufpool, PR_WAITOK);
- UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
- vp, mbp, vp->v_numoutput, bytes);
- splx(s);
- mbp->b_bufsize = npages << PAGE_SHIFT;
- mbp->b_data = (void *)kva;
- mbp->b_resid = mbp->b_bcount = bytes;
- mbp->b_flags = B_BUSY|B_WRITE|B_AGE |
- (async ? B_CALL : 0) |
- (curproc == uvm.pagedaemon_proc ? B_PDAEMON : 0);
- mbp->b_iodone = uvm_aio_biodone;
- mbp->b_vp = NULL;
- LIST_INIT(&mbp->b_dep);
- bgetvp(vp, mbp);
-
- bp = NULL;
- for (offset = startoffset;
- bytes > 0;
- offset += iobytes, bytes -= iobytes) {
- lbn = offset >> fs_bshift;
- error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
- if (error) {
- UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
- skipbytes += bytes;
- bytes = 0;
- break;
- }
-
- iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
- bytes);
- if (blkno == (daddr_t)-1) {
- skipbytes += iobytes;
- continue;
- }
-
- /* if it's really one i/o, don't make a second buf */
- if (offset == startoffset && iobytes == bytes) {
- bp = mbp;
- } else {
- s = splbio();
- vp->v_numoutput++;
- bp = pool_get(&bufpool, PR_WAITOK);
- UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
- vp, bp, vp->v_numoutput, 0);
- splx(s);
- bp->b_data = (char *)kva +
- (vaddr_t)(offset - pg->offset);
- bp->b_resid = bp->b_bcount = iobytes;
- bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
- bp->b_iodone = uvm_aio_biodone1;
- bp->b_vp = vp;
- LIST_INIT(&bp->b_dep);
- }
- bp->b_lblkno = 0;
- bp->b_private = mbp;
-
- /* adjust physical blkno for partial blocks */
- bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
- dev_bshift);
- UVMHIST_LOG(ubchist, "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
- vp, offset, bp->b_bcount, bp->b_blkno);
- VOP_STRATEGY(bp);
- }
- if (skipbytes) {
- UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
- s = splbio();
- mbp->b_resid -= skipbytes;
- if (error) {
- mbp->b_flags |= B_ERROR;
- mbp->b_error = error;
- }
- if (mbp->b_resid == 0) {
- biodone(mbp);
- }
- splx(s);
- }
- if (async) {
- UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
- return 0;
- }
- if (bp != NULL) {
- UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
- error = biowait(mbp);
- }
- if (bioops.io_pageiodone) {
- (*bioops.io_pageiodone)(mbp);
- }
- s = splbio();
- if (mbp->b_vp) {
- vwakeup(mbp->b_vp);
- }
- buf_cleanout(mbp);
- pool_put(&bufpool, mbp);
- splx(s);
- uvm_pagermapout(kva, npages);
- UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
- return error;
-}
-
-int
-genfs_size(v)
- void *v;
-{
- struct vop_size_args /* {
- struct vnode *a_vp;
- off_t a_size;
- off_t *a_eobp;
- } */ *ap = v;
- int bsize;
-
- bsize = 1 << ap->a_vp->v_mount->mnt_fs_bshift;
- *ap->a_eobp = (ap->a_size + bsize - 1) & ~(bsize - 1);
- return 0;
-}
-
int
-genfs_mmap(v)
+vop_generic_mmap(v)
void *v;
{
return 0;
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 723d837c606..6fbb27dbecb 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_subr.c,v 1.77 2001/12/10 02:19:34 art Exp $ */
+/* $OpenBSD: vfs_subr.c,v 1.78 2001/12/10 04:45:31 art Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@@ -460,7 +460,6 @@ getnewvnode(tag, mp, vops, vpp)
vp->v_socket = 0;
}
vp->v_type = VNON;
- lockinit(&vp->v_glock, PVFS, "glock", 0, 0);
cache_purge(vp);
vp->v_tag = tag;
vp->v_op = vops;
diff --git a/sys/kern/vnode_if.c b/sys/kern/vnode_if.c
index 8459a5486a7..9819bea72e4 100644
--- a/sys/kern/vnode_if.c
+++ b/sys/kern/vnode_if.c
@@ -1230,39 +1230,6 @@ int VOP_WHITEOUT(dvp, cnp, flags)
return (VCALL(dvp, VOFFSET(vop_whiteout), &a));
}
-int vop_ballocn_vp_offsets[] = {
- VOPARG_OFFSETOF(struct vop_ballocn_args,a_vp),
- VDESC_NO_OFFSET
-};
-struct vnodeop_desc vop_ballocn_desc = {
- 0,
- "vop_ballocn",
- 0,
- vop_ballocn_vp_offsets,
- VDESC_NO_OFFSET,
- VOPARG_OFFSETOF(struct vop_ballocn_args, a_cred),
- VDESC_NO_OFFSET,
- VDESC_NO_OFFSET,
- NULL,
-};
-
-int VOP_BALLOCN(vp, offset, length, cred, flags)
- struct vnode *vp;
- off_t offset;
- off_t length;
- struct ucred *cred;
- int flags;
-{
- struct vop_ballocn_args a;
- a.a_desc = VDESC(vop_ballocn);
- a.a_vp = vp;
- a.a_offset = offset;
- a.a_length = length;
- a.a_cred = cred;
- a.a_flags = flags;
- return (VCALL(vp, VOFFSET(vop_ballocn), &a));
-}
-
int vop_getpages_vp_offsets[] = {
VOPARG_OFFSETOF(struct vop_getpages_args,a_vp),
VDESC_NO_OFFSET
@@ -1335,35 +1302,6 @@ int VOP_PUTPAGES(vp, m, count, flags, rtvals)
return (VCALL(vp, VOFFSET(vop_putpages), &a));
}
-int vop_size_vp_offsets[] = {
- VOPARG_OFFSETOF(struct vop_size_args,a_vp),
- VDESC_NO_OFFSET
-};
-struct vnodeop_desc vop_size_desc = {
- 0,
- "vop_size",
- 0,
- vop_size_vp_offsets,
- VDESC_NO_OFFSET,
- VDESC_NO_OFFSET,
- VDESC_NO_OFFSET,
- VDESC_NO_OFFSET,
- NULL,
-};
-
-int VOP_SIZE(vp, size, eobp)
- struct vnode *vp;
- off_t size;
- off_t *eobp;
-{
- struct vop_size_args a;
- a.a_desc = VDESC(vop_size);
- a.a_vp = vp;
- a.a_size = size;
- a.a_eobp = eobp;
- return (VCALL(vp, VOFFSET(vop_size), &a));
-}
-
int vop_mmap_vp_offsets[] = {
VOPARG_OFFSETOF(struct vop_mmap_args,a_vp),
VDESC_NO_OFFSET
@@ -1488,10 +1426,8 @@ struct vnodeop_desc *vfs_op_descs[] = {
&vop_advlock_desc,
&vop_reallocblks_desc,
&vop_whiteout_desc,
- &vop_ballocn_desc,
&vop_getpages_desc,
&vop_putpages_desc,
- &vop_size_desc,
&vop_mmap_desc,
NULL
};
diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src
index fc1a45351cd..0efb5afdd4f 100644
--- a/sys/kern/vnode_if.src
+++ b/sys/kern/vnode_if.src
@@ -1,4 +1,4 @@
-# $OpenBSD: vnode_if.src,v 1.16 2001/12/04 22:44:31 art Exp $
+# $OpenBSD: vnode_if.src,v 1.17 2001/12/10 04:45:31 art Exp $
# $NetBSD: vnode_if.src,v 1.10 1996/05/11 18:26:27 mycroft Exp $
#
# Copyright (c) 1992, 1993
@@ -469,17 +469,6 @@ vop_whiteout {
#};
#
-#% ballocn vp L L L
-#
-vop_ballocn {
- IN struct vnode *vp;
- IN off_t offset;
- IN off_t length;
- IN struct ucred *cred;
- IN int flags;
-};
-
-#
#% getpages vp L L L
#
vop_getpages {
@@ -505,15 +494,6 @@ vop_putpages {
};
#
-#% size vp = = =
-#
-vop_size {
- IN struct vnode *vp;
- IN off_t size;
- OUT off_t *eobp;
-};
-
-#
#% mmap vp = = =
#
vop_mmap {