summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcsapuntz <csapuntz@openbsd.org>1998-01-10 23:44:28 +0000
committercsapuntz <csapuntz@openbsd.org>1998-01-10 23:44:28 +0000
commit7c2031d54ea47d5d5635c59bd3d6d63afea5d34d (patch)
tree539d4654c69c4a34ad439bff46927e2534315774
parentBroke up vfs_subr.c which was getting a bit huge. We now have seperate files (diff)
downloadwireguard-openbsd-7c2031d54ea47d5d5635c59bd3d6d63afea5d34d.tar.xz
wireguard-openbsd-7c2031d54ea47d5d5635c59bd3d6d63afea5d34d.zip
A couple more splbio()s in vfs_bio plus moving around a couple functions.
-rw-r--r--sys/kern/vfs_bio.c17
-rw-r--r--sys/kern/vfs_default.c235
-rw-r--r--sys/kern/vfs_sync.c342
3 files changed, 585 insertions, 9 deletions
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index e094901f075..e8abd767445 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_bio.c,v 1.19 1997/11/07 23:01:36 csapuntz Exp $ */
+/* $OpenBSD: vfs_bio.c,v 1.20 1998/01/10 23:44:28 csapuntz Exp $ */
/* $NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $ */
/*-
@@ -333,14 +333,6 @@ bwrite(bp)
return (rv);
}
-int
-vn_bwrite(v)
- void *v;
-{
- struct vop_bwrite_args *ap = v;
-
- return (bwrite(ap->a_bp));
-}
/*
* Delayed write.
@@ -359,6 +351,8 @@ void
bdwrite(bp)
struct buf *bp;
{
+ int s;
+
/*
* If the block hasn't been seen before:
* (1) Mark it as having been seen,
@@ -368,7 +362,9 @@ bdwrite(bp)
*/
if (!ISSET(bp->b_flags, B_DELWRI)) {
SET(bp->b_flags, B_DELWRI);
+ s = splbio();
reassignbuf(bp, bp->b_vp);
+ splx(s);
curproc->p_stats->p_ru.ru_oublock++; /* XXX */
}
@@ -402,10 +398,13 @@ bdirty(bp)
struct buf *bp;
{
struct proc *p = curproc; /* XXX */
+ int s;
if (ISSET(bp->b_flags, B_DELWRI) == 0) {
SET(bp->b_flags, B_DELWRI);
+ s = splbio();
reassignbuf(bp, bp->b_vp);
+ splx(s);
if (p)
p->p_stats->p_ru.ru_oublock++;
}
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
new file mode 100644
index 00000000000..c524e6af201
--- /dev/null
+++ b/sys/kern/vfs_default.c
@@ -0,0 +1,235 @@
+/* $OpenBSD: vfs_default.c,v 1.1 1998/01/10 23:44:29 csapuntz Exp $ */
+
+
+/*
+ * Portions of this code are:
+ *
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+
+#include <miscfs/specfs/specdev.h>
+
+
+/*
+ * Eliminate all activity associated with the requested vnode
+ * and with all vnodes aliased to the requested vnode.
+ */
+int
+vop_revoke(v)
+ void *v;
+{
+ struct vop_revoke_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ } */ *ap = v;
+ struct vnode *vp, *vq;
+ struct proc *p = curproc;
+
+#ifdef DIAGNOSTIC
+ if ((ap->a_flags & REVOKEALL) == 0)
+ panic("vop_revoke");
+#endif
+
+ vp = ap->a_vp;
+ simple_lock(&vp->v_interlock);
+
+ if (vp->v_flag & VALIASED) {
+ /*
+ * If a vgone (or vclean) is already in progress,
+ * wait until it is done and return.
+ */
+ if (vp->v_flag & VXLOCK) {
+ vp->v_flag |= VXWANT;
+ simple_unlock(&vp->v_interlock);
+ tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
+ return(0);
+ }
+ /*
+ * Ensure that vp will not be vgone'd while we
+ * are eliminating its aliases.
+ */
+ vp->v_flag |= VXLOCK;
+ simple_unlock(&vp->v_interlock);
+ while (vp->v_flag & VALIASED) {
+ simple_lock(&spechash_slock);
+ for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
+ if (vq->v_rdev != vp->v_rdev ||
+ vq->v_type != vp->v_type || vp == vq)
+ continue;
+ simple_unlock(&spechash_slock);
+ vgone(vq);
+ break;
+ }
+ }
+ /*
+ * Remove the lock so that vgone below will
+ * really eliminate the vnode after which time
+ * vgone will awaken any sleepers.
+ */
+ simple_lock(&vp->v_interlock);
+ vp->v_flag &= ~VXLOCK;
+ }
+ vgonel(vp, p);
+ return (0);
+}
+
+
+int
+vn_bwrite(v)
+ void *v;
+{
+ struct vop_bwrite_args *ap = v;
+
+ return (bwrite(ap->a_bp));
+}
+
+/*
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
+vop_nolock(v)
+ void *v;
+{
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap = v;
+
+#ifdef notyet
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+#else /* for now */
+ /*
+ * Since we are not using the lock manager, we must clear
+ * the interlock here.
+ */
+ if (ap->a_flags & LK_INTERLOCK)
+ simple_unlock(&ap->a_vp->v_interlock);
+ return (0);
+#endif
+}
+
+/*
+ * Decrement the active use count.
+ */
+
+int
+vop_nounlock(v)
+ void *v;
+{
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL)
+ return (0);
+ return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p));
+}
+
+/*
+ * Return whether or not the node is in use.
+ */
+int
+vop_noislocked(v)
+ void *v;
+{
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL)
+ return (0);
+ return (lockstatus(vp->v_vnlock));
+}
diff --git a/sys/kern/vfs_sync.c b/sys/kern/vfs_sync.c
new file mode 100644
index 00000000000..af0d5a73e44
--- /dev/null
+++ b/sys/kern/vfs_sync.c
@@ -0,0 +1,342 @@
+/* $OpenBSD: vfs_sync.c,v 1.1 1998/01/10 23:44:29 csapuntz Exp $ */
+
+
+/*
+ * Portions of this code are:
+ *
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Syncer daemon
+ */
+
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+#include <sys/buf.h>
+#include <sys/malloc.h>
+
+#include <sys/kernel.h>
+
+/*
+ * The workitem queue.
+ */
+#define SYNCER_MAXDELAY 32
+int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
+time_t syncdelay = 30; /* time to delay syncing vnodes */
+
+static int syncer_delayno = 0;
+static long syncer_mask;
+LIST_HEAD(synclist, vnode);
+static struct synclist *syncer_workitem_pending;
+
+/*
+ * The workitem queue.
+ *
+ * It is useful to delay writes of file data and filesystem metadata
+ * for tens of seconds so that quickly created and deleted files need
+ * not waste disk bandwidth being created and removed. To realize this,
+ * we append vnodes to a "workitem" queue. When running with a soft
+ * updates implementation, most pending metadata dependencies should
+ * not wait for more than a few seconds. Thus, mounted on block devices
+ * are delayed only about a half the time that file data is delayed.
+ * Similarly, directory updates are more critical, so are only delayed
+ * about a third the time that file data is delayed. Thus, there are
+ * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
+ * one each second (driven off the filesystem syner process). The
+ * syncer_delayno variable indicates the next queue that is to be processed.
+ * Items that need to be processed soon are placed in this queue:
+ *
+ * syncer_workitem_pending[syncer_delayno]
+ *
+ * A delay of fifteen seconds is done by placing the request fifteen
+ * entries later in the queue:
+ *
+ * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
+ *
+ */
+
+void
+vn_initialize_syncerd()
+
+{
+ syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
+ &syncer_mask);
+ syncer_maxdelay = syncer_mask + 1;
+}
+
+/*
+ * Add an item to the syncer work queue.
+ */
+void
+vn_syncer_add_to_worklist(vp, delay)
+ struct vnode *vp;
+ int delay;
+{
+ int s, slot;
+
+ s = splbio();
+ if (delay > syncer_maxdelay - 2)
+ delay = syncer_maxdelay - 2;
+ slot = (syncer_delayno + delay) & syncer_mask;
+ LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
+ splx(s);
+}
+
+/*
+ * System filesystem synchronizer daemon.
+ */
+
+
+extern int lbolt;
+
+void
+sched_sync(p)
+ struct proc *p;
+{
+ struct synclist *slp;
+ struct vnode *vp;
+ long starttime;
+ int s;
+
+ for (;;) {
+ starttime = time.tv_sec;
+
+ /*
+ * Push files whose dirty time has expired.
+ */
+ s = splbio();
+ slp = &syncer_workitem_pending[syncer_delayno];
+ syncer_delayno += 1;
+ if (syncer_delayno == syncer_maxdelay)
+ syncer_delayno = 0;
+ splx(s);
+ while ((vp = LIST_FIRST(slp)) != NULL) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
+ VOP_UNLOCK(vp, 0, p);
+ if (LIST_FIRST(slp) == vp) {
+ if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
+ panic("sched_sync: fsync failed");
+ /*
+ * Move ourselves to the back of the sync list.
+ */
+ LIST_REMOVE(vp, v_synclist);
+ vn_syncer_add_to_worklist(vp, syncdelay);
+ }
+ }
+
+ /*
+ * Do soft update processing.
+ */
+ if (bioops.io_sync)
+ (*bioops.io_sync)(NULL);
+
+ /*
+ * If it has taken us less than a second to process the
+ * current work, then wait. Otherwise start right over
+ * again. We can still lose time if any single round
+ * takes more than two seconds, but it does not really
+ * matter as we are just trying to generally pace the
+ * filesystem activity.
+ */
+ if (time.tv_sec == starttime)
+ tsleep(&lbolt, PPAUSE, "syncer", 0);
+ }
+}
+
+
+/*
+ * Routine to create and manage a filesystem syncer vnode.
+ */
+#define sync_close nullop
+int sync_fsync __P((void *));
+int sync_inactive __P((void *));
+#define sync_reclaim nullop
+#define sync_lock vop_nolock
+#define sync_unlock vop_nounlock
+int sync_print __P((void *));
+#define sync_islocked vop_noislocked
+
+int (**sync_vnodeop_p) __P((void *));
+struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
+ { &vop_default_desc, vn_default_error },
+ { &vop_close_desc, sync_close }, /* close */
+ { &vop_fsync_desc, sync_fsync }, /* fsync */
+ { &vop_inactive_desc, sync_inactive }, /* inactive */
+ { &vop_reclaim_desc, sync_reclaim }, /* reclaim */
+ { &vop_lock_desc, sync_lock }, /* lock */
+ { &vop_unlock_desc, sync_unlock }, /* unlock */
+ { &vop_print_desc, sync_print }, /* print */
+ { &vop_islocked_desc, sync_islocked }, /* islocked */
+ { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL }
+};
+struct vnodeopv_desc sync_vnodeop_opv_desc =
+ { &sync_vnodeop_p, sync_vnodeop_entries };
+
+/*
+ * Create a new filesystem syncer vnode for the specified mount point.
+ */
+int
+vfs_allocate_syncvnode(mp)
+ struct mount *mp;
+{
+ struct vnode *vp;
+ static long start, incr, next;
+ int error;
+
+ /* Allocate a new vnode */
+ if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
+ mp->mnt_syncer = NULL;
+ return (error);
+ }
+ vp->v_writecount = 1;
+ vp->v_type = VNON;
+ /*
+ * Place the vnode onto the syncer worklist. We attempt to
+ * scatter them about on the list so that they will go off
+ * at evenly distributed times even if all the filesystems
+ * are mounted at once.
+ */
+ next += incr;
+ if (next == 0 || next > syncer_maxdelay) {
+ start /= 2;
+ incr /= 2;
+ if (start == 0) {
+ start = syncer_maxdelay / 2;
+ incr = syncer_maxdelay;
+ }
+ next = start;
+ }
+ vn_syncer_add_to_worklist(vp, next);
+ mp->mnt_syncer = vp;
+ return (0);
+}
+
+/*
+ * Do a lazy sync of the filesystem.
+ */
+int
+sync_fsync(v)
+ void *v;
+{
+ struct vop_fsync_args /* {
+ struct vnode *a_vp;
+ struct ucred *a_cred;
+ int a_waitfor;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *syncvp = ap->a_vp;
+ struct mount *mp = syncvp->v_mount;
+ int asyncflag;
+
+ /*
+ * We only need to do something if this is a lazy evaluation.
+ */
+ if (ap->a_waitfor != MNT_LAZY)
+ return (0);
+
+ /*
+ * Move ourselves to the back of the sync list.
+ */
+ LIST_REMOVE(syncvp, v_synclist);
+ vn_syncer_add_to_worklist(syncvp, syncdelay);
+
+ /*
+ * Walk the list of vnodes pushing all that are dirty and
+ * not already on the sync list.
+ */
+ simple_lock(&mountlist_slock);
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p) == 0) {
+ asyncflag = mp->mnt_flag & MNT_ASYNC;
+ mp->mnt_flag &= ~MNT_ASYNC;
+ VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p);
+ if (asyncflag)
+ mp->mnt_flag |= MNT_ASYNC;
+ vfs_unbusy(mp, ap->a_p);
+ }
+ return (0);
+}
+
+/*
+ * The syncer vnode is no longer needed and is being decommissioned.
+ */
+int
+sync_inactive(v)
+ void *v;
+
+{
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_usecount == 0)
+ return (0);
+ vp->v_mount->mnt_syncer = NULL;
+ LIST_REMOVE(vp, v_synclist);
+ vp->v_writecount = 0;
+ vput(vp);
+ return (0);
+}
+
+/*
+ * Print out a syncer vnode.
+ */
+int
+sync_print(v)
+ void *v;
+
+{
+ struct vop_print_args /* {
+ struct vnode *a_vp;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+
+ printf("syncer vnode");
+ if (vp->v_vnlock != NULL)
+ lockmgr_printinfo(vp->v_vnlock);
+ printf("\n");
+ return (0);
+}