summaryrefslogtreecommitdiffstats
path: root/sys/kern/vfs_subr.c
diff options
context:
space:
mode:
authorcsapuntz <csapuntz@openbsd.org>1997-10-06 15:05:18 +0000
committercsapuntz <csapuntz@openbsd.org>1997-10-06 15:05:18 +0000
commit6e175d504b4d2c3634337aa6a71d85b74b81c113 (patch)
tree69347f4cbda33eca8b28929f0e401694bb32f5f7 /sys/kern/vfs_subr.c
parentVFS Lite 2 Changes (diff)
downloadwireguard-openbsd-6e175d504b4d2c3634337aa6a71d85b74b81c113.tar.xz
wireguard-openbsd-6e175d504b4d2c3634337aa6a71d85b74b81c113.zip
VFS Lite2 Changes
Diffstat (limited to 'sys/kern/vfs_subr.c')
-rw-r--r--sys/kern/vfs_subr.c1230
1 files changed, 1018 insertions, 212 deletions
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 10ed04c26ca..f265b15051d 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_subr.c,v 1.10 1997/04/25 09:33:24 deraadt Exp $ */
+/* $OpenBSD: vfs_subr.c,v 1.11 1997/10/06 15:12:42 csapuntz Exp $ */
/* $NetBSD: vfs_subr.c,v 1.53 1996/04/22 01:39:13 christos Exp $ */
/*
@@ -51,6 +51,7 @@
#include <sys/mount.h>
#include <sys/time.h>
#include <sys/fcntl.h>
+#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/stat.h>
#include <sys/namei.h>
@@ -88,8 +89,28 @@ int suid_clear = 1; /* 1 => clear SUID / SGID on owner change */
LIST_REMOVE(bp, b_vnbufs); \
(bp)->b_vnbufs.le_next = NOLIST; \
}
-TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
+
+struct freelst vnode_hold_list; /* list of vnodes referencing buffers */
+struct freelst vnode_free_list; /* vnode free list */
+
struct mntlist mountlist; /* mounted filesystem list */
+struct simplelock mountlist_slock;
+static struct simplelock mntid_slock;
+struct simplelock mntvnode_slock;
+struct simplelock vnode_free_list_slock;
+static struct simplelock spechash_slock;
+
+/*
+ * The workitem queue.
+ */
+#define SYNCER_MAXDELAY 32
+int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
+time_t syncdelay = 30; /* time to delay syncing vnodes */
+
+static int syncer_delayno = 0;
+static long syncer_mask;
+LIST_HEAD(synclist, vnode);
+static struct synclist *syncer_workitem_pending;
int vfs_lock __P((struct mount *));
void vfs_unlock __P((struct mount *));
@@ -107,15 +128,16 @@ int bdevvp __P((dev_t, struct vnode **));
int cdevvp __P((dev_t, struct vnode **));
int getdevvp __P((dev_t, struct vnode **, enum vtype));
struct vnode *checkalias __P((struct vnode *, dev_t, struct mount *));
-int vget __P((struct vnode *, int));
void vref __P((struct vnode *));
void vput __P((struct vnode *));
void vrele __P((struct vnode *));
+int vunref __P((struct vnode *));
void vhold __P((struct vnode *));
void holdrele __P((struct vnode *));
int vflush __P((struct mount *, struct vnode *, int));
void vgoneall __P((struct vnode *));
void vgone __P((struct vnode *));
+void vgonel __P((struct vnode *, struct proc *));
int vcount __P((struct vnode *));
void vprint __P((char *, struct vnode *));
int vfs_mountedon __P((struct vnode *));
@@ -126,10 +148,10 @@ int vaccess __P((mode_t, uid_t, gid_t, mode_t, struct ucred *));
void vfs_unmountall __P((void));
void vfs_shutdown __P((void));
-static int vfs_hang_addrlist __P((struct mount *, struct netexport *,
+int vfs_hang_addrlist __P((struct mount *, struct netexport *,
struct export_args *));
-static int vfs_free_netcred __P((struct radix_node *, void *));
-static void vfs_free_addrlist __P((struct netexport *));
+int vfs_free_netcred __P((struct radix_node *, void *));
+void vfs_free_addrlist __P((struct netexport *));
#ifdef DEBUG
void printlockedvnodes __P((void));
@@ -142,124 +164,191 @@ void
vntblinit()
{
+ simple_lock_init(&mntvnode_slock);
+ simple_lock_init(&mntid_slock);
+ simple_lock_init(&spechash_slock);
+ TAILQ_INIT(&vnode_hold_list);
TAILQ_INIT(&vnode_free_list);
+ simple_lock_init(&vnode_free_list_slock);
CIRCLEQ_INIT(&mountlist);
+ /*
+ * Initialize the filesystem syncer.
+ */
+ syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
+ &syncer_mask);
+ syncer_maxdelay = syncer_mask + 1;
+
}
+
/*
- * Lock a filesystem.
- * Used to prevent access to it while mounting and unmounting.
+ * Mark a mount point as busy. Used to synchornize access and to delay
+ * unmounting. Interlock is not released n failure.
*/
+
int
-vfs_lock(mp)
- register struct mount *mp;
+vfs_busy(mp, flags, interlkp, p)
+ struct mount *mp;
+ int flags;
+ struct simplelock *interlkp;
+ struct proc *p;
{
+ int lkflags;
- while (mp->mnt_flag & MNT_MLOCK) {
+ if (mp->mnt_flag & MNT_UNMOUNT) {
+ if (flags & LK_NOWAIT)
+ return (ENOENT);
mp->mnt_flag |= MNT_MWAIT;
- tsleep((caddr_t)mp, PVFS, "vfslock", 0);
+ if (interlkp)
+ simple_unlock(interlkp);
+ /*
+ * Since all busy locks are shared except the exclusive
+ * lock granted when unmounting, the only place that a
+ * wakeup needs to be done is at the release of the
+ * exclusive lock at the end of dounmount.
+ */
+ sleep((caddr_t)mp, PVFS);
+ if (interlkp)
+ simple_lock(interlkp);
+ return (ENOENT);
}
- mp->mnt_flag |= MNT_MLOCK;
- return (0);
+ lkflags = LK_SHARED;
+ if (interlkp)
+ lkflags |= LK_INTERLOCK;
+ if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
+ panic("vfs_busy: unexpected lock failure");
+ return (0);
}
+
/*
- * Unlock a locked filesystem.
- * Panic if filesystem is not locked.
+ * Free a busy file system
*/
void
-vfs_unlock(mp)
- register struct mount *mp;
+vfs_unbusy(mp, p)
+ struct mount *mp;
+ struct proc *p;
{
-
- if ((mp->mnt_flag & MNT_MLOCK) == 0)
- panic("vfs_unlock: not locked");
- mp->mnt_flag &= ~MNT_MLOCK;
- if (mp->mnt_flag & MNT_MWAIT) {
- mp->mnt_flag &= ~MNT_MWAIT;
- wakeup((caddr_t)mp);
- }
+ lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
}
/*
- * Mark a mount point as busy.
- * Used to synchronize access and to delay unmounting.
+ * Lookup a filesystem type, and if found allocate and initialize
+ * a mount structure for it.
+ *
+ * Devname is usually updated by mount(8) after booting.
*/
-int
-vfs_busy(mp)
- register struct mount *mp;
-{
- while(mp->mnt_flag & MNT_MPBUSY) {
- mp->mnt_flag |= MNT_MPWANT;
- tsleep((caddr_t)&mp->mnt_flag, PVFS, "vfsbusy", 0);
- }
- if (mp->mnt_flag & MNT_UNMOUNT)
- return (1);
- mp->mnt_flag |= MNT_MPBUSY;
- return (0);
-}
+int
+vfs_rootmountalloc(fstypename, devname, mpp)
+ char *fstypename;
+ char *devname;
+ struct mount **mpp;
+ {
+ struct proc *p = curproc; /* XXX */
+ struct vfsconf *vfsp;
+ struct mount *mp;
+
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (!strcmp(vfsp->vfc_name, fstypename))
+ break;
+ if (vfsp == NULL)
+ return (ENODEV);
+ mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
+ bzero((char *)mp, (u_long)sizeof(struct mount));
+ lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
+ (void)vfs_busy(mp, LK_NOWAIT, 0, p);
+ LIST_INIT(&mp->mnt_vnodelist);
+ mp->mnt_vfc = vfsp;
+ mp->mnt_op = vfsp->vfc_vfsops;
+ mp->mnt_flag = MNT_RDONLY;
+ mp->mnt_vnodecovered = NULLVP;
+ vfsp->vfc_refcount++;
+ mp->mnt_stat.f_type = vfsp->vfc_typenum;
+ mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
+ strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
+ mp->mnt_stat.f_mntonname[0] = '/';
+ (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
+ *mpp = mp;
+ return (0);
+ }
/*
- * Free a busy filesystem.
- * Panic if filesystem is not busy.
- */
-void
-vfs_unbusy(mp)
- register struct mount *mp;
+ * Find an appropriate filesystem to use for the root. If a filesystem
+ * has not been preselected, walk through the list of known filesystems
+ * trying those that have mountroot routines, and try them until one
+ * works or we have tried them all.
+ */
+int
+vfs_mountroot()
{
-
- if ((mp->mnt_flag & MNT_MPBUSY) == 0)
- panic("vfs_unbusy: not busy");
- mp->mnt_flag &= ~MNT_MPBUSY;
- if (mp->mnt_flag & MNT_MPWANT) {
- mp->mnt_flag &= ~MNT_MPWANT;
- wakeup((caddr_t)&mp->mnt_flag);
- }
+ struct vfsconf *vfsp;
+ extern int (*mountroot)(void);
+ int error;
+
+ if (mountroot != NULL)
+ return ((*mountroot)());
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
+ if (vfsp->vfc_mountroot == NULL)
+ continue;
+ if ((error = (*vfsp->vfc_mountroot)()) == 0)
+ return (0);
+ printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
+ }
+ return (ENODEV);
}
-
+
/*
* Lookup a mount point by filesystem identifier.
*/
struct mount *
-getvfs(fsid)
+vfs_getvfs(fsid)
fsid_t *fsid;
{
register struct mount *mp;
+ simple_lock(&mountlist_slock);
for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
- mp = mp->mnt_list.cqe_next)
+ mp = mp->mnt_list.cqe_next) {
if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
- mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
+ mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
+ simple_unlock(&mountlist_slock);
return (mp);
+ }
+ }
+ simple_unlock(&mountlist_slock);
return ((struct mount *)0);
}
+
/*
* Get a new unique fsid
*/
void
-getnewfsid(mp, mtype)
+vfs_getnewfsid(mp)
struct mount *mp;
- int mtype;
{
static u_short xxxfs_mntid;
fsid_t tfsid;
+ int mtype;
- mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + 11, 0); /* XXX */
+ simple_lock(&mntid_slock);
+ mtype = mp->mnt_vfc->vfc_typenum;
+ mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
mp->mnt_stat.f_fsid.val[1] = mtype;
if (xxxfs_mntid == 0)
++xxxfs_mntid;
- tfsid.val[0] = makedev((nblkdev + mtype) & 0xff, xxxfs_mntid);
+ tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
tfsid.val[1] = mtype;
if (mountlist.cqh_first != (void *)&mountlist) {
- while (getvfs(&tfsid)) {
+ while (vfs_getvfs(&tfsid)) {
tfsid.val[0]++;
xxxfs_mntid++;
}
}
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
+ simple_unlock(&mntid_slock);
}
/*
@@ -318,20 +407,56 @@ getnewvnode(tag, mp, vops, vpp)
int (**vops) __P((void *));
struct vnode **vpp;
{
- register struct vnode *vp;
+ struct proc *p = curproc; /* XXX */
+ struct freelst *listhd;
+ static int toggle;
+ struct vnode *vp;
#ifdef DIAGNOSTIC
int s;
#endif
- if ((vnode_free_list.tqh_first == NULL &&
- numvnodes < 2 * desiredvnodes) ||
- numvnodes < desiredvnodes) {
+ /*
+ * We must choose whether to allocate a new vnode or recycle an
+ * existing one. The criterion for allocating a new one is that
+ * the total number of vnodes is less than the number desired or
+ * there are no vnodes on either free list. Generally we only
+ * want to recycle vnodes that have no buffers associated with
+ * them, so we look first on the vnode_free_list. If it is empty,
+ * we next consider vnodes with referencing buffers on the
+ * vnode_hold_list. The toggle ensures that half the time we
+ * will use a buffer from the vnode_hold_list, and half the time
+ * we will allocate a new one unless the list has grown to twice
+ * the desired size. We are reticent to recycle vnodes from the
+ * vnode_hold_list because we will lose the identity of all its
+ * referencing buffers.
+ */
+ toggle ^= 1;
+ if (numvnodes > 2 * desiredvnodes)
+ toggle = 0;
+
+
+ simple_lock(&vnode_free_list_slock);
+ if ((numvnodes < desiredvnodes) ||
+ ((TAILQ_FIRST(listhd = &vnode_free_list) == NULL) &&
+ ((TAILQ_FIRST(listhd = &vnode_hold_list) == NULL) || toggle))) {
+ simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *)malloc((u_long)sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *)vp, sizeof *vp);
numvnodes++;
} else {
- if ((vp = vnode_free_list.tqh_first) == NULL) {
+ for (vp = TAILQ_FIRST(listhd); vp != NULLVP;
+ vp = TAILQ_NEXT(vp, v_freelist)) {
+ if (simple_lock_try(&vp->v_interlock))
+ break;
+ }
+ /*
+ * Unless this is a bad time of the month, at most
+ * the first NCPUS items on the free list are
+ * locked, so this is close enough to being empty.
+ */
+ if (vp == NULLVP) {
+ simple_unlock(&vnode_free_list_slock);
tablefull("vnode");
*vpp = 0;
return (ENFILE);
@@ -340,12 +465,15 @@ getnewvnode(tag, mp, vops, vpp)
vprint("free vnode", vp);
panic("free vnode isn't");
}
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_REMOVE(listhd, vp, v_freelist);
/* see comment on why 0xdeadb is set at end of vgone (below) */
- vp->v_freelist.tqe_prev = (struct vnode **)0xdeadb;
+ vp->v_flag |= VGONEHACK;
+ simple_unlock(&vnode_free_list_slock);
vp->v_lease = NULL;
if (vp->v_type != VBAD)
- vgone(vp);
+ vgonel(vp, p);
+ else
+ simple_unlock(&vp->v_interlock);
#ifdef DIAGNOSTIC
if (vp->v_data) {
vprint("cleaned vnode", vp);
@@ -385,18 +513,19 @@ insmntque(vp, mp)
register struct vnode *vp;
register struct mount *mp;
{
-
+ simple_lock(&mntvnode_slock);
/*
* Delete from old mount point vnode list, if on one.
*/
+
if (vp->v_mount != NULL)
LIST_REMOVE(vp, v_mntvnodes);
/*
* Insert into list of vnodes for the new mount point, if available.
*/
- if ((vp->v_mount = mp) == NULL)
- return;
- LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
+ if ((vp->v_mount = mp) != NULL)
+ LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
+ simple_unlock(&mntvnode_slock);
}
/*
@@ -435,14 +564,15 @@ vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
struct buf *nbp, *blist;
int s, error;
- if (flags & V_SAVE) {
+ if ((flags & V_SAVE) && vp->v_dirtyblkhd.lh_first != NULL) {
if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
return (error);
if (vp->v_dirtyblkhd.lh_first != NULL)
panic("vinvalbuf: dirty bufs");
}
for (;;) {
- if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA)
+ if ((blist = vp->v_cleanblkhd.lh_first) &&
+ (flags & V_SAVEMETA))
while (blist && blist->b_lblkno < 0)
blist = blist->b_vnbufs.le_next;
if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
@@ -562,30 +692,140 @@ brelvp(bp)
register struct buf *bp;
{
struct vnode *vp;
+ struct buf *wasdirty;
- if (bp->b_vp == (struct vnode *) 0)
+ if ((vp = bp->b_vp) == (struct vnode *) 0)
panic("brelvp: NULL");
/*
* Delete from old vnode list, if on one.
*/
+ wasdirty = vp->v_dirtyblkhd.lh_first;
if (bp->b_vnbufs.le_next != NOLIST)
bufremvn(bp);
- vp = bp->b_vp;
+ if (wasdirty && LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
+ LIST_REMOVE(vp, v_synclist);
bp->b_vp = (struct vnode *) 0;
HOLDRELE(vp);
}
/*
- * Reassign a buffer from one vnode to another.
- * Used to assign file specific control information
- * (indirect blocks) to the vnode to which they belong.
+ * The workitem queue.
+ *
+ * It is useful to delay writes of file data and filesystem metadata
+ * for tens of seconds so that quickly created and deleted files need
+ * not waste disk bandwidth being created and removed. To realize this,
+ * we append vnodes to a "workitem" queue. When running with a soft
+ * updates implementation, most pending metadata dependencies should
+ * not wait for more than a few seconds. Thus, mounted on block devices
+ * are delayed only about a half the time that file data is delayed.
+ * Similarly, directory updates are more critical, so are only delayed
+ * about a third the time that file data is delayed. Thus, there are
+ * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
+ * one each second (driven off the filesystem syner process). The
+ * syncer_delayno variable indicates the next queue that is to be processed.
+ * Items that need to be processed soon are placed in this queue:
+ *
+ * syncer_workitem_pending[syncer_delayno]
+ *
+ * A delay of fifteen seconds is done by placing the request fifteen
+ * entries later in the queue:
+ *
+ * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
+ *
+ */
+
+/*
+ * Add an item to the syncer work queue.
+ */
+void
+vn_syncer_add_to_worklist(vp, delay)
+ struct vnode *vp;
+ int delay;
+{
+ int s, slot;
+
+ s = splbio();
+ if (delay > syncer_maxdelay - 2)
+ delay = syncer_maxdelay - 2;
+ slot = (syncer_delayno + delay) & syncer_mask;
+ LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
+ splx(s);
+}
+
+/*
+ * System filesystem synchronizer daemon.
+ */
+
+extern int lbolt;
+
+void
+sched_sync(p)
+ struct proc *p;
+{
+ struct synclist *slp;
+ struct vnode *vp;
+ long starttime;
+ int s;
+
+ for (;;) {
+ starttime = time.tv_sec;
+
+ /*
+ * Push files whose dirty time has expired.
+ */
+ s = splbio();
+ slp = &syncer_workitem_pending[syncer_delayno];
+ syncer_delayno += 1;
+ if (syncer_delayno == syncer_maxdelay)
+ syncer_delayno = 0;
+ splx(s);
+ while ((vp = LIST_FIRST(slp)) != NULL) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
+ VOP_UNLOCK(vp, 0, p);
+ if (LIST_FIRST(slp) == vp) {
+ if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
+ panic("sched_sync: fsync failed");
+ /*
+ * Move ourselves to the back of the sync list.
+ */
+ LIST_REMOVE(vp, v_synclist);
+ vn_syncer_add_to_worklist(vp, syncdelay);
+ }
+ }
+
+ /*
+ * Do soft update processing.
+ */
+ if (bioops.io_sync)
+ (*bioops.io_sync)(NULL);
+
+ /*
+ * If it has taken us less than a second to process the
+ * current work, then wait. Otherwise start right over
+ * again. We can still lose time if any single round
+ * takes more than two seconds, but it does not really
+ * matter as we are just trying to generally pace the
+ * filesystem activity.
+ */
+ if (time.tv_sec == starttime)
+ tsleep(&lbolt, PPAUSE, "syncer", 0);
+ }
+}
+
+/*
+ * Reassign a buffer from one vnode to another. Used to assign buffers
+ * to the appropriate clean or dirty list and to add newly dirty vnodes
+ * to the appropriate filesystem syncer list.
*/
void
reassignbuf(bp, newvp)
register struct buf *bp;
register struct vnode *newvp;
{
- register struct buflists *listheadp;
+ struct buflists *listheadp;
+ struct buf *wasdirty;
+ int delay;
if (newvp == NULL) {
printf("reassignbuf: NULL");
@@ -594,16 +834,36 @@ reassignbuf(bp, newvp)
/*
* Delete from old vnode list, if on one.
*/
+ wasdirty = newvp->v_dirtyblkhd.lh_first;
if (bp->b_vnbufs.le_next != NOLIST)
bufremvn(bp);
/*
* If dirty, put on list of dirty buffers;
* otherwise insert onto list of clean buffers.
*/
- if (bp->b_flags & B_DELWRI)
- listheadp = &newvp->v_dirtyblkhd;
- else
+ if ((bp->b_flags & B_DELWRI) == 0) {
listheadp = &newvp->v_cleanblkhd;
+ if (wasdirty && LIST_FIRST(&newvp->v_dirtyblkhd) == NULL)
+ LIST_REMOVE(newvp, v_synclist);
+ } else {
+ listheadp = &newvp->v_dirtyblkhd;
+ if (LIST_FIRST(listheadp) == NULL) {
+ switch (newvp->v_type) {
+ case VDIR:
+ delay = syncdelay / 3;
+ break;
+ case VBLK:
+ if (newvp->v_specmountpoint != NULL) {
+ delay = syncdelay / 2;
+ break;
+ }
+ /* fall through */
+ default:
+ delay = syncdelay;
+ }
+ vn_syncer_add_to_worklist(newvp, delay);
+ }
+ }
bufinsvn(bp, listheadp);
}
@@ -649,8 +909,10 @@ getdevvp(dev, vpp, type)
struct vnode *nvp;
int error;
- if (dev == NODEV)
+ if (dev == NODEV) {
+ *vpp = NULLVP;
return (0);
+ }
error = getnewvnode(VT_NON, NULL, spec_vnodeop_p, &nvp);
if (error) {
*vpp = NULLVP;
@@ -680,6 +942,7 @@ checkalias(nvp, nvp_rdev, mp)
dev_t nvp_rdev;
struct mount *mp;
{
+ struct proc *p = curproc;
register struct vnode *vp;
struct vnode **vpp;
@@ -688,18 +951,23 @@ checkalias(nvp, nvp_rdev, mp)
vpp = &speclisth[SPECHASH(nvp_rdev)];
loop:
+ simple_lock(&spechash_slock);
for (vp = *vpp; vp; vp = vp->v_specnext) {
+ simple_lock(&vp->v_interlock);
if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
continue;
/*
* Alias, but not in use, so flush it out.
*/
if (vp->v_usecount == 0) {
- vgone(vp);
+ simple_unlock(&spechash_slock);
+ vgonel(vp, p);
goto loop;
}
- if (vget(vp, 1))
+ if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
+ simple_unlock(&spechash_slock);
goto loop;
+ }
break;
}
if (vp == NULL || vp->v_tag != VT_NON || vp->v_type != VBLK) {
@@ -708,18 +976,21 @@ loop:
nvp->v_rdev = nvp_rdev;
nvp->v_hashchain = vpp;
nvp->v_specnext = *vpp;
- nvp->v_specflags = 0;
+ nvp->v_specmountpoint = NULL;
nvp->v_speclockf = NULL;
+ simple_unlock(&spechash_slock);
*vpp = nvp;
- if (vp != NULL) {
+ if (vp != NULLVP) {
nvp->v_flag |= VALIASED;
vp->v_flag |= VALIASED;
vput(vp);
}
return (NULLVP);
}
- VOP_UNLOCK(vp);
- vclean(vp, 0);
+ simple_unlock(&spechash_slock);
+ VOP_UNLOCK(vp, 0, p);
+ simple_lock(&vp->v_interlock);
+ vclean(vp, 0, p);
vp->v_op = nvp->v_op;
vp->v_tag = nvp->v_tag;
nvp->v_type = VNON;
@@ -736,91 +1007,260 @@ loop:
* been changed to a new file system type).
*/
int
-vget(vp, lockflag)
- register struct vnode *vp;
- int lockflag;
+vget(vp, flags, p)
+ struct vnode *vp;
+ int flags;
+ struct proc *p;
{
-
+ int error;
/*
* If the vnode is in the process of being cleaned out for
* another use, we wait for the cleaning to finish and then
- * return failure. Cleaning is determined either by checking
- * that the VXLOCK flag is set, or that the use count is
- * zero with the back pointer set to show that it has been
- * removed from the free list by getnewvnode. The VXLOCK
- * flag may not have been set yet because vclean is blocked in
- * the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
+ * return failure. Cleaning is determined by checking that
+ * the VXLOCK flag is set.
*/
- if ((vp->v_flag & VXLOCK) ||
- (vp->v_usecount == 0 &&
- vp->v_freelist.tqe_prev == (struct vnode **)0xdeadb)) {
- vp->v_flag |= VXWANT;
+ if ((flags & LK_INTERLOCK) == 0)
+ simple_lock(&vp->v_interlock);
+ if (vp->v_flag & VXLOCK) {
+ vp->v_flag |= VXWANT;
+ simple_unlock(&vp->v_interlock);
tsleep((caddr_t)vp, PINOD, "vget", 0);
- return (1);
+ return (ENOENT);
+ }
+ if (vp->v_usecount == 0) {
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_holdcnt > 0)
+ TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
+ else
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
}
- if (vp->v_usecount == 0)
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- vp->v_usecount++;
- if (lockflag)
- VOP_LOCK(vp);
+ vp->v_usecount++;
+ if (flags & LK_TYPE_MASK) {
+ if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
+ vunref(vp);
+ simple_unlock(&vp->v_interlock);
+ }
+ return (error);
+ }
+ simple_unlock(&vp->v_interlock);
return (0);
}
/*
- * Vnode reference, just increment the count
+ * Stubs to use when there is no locking to be done on the underlying object.
+ * A minimal shared lock is necessary to ensure that the underlying object
+ * is not revoked while an operation is in progress. So, an active shared
+ * count is maintained in an auxillary vnode lock structure.
+ */
+int
+vop_nolock(v)
+ void *v;
+{
+ struct vop_lock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap = v;
+
+#ifdef notyet
+ /*
+ * This code cannot be used until all the non-locking filesystems
+ * (notably NFS) are converted to properly lock and release nodes.
+ * Also, certain vnode operations change the locking state within
+ * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
+ * and symlink). Ideally these operations should not change the
+ * lock state, but should be changed to let the caller of the
+ * function unlock them. Otherwise all intermediate vnode layers
+ * (such as union, umapfs, etc) must catch these functions to do
+ * the necessary locking at their layer. Note that the inactive
+ * and lookup operations also change their lock state, but this
+ * cannot be avoided, so these two operations will always need
+ * to be handled in intermediate layers.
+ */
+ struct vnode *vp = ap->a_vp;
+ int vnflags, flags = ap->a_flags;
+
+ if (vp->v_vnlock == NULL) {
+ if ((flags & LK_TYPE_MASK) == LK_DRAIN)
+ return (0);
+ MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
+ M_VNODE, M_WAITOK);
+ lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
+ }
+ switch (flags & LK_TYPE_MASK) {
+ case LK_DRAIN:
+ vnflags = LK_DRAIN;
+ break;
+ case LK_EXCLUSIVE:
+ case LK_SHARED:
+ vnflags = LK_SHARED;
+ break;
+ case LK_UPGRADE:
+ case LK_EXCLUPGRADE:
+ case LK_DOWNGRADE:
+ return (0);
+ case LK_RELEASE:
+ default:
+ panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
+ }
+ if (flags & LK_INTERLOCK)
+ vnflags |= LK_INTERLOCK;
+ return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
+#else /* for now */
+ /*
+ * Since we are not using the lock manager, we must clear
+ * the interlock here.
+ */
+ if (ap->a_flags & LK_INTERLOCK)
+ simple_unlock(&ap->a_vp->v_interlock);
+ return (0);
+#endif
+}
+
+/*
+ * Decrement the active use count.
+ */
+
+int
+vop_nounlock(v)
+ void *v;
+{
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL)
+ return (0);
+ return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p));
+}
+
+/*
+ * Return whether or not the node is in use.
+ */
+int
+vop_noislocked(v)
+ void *v;
+{
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_vnlock == NULL)
+ return (0);
+ return (lockstatus(vp->v_vnlock));
+}
+
+/*
+ * Vnode reference.
*/
void
vref(vp)
struct vnode *vp;
{
-
+ simple_lock(&vp->v_interlock);
if (vp->v_usecount <= 0)
panic("vref used where vget required");
vp->v_usecount++;
+ simple_unlock(&vp->v_interlock);
}
-/*
- * vput(), just unlock and vrele()
- */
-void
-vput(vp)
- register struct vnode *vp;
+
+int
+vunref(vp)
+ struct vnode *vp;
{
+#ifdef DIAGNOSTIC
+ if (vp == NULL)
+ panic("vrele: null vp");
+#endif
+ simple_lock (&vp->v_interlock);
+ vp->v_usecount--;
+ if (vp->v_usecount > 0) {
+ simple_unlock(&vp->v_interlock);
+ return (vp->v_usecount);
+ }
+#ifdef DIAGNOSTIC
+ if (vp->v_usecount < 0 || vp->v_writecount != 0) {
+ vprint("vrele: bad ref count", vp);
+ panic("vrele: ref cnt");
+ }
+#endif
+ /*
+ * insert at tail of LRU list
+ */
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_holdcnt > 0)
+ TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
+ else
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
- VOP_UNLOCK(vp);
- vrele(vp);
+ return (0);
}
/*
- * Vnode release.
- * If count drops to zero, call inactive routine and return to freelist.
+ * vput(), just unlock and vrele()
*/
void
-vrele(vp)
+vput(vp)
register struct vnode *vp;
{
+ struct proc *p = curproc; /* XXX */
-#ifdef DIAGNOSTIC
+#ifdef DIGANOSTIC
if (vp == NULL)
- panic("vrele: null vp");
+ panic("vput: null vp");
#endif
+ simple_lock(&vp->v_interlock);
vp->v_usecount--;
- if (vp->v_usecount > 0)
+ if (vp->v_usecount > 0) {
+ simple_unlock(&vp->v_interlock);
+ VOP_UNLOCK(vp, 0, p);
return;
+ }
#ifdef DIAGNOSTIC
- if (vp->v_usecount != 0 || vp->v_writecount != 0) {
- vprint("vrele: bad ref count", vp);
- panic("vrele: ref cnt");
+ if (vp->v_usecount < 0 || vp->v_writecount != 0) {
+ vprint("vput: bad ref count", vp);
+ panic("vput: ref cnt");
}
#endif
/*
* insert at tail of LRU list
*/
- TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
- VOP_INACTIVE(vp);
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_holdcnt > 0)
+ TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
+ else
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ simple_unlock(&vp->v_interlock);
+ VOP_INACTIVE(vp, p);
}
/*
+ * Vnode release - use for active VNODES.
+ * If count drops to zero, call inactive routine and return to freelist.
+ */
+void
+vrele(vp)
+ register struct vnode *vp;
+{
+ struct proc *p = curproc;
+
+ if (vunref(vp) == 0 &&
+ vn_lock(vp, LK_EXCLUSIVE |LK_INTERLOCK, p) == 0)
+ VOP_INACTIVE(vp, p);
+}
+
+#ifdef DIAGNOSTIC
+/*
* Page or buffer structure gets a reference.
*/
void
@@ -828,7 +1268,27 @@ vhold(vp)
register struct vnode *vp;
{
+ /*
+ * If it is on the freelist and the hold count is currently
+ * zero, move it to the hold list.
+ *
+ * The VGONEHACK flag reflects a call from getnewvnode,
+ * which will remove the vnode from the free list, but
+ * will not increment the ref count until after it calls vgone
+ * If the ref count we're incremented first, vgone would
+ * (incorrectly) try to close the previous instance of the
+ * underlying object.
+ */
+ simple_lock(&vp->v_interlock);
+ if (!(vp->v_flag & VGONEHACK) &&
+ vp->v_holdcnt == 0 && vp->v_usecount == 0) {
+ simple_lock(&vnode_free_list_slock);
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ }
vp->v_holdcnt++;
+ simple_unlock(&vp->v_interlock);
}
/*
@@ -839,10 +1299,26 @@ holdrele(vp)
register struct vnode *vp;
{
+ simple_lock(&vp->v_interlock);
if (vp->v_holdcnt <= 0)
panic("holdrele: holdcnt");
vp->v_holdcnt--;
+ /*
+ * If it is on the holdlist and the hold count drops to
+ * zero, move it to the free list.
+ *
+ * See above for VGONEHACK
+ */
+ if (!(vp->v_flag & VGONEHACK) &&
+ vp->v_holdcnt == 0 && vp->v_usecount == 0) {
+ simple_lock(&vnode_free_list_slock);
+ TAILQ_REMOVE(&vnode_hold_list, vp, v_freelist);
+ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
+ simple_unlock(&vnode_free_list_slock);
+ }
+ simple_unlock(&vp->v_interlock);
}
+#endif /* DIAGNOSTIC */
/*
* Remove any vnodes in the vnode table belonging to mount point mp.
@@ -863,11 +1339,11 @@ vflush(mp, skipvp, flags)
struct vnode *skipvp;
int flags;
{
+ struct proc *p = curproc;
register struct vnode *vp, *nvp;
int busy = 0;
- if ((mp->mnt_flag & MNT_MPBUSY) == 0)
- panic("vflush: not busy");
+ simple_lock(&mntvnode_slock);
loop:
for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
if (vp->v_mount != mp)
@@ -878,24 +1354,32 @@ loop:
*/
if (vp == skipvp)
continue;
+
+ simple_lock(&vp->v_interlock);
/*
* Skip over a vnodes marked VSYSTEM.
*/
- if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
+ if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
+ simple_unlock(&vp->v_interlock);
continue;
+ }
/*
* If WRITECLOSE is set, only flush out regular file
* vnodes open for writing.
*/
if ((flags & WRITECLOSE) &&
- (vp->v_writecount == 0 || vp->v_type != VREG))
+ (vp->v_writecount == 0 || vp->v_type != VREG)) {
+ simple_unlock(&vp->v_interlock);
continue;
+ }
/*
* With v_usecount == 0, all we need to do is clear
* out the vnode data structures and we are done.
*/
if (vp->v_usecount == 0) {
- vgone(vp);
+ simple_unlock(&mntvnode_slock);
+ vgonel(vp, p);
+ simple_lock(&mntvnode_slock);
continue;
}
/*
@@ -904,21 +1388,25 @@ loop:
* anonymous device. For all other files, just kill them.
*/
if (flags & FORCECLOSE) {
+ simple_unlock(&mntvnode_slock);
if (vp->v_type != VBLK && vp->v_type != VCHR) {
- vgone(vp);
+ vgonel(vp, p);
} else {
- vclean(vp, 0);
+ vclean(vp, 0, p);
vp->v_op = spec_vnodeop_p;
insmntque(vp, (struct mount *)0);
}
+ simple_lock(&mntvnode_slock);
continue;
}
#ifdef DEBUG
if (busyprt)
vprint("vflush: busy vnode", vp);
#endif
+ simple_unlock(&vp->v_interlock);
busy++;
}
+ simple_unlock(&mntvnode_slock);
if (busy)
return (EBUSY);
return (0);
@@ -926,11 +1414,13 @@ loop:
/*
* Disassociate the underlying file system from a vnode.
+ * The vnode interlock is held on entry.
*/
void
-vclean(vp, flags)
+vclean(vp, flags, p)
register struct vnode *vp;
int flags;
+ struct proc *p;
{
int active;
@@ -941,15 +1431,8 @@ vclean(vp, flags)
* race against ourselves to recycle it.
*/
if ((active = vp->v_usecount) != 0)
- VREF(vp);
- /*
- * Even if the count is zero, the VOP_INACTIVE routine may still
- * have the object locked while it cleans it out. The VOP_LOCK
- * ensures that the VOP_INACTIVE routine is done with its work.
- * For active vnodes, it ensures that no other activity can
- * occur while the underlying object is being cleaned out.
- */
- VOP_LOCK(vp);
+ vp->v_usecount++;
+
/*
* Prevent the vnode from being recycled or
* brought into use while we clean it out.
@@ -957,32 +1440,57 @@ vclean(vp, flags)
if (vp->v_flag & VXLOCK)
panic("vclean: deadlock");
vp->v_flag |= VXLOCK;
+
+
/*
- * Clean out any buffers associated with the vnode.
+ * Even if the count is zero, the VOP_INACTIVE routine may still
+ * have the object locked while it cleans it out. The VOP_LOCK
+ * ensures that the VOP_INACTIVE routine is done with its work.
+ * For active vnodes, it ensures that no other activity can
+ * occur while the underlying object is being cleaned out.
*/
- if (flags & DOCLOSE)
- vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
+ VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
+
/*
- * Any other processes trying to obtain this lock must first
- * wait for VXLOCK to clear, then call the new lock operation.
+ * Clean out any buffers associated with the vnode.
*/
- VOP_UNLOCK(vp);
+ if (flags & DOCLOSE)
+ vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
/*
* If purging an active vnode, it must be closed and
- * deactivated before being reclaimed.
+ * deactivated before being reclaimed. Note that the
+ * VOP_INACTIVE will unlock the vnode
*/
if (active) {
if (flags & DOCLOSE)
- VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
- VOP_INACTIVE(vp);
+ VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
+ VOP_INACTIVE(vp, p);
+ } else {
+ /*
+ * Any other processes trying to obtain this lock must first
+ * wait for VXLOCK to clear, then call the new lock operation.
+ */
+ VOP_UNLOCK(vp, 0, p);
}
+
/*
* Reclaim the vnode.
*/
- if (VOP_RECLAIM(vp))
+ if (VOP_RECLAIM(vp, p))
panic("vclean: cannot reclaim");
- if (active)
- vrele(vp);
+ if (active) {
+ if (vunref(vp) == 0 &&
+ vp->v_holdcnt > 0)
+ panic("vclean: not clean");
+ simple_unlock(&vp->v_interlock);
+ }
+ cache_purge(vp);
+ if (vp->v_vnlock) {
+ if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
+ vprint("vclean: lock not drained", vp);
+ FREE(vp->v_vnlock, M_VNODE);
+ vp->v_vnlock = NULL;
+ }
/*
* Done with purge, notify sleepers of the grim news.
@@ -1000,12 +1508,25 @@ vclean(vp, flags)
* Eliminate all activity associated with the requested vnode
* and with all vnodes aliased to the requested vnode.
*/
-void
-vgoneall(vp)
- register struct vnode *vp;
+int
+vop_revoke(v)
+ void *v;
{
- register struct vnode *vq;
+ struct vop_revoke_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ } */ *ap = v;
+ struct vnode *vp, *vq;
+ struct proc *p = curproc;
+
+#ifdef DIAGNOSTIC
+ if ((ap->a_flags & REVOKEALL) == 0)
+ panic("vop_revoke");
+#endif
+ vp = ap->a_vp;
+ simple_lock(&vp->v_interlock);
+
if (vp->v_flag & VALIASED) {
/*
* If a vgone (or vclean) is already in progress,
@@ -1013,19 +1534,23 @@ vgoneall(vp)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
- tsleep((caddr_t)vp, PINOD, "vgoneall", 0);
- return;
+ simple_unlock(&vp->v_interlock);
+ tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
+ return(0);
}
/*
* Ensure that vp will not be vgone'd while we
* are eliminating its aliases.
*/
vp->v_flag |= VXLOCK;
+ simple_unlock(&vp->v_interlock);
while (vp->v_flag & VALIASED) {
+ simple_lock(&spechash_slock);
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type || vp == vq)
continue;
+ simple_unlock(&spechash_slock);
vgone(vq);
break;
}
@@ -1035,9 +1560,34 @@ vgoneall(vp)
* really eliminate the vnode after which time
* vgone will awaken any sleepers.
*/
+ simple_lock(&vp->v_interlock);
vp->v_flag &= ~VXLOCK;
}
- vgone(vp);
+ vgonel(vp, p);
+ return (0);
+}
+
+
+/*
+ * Recycle an unused vnode to the front of the free list.
+ * Release the passed interlock if the vnode will be recycled.
+ */
+int
+vrecycle(vp, inter_lkp, p)
+ struct vnode *vp;
+ struct simplelock *inter_lkp;
+ struct proc *p;
+{
+
+ simple_lock(&vp->v_interlock);
+ if (vp->v_usecount == 0) {
+ if (inter_lkp)
+ simple_unlock(inter_lkp);
+ vgonel(vp, p);
+ return (1);
+ }
+ simple_unlock(&vp->v_interlock);
+ return (0);
}
/*
@@ -1048,6 +1598,20 @@ void
vgone(vp)
register struct vnode *vp;
{
+ struct proc *p = curproc;
+
+ simple_lock (&vp->v_interlock);
+ vgonel(vp, p);
+}
+
+/*
+ * vgone, with the vp interlock held.
+ */
+void
+vgonel(vp, p)
+ struct vnode *vp;
+ struct proc *p;
+{
register struct vnode *vq;
struct vnode *vx;
@@ -1057,21 +1621,25 @@ vgone(vp)
*/
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
+ simple_unlock(&vp->v_interlock);
tsleep((caddr_t)vp, PINOD, "vgone", 0);
return;
}
/*
* Clean out the filesystem specific data.
*/
- vclean(vp, DOCLOSE);
+ vclean(vp, DOCLOSE, p);
/*
* Delete from old mount point vnode list, if on one.
*/
- insmntque(vp, (struct mount *)0);
+ if (vp->v_mount != NULL)
+ insmntque(vp, (struct mount *)0);
/*
- * If special device, remove it from special device alias list.
+ * If special device, remove it from special device alias list
+ * if it is on one.
*/
- if (vp->v_type == VBLK || vp->v_type == VCHR) {
+ if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
+ simple_lock(&spechash_slock);
if (*vp->v_hashchain == vp) {
*vp->v_hashchain = vp->v_specnext;
} else {
@@ -1100,27 +1668,26 @@ vgone(vp)
vx->v_flag &= ~VALIASED;
vp->v_flag &= ~VALIASED;
}
+ simple_unlock(&spechash_slock);
FREE(vp->v_specinfo, M_VNODE);
vp->v_specinfo = NULL;
}
/*
* If it is on the freelist and not already at the head,
- * move it to the head of the list. The test of the back
- * pointer and the reference count of zero is because
- * it will be removed from the free list by getnewvnode,
- * but will not have its reference count incremented until
- * after calling vgone. If the reference count were
- * incremented first, vgone would (incorrectly) try to
- * close the previous instance of the underlying object.
- * So, the back pointer is explicitly set to `0xdeadb' in
- * getnewvnode after removing it from the freelist to ensure
- * that we do not try to move it here.
+ * move it to the head of the list.
+ *
+ * See above about the VGONEHACK
*/
- if (vp->v_usecount == 0 &&
- vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb &&
- vnode_free_list.tqh_first != vp) {
- TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
- TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+ if (vp->v_usecount == 0) {
+ simple_lock(&vnode_free_list_slock);
+ if (vp->v_holdcnt > 0)
+ panic("vgonel: not clean");
+ if (!(vp->v_flag & VGONEHACK) &&
+ TAILQ_FIRST(&vnode_free_list) != vp) {
+ TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
+ TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
+ }
+ simple_unlock(&vnode_free_list_slock);
}
vp->v_type = VBAD;
}
@@ -1135,14 +1702,18 @@ vfinddev(dev, type, vpp)
struct vnode **vpp;
{
register struct vnode *vp;
+ int rc =0;
+ simple_lock(&spechash_slock);
for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
if (dev != vp->v_rdev || type != vp->v_type)
continue;
*vpp = vp;
- return (1);
+ rc = 1;
+ break;
}
- return (0);
+ simple_unlock(&spechash_slock);
+ return (rc);
}
/*
@@ -1150,14 +1721,15 @@ vfinddev(dev, type, vpp)
*/
int
vcount(vp)
- register struct vnode *vp;
+ struct vnode *vp;
{
- register struct vnode *vq, *vnext;
+ struct vnode *vq, *vnext;
int count;
loop:
if ((vp->v_flag & VALIASED) == 0)
return (vp->v_usecount);
+ simple_lock(&spechash_slock);
for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
vnext = vq->v_specnext;
if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
@@ -1166,11 +1738,13 @@ loop:
* Alias, but not in use, so flush it out.
*/
if (vq->v_usecount == 0 && vq != vp) {
+ simple_unlock(&spechash_slock);
vgone(vq);
goto loop;
}
count += vq->v_usecount;
}
+ simple_unlock(&spechash_slock);
return (count);
}
@@ -1225,21 +1799,77 @@ vprint(label, vp)
void
printlockedvnodes()
{
- register struct mount *mp;
+ struct proc *p = curproc;
+ register struct mount *mp, *nmp;
register struct vnode *vp;
printf("Locked vnodes\n");
+ simple_lock(&mountlist_slock);
for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
- mp = mp->mnt_list.cqe_next) {
+ mp = nmp) {
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ nmp = mp->mnt_list.cque_next;
+ continue;
+ }
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
- vp = vp->v_mntvnodes.le_next)
+ vp = vp->v_mntvnodes.le_next) {
if (VOP_ISLOCKED(vp))
vprint((char *)0, vp);
- }
+ simple_lock(&mountlist_slock);
+ nmp = mp->mnt_list.cqe_next;
+ vfs_unbusy(mp, p);
+ }
+ simple_unlock(&mountlist_slock);
+
}
#endif
+/*
+ * Top level filesystem related information gathering.
+ */
+int
+vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+ struct vfsconf *vfsp;
+
+ /* all sysctl names at this level are at least name and field */
+ if (namelen < 2)
+ return (ENOTDIR); /* overloaded */
+ if (name[0] != VFS_GENERIC) {
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[0])
+ break;
+ if (vfsp == NULL)
+ return (EOPNOTSUPP);
+ return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
+ oldp, oldlenp, newp, newlen, p));
+ }
+ switch (name[1]) {
+ case VFS_MAXTYPENUM:
+ return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf));
+ case VFS_CONF:
+ if (namelen < 3)
+ return (ENOTDIR); /* overloaded */
+ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
+ if (vfsp->vfc_typenum == name[2])
+ break;
+ if (vfsp == NULL)
+ return (EOPNOTSUPP);
+ return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp,
+ sizeof(struct vfsconf)));
+ }
+ return (EOPNOTSUPP);
+}
+
+
int kinfo_vdebug = 1;
int kinfo_vgetfailed;
#define KINFO_VNODESLOP 10
@@ -1249,12 +1879,13 @@ int kinfo_vgetfailed;
*/
/* ARGSUSED */
int
-sysctl_vnode(where, sizep)
+sysctl_vnode(where, sizep, p)
char *where;
size_t *sizep;
+ struct proc *p;
{
register struct mount *mp, *nmp;
- struct vnode *vp;
+ struct vnode *vp, *nvp;
register char *bp = where, *savebp;
char *ewhere;
int error;
@@ -1267,27 +1898,32 @@ sysctl_vnode(where, sizep)
}
ewhere = where + *sizep;
+ simple_lock(&mountlist_slock);
for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
- nmp = mp->mnt_list.cqe_next;
- if (vfs_busy(mp))
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
+ nmp = mp->mnt_list.cqe_next;
continue;
+ }
savebp = bp;
again:
for (vp = mp->mnt_vnodelist.lh_first;
vp != NULL;
- vp = vp->v_mntvnodes.le_next) {
+ vp = nvp) {
/*
* Check that the vp is still associated with
* this filesystem. RACE: could have been
* recycled onto the same filesystem.
*/
if (vp->v_mount != mp) {
+ simple_unlock(&mntvnode_slock);
if (kinfo_vdebug)
printf("kinfo: vp changed\n");
bp = savebp;
goto again;
}
+ nvp = vp->v_mntvnodes.le_next;
if (bp + VPTRSZ + VNODESZ > ewhere) {
+ simple_unlock(&mntvnode_slock);
*sizep = bp - where;
return (ENOMEM);
}
@@ -1295,10 +1931,17 @@ again:
(error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ)))
return (error);
bp += VPTRSZ + VNODESZ;
+ simple_lock(&mntvnode_slock);
}
- vfs_unbusy(mp);
+
+ simple_unlock(&mntvnode_slock);
+ simple_lock(&mountlist_slock);
+ nmp = mp->mnt_list.cqe_next;
+ vfs_unbusy(mp, p);
}
+ simple_unlock(&mountlist_slock);
+
*sizep = bp - where;
return (0);
}
@@ -1311,26 +1954,31 @@ vfs_mountedon(vp)
register struct vnode *vp;
{
register struct vnode *vq;
+ int error = 0;
- if (vp->v_specflags & SI_MOUNTEDON)
+ if (vp->v_specmountpoint != NULL)
return (EBUSY);
if (vp->v_flag & VALIASED) {
+ simple_lock(&spechash_slock);
for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
if (vq->v_rdev != vp->v_rdev ||
vq->v_type != vp->v_type)
continue;
- if (vq->v_specflags & SI_MOUNTEDON)
- return (EBUSY);
- }
+ if (vq->v_specmountpoint != NULL) {
+ error = EBUSY;
+ break;
+ }
+ }
+ simple_unlock(&spechash_slock);
}
- return (0);
+ return (error);
}
/*
* Build hash lists of net addresses and hang them off the mount point.
* Called by ufs_mount() to set up the lists of export addresses.
*/
-static int
+int
vfs_hang_addrlist(mp, nep, argp)
struct mount *mp;
struct netexport *nep;
@@ -1404,7 +2052,7 @@ out:
}
/* ARGSUSED */
-static int
+int
vfs_free_netcred(rn, w)
struct radix_node *rn;
void *w;
@@ -1419,7 +2067,7 @@ vfs_free_netcred(rn, w)
/*
* Free the net address hash lists that are hanging off the mount points.
*/
-static void
+void
vfs_free_addrlist(nep)
struct netexport *nep;
{
@@ -1666,3 +2314,161 @@ fs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
}
return (*fn)(name + 1, namelen - 1, oldp, oldlenp, newp, newlen, p);
}
+
+/*
+ * Routine to create and manage a filesystem syncer vnode.
+ */
+#define sync_close nullop
+int sync_fsync __P((void *));
+int sync_inactive __P((void *));
+#define sync_reclaim nullop
+#define sync_lock vop_nolock
+#define sync_unlock vop_nounlock
+int sync_print __P((void *));
+#define sync_islocked vop_noislocked
+
+int (**sync_vnodeop_p) __P((void *));
+struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
+ { &vop_default_desc, vn_default_error },
+ { &vop_close_desc, sync_close }, /* close */
+ { &vop_fsync_desc, sync_fsync }, /* fsync */
+ { &vop_inactive_desc, sync_inactive }, /* inactive */
+ { &vop_reclaim_desc, sync_reclaim }, /* reclaim */
+ { &vop_lock_desc, sync_lock }, /* lock */
+ { &vop_unlock_desc, sync_unlock }, /* unlock */
+ { &vop_print_desc, sync_print }, /* print */
+ { &vop_islocked_desc, sync_islocked }, /* islocked */
+ { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL }
+};
+struct vnodeopv_desc sync_vnodeop_opv_desc =
+ { &sync_vnodeop_p, sync_vnodeop_entries };
+
+/*
+ * Create a new filesystem syncer vnode for the specified mount point.
+ */
+int
+vfs_allocate_syncvnode(mp)
+ struct mount *mp;
+{
+ struct vnode *vp;
+ static long start, incr, next;
+ int error;
+
+ /* Allocate a new vnode */
+ if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
+ mp->mnt_syncer = NULL;
+ return (error);
+ }
+ vp->v_writecount = 1;
+ vp->v_type = VNON;
+ /*
+ * Place the vnode onto the syncer worklist. We attempt to
+ * scatter them about on the list so that they will go off
+ * at evenly distributed times even if all the filesystems
+ * are mounted at once.
+ */
+ next += incr;
+ if (next == 0 || next > syncer_maxdelay) {
+ start /= 2;
+ incr /= 2;
+ if (start == 0) {
+ start = syncer_maxdelay / 2;
+ incr = syncer_maxdelay;
+ }
+ next = start;
+ }
+ vn_syncer_add_to_worklist(vp, next);
+ mp->mnt_syncer = vp;
+ return (0);
+}
+
+/*
+ * Do a lazy sync of the filesystem.
+ */
+int
+sync_fsync(v)
+ void *v;
+{
+ struct vop_fsync_args /* {
+ struct vnode *a_vp;
+ struct ucred *a_cred;
+ int a_waitfor;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *syncvp = ap->a_vp;
+ struct mount *mp = syncvp->v_mount;
+ int asyncflag;
+
+ /*
+ * We only need to do something if this is a lazy evaluation.
+ */
+ if (ap->a_waitfor != MNT_LAZY)
+ return (0);
+
+ /*
+ * Move ourselves to the back of the sync list.
+ */
+ LIST_REMOVE(syncvp, v_synclist);
+ vn_syncer_add_to_worklist(syncvp, syncdelay);
+
+ /*
+ * Walk the list of vnodes pushing all that are dirty and
+ * not already on the sync list.
+ */
+ simple_lock(&mountlist_slock);
+ if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p) == 0) {
+ asyncflag = mp->mnt_flag & MNT_ASYNC;
+ mp->mnt_flag &= ~MNT_ASYNC;
+ VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p);
+ if (asyncflag)
+ mp->mnt_flag |= MNT_ASYNC;
+ vfs_unbusy(mp, ap->a_p);
+ }
+ return (0);
+}
+
+/*
+ * The syncer vnode is no longer needed and is being decommissioned.
+ */
+int
+sync_inactive(v)
+ void *v;
+
+{
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ struct proc *a_p;
+ } */ *ap = v;
+
+ struct vnode *vp = ap->a_vp;
+
+ if (vp->v_usecount == 0)
+ return (0);
+ vp->v_mount->mnt_syncer = NULL;
+ LIST_REMOVE(vp, v_synclist);
+ vp->v_writecount = 0;
+ vput(vp);
+ return (0);
+}
+
+/*
+ * Print out a syncer vnode.
+ */
+int
+sync_print(v)
+ void *v;
+
+{
+ struct vop_print_args /* {
+ struct vnode *a_vp;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+
+ printf("syncer vnode");
+ if (vp->v_vnlock != NULL)
+ lockmgr_printinfo(vp->v_vnlock);
+ printf("\n");
+ return (0);
+}
+