mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-11-30 00:02:44 +00:00
Lock reporting and assertion changes.
* lockstatus() and VOP_ISLOCKED() gets a new process argument and a new return value: LK_EXCLOTHER, when the lock is held exclusively by another process. * The ASSERT_VOP_(UN)LOCKED family is extended to use what this gives them * Extend the vnode_if.src format to allow more exact specification than locked/unlocked. This commit should not do any semantic changes unless you are using DEBUG_VFS_LOCKS. Discussed with: grog, mch, peter, phk Reviewed by: peter
This commit is contained in:
parent
47e98476fe
commit
6bdfe06ad9
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=54444
@ -1865,7 +1865,7 @@ coda_islocked(v)
|
||||
struct cnode *cp = VTOC(ap->a_vp);
|
||||
ENTRY;
|
||||
|
||||
return (lockstatus(&cp->c_lock));
|
||||
return (lockstatus(&cp->c_lock, ap->a_p));
|
||||
}
|
||||
|
||||
/* How one looks up a vnode given a device/inode pair: */
|
||||
|
@ -3978,7 +3978,7 @@ loop:
|
||||
* way to accomplish this is to sync the entire filesystem (luckily
|
||||
* this happens rarely).
|
||||
*/
|
||||
if (vn_isdisk(vp) && vp->v_specmountpoint && !VOP_ISLOCKED(vp) &&
|
||||
if (vn_isdisk(vp) && vp->v_specmountpoint && !VOP_ISLOCKED(vp, NULL) &&
|
||||
(error = VFS_SYNC(vp->v_specmountpoint, MNT_WAIT, ap->a_cred,
|
||||
ap->a_p)) != 0)
|
||||
return (error);
|
||||
|
@ -362,7 +362,7 @@ vnstrategy(struct buf *bp)
|
||||
auio.uio_rw = UIO_WRITE;
|
||||
auio.uio_resid = bp->b_bcount;
|
||||
auio.uio_procp = curproc;
|
||||
if (!VOP_ISLOCKED(vn->sc_vp)) {
|
||||
if (!VOP_ISLOCKED(vn->sc_vp, NULL)) {
|
||||
isvplocked = 1;
|
||||
vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY, curproc);
|
||||
}
|
||||
|
@ -1865,7 +1865,7 @@ coda_islocked(v)
|
||||
struct cnode *cp = VTOC(ap->a_vp);
|
||||
ENTRY;
|
||||
|
||||
return (lockstatus(&cp->c_lock));
|
||||
return (lockstatus(&cp->c_lock, ap->a_p));
|
||||
}
|
||||
|
||||
/* How one looks up a vnode given a device/inode pair: */
|
||||
|
@ -119,7 +119,7 @@ nullfs_mount(mp, path, data, ndp, p)
|
||||
* (XXX) VOP_ISLOCKED is needed?
|
||||
*/
|
||||
if ((mp->mnt_vnodecovered->v_op == null_vnodeop_p) &&
|
||||
VOP_ISLOCKED(mp->mnt_vnodecovered)) {
|
||||
VOP_ISLOCKED(mp->mnt_vnodecovered, NULL)) {
|
||||
VOP_UNLOCK(mp->mnt_vnodecovered, 0, p);
|
||||
isvnunlocked = 1;
|
||||
}
|
||||
@ -132,7 +132,7 @@ nullfs_mount(mp, path, data, ndp, p)
|
||||
/*
|
||||
* Re-lock vnode.
|
||||
*/
|
||||
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered))
|
||||
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, NULL))
|
||||
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY, p);
|
||||
|
||||
if (error)
|
||||
@ -296,7 +296,7 @@ nullfs_root(mp, vpp)
|
||||
*/
|
||||
vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
|
||||
VREF(vp);
|
||||
if (VOP_ISLOCKED(vp)) {
|
||||
if (VOP_ISLOCKED(vp, NULL)) {
|
||||
/*
|
||||
* XXX
|
||||
* Should we check type of node?
|
||||
|
@ -372,13 +372,13 @@ null_lookup(ap)
|
||||
vp = *ap->a_vpp;
|
||||
if (dvp == vp)
|
||||
return (error);
|
||||
if (!VOP_ISLOCKED(dvp)) {
|
||||
if (!VOP_ISLOCKED(dvp, NULL)) {
|
||||
unlockargs.a_vp = dvp;
|
||||
unlockargs.a_flags = 0;
|
||||
unlockargs.a_p = p;
|
||||
vop_nounlock(&unlockargs);
|
||||
}
|
||||
if (vp != NULLVP && VOP_ISLOCKED(vp)) {
|
||||
if (vp != NULLVP && VOP_ISLOCKED(vp, NULL)) {
|
||||
lockargs.a_vp = vp;
|
||||
lockargs.a_flags = LK_SHARED;
|
||||
lockargs.a_p = p;
|
||||
|
@ -498,7 +498,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
if (VOP_ISLOCKED(vp) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
waitfor == MNT_LAZY)
|
||||
continue;
|
||||
if (vget(vp, LK_EXCLUSIVE, p))
|
||||
|
@ -148,7 +148,8 @@ union_mount(mp, path, data, ndp, p)
|
||||
vrele(ndp->ni_dvp);
|
||||
ndp->ni_dvp = NULL;
|
||||
|
||||
UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp, VOP_ISLOCKED(upperrootvp)));
|
||||
UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp,
|
||||
VOP_ISLOCKED(upperrootvp, NULL)));
|
||||
|
||||
/*
|
||||
* Check multi union mount to avoid `lock myself again' panic.
|
||||
@ -396,7 +397,8 @@ union_root(mp, vpp)
|
||||
* root union_node being locked. We let union_allocvp() deal with
|
||||
* it.
|
||||
*/
|
||||
UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp)));
|
||||
UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp,
|
||||
VOP_ISLOCKED(um->um_uppervp, NULL)));
|
||||
|
||||
VREF(um->um_uppervp);
|
||||
if (um->um_lowervp)
|
||||
@ -405,7 +407,8 @@ union_root(mp, vpp)
|
||||
error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL,
|
||||
um->um_uppervp, um->um_lowervp, 1);
|
||||
UDEBUG(("error %d\n", error));
|
||||
UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp)));
|
||||
UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp,
|
||||
VOP_ISLOCKED(um->um_uppervp, NULL)));
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
@ -363,10 +363,10 @@ union_lookup(ap)
|
||||
uerror,
|
||||
upperdvp,
|
||||
upperdvp->v_usecount,
|
||||
VOP_ISLOCKED(upperdvp),
|
||||
VOP_ISLOCKED(upperdvp, NULL),
|
||||
uppervp,
|
||||
(uppervp ? uppervp->v_usecount : -99),
|
||||
(uppervp ? VOP_ISLOCKED(uppervp) : -99)
|
||||
(uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
|
||||
));
|
||||
|
||||
/*
|
||||
@ -1698,7 +1698,7 @@ union_abortop(ap)
|
||||
struct componentname *cnp = ap->a_cnp;
|
||||
struct proc *p = cnp->cn_proc;
|
||||
struct union_node *un = VTOUNION(ap->a_dvp);
|
||||
int islocked = VOP_ISLOCKED(ap->a_dvp);
|
||||
int islocked = VOP_ISLOCKED(ap->a_dvp, NULL);
|
||||
struct vnode *vp;
|
||||
int error;
|
||||
|
||||
@ -1850,7 +1850,7 @@ union_unlock(ap)
|
||||
*/
|
||||
|
||||
if ((un->un_flags & UN_ULOCK) &&
|
||||
lockstatus(&un->un_lock) != LK_EXCLUSIVE) {
|
||||
lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
|
||||
un->un_flags &= ~UN_ULOCK;
|
||||
VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, p);
|
||||
}
|
||||
|
@ -511,15 +511,19 @@ lockinit(lkp, prio, wmesg, timo, flags)
|
||||
* Determine the status of a lock.
|
||||
*/
|
||||
int
|
||||
lockstatus(lkp)
|
||||
lockstatus(lkp, p)
|
||||
struct lock *lkp;
|
||||
struct proc *p;
|
||||
{
|
||||
int lock_type = 0;
|
||||
|
||||
simple_lock(&lkp->lk_interlock);
|
||||
if (lkp->lk_exclusivecount != 0)
|
||||
lock_type = LK_EXCLUSIVE;
|
||||
else if (lkp->lk_sharecount != 0)
|
||||
if (lkp->lk_exclusivecount != 0) {
|
||||
if (p == NULL || lkp->lk_lockholder == p->p_pid)
|
||||
lock_type = LK_EXCLUSIVE;
|
||||
else
|
||||
lock_type = LK_EXCLOTHER;
|
||||
} else if (lkp->lk_sharecount != 0)
|
||||
lock_type = LK_SHARED;
|
||||
simple_unlock(&lkp->lk_interlock);
|
||||
return (lock_type);
|
||||
|
@ -255,6 +255,7 @@ int
|
||||
vop_stdislocked(ap)
|
||||
struct vop_islocked_args /* {
|
||||
struct vnode *a_vp;
|
||||
struct proc *a_p;
|
||||
} */ *ap;
|
||||
{
|
||||
struct lock *l;
|
||||
@ -262,7 +263,7 @@ vop_stdislocked(ap)
|
||||
if ((l = (struct lock *)ap->a_vp->v_data) == NULL)
|
||||
return 0;
|
||||
|
||||
return (lockstatus(l));
|
||||
return (lockstatus(l, ap->a_p));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -484,13 +485,14 @@ int
|
||||
vop_noislocked(ap)
|
||||
struct vop_islocked_args /* {
|
||||
struct vnode *a_vp;
|
||||
struct proc *a_p;
|
||||
} */ *ap;
|
||||
{
|
||||
struct vnode *vp = ap->a_vp;
|
||||
|
||||
if (vp->v_vnlock == NULL)
|
||||
return (0);
|
||||
return (lockstatus(vp->v_vnlock));
|
||||
return (lockstatus(vp->v_vnlock, ap->a_p));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -990,7 +990,7 @@ sched_sync(void)
|
||||
splx(s);
|
||||
|
||||
while ((vp = LIST_FIRST(slp)) != NULL) {
|
||||
if (VOP_ISLOCKED(vp) == 0) {
|
||||
if (VOP_ISLOCKED(vp, NULL) == 0) {
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
||||
(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
@ -1962,7 +1962,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
|
||||
continue;
|
||||
}
|
||||
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
||||
if (VOP_ISLOCKED(vp))
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
vprint((char *)0, vp);
|
||||
}
|
||||
simple_lock(&mountlist_slock);
|
||||
@ -2450,7 +2450,7 @@ loop:
|
||||
obj = vp->v_object;
|
||||
if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||
continue;
|
||||
if (VOP_ISLOCKED(vp))
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -990,7 +990,7 @@ sched_sync(void)
|
||||
splx(s);
|
||||
|
||||
while ((vp = LIST_FIRST(slp)) != NULL) {
|
||||
if (VOP_ISLOCKED(vp) == 0) {
|
||||
if (VOP_ISLOCKED(vp, NULL) == 0) {
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
|
||||
(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
@ -1962,7 +1962,7 @@ DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
|
||||
continue;
|
||||
}
|
||||
LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
|
||||
if (VOP_ISLOCKED(vp))
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
vprint((char *)0, vp);
|
||||
}
|
||||
simple_lock(&mountlist_slock);
|
||||
@ -2450,7 +2450,7 @@ loop:
|
||||
obj = vp->v_object;
|
||||
if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||
continue;
|
||||
if (VOP_ISLOCKED(vp))
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,11 @@
|
||||
# "error" column defines the locking state on error exit.
|
||||
#
|
||||
# The locking value can take the following values:
|
||||
# L: locked.
|
||||
# L: locked; not converted to type of lock.
|
||||
# A: any lock type.
|
||||
# S: locked with shared lock.
|
||||
# E: locked with exclusive lock for this process.
|
||||
# O: locked with exclusive lock for other process.
|
||||
# U: unlocked.
|
||||
# -: not applicable. vnode does not yet (or no longer) exists.
|
||||
# =: the same on input and output, may be either L or U.
|
||||
@ -55,6 +59,7 @@
|
||||
#
|
||||
vop_islocked {
|
||||
IN struct vnode *vp;
|
||||
IN struct proc *p;
|
||||
};
|
||||
|
||||
#
|
||||
|
@ -264,7 +264,7 @@ loop:
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
nvp = vp->v_mntvnodes.le_next;
|
||||
if (VOP_ISLOCKED(vp))
|
||||
if (VOP_ISLOCKED(vp, NULL))
|
||||
continue;
|
||||
if (TAILQ_EMPTY(&vp->v_dirtyblkhd))
|
||||
continue;
|
||||
|
@ -119,7 +119,7 @@ nullfs_mount(mp, path, data, ndp, p)
|
||||
* (XXX) VOP_ISLOCKED is needed?
|
||||
*/
|
||||
if ((mp->mnt_vnodecovered->v_op == null_vnodeop_p) &&
|
||||
VOP_ISLOCKED(mp->mnt_vnodecovered)) {
|
||||
VOP_ISLOCKED(mp->mnt_vnodecovered, NULL)) {
|
||||
VOP_UNLOCK(mp->mnt_vnodecovered, 0, p);
|
||||
isvnunlocked = 1;
|
||||
}
|
||||
@ -132,7 +132,7 @@ nullfs_mount(mp, path, data, ndp, p)
|
||||
/*
|
||||
* Re-lock vnode.
|
||||
*/
|
||||
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered))
|
||||
if (isvnunlocked && !VOP_ISLOCKED(mp->mnt_vnodecovered, NULL))
|
||||
vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY, p);
|
||||
|
||||
if (error)
|
||||
@ -296,7 +296,7 @@ nullfs_root(mp, vpp)
|
||||
*/
|
||||
vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp;
|
||||
VREF(vp);
|
||||
if (VOP_ISLOCKED(vp)) {
|
||||
if (VOP_ISLOCKED(vp, NULL)) {
|
||||
/*
|
||||
* XXX
|
||||
* Should we check type of node?
|
||||
|
@ -372,13 +372,13 @@ null_lookup(ap)
|
||||
vp = *ap->a_vpp;
|
||||
if (dvp == vp)
|
||||
return (error);
|
||||
if (!VOP_ISLOCKED(dvp)) {
|
||||
if (!VOP_ISLOCKED(dvp, NULL)) {
|
||||
unlockargs.a_vp = dvp;
|
||||
unlockargs.a_flags = 0;
|
||||
unlockargs.a_p = p;
|
||||
vop_nounlock(&unlockargs);
|
||||
}
|
||||
if (vp != NULLVP && VOP_ISLOCKED(vp)) {
|
||||
if (vp != NULLVP && VOP_ISLOCKED(vp, NULL)) {
|
||||
lockargs.a_vp = vp;
|
||||
lockargs.a_flags = LK_SHARED;
|
||||
lockargs.a_p = p;
|
||||
|
@ -148,7 +148,8 @@ union_mount(mp, path, data, ndp, p)
|
||||
vrele(ndp->ni_dvp);
|
||||
ndp->ni_dvp = NULL;
|
||||
|
||||
UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp, VOP_ISLOCKED(upperrootvp)));
|
||||
UDEBUG(("mount_root UPPERVP %p locked = %d\n", upperrootvp,
|
||||
VOP_ISLOCKED(upperrootvp, NULL)));
|
||||
|
||||
/*
|
||||
* Check multi union mount to avoid `lock myself again' panic.
|
||||
@ -396,7 +397,8 @@ union_root(mp, vpp)
|
||||
* root union_node being locked. We let union_allocvp() deal with
|
||||
* it.
|
||||
*/
|
||||
UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp)));
|
||||
UDEBUG(("union_root UPPERVP %p locked = %d\n", um->um_uppervp,
|
||||
VOP_ISLOCKED(um->um_uppervp, NULL)));
|
||||
|
||||
VREF(um->um_uppervp);
|
||||
if (um->um_lowervp)
|
||||
@ -405,7 +407,8 @@ union_root(mp, vpp)
|
||||
error = union_allocvp(vpp, mp, NULLVP, NULLVP, NULL,
|
||||
um->um_uppervp, um->um_lowervp, 1);
|
||||
UDEBUG(("error %d\n", error));
|
||||
UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp, VOP_ISLOCKED(um->um_uppervp)));
|
||||
UDEBUG(("union_root2 UPPERVP %p locked = %d\n", um->um_uppervp,
|
||||
VOP_ISLOCKED(um->um_uppervp, NULL)));
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
@ -363,10 +363,10 @@ union_lookup(ap)
|
||||
uerror,
|
||||
upperdvp,
|
||||
upperdvp->v_usecount,
|
||||
VOP_ISLOCKED(upperdvp),
|
||||
VOP_ISLOCKED(upperdvp, NULL),
|
||||
uppervp,
|
||||
(uppervp ? uppervp->v_usecount : -99),
|
||||
(uppervp ? VOP_ISLOCKED(uppervp) : -99)
|
||||
(uppervp ? VOP_ISLOCKED(uppervp, NULL) : -99)
|
||||
));
|
||||
|
||||
/*
|
||||
@ -1698,7 +1698,7 @@ union_abortop(ap)
|
||||
struct componentname *cnp = ap->a_cnp;
|
||||
struct proc *p = cnp->cn_proc;
|
||||
struct union_node *un = VTOUNION(ap->a_dvp);
|
||||
int islocked = VOP_ISLOCKED(ap->a_dvp);
|
||||
int islocked = VOP_ISLOCKED(ap->a_dvp, NULL);
|
||||
struct vnode *vp;
|
||||
int error;
|
||||
|
||||
@ -1850,7 +1850,7 @@ union_unlock(ap)
|
||||
*/
|
||||
|
||||
if ((un->un_flags & UN_ULOCK) &&
|
||||
lockstatus(&un->un_lock) != LK_EXCLUSIVE) {
|
||||
lockstatus(&un->un_lock, NULL) != LK_EXCLUSIVE) {
|
||||
un->un_flags &= ~UN_ULOCK;
|
||||
VOP_UNLOCK(un->un_uppervp, LK_EXCLUSIVE, p);
|
||||
}
|
||||
|
@ -380,6 +380,7 @@ int
|
||||
nfs_islocked(ap)
|
||||
struct vop_islocked_args /* {
|
||||
struct vnode *a_vp;
|
||||
struct proc *a_p;
|
||||
} */ *ap;
|
||||
{
|
||||
return VTONFS(ap->a_vp)->n_flag & NLOCKED ? 1 : 0;
|
||||
|
@ -1062,7 +1062,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
if (VOP_ISLOCKED(vp) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
waitfor == MNT_LAZY)
|
||||
continue;
|
||||
if (vget(vp, LK_EXCLUSIVE, p))
|
||||
|
@ -380,6 +380,7 @@ int
|
||||
nfs_islocked(ap)
|
||||
struct vop_islocked_args /* {
|
||||
struct vnode *a_vp;
|
||||
struct proc *a_p;
|
||||
} */ *ap;
|
||||
{
|
||||
return VTONFS(ap->a_vp)->n_flag & NLOCKED ? 1 : 0;
|
||||
|
@ -1062,7 +1062,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
if (VOP_ISLOCKED(vp) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
waitfor == MNT_LAZY)
|
||||
continue;
|
||||
if (vget(vp, LK_EXCLUSIVE, p))
|
||||
|
@ -498,7 +498,7 @@ loop:
|
||||
*/
|
||||
if (vp->v_mount != mp)
|
||||
goto loop;
|
||||
if (VOP_ISLOCKED(vp) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) || TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
|
||||
waitfor == MNT_LAZY)
|
||||
continue;
|
||||
if (vget(vp, LK_EXCLUSIVE, p))
|
||||
|
@ -95,6 +95,8 @@ struct lock {
|
||||
* LK_DRAIN - wait for all activity on the lock to end, then mark it
|
||||
* decommissioned. This feature is used before freeing a lock that
|
||||
* is part of a piece of memory that is about to be freed.
|
||||
* LK_EXCLOTHER - return for lockstatus(). Used when another process
|
||||
* holds the lock exclusively.
|
||||
*
|
||||
* These are flags that are passed to the lockmgr routine.
|
||||
*/
|
||||
@ -106,6 +108,7 @@ struct lock {
|
||||
#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
|
||||
#define LK_RELEASE 0x00000006 /* release any type of lock */
|
||||
#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
|
||||
#define LK_EXCLOTHER 0x00000008 /* other process holds lock */
|
||||
/*
|
||||
* External lock flags.
|
||||
*
|
||||
@ -187,7 +190,7 @@ int lockmgr __P((struct lock *, u_int flags,
|
||||
struct simplelock *, struct proc *p));
|
||||
#endif
|
||||
void lockmgr_printinfo __P((struct lock *));
|
||||
int lockstatus __P((struct lock *));
|
||||
int lockstatus __P((struct lock *, struct proc *));
|
||||
int lockcount __P((struct lock *));
|
||||
|
||||
#ifdef SIMPLELOCK_DEBUG
|
||||
|
@ -95,6 +95,8 @@ struct lock {
|
||||
* LK_DRAIN - wait for all activity on the lock to end, then mark it
|
||||
* decommissioned. This feature is used before freeing a lock that
|
||||
* is part of a piece of memory that is about to be freed.
|
||||
* LK_EXCLOTHER - return for lockstatus(). Used when another process
|
||||
* holds the lock exclusively.
|
||||
*
|
||||
* These are flags that are passed to the lockmgr routine.
|
||||
*/
|
||||
@ -106,6 +108,7 @@ struct lock {
|
||||
#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
|
||||
#define LK_RELEASE 0x00000006 /* release any type of lock */
|
||||
#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
|
||||
#define LK_EXCLOTHER 0x00000008 /* other process holds lock */
|
||||
/*
|
||||
* External lock flags.
|
||||
*
|
||||
@ -187,7 +190,7 @@ int lockmgr __P((struct lock *, u_int flags,
|
||||
struct simplelock *, struct proc *p));
|
||||
#endif
|
||||
void lockmgr_printinfo __P((struct lock *));
|
||||
int lockstatus __P((struct lock *));
|
||||
int lockstatus __P((struct lock *, struct proc *));
|
||||
int lockcount __P((struct lock *));
|
||||
|
||||
#ifdef SIMPLELOCK_DEBUG
|
||||
|
@ -421,15 +421,56 @@ struct vop_generic_args {
|
||||
|| (vp)->v_tag == VT_MSDOSFS \
|
||||
|| (vp)->v_tag == VT_DEVFS)
|
||||
|
||||
#define ASSERT_VOP_LOCKED(vp, str) \
|
||||
if ((vp) && IS_LOCKING_VFS(vp) && !VOP_ISLOCKED(vp)) { \
|
||||
panic("%s: %p is not locked but should be", str, vp); \
|
||||
}
|
||||
#define ASSERT_VOP_LOCKED(vp, str) \
|
||||
do { \
|
||||
struct vnode *_vp = (vp); \
|
||||
\
|
||||
if (_vp && IS_LOCKING_VFS(_vp) && !VOP_ISLOCKED(_vp, NULL)) \
|
||||
panic("%s: %p is not locked but should be", str, _vp); \
|
||||
} while (0)
|
||||
|
||||
#define ASSERT_VOP_UNLOCKED(vp, str) \
|
||||
if ((vp) && IS_LOCKING_VFS(vp) && VOP_ISLOCKED(vp)) { \
|
||||
panic("%s: %p is locked but shouldn't be", str, vp); \
|
||||
}
|
||||
#define ASSERT_VOP_UNLOCKED(vp, str) \
|
||||
do { \
|
||||
struct vnode *_vp = (vp); \
|
||||
int lockstate; \
|
||||
\
|
||||
if (_vp && IS_LOCKING_VFS(_vp)) { \
|
||||
lockstate = VOP_ISLOCKED(_vp, curproc); \
|
||||
if (lockstate == LK_EXCLUSIVE) \
|
||||
panic("%s: %p is locked but should not be", \
|
||||
str, _vp); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASSERT_VOP_ELOCKED(vp, str) \
|
||||
do { \
|
||||
struct vnode *_vp = (vp); \
|
||||
\
|
||||
if (_vp && IS_LOCKING_VFS(_vp) && \
|
||||
VOP_ISLOCKED(_vp, curproc) != LK_EXCLUSIVE) \
|
||||
panic("%s: %p is not exclusive locked but should be", \
|
||||
str, _vp); \
|
||||
} while (0)
|
||||
|
||||
#define ASSERT_VOP_ELOCKED_OTHER(vp, str) \
|
||||
do { \
|
||||
struct vnode *_vp = (vp); \
|
||||
\
|
||||
if (_vp && IS_LOCKING_VFS(_vp) && \
|
||||
VOP_ISLOCKED(_vp, curproc) != LK_EXCLOTHER) \
|
||||
panic("%s: %p is not exclusive locked by another proc", \
|
||||
str, _vp); \
|
||||
} while (0)
|
||||
|
||||
#define ASSERT_VOP_SLOCKED(vp, str) \
|
||||
do { \
|
||||
struct vnode *_vp = (vp); \
|
||||
\
|
||||
if (_vp && IS_LOCKING_VFS(_vp) && \
|
||||
VOP_ISLOCKED(_vp, NULL) != LK_SHARED) \
|
||||
panic("%s: %p is not locked shared but should be", \
|
||||
str, _vp); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
|
||||
|
@ -3978,7 +3978,7 @@ loop:
|
||||
* way to accomplish this is to sync the entire filesystem (luckily
|
||||
* this happens rarely).
|
||||
*/
|
||||
if (vn_isdisk(vp) && vp->v_specmountpoint && !VOP_ISLOCKED(vp) &&
|
||||
if (vn_isdisk(vp) && vp->v_specmountpoint && !VOP_ISLOCKED(vp, NULL) &&
|
||||
(error = VFS_SYNC(vp->v_specmountpoint, MNT_WAIT, ap->a_cred,
|
||||
ap->a_p)) != 0)
|
||||
return (error);
|
||||
|
@ -852,7 +852,7 @@ rescan0:
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vp = object->handle;
|
||||
|
||||
if (VOP_ISLOCKED(vp) ||
|
||||
if (VOP_ISLOCKED(vp, NULL) ||
|
||||
vp->v_data == NULL ||
|
||||
vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
|
||||
if ((m->queue == PQ_INACTIVE) &&
|
||||
|
@ -324,7 +324,7 @@ _zget(vm_zone_t z)
|
||||
* We can wait, so just do normal map allocation in the appropriate
|
||||
* map.
|
||||
*/
|
||||
if (lockstatus(&kernel_map->lock)) {
|
||||
if (lockstatus(&kernel_map->lock, NULL)) {
|
||||
int s;
|
||||
s = splvm();
|
||||
#ifdef SMP
|
||||
|
Loading…
Reference in New Issue
Block a user