diff --git a/src/afs/FBSD/osi_vcache.c b/src/afs/FBSD/osi_vcache.c index 52d7ae36f4..c2060c74f0 100644 --- a/src/afs/FBSD/osi_vcache.c +++ b/src/afs/FBSD/osi_vcache.c @@ -36,15 +36,15 @@ osi_TryEvictVCache(struct vcache *avc, int *slept) { *slept = 1; #if defined(AFS_FBSD80_ENV) - /* vgone() is correct, but v_usecount is assumed not - * to be 0, and I suspect that currently our usage ensures that - * in fact it will */ - if (vrefcnt(vp) < 1) { + /* vgone() is correct, but vgonel() panics if v_usecount is 0-- + * this is particularly confusing since vgonel() will trigger + * vop_reclaim, in the call path of which we'll check v_usecount + * and decide that the vnode is busy. Splat. */ + if (vrefcnt(vp) < 1) vref(vp); - } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */ #endif - vgone(vp); #if defined(AFS_FBSD80_ENV) VOP_UNLOCK(vp, 0); diff --git a/src/afs/FBSD/osi_vm.c b/src/afs/FBSD/osi_vm.c index 0a813e812b..6f49d7d232 100644 --- a/src/afs/FBSD/osi_vm.c +++ b/src/afs/FBSD/osi_vm.c @@ -89,15 +89,22 @@ osi_VM_FlushVCache(struct vcache *avc, int *slept) { struct vm_object *obj; struct vnode *vp; - if (VREFCOUNT(avc) > 1) + if (VREFCOUNT(avc) > 1) { return EBUSY; + } - if (avc->opens) + /* XXX + * The value of avc->opens here came to be, at some point, + * typically -1. This was caused by incorrectly performing afs_close + * processing on vnodes being recycled */ + if (avc->opens) { return EBUSY; + } /* if a lock is held, give up */ - if (CheckLock(&avc->lock)) + if (CheckLock(&avc->lock)) { return EBUSY; + } return(0); diff --git a/src/afs/FBSD/osi_vnodeops.c b/src/afs/FBSD/osi_vnodeops.c index 12cacb0036..ea3aa34418 100644 --- a/src/afs/FBSD/osi_vnodeops.c +++ b/src/afs/FBSD/osi_vnodeops.c @@ -666,8 +666,24 @@ afs_vop_close(ap) * struct thread *a_td; * } */ *ap; { - int code; - struct vcache *avc = VTOAFS(ap->a_vp); + int code, iflag; + struct vnode *vp = ap->a_vp; + struct vcache *avc = VTOAFS(vp); + +#if defined(AFS_FBSD80_ENV) + VI_LOCK(vp); + iflag = vp->v_iflag & VI_DOOMED; + VI_UNLOCK(vp); + if (iflag & VI_DOOMED) { + /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't + * have an afs_close to process, in that case */ + if (avc->opens != 0) + panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n", + vp, avc, avc->opens); + return 0; + } +#endif + AFS_GLOCK(); if (ap->a_cred) code = afs_close(avc, ap->a_fflag, ap->a_cred); @@ -1473,12 +1489,8 @@ afs_vop_reclaim(struct vop_reclaim_args *ap) if (!haveGlock) AFS_GUNLOCK(); - /* - * XXX Pretend it worked, to prevent panic on shutdown - * Garrett, please fix - Jim Rees - */ if (code) { - printf("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code); + afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code); VOP_PRINT(vp); } @@ -1539,7 +1551,7 @@ afs_vop_print(ap) struct vcache *vc = VTOAFS(ap->a_vp); int s = vc->f.states; - printf("tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vp->v_tag, + printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag, (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume, (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens, vc->execsOrWriters); diff --git a/src/afs/VNOPS/afs_vnop_write.c b/src/afs/VNOPS/afs_vnop_write.c index 49367b37b6..ac0894e088 100644 --- a/src/afs/VNOPS/afs_vnop_write.c +++ b/src/afs/VNOPS/afs_vnop_write.c @@ -815,6 +815,22 @@ afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred) code = avc->vc_error; avc->vc_error = 0; } +#if defined(AFS_FBSD80_ENV) + /* XXX */ + if (!avc->opens) { + afs_int32 opens, is_free, is_gone, is_doomed, iflag; + struct vnode *vp = AFSTOV(avc); + VI_LOCK(vp); + is_doomed = vp->v_iflag & VI_DOOMED; + is_free = vp->v_iflag & VI_FREE; + is_gone = vp->v_iflag & VI_DOINGINACT; + iflag = vp->v_iflag; + VI_UNLOCK(vp); + opens = avc->opens; + afs_warn("afs_close avc %p vp %p opens %d free %d doinginact %d doomed %d iflag %d\n", + avc, vp, opens, is_free, is_gone, is_doomed, iflag); + } +#endif avc->opens--; ReleaseWriteLock(&avc->lock); }