afs: Convert afs_vhashT to use struct afs_q

Because the afs_vhashT hash table uses singly-linked lists in its hash
buckets, to remove an entry we have to:

1. Find the bucket where the entry to be removed should be present.
2. Go through each element of this bucket until we find this entry.
3. Remove it.

Due to step number 2, the time required to remove an entry from a bucket
increases as the linked list associated with this bucket gets longer.
This can get particularly bad when evicting vcaches (ShakeLooseVCaches),
as each entry from the least recently used queue (VLRU) has to be found
in our afs_vhashT hash table. This problem is exacerbated when files are
repeatedly deleted and recreated in the same volume, resulting in many
vcaches with the same volume id and vnode number (but different unique
ids) in the same bucket.

To avoid this problem, build afs_vhashT using doubly-linked lists with
'struct afs_q', like we did for afs_vhashTV in commit 4fc48af8
(vc-hashing-be-less-expensive-20050728). Now, removing a vcache from
afs_vhashT is an O(1) operation (QRemove).

Change-Id: I5a1a9a090f9aa3d8884e2bb12aca1f1acc3b902d
Reviewed-on: https://gerrit.openafs.org/14949
Tested-by: BuildBot <buildbot@rampaginggeek.com>
Reviewed-by: Cheyenne Wills <cwills@sinenomine.net>
Reviewed-by: Michael Meffie <mmeffie@sinenomine.net>
Reviewed-by: Marcio Brito Barbosa <mbarbosa@sinenomine.net>
Reviewed-by: Andrew Deason <adeason@sinenomine.net>
This commit is contained in:
Marcio Barbosa 2022-05-24 13:03:51 +00:00 committed by Andrew Deason
parent e1bb50849e
commit f98e84458c
9 changed files with 130 additions and 71 deletions

View File

@ -316,7 +316,7 @@ afs_evict_inode(struct inode *ip)
if (vcp->vlruq.prev || vcp->vlruq.next)
osi_Panic("inode freed while on LRU");
if (vcp->hnext)
if (vcp->hashq.prev || vcp->hashq.next)
osi_Panic("inode freed while still hashed");
truncate_inode_pages(&ip->i_data, 0);
@ -338,7 +338,7 @@ afs_clear_inode(struct inode *ip)
if (vcp->vlruq.prev || vcp->vlruq.next)
osi_Panic("inode freed while on LRU");
if (vcp->hnext)
if (vcp->hashq.prev || vcp->hashq.next)
osi_Panic("inode freed while still hashed");
#if !defined(STRUCT_SUPER_OPERATIONS_HAS_ALLOC_INODE)

View File

@ -258,6 +258,7 @@ struct afs_q {
#define QTOV(e) QEntry(e, struct vcache, vlruq)
#define QTOC(e) QEntry(e, struct cell, lruq)
#define QTOVC(e) QEntry(e, struct vcache, hashq)
#define QTOVH(e) QEntry(e, struct vcache, vhashq)
/*!
@ -873,7 +874,7 @@ struct vcache {
#if !defined(AFS_LINUX_ENV)
struct vcache *nextfree; /* next on free list (if free) */
#endif
struct vcache *hnext; /* Hash next */
struct afs_q hashq; /* Hashed per-{volume,vnode} list */
struct afs_q vhashq; /* Hashed per-volume list */
/*! Queue of dirty vcaches. Lock with afs_disconDirtyLock */
struct afs_q dirtyq;
@ -1333,7 +1334,7 @@ extern afs_int32 afs_cacheStats; /*Stat entries in cache */
extern afs_int32 afs_freeDCCount; /*Count of elts in freeDCList */
extern afs_uint32 afs_CacheTooFullCount;
extern afs_uint32 afs_WaitForCacheDrainCount;
extern struct vcache *afs_vhashT[VCSIZE]; /*Stat cache hash table */
extern struct afs_q afs_vhashT[VCSIZE]; /*Stat cache hash table */
extern struct afs_q afs_vhashTV[VCSIZE]; /* cache hash table on volume */
extern afs_int32 afs_initState; /*Initialization state */
extern afs_int32 afs_termState; /* Termination state */

View File

@ -106,6 +106,7 @@ SRXAFSCB_GetCE(struct rx_call *a_call, afs_int32 a_index,
struct vcache *tvc; /*Ptr to current cache entry */
int code; /*Return code */
XSTATS_DECLS;
struct afs_q *tq, *uq;
RX_AFS_GLOCK();
@ -113,13 +114,18 @@ SRXAFSCB_GetCE(struct rx_call *a_call, afs_int32 a_index,
AFS_STATCNT(SRXAFSCB_GetCE);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (a_index == 0)
goto searchDone;
a_index--;
} /*Zip through current hash chain */
} /*Zip through hash chains */
tvc = NULL;
searchDone:
if (tvc == NULL) {
/*Past EOF */
@ -185,6 +191,7 @@ SRXAFSCB_GetCE64(struct rx_call *a_call, afs_int32 a_index,
struct vcache *tvc; /*Ptr to current cache entry */
int code; /*Return code */
XSTATS_DECLS;
struct afs_q *tq, *uq;
RX_AFS_GLOCK();
@ -192,13 +199,18 @@ SRXAFSCB_GetCE64(struct rx_call *a_call, afs_int32 a_index,
AFS_STATCNT(SRXAFSCB_GetCE64);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (a_index == 0)
goto searchDone;
a_index--;
} /*Zip through current hash chain */
} /*Zip through hash chains */
tvc = NULL;
searchDone:
if (tvc == NULL) {
/*Past EOF */
@ -484,7 +496,7 @@ loop1:
/*
* Clear callbacks just for the one file.
*/
struct vcache *uvc;
struct afs_q *tq, *uq;
afs_allCBs++;
if (a_fid->Vnode & 1)
afs_oddCBs++; /*Could do this on volume basis, too */
@ -493,8 +505,10 @@ loop1:
loop2:
ObtainReadLock(&afs_xvcache);
i = VCHash(&localFid);
for (tvc = afs_vhashT[i]; tvc; tvc = uvc) {
uvc = tvc->hnext;
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->f.fid.Fid.Vnode == a_fid->Vnode
&& tvc->f.fid.Fid.Volume == a_fid->Volume
&& tvc->f.fid.Fid.Unique == a_fid->Unique) {
@ -539,7 +553,7 @@ loop2:
vnode_put(AFSTOV(tvc));
#endif
ObtainReadLock(&afs_xvcache);
uvc = tvc->hnext;
uq = QNext(tq);
AFS_FAST_RELE(tvc);
}
} /*Walk through hash table */
@ -684,6 +698,7 @@ SRXAFSCB_InitCallBackState(struct rx_call *a_call)
struct server *ts;
int code = 0;
XSTATS_DECLS;
struct afs_q *tq, *uq;
RX_AFS_GLOCK();
@ -703,7 +718,10 @@ SRXAFSCB_InitCallBackState(struct rx_call *a_call)
0);
if (ts) {
for (i = 0; i < VCSIZE; i++)
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->callback == ts) {
afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC |
AFS_STALEVC_CLEARCB,

View File

@ -295,11 +295,15 @@ afs_FlushCBs(void)
{
int i;
struct vcache *tvc;
struct afs_q *tq, *uq;
ObtainWriteLock(&afs_xcbhash, 86); /* pretty likely I'm going to remove something */
for (i = 0; i < VCSIZE; i++) /* reset all the vnodes */
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED |
AFS_STALEVC_CLEARCB |
AFS_STALEVC_SKIP_DNLC_FOR_INIT_FLUSHED, 0);
@ -321,11 +325,15 @@ afs_FlushServerCBs(struct server *srvp)
{
int i;
struct vcache *tvc;
struct afs_q *tq, *uq;
ObtainWriteLock(&afs_xcbhash, 86); /* pretty likely I'm going to remove something */
for (i = 0; i < VCSIZE; i++) { /* reset all the vnodes */
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->callback == srvp) {
afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED |
AFS_STALEVC_CLEARCB |

View File

@ -661,7 +661,7 @@ afs_ProcessOpCreate(struct vcache *avc, struct vrequest *areq,
struct VenusFid pdir_fid, newFid;
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
struct vcache *tdp = NULL, *tvc = NULL;
struct vcache *tdp = NULL;
struct dcache *tdc = NULL;
struct afs_conn *tc;
struct rx_connection *rxconn;
@ -830,32 +830,12 @@ afs_ProcessOpCreate(struct vcache *avc, struct vrequest *areq,
/* The vcache goes first. */
ObtainWriteLock(&afs_xvcache, 735);
/* Old fid hash. */
hash = VCHash(&avc->f.fid);
/* New fid hash. */
new_hash = VCHash(&newFid);
/* Remove hash from old position. */
/* XXX: not checking array element contents. It shouldn't be empty.
* If it oopses, then something else might be wrong.
*/
if (afs_vhashT[hash] == avc) {
/* First in hash chain (might be the only one). */
afs_vhashT[hash] = avc->hnext;
} else {
/* More elements in hash chain. */
for (tvc = afs_vhashT[hash]; tvc; tvc = tvc->hnext) {
if (tvc->hnext == avc) {
tvc->hnext = avc->hnext;
break;
}
}
} /* if (!afs_vhashT[i]->hnext) */
QRemove(&avc->hashq);
QRemove(&avc->vhashq);
/* Insert hash in new position. */
avc->hnext = afs_vhashT[new_hash];
afs_vhashT[new_hash] = avc;
QAdd(&afs_vhashT[VCHash(&newFid)], &avc->hashq);
QAdd(&afs_vhashTV[VCHashV(&newFid)], &avc->vhashq);
ReleaseWriteLock(&afs_xvcache);
@ -1396,6 +1376,7 @@ afs_GenFakeFid(struct VenusFid *afid, afs_uint32 avtype, int lock)
{
struct vcache *tvc;
afs_uint32 max_unique = 0, i;
struct afs_q *tq, *uq;
switch (avtype) {
case VDIR:
@ -1410,7 +1391,10 @@ afs_GenFakeFid(struct VenusFid *afid, afs_uint32 avtype, int lock)
if (lock)
ObtainWriteLock(&afs_xvcache, 736);
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->f.fid.Fid.Unique > max_unique)
max_unique = tvc->f.fid.Fid.Unique;
}

View File

@ -260,11 +260,14 @@ afs_HaveCallBacksFrom(struct server *aserver)
afs_int32 now;
int i;
struct vcache *tvc;
struct afs_q *tq, *uq;
AFS_STATCNT(HaveCallBacksFrom);
now = osi_Time(); /* for checking for expired callbacks */
for (i = 0; i < VCSIZE; i++) { /* for all guys in the hash table */
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
/*
* Check to see if this entry has an unexpired callback promise
* from the required host

View File

@ -147,6 +147,7 @@ afs_CheckTokenCache(void)
struct vcache *tvc;
struct axscache *tofreelist;
int do_scan = 0;
struct afs_q *tq, *uq;
AFS_STATCNT(afs_CheckCacheResets);
ObtainReadLock(&afs_xvcache);
@ -177,7 +178,9 @@ afs_CheckTokenCache(void)
tofreelist = NULL;
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
/* really should do this under cache write lock, but that.
* is hard to under locking hierarchy */
if (tvc->Access) {
@ -225,12 +228,16 @@ afs_ResetAccessCache(afs_int32 uid, afs_int32 cell, int alock)
int i;
struct vcache *tvc;
struct axscache *ac;
struct afs_q *tq, *uq;
AFS_STATCNT(afs_ResetAccessCache);
if (alock)
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
/* really should do this under cache write lock, but that.
* is hard to under locking hierarchy */
if (tvc->Access && (cell == -1 || tvc->f.fid.Cell == cell)) {

View File

@ -69,7 +69,7 @@ static struct vcache *Initial_freeVCList; /*Initial list for above */
struct afs_q VLRU; /*vcache LRU */
afs_int32 vcachegen = 0;
unsigned int afs_paniconwarn = 0;
struct vcache *afs_vhashT[VCSIZE];
struct afs_q afs_vhashT[VCSIZE];
struct afs_q afs_vhashTV[VCSIZE];
static struct afs_cbr *afs_cbrHashT[CBRSIZE];
afs_int32 afs_bulkStatsLost;
@ -140,7 +140,7 @@ afs_InsertHashCBR(struct afs_cbr *cbr)
* Environment:
* afs_xvcache lock must be held for writing upon entry to
* prevent people from changing the vrefCount field, and to
* protect the lruq and hnext fields.
* protect the lruq and hashq fields.
* LOCK: afs_FlushVCache afs_xvcache W
* REFCNT: vcache ref count must be zero on entry except for osf1
* RACE: lock is dropped and reobtained, permitting race in caller
@ -153,8 +153,7 @@ int
afs_FlushVCache(struct vcache *avc, int *slept)
{ /*afs_FlushVCache */
afs_int32 i, code;
struct vcache **uvc, *wvc;
afs_int32 code;
/* NOTE: We must have nothing drop afs_xvcache until we have removed all
* possible references to this vcache. This means all hash tables, queues,
@ -189,16 +188,7 @@ afs_FlushVCache(struct vcache *avc, int *slept)
afs_bulkStatsLost++;
vcachegen++;
/* remove entry from the hash chain */
i = VCHash(&avc->f.fid);
uvc = &afs_vhashT[i];
for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
if (avc == wvc) {
*uvc = avc->hnext;
avc->hnext = NULL;
break;
}
}
QRemove(&avc->hashq);
/* remove entry from the volume hash table */
QRemove(&avc->vhashq);
@ -1004,16 +994,19 @@ void
afs_FlushAllVCaches(void)
{
int i;
struct vcache *tvc, *nvc;
struct vcache *tvc;
struct afs_q *tq, *uq;
ObtainWriteLock(&afs_xvcache, 867);
retry:
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
int slept;
nvc = tvc->hnext;
tvc = QTOVC(tq);
uq = QNext(tq);
if (afs_FlushVCache(tvc, &slept)) {
afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
}
@ -1096,8 +1089,7 @@ afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
i = VCHash(afid);
j = VCHashV(afid);
tvc->hnext = afs_vhashT[i];
afs_vhashT[i] = tvc;
QAdd(&afs_vhashT[i], &tvc->hashq);
QAdd(&afs_vhashTV[j], &tvc->vhashq);
if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
@ -1170,6 +1162,7 @@ afs_FlushActiveVcaches(afs_int32 doflocks)
struct AFSVolSync tsync;
int didCore;
XSTATS_DECLS;
struct afs_q *tq, *uq;
AFS_STATCNT(afs_FlushActiveVcaches);
code = afs_CreateReq(&treq, afs_osi_credp);
@ -1180,7 +1173,10 @@ afs_FlushActiveVcaches(afs_int32 doflocks)
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->f.states & CVInit) continue;
#ifdef AFS_DARWIN80_ENV
if (tvc->f.states & CDeadVnode &&
@ -2124,6 +2120,7 @@ afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
#ifdef AFS_DARWIN80_ENV
vnode_t tvp;
#endif
struct afs_q *tq, *uq;
start = osi_Time();
@ -2154,8 +2151,12 @@ afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
rootvc_loop:
ObtainSharedLock(&afs_xvcache, 7);
tvc = NULL;
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (!FidCmp(&(tvc->f.fid), afid)) {
if (tvc->f.states & CVInit) {
ReleaseSharedLock(&afs_xvcache);
@ -2187,6 +2188,11 @@ afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
}
}
if (tq == &afs_vhashT[i]) {
/* vcache not found */
tvc = NULL;
}
if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
/* Mount point no longer stat'd or unknown. FID may have changed. */
getNewFid = 1;
@ -2637,12 +2643,17 @@ afs_FindVCache(struct VenusFid *afid, afs_int32 flag)
struct vcache *deadvc = NULL, *livevc = NULL;
vnode_t tvp;
#endif
struct afs_q *tq, *uq;
AFS_STATCNT(afs_FindVCache);
findloop:
tvc = NULL;
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (FidMatches(afid, tvc)) {
if (tvc->f.states & CVInit) {
findvc_sleep(tvc, flag);
@ -2658,6 +2669,11 @@ afs_FindVCache(struct VenusFid *afid, afs_int32 flag)
}
}
if (tq == &afs_vhashT[i]) {
/* vcache not found */
tvc = NULL;
}
/* should I have a read lock on the vnode here? */
if (tvc) {
#if defined(AFS_DARWIN80_ENV)
@ -2762,15 +2778,19 @@ afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
#ifdef AFS_DARWIN80_ENV
vnode_t tvp;
#endif
struct afs_q *tq, *uq;
AFS_STATCNT(afs_FindVCache);
loop:
ObtainSharedLock(&afs_xvcache, 331);
tvc = NULL;
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
/* Match only on what we have.... */
if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
&& (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
@ -2926,8 +2946,10 @@ afs_vcacheInit(int astatSize)
}
#endif
QInit(&VLRU);
for(i = 0; i < VCSIZE; ++i)
for (i = 0; i < VCSIZE; i++) {
QInit(&afs_vhashT[i]);
QInit(&afs_vhashTV[i]);
}
}
/*!
@ -2970,7 +2992,10 @@ shutdown_vcache(void)
* Also free the remaining ones in the Cache
*/
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
if (tvc->mvid.target_root) {
osi_FreeSmallSpace(tvc->mvid.target_root);
tvc->mvid.target_root = NULL;
@ -3007,7 +3032,6 @@ shutdown_vcache(void)
if (tvc->Access)
afs_FreeAllAxs(&(tvc->Access));
}
afs_vhashT[i] = 0;
}
}
@ -3047,8 +3071,10 @@ shutdown_vcache(void)
AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
LOCK_INIT(&afs_xvcb, "afs_xvcb");
QInit(&VLRU);
for(i = 0; i < VCSIZE; ++i)
for (i = 0; i < VCSIZE; i++) {
QInit(&afs_vhashT[i]);
QInit(&afs_vhashTV[i]);
}
}
void
@ -3056,14 +3082,19 @@ afs_DisconGiveUpCallbacks(void)
{
int i;
struct vcache *tvc;
struct afs_q *tq, *uq;
ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
retry:
/* Somehow, walk the set of vcaches, with each one coming out as tvc */
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
int slept = 0;
tvc = QTOVC(tq);
uq = QNext(tq);
if (afs_QueueVCB(tvc, &slept)) {
tvc->callback = NULL;
}
@ -3091,11 +3122,15 @@ afs_ClearAllStatdFlag(void)
{
int i;
struct vcache *tvc;
struct afs_q *tq, *uq;
ObtainWriteLock(&afs_xvcache, 715);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_NOCB,
CUnique);
}

View File

@ -457,6 +457,7 @@ afs_CheckVolumeNames(int flags)
#ifdef AFS_DARWIN80_ENV
vnode_t tvp;
#endif
struct afs_q *tq, *uq;
AFS_STATCNT(afs_CheckVolumeNames);
nvols = 0;
@ -506,7 +507,9 @@ afs_CheckVolumeNames(int flags)
loop:
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
for (tq = afs_vhashT[i].next; tq != &afs_vhashT[i]; tq = uq) {
tvc = QTOVC(tq);
uq = QNext(tq);
/* if the volume of "mvid.target_root" of the vcache entry is
* among the ones we found earlier, then we re-evaluate it.