bkg-daemon-dont-break-64bit-pointers-and-handle-requests-in-order-20011102

This patch fixes a problem with 64-bit pointers being munged by the
background daemons (by separating sizes and pointers into separate
variables -- this bug was apparently introduced by the 64-bit file
support patch), and makes the background daemons handle requests in
order they came in.  The latter will be mostly just useful for some
prefetching and fine grained dcache-locking patches
This commit is contained in:
Nickolai Zeldovich 2001-11-05 19:34:25 +00:00 committed by Derrick Brashear
parent 68196d85db
commit 4b156c3800
7 changed files with 73 additions and 58 deletions

View File

@ -967,8 +967,8 @@ static int afs_delmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot,
AFS_RWUNLOCK(vp, VRWLOCK_WRITE); AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
/* at least one daemon is idle, so ask it to do the store. /* at least one daemon is idle, so ask it to do the store.
* Also, note that we don't lock it any more... */ * Also, note that we don't lock it any more... */
tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, (long)acred->cr_uid, tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
0L, 0L, 0L); (afs_size_t) acred->cr_uid, 0L, (void *) 0);
/* sleep waiting for the store to start, then retrieve error code */ /* sleep waiting for the store to start, then retrieve error code */
while ((tb->flags & BUVALID) == 0) { while ((tb->flags & BUVALID) == 0) {
tb->flags |= BUWAIT; tb->flags |= BUWAIT;

View File

@ -165,8 +165,8 @@ tagain:
/* start the daemon (may already be running, however) */ /* start the daemon (may already be running, however) */
tdc->flags |= DFFetchReq; tdc->flags |= DFFetchReq;
bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred, bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
(afs_size_t)filePos, (afs_size_t) tdc, (afs_size_t)filePos, (afs_size_t) 0,
(afs_size_t) 0, (afs_size_t) 0); tdc);
if (!bp) { if (!bp) {
tdc->flags &= ~DFFetchReq; tdc->flags &= ~DFFetchReq;
trybusy = 0; /* Avoid bkg daemon since they're too busy */ trybusy = 0; /* Avoid bkg daemon since they're too busy */
@ -318,8 +318,7 @@ void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
mutex_exit(&tdc->lock); mutex_exit(&tdc->lock);
#endif #endif
bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred, bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
(afs_size_t) offset, (afs_size_t) tdc, (afs_size_t) offset, (afs_size_t) 1, tdc);
(afs_size_t) 1, (afs_size_t) 0);
if (!bp) { if (!bp) {
/* Bkg table full; just abort non-important prefetching to avoid deadlocks */ /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
tdc->flags &= ~(DFNextStarted | DFFetchReq); tdc->flags &= ~(DFNextStarted | DFFetchReq);
@ -614,8 +613,8 @@ tagain:
munlocked = 1; munlocked = 1;
#endif #endif
bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred, bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
(afs_size_t)filePos, (afs_size_t) tdc, (afs_size_t)filePos, (afs_size_t) 0,
(afs_size_t) 0, (afs_size_t) 0); tdc);
if (!bp) { if (!bp) {
/* Bkg table full; retry deadlocks */ /* Bkg table full; retry deadlocks */
tdc->flags &= ~DFFetchReq; tdc->flags &= ~DFFetchReq;

View File

@ -844,7 +844,7 @@ afs_close(OSI_VC_ARG(avc), aflags, acred)
Also, note that we don't lock it any more... */ Also, note that we don't lock it any more... */
tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
(afs_size_t) acred->cr_uid, (afs_size_t) 0, (afs_size_t) acred->cr_uid, (afs_size_t) 0,
(afs_size_t) 0, (afs_size_t) 0); (void *) 0);
/* sleep waiting for the store to start, then retrieve error code */ /* sleep waiting for the store to start, then retrieve error code */
while ((tb->flags & BUVALID) == 0) { while ((tb->flags & BUVALID) == 0) {
tb->flags |= BUWAIT; tb->flags |= BUWAIT;

View File

@ -116,11 +116,13 @@ struct sysname_info {
struct brequest { struct brequest {
struct vcache *vnode; /* vnode to use, with vrefcount bumped */ struct vcache *vnode; /* vnode to use, with vrefcount bumped */
struct AFS_UCRED *cred; /* credentials to use for operation */ struct AFS_UCRED *cred; /* credentials to use for operation */
afs_size_t parm[BPARMS]; /* random parameters */ afs_size_t size_parm[BPARMS]; /* random parameters */
afs_int32 code; /* return code */ void *ptr_parm[BPARMS]; /* pointer parameters */
afs_int32 code; /* return code */
short refCount; /* use counter for this structure */ short refCount; /* use counter for this structure */
char opcode; /* what to do (store, fetch, etc) */ char opcode; /* what to do (store, fetch, etc) */
char flags; /* free, etc */ char flags; /* free, etc */
afs_int32 ts; /* counter "timestamp" */
}; };
struct SecretToken { struct SecretToken {

View File

@ -28,6 +28,7 @@ short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */ short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
struct brequest afs_brs[NBRS]; /* request structures */ struct brequest afs_brs[NBRS]; /* request structures */
struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler; struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
static int afs_brs_count = 0; /* request counter, to service reqs in order */
static int rxepoch_checked=0; static int rxepoch_checked=0;
#define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \ #define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
@ -346,7 +347,7 @@ afs_CheckRootVolume () {
else return ENOENT; else return ENOENT;
} }
/* parm 0 is the pathname, parm 1 to the fetch is the chunk number */ /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
void BPath(ab) void BPath(ab)
register struct brequest *ab; { register struct brequest *ab; {
register struct dcache *tdc; register struct dcache *tdc;
@ -363,14 +364,14 @@ void BPath(ab)
if (code = afs_InitReq(&treq, ab->cred)) return; if (code = afs_InitReq(&treq, ab->cred)) return;
AFS_GUNLOCK(); AFS_GUNLOCK();
#ifdef AFS_LINUX22_ENV #ifdef AFS_LINUX22_ENV
code = gop_lookupname((char *)ab->parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, &dp); code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, &dp);
if (dp) if (dp)
tvn = (struct vnode*)dp->d_inode; tvn = (struct vnode*)dp->d_inode;
#else #else
code = gop_lookupname((char *)ab->parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, (struct vnode **)&tvn); code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, (struct vnode **) 0, (struct vnode **)&tvn);
#endif #endif
AFS_GLOCK(); AFS_GLOCK();
osi_FreeLargeSpace((char *)ab->parm[0]); /* free path name buffer here */ osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
if (code) return; if (code) return;
/* now path may not have been in afs, so check that before calling our cache manager */ /* now path may not have been in afs, so check that before calling our cache manager */
if (!tvn || !IsAfsVnode((struct vnode *) tvn)) { if (!tvn || !IsAfsVnode((struct vnode *) tvn)) {
@ -394,7 +395,7 @@ void BPath(ab)
tvc = (struct vcache *) tvn; tvc = (struct vcache *) tvn;
#endif #endif
/* here we know its an afs vnode, so we can get the data for the chunk */ /* here we know its an afs vnode, so we can get the data for the chunk */
tdc = afs_GetDCache(tvc, (afs_size_t) ab->parm[1], &treq, &offset, &len, 1); tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
if (tdc) { if (tdc) {
afs_PutDCache(tdc); afs_PutDCache(tdc);
} }
@ -409,8 +410,9 @@ void BPath(ab)
#endif #endif
} }
/* parm 0 to the fetch is the chunk number; parm 1 is the dcache entry to wakeup, /* size_parm 0 to the fetch is the chunk number,
* parm 2 is true iff we should release the dcache entry here. * ptr_parm 0 is the dcache entry to wakeup,
* size_parm 1 is true iff we should release the dcache entry here.
*/ */
void BPrefetch(ab) void BPrefetch(ab)
register struct brequest *ab; { register struct brequest *ab; {
@ -422,7 +424,7 @@ void BPrefetch(ab)
AFS_STATCNT(BPrefetch); AFS_STATCNT(BPrefetch);
if (len = afs_InitReq(&treq, ab->cred)) return; if (len = afs_InitReq(&treq, ab->cred)) return;
tvc = ab->vnode; tvc = ab->vnode;
tdc = afs_GetDCache(tvc, (afs_size_t)ab->parm[0], &treq, &offset, &len, 1); tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
if (tdc) { if (tdc) {
afs_PutDCache(tdc); afs_PutDCache(tdc);
} }
@ -430,10 +432,10 @@ void BPrefetch(ab)
* use tdc from GetDCache since afs_GetDCache may fail, but someone may * use tdc from GetDCache since afs_GetDCache may fail, but someone may
* be waiting for our wakeup anyway. * be waiting for our wakeup anyway.
*/ */
tdc = (struct dcache *) (ab->parm[1]); tdc = (struct dcache *) (ab->ptr_parm[0]);
tdc->flags &= ~DFFetchReq; tdc->flags &= ~DFFetchReq;
afs_osi_Wakeup(&tdc->validPos); afs_osi_Wakeup(&tdc->validPos);
if ((afs_size_t)ab->parm[2]) { if (ab->size_parm[1]) {
#ifdef AFS_SUN5_ENVX #ifdef AFS_SUN5_ENVX
mutex_enter(&tdc->lock); mutex_enter(&tdc->lock);
tdc->refCount--; tdc->refCount--;
@ -512,13 +514,13 @@ int afs_BBusy() {
return 1; return 1;
} }
struct brequest *afs_BQueue(aopcode, avc, dontwait, ause, acred, aparm0, aparm1, aparm2, aparm3) struct brequest *afs_BQueue(aopcode, avc, dontwait, ause, acred, asparm0, asparm1, apparm0)
register short aopcode; register short aopcode;
afs_int32 ause, dontwait; afs_int32 ause, dontwait;
register struct vcache *avc; register struct vcache *avc;
struct AFS_UCRED *acred; struct AFS_UCRED *acred;
/* On 64 bit platforms, "long" does the right thing. */ afs_size_t asparm0, asparm1;
afs_size_t aparm0, aparm1, aparm2, aparm3; void *apparm0;
{ {
register int i; register int i;
register struct brequest *tb; register struct brequest *tb;
@ -544,12 +546,12 @@ struct brequest *afs_BQueue(aopcode, avc, dontwait, ause, acred, aparm0, aparm1,
#endif #endif
} }
tb->refCount = ause+1; tb->refCount = ause+1;
tb->parm[0] = aparm0; tb->size_parm[0] = asparm0;
tb->parm[1] = aparm1; tb->size_parm[1] = asparm1;
tb->parm[2] = aparm2; tb->ptr_parm[0] = apparm0;
tb->parm[3] = aparm3;
tb->flags = 0; tb->flags = 0;
tb->code = 0; tb->code = 0;
tb->ts = afs_brs_count++;
/* if daemons are waiting for work, wake them up */ /* if daemons are waiting for work, wake them up */
if (afs_brsDaemons > 0) { if (afs_brsDaemons > 0) {
afs_osi_Wakeup(&afs_brsDaemons); afs_osi_Wakeup(&afs_brsDaemons);
@ -1231,6 +1233,9 @@ void afs_BackgroundDaemon() {
MObtainWriteLock(&afs_xbrs,302); MObtainWriteLock(&afs_xbrs,302);
while (1) { while (1) {
int min_ts;
struct brequest *min_tb;
if (afs_termState == AFSOP_STOP_BKG) { if (afs_termState == AFSOP_STOP_BKG) {
if (--afs_nbrs <= 0) if (--afs_nbrs <= 0)
afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_termState = AFSOP_STOP_TRUNCDAEMON;
@ -1238,42 +1243,50 @@ void afs_BackgroundDaemon() {
afs_osi_Wakeup(&afs_termState); afs_osi_Wakeup(&afs_termState);
return; return;
} }
/* find a request */ /* find a request */
tb = afs_brs; tb = afs_brs;
foundAny = 0; foundAny = 0;
for(i=0;i<NBRS;i++,tb++) { min_tb = NULL;
/* look for request */ for(i=0; i<NBRS; i++, tb++) {
/* look for request with smallest ts */
if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) { if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
/* new request, not yet picked up */ /* new request, not yet picked up */
tb->flags |= BSTARTED; if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
MReleaseWriteLock(&afs_xbrs); min_tb = tb;
foundAny = 1; min_ts = tb->ts;
afs_Trace1(afs_iclSetp, CM_TRACE_BKG1,
ICL_TYPE_INT32, tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)
BPath(tb);
else panic("background bop");
if (tb->vnode) {
#ifdef AFS_DEC_ENV
tb->vnode->vrefCount--; /* fix up reference count */
#else
AFS_RELE((struct vnode *)(tb->vnode)); /* MUST call vnode layer or could lose vnodes */
#endif
tb->vnode = (struct vcache *) 0;
} }
if (tb->cred) {
crfree(tb->cred);
tb->cred = (struct AFS_UCRED *) 0;
}
afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
MObtainWriteLock(&afs_xbrs,305);
} }
} }
if (tb = min_tb) {
/* claim and process this request */
tb->flags |= BSTARTED;
MReleaseWriteLock(&afs_xbrs);
foundAny = 1;
afs_Trace1(afs_iclSetp, CM_TRACE_BKG1,
ICL_TYPE_INT32, tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)
BPath(tb);
else panic("background bop");
if (tb->vnode) {
#ifdef AFS_DEC_ENV
tb->vnode->vrefCount--; /* fix up reference count */
#else
AFS_RELE((struct vnode *)(tb->vnode)); /* MUST call vnode layer or could lose vnodes */
#endif
tb->vnode = (struct vcache *) 0;
}
if (tb->cred) {
crfree(tb->cred);
tb->cred = (struct AFS_UCRED *) 0;
}
afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
MObtainWriteLock(&afs_xbrs,305);
}
if (!foundAny) { if (!foundAny) {
/* wait for new request */ /* wait for new request */
afs_brsDaemons++; afs_brsDaemons++;

View File

@ -2079,7 +2079,8 @@ struct AFS_UCRED *acred;
osi_FreeLargeSpace(tp); osi_FreeLargeSpace(tp);
return EWOULDBLOCK; /* pretty close */ return EWOULDBLOCK; /* pretty close */
} }
afs_BQueue(BOP_PATH, (struct vcache*)0, 0, 0, acred, (long)tp, 0L, 0L, 0L); afs_BQueue(BOP_PATH, (struct vcache*)0, 0, 0, acred,
(afs_size_t) 0, (afs_size_t) 0, tp);
return 0; return 0;
} }

View File

@ -2682,7 +2682,7 @@ void print_bkg(kmem)
if (uentry->refCount == 0) break; if (uentry->refCount == 0) break;
printf("[%d] vcache=0x%lx, cred=0x%lx, code=%d, refCount=%d, opcode=%d, flags=%x [%lx, %lx, %lx, %lx]\n", printf("[%d] vcache=0x%lx, cred=0x%lx, code=%d, refCount=%d, opcode=%d, flags=%x [%lx, %lx, %lx, %lx]\n",
i, uentry->vnode, uentry->cred, uentry->code, uentry->refCount, uentry->opcode, uentry->flags, i, uentry->vnode, uentry->cred, uentry->code, uentry->refCount, uentry->opcode, uentry->flags,
uentry->parm[0], uentry->parm[1], uentry->parm[2], uentry->parm[3]); uentry->size_parm[0], uentry->size_parm[1], uentry->ptr_parm[0], uentry->ptr_parm[1]);
} }
printf("... found %d active 'afs_brs' entries\n", j); printf("... found %d active 'afs_brs' entries\n", j);