afs_int32 afs_bulkStatsDone;
static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
+int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
/* this would be faster if it did comparison as int32word, but would be
* what "@sys" is in binary... */
#define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
-char *
-afs_strcat(s1, s2)
- register char *s1, *s2;
+char *afs_strcat(register char *s1, register char *s2)
{
register char *os1;
while (*s1++)
;
--s1;
- while (*s1++ = *s2++)
+ while ((*s1++ = *s2++))
;
return (os1);
}
-char *afs_index(a, c)
- register char *a, c; {
+char *afs_index(register char *a, register char c)
+{
register char tc;
AFS_STATCNT(afs_index);
- while (tc = *a) {
+ while ((tc = *a)) {
if (tc == c) return a;
else a++;
}
- return (char *) 0;
+ return NULL;
}
/* call under write lock, evaluate mvid field from a mt pt.
- * avc is the vnode of the mount point object.
- * advc is the vnode of the containing directory
+ * avc is the vnode of the mount point object; must be write-locked.
+ * advc is the vnode of the containing directory (optional; if NULL and
+ * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
* avolpp is where we return a pointer to the volume named by the mount pt, if success
* areq is the identity of the caller.
*
* NOTE: this function returns a held volume structure in *volpp if it returns 0!
*/
-EvalMountPoint(avc, advc, avolpp, areq)
- register struct vcache *avc;
- struct volume **avolpp;
- struct vcache *advc; /* the containing dir */
- register struct vrequest *areq;
+int EvalMountPoint(register struct vcache *avc, struct vcache *advc,
+ struct volume **avolpp, register struct vrequest *areq)
{
afs_int32 code;
struct volume *tvp = 0;
struct VenusFid tfid;
struct cell *tcell;
char *cpos, *volnamep;
- char type, buf[128];
+ char type, *buf;
afs_int32 prefetchRO; /* 1=>No 2=>Yes */
afs_int32 mtptCell, assocCell, hac=0;
afs_int32 samecell, roname, len;
#ifdef notdef
if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
#endif
- *avolpp = (struct volume *)0;
+ *avolpp = NULL;
code = afs_HandleLink(avc, areq);
if (code) return code;
volnamep = &avc->linkData[1];
tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
}
- if (!tcell) return ENOENT;
+ if (!tcell) return ENODEV;
- mtptCell = tcell->cell; /* The cell for the mountpoint */
+ mtptCell = tcell->cellNum; /* The cell for the mountpoint */
if (tcell->lcellp) {
- hac = 1; /* has associated cell */
- assocCell = tcell->lcellp->cell; /* The associated cell */
+ hac = 1; /* has associated cell */
+ assocCell = tcell->lcellp->cellNum; /* The associated cell */
}
afs_PutCell(tcell, READ_LOCK);
* Don't know why we do this. Would have still found it in above call - jpm.
*/
if (!tvp && (prefetchRO == 2)) {
- strcpy(buf, volnamep);
- afs_strcat(buf, ".readonly");
+ buf = (char *)osi_AllocSmallSpace(strlen(volnamep)+10);
- tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
+ strcpy(buf, volnamep);
+ afs_strcat(buf, ".readonly");
- /* Try the associated linked cell if failed */
- if (!tvp && hac && areq->volumeError) {
- tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
- }
+ tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
+
+ /* Try the associated linked cell if failed */
+ if (!tvp && hac && areq->volumeError) {
+ tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
+ }
+ osi_FreeSmallSpace(buf);
}
if (!tvp) return ENODEV; /* Couldn't find the volume */
/* Don't cross mountpoint from a BK to a BK volume */
if ((avc->states & CBackup) && (tvp->states & VBackup)) {
afs_PutVolume(tvp, WRITE_LOCK);
- return ELOOP;
+ return ENODEV;
}
/* If we want (prefetched) the RO and it exists, then drop the
* to the new path.
*/
tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
- tvp->dotdot = advc->fid;
+ if (advc) tvp->dotdot = advc->fid;
*avolpp = tvp;
return 0;
}
+
+/*
+ * afs_InitFakeStat
+ *
+ * Must be called on an afs_fakestat_state object before calling
+ * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
+ * without calling afs_EvalFakeStat is legal, as long as this
+ * function is called.
+ */
+void afs_InitFakeStat(struct afs_fakestat_state *state)
+{
+ if (!afs_fakestat_enable)
+ return;
+
+ state->valid = 1;
+ state->did_eval = 0;
+ state->need_release = 0;
+}
+
+/*
+ * afs_EvalFakeStat_int
+ *
+ * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
+ * which is called by those wrapper functions.
+ *
+ * Only issues RPCs if canblock is non-zero.
+ */
+int afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq, int canblock)
+{
+ struct vcache *tvc, *root_vp;
+ struct volume *tvolp = NULL;
+ int code = 0;
+
+ if (!afs_fakestat_enable)
+ return 0;
+
+ osi_Assert(state->valid == 1);
+ osi_Assert(state->did_eval == 0);
+ state->did_eval = 1;
+
+ tvc = *avcp;
+ if (tvc->mvstat != 1)
+ return 0;
+
+ /* Is the call to VerifyVCache really necessary? */
+ code = afs_VerifyVCache(tvc, areq);
+ if (code)
+ goto done;
+ if (canblock) {
+ ObtainWriteLock(&tvc->lock, 599);
+ code = EvalMountPoint(tvc, NULL, &tvolp, areq);
+ ReleaseWriteLock(&tvc->lock);
+ if (code)
+ goto done;
+ if (tvolp) {
+ tvolp->dotdot = tvc->fid;
+ tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
+ tvolp->dotdot.Fid.Unique = tvc->parentUnique;
+ }
+ }
+ if (tvc->mvid && (tvc->states & CMValid)) {
+ if (!canblock) {
+ afs_int32 retry;
+
+ do {
+ retry = 0;
+ ObtainWriteLock(&afs_xvcache, 597);
+ root_vp = afs_FindVCache(tvc->mvid, &retry, 0);
+ if (root_vp && retry) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_PutVCache(root_vp);
+ }
+ } while (root_vp && retry);
+ ReleaseWriteLock(&afs_xvcache);
+ } else {
+ root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
+ }
+ if (!root_vp) {
+ code = canblock ? ENOENT : 0;
+ goto done;
+ }
+ if (tvolp) {
+ /* Is this always kosher? Perhaps we should instead use
+ * NBObtainWriteLock to avoid potential deadlock.
+ */
+ ObtainWriteLock(&root_vp->lock, 598);
+ if (!root_vp->mvid)
+ root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *root_vp->mvid = tvolp->dotdot;
+ ReleaseWriteLock(&root_vp->lock);
+ }
+ state->need_release = 1;
+ state->root_vp = root_vp;
+ *avcp = root_vp;
+ code = 0;
+ } else {
+ code = canblock ? ENOENT : 0;
+ }
+
+done:
+ if (tvolp)
+ afs_PutVolume(tvolp, WRITE_LOCK);
+ return code;
+}
+
+/*
+ * afs_EvalFakeStat
+ *
+ * Automatically does the equivalent of EvalMountPoint for vcache entries
+ * which are mount points. Remembers enough state to properly release
+ * the volume root vcache when afs_PutFakeStat() is called.
+ *
+ * State variable must be initialized by afs_InitFakeState() beforehand.
+ *
+ * Returns 0 when everything succeeds and *avcp points to the vcache entry
+ * that should be used for the real vnode operation. Returns non-zero if
+ * something goes wrong and the error code should be returned to the user.
+ */
+int
+afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 1);
+}
+
+/*
+ * afs_TryEvalFakeStat
+ *
+ * Same as afs_EvalFakeStat, but tries not to talk to remote servers
+ * and only evaluate the mount point if all the data is already in
+ * local caches.
+ *
+ * Returns 0 if everything succeeds and *avcp points to a valid
+ * vcache entry (possibly evaluated).
+ */
+int afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 0);
+}
+
+/*
+ * afs_PutFakeStat
+ *
+ * Perform any necessary cleanup at the end of a vnode op, given that
+ * afs_InitFakeStat was previously called with this state.
+ */
+void afs_PutFakeStat(struct afs_fakestat_state *state)
+{
+ if (!afs_fakestat_enable)
+ return;
+
+ osi_Assert(state->valid == 1);
+ if (state->need_release)
+ afs_PutVCache(state->root_vp);
+ state->valid = 0;
+}
-afs_ENameOK(aname)
- register char *aname; {
- register char tc;
+int afs_ENameOK(register char *aname)
+{
register int tlen;
AFS_STATCNT(ENameOK);
return 1;
}
-afs_getsysname(areq, adp, bufp)
- register struct vrequest *areq;
- register struct vcache *adp;
- register char *bufp;
+int afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
+ register char *bufp)
{
static char sysname[MAXSYSNAME];
register struct unixuser *au;
au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
afs_PutUser(au, 0);
if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, bufp);
+ error = EXP_SYSNAME(au->exporter, NULL, bufp);
if (error)
strcpy(bufp, "@sys");
return -1;
}
}
-Check_AtSys(avc, aname, state, areq)
- register struct vcache *avc;
- char *aname;
- struct sysname_info *state;
- struct vrequest *areq;
+int Check_AtSys(register struct vcache *avc, const char *aname,
+ struct sysname_info *state, struct vrequest *areq)
{
if (AFS_EQ_ATSYS(aname)) {
state->offset = 0;
}
}
-Next_AtSys(avc, areq, state)
- register struct vcache *avc;
- struct vrequest *areq;
- struct sysname_info *state;
+int Next_AtSys(register struct vcache *avc, struct vrequest *areq,
+ struct sysname_info *state)
{
if (state->index == -1)
return 0; /* No list */
} else
return 0; /* .*@sys doesn't match either */
} else if (++(state->index) >= afs_sysnamecount
- || !afs_sysnamelist[state->index])
+ || !afs_sysnamelist[(int)state->index])
return 0; /* end of list */
- strcpy(state->name+state->offset, afs_sysnamelist[state->index]);
+ strcpy(state->name+state->offset, afs_sysnamelist[(int)state->index]);
return 1;
}
* ensure that vcaches created for failed RPC's to older servers have the
* CForeign bit set.
*/
-struct vcache * BStvc = (struct vcache *) 0;
-void afs_DoBulkStat(adp, dirCookie, areqp)
- struct vcache *adp;
- long dirCookie;
- struct vrequest *areqp;
+static struct vcache *BStvc = NULL;
+
+int afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
{
int nentries; /* # of entries to prefetch */
int nskip; /* # of slots in the LRU queue to skip */
struct dcache *dcp; /* chunk containing the dir block */
char *statMemp; /* status memory block */
char *cbfMemp; /* callback and fid memory block */
- long temp; /* temp for holding chunk length, &c. */
+ afs_size_t temp; /* temp for holding chunk length, &c. */
struct AFSFid *fidsp; /* file IDs were collecting */
struct AFSCallBack *cbsp; /* call back pointers */
struct AFSCallBack *tcbp; /* temp callback ptr */
struct conn *tcp; /* conn for call */
AFSCBs cbParm; /* callback parm for bulk stat */
struct server *hostp = 0; /* host we got callback from */
- long origEvenCBs; /* original # of callbacks for even-fid files */
- long origOddCBs; /* original # of callbacks for odd-fid files */
- long origEvenZaps; /* original # of recycles for even-fid files */
- long origOddZaps; /* original # of recycles for odd-fid files */
long startTime; /* time we started the call,
* for callback expiration base
*/
- int statSeqNo; /* Valued of file size to detect races */
+ afs_size_t statSeqNo; /* Valued of file size to detect races */
int code; /* error code */
long newIndex; /* new index in the dir */
struct DirEntry *dirEntryp; /* dir entry we are examining */
struct volume *volp=0; /* volume ptr */
struct VenusFid dotdot;
int flagIndex; /* First file with bulk fetch flag set */
+ int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
XSTATS_DECLS
/* first compute some basic parameters. We dont want to prefetch more
code = afs_VerifyVCache(adp, areqp);
if (code) goto done;
- dcp = afs_GetDCache(adp, 0, areqp, &temp, &temp, 1);
+ dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
code = ENOENT;
goto done;
/* lock the directory cache entry */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (dcp->flags & DFFetching)
+ && (dcp->dflags & DFFetching)
&& hsame(adp->m.DataVersion, dcp->f.versionNo)) {
- dcp->flags |= DFWaiting;
+ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
+ ICL_TYPE_STRING, __FILE__,
+ ICL_TYPE_INT32, __LINE__,
+ ICL_TYPE_POINTER, dcp,
+ ICL_TYPE_INT32, dcp->dflags);
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&dcp->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
goto tagain;
do {
retry = 0;
ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
+ tvcp = afs_FindVCache(&tfid, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
- tvcp = afs_NewVCache(&tfid, hostp, 0, 0);
+ tvcp = afs_NewVCache(&tfid, hostp);
ObtainWriteLock(&tvcp->lock, 505);
ReleaseWriteLock(&afs_xvcache);
afs_RemoveVCB(&tfid);
* preserve the value of the file size. We could
* flush the pages, but it wouldn't be worthwhile.
*/
- bcopy((char *) &tfid.Fid, (char *)(fidsp+fidIndex),
- sizeof(*fidsp));
+ memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
tvcp->states |= CBulkFetching;
tvcp->m.Length = statSeqNo;
fidIndex++;
} /* while loop over all dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
/* release the chunk */
if (tcp) {
hostp = tcp->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
- code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
- &volSync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
+
+ if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
+ code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ if (code == RXGEN_OPCODE) {
+ tcp->srvr->server->flags |= SNO_INLINEBULK;
+ inlinebulk = 0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ } else
+ inlinebulk=1;
+ } else {
+ inlinebulk=0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
+ &volSync);
+ }
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while (afs_Analyze(tcp, code, &adp->fid, areqp,
- AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL));
/* now, if we didnt get the info, bail out. */
if (code) goto done;
* We also have to take into account racing token revocations.
*/
for(i=0; i<fidIndex; i++) {
+ if ((&statsp[i])->errorCode)
+ continue;
afid.Cell = adp->fid.Cell;
afid.Fid.Volume = adp->fid.Fid.Volume;
afid.Fid.Vnode = fidsp[i].Vnode;
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent4");
+ { refpanic ("Bulkstat VLRU inconsistent4"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
QRemove(&tvcp->vlruq);
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent6");
+ { refpanic ("Bulkstat VLRU inconsistent6"); }
ReleaseWriteLock(&afs_xvcache);
ObtainWriteLock(&afs_xcbhash, 494);
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
if (tvcp != NULL
if ( volp )
afs_PutVolume(volp, READ_LOCK);
+ /* If we did the InlineBulk RPC pull out the return code */
+ if (inlinebulk) {
+ if ((&statsp[0])->errorCode) {
+ afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
+ NULL);
+ code = (&statsp[0])->errorCode;
+ }
+ } else {
+ code = 0;
+ }
osi_FreeLargeSpace(statMemp);
osi_FreeLargeSpace(cbfMemp);
+ return code;
}
/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
-int AFSDOBULK = 1;
+static int AFSDOBULK = 1;
#ifdef AFS_OSF_ENV
afs_lookup(adp, ndp)
struct AFS_UCRED *acred; {
#endif
struct vrequest treq;
- char *tname = (char *)0;
+ char *tname = NULL;
register struct vcache *tvc=0;
register afs_int32 code;
+ register afs_int32 bulkcode = 0;
int pass = 0, hit = 0;
long dirCookie;
extern afs_int32 afs_mariner; /*Writing activity to log?*/
afs_hyper_t versionNo;
int no_read_access = 0;
struct sysname_info sysState; /* used only for @sys checking */
+ int dynrootRetry = 1;
+ struct afs_fakestat_state fakestate;
+ int tryEvalOnly = 0;
AFS_STATCNT(afs_lookup);
+ afs_InitFakeStat(&fakestate);
+
+ if ((code = afs_InitReq(&treq, acred)))
+ goto done;
+
#ifdef AFS_OSF_ENV
- ndp->ni_dvp = (struct vnode *)adp;
- bcopy(ndp->ni_ptr, aname, ndp->ni_namelen);
+ ndp->ni_dvp = AFSTOV(adp);
+ memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
aname[ndp->ni_namelen] = '\0';
#endif /* AFS_OSF_ENV */
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
-
- if (code = afs_InitReq(&treq, acred)) {
- goto done;
+#if defined(AFS_DARWIN_ENV)
+ /* Workaround for MacOSX Finder, which tries to look for
+ * .DS_Store and Contents under every directory.
+ */
+ if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (strcmp(aname, ".DS_Store") == 0)
+ tryEvalOnly = 1;
+ if (strcmp(aname, "Contents") == 0)
+ tryEvalOnly = 1;
}
+#endif
+
+ if (tryEvalOnly)
+ code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
+ else
+ code = afs_EvalFakeStat(&adp, &fakestate, &treq);
+ if (tryEvalOnly && adp->mvstat == 1)
+ code = ENOENT;
+ if (code)
+ goto done;
+
+ *avcp = NULL; /* Since some callers don't initialize it */
/* come back to here if we encounter a non-existent object in a read-only
volume's directory */
redo:
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+ *avcp = NULL; /* Since some callers don't initialize it */
+ bulkcode = 0;
if (!(adp->states & CStatd)) {
- if (code = afs_VerifyVCache2(adp, &treq))
- goto done;
+ if ((code = afs_VerifyVCache2(adp, &treq))) {
+ goto done;
+ }
}
else code = 0;
#ifdef AFS_OSF_ENV
extern struct vcache *afs_globalVp;
if (adp == afs_globalVp) {
- struct vnode *rvp = (struct vnode *)adp;
+ struct vnode *rvp = AFSTOV(adp);
/*
ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
ndp->ni_dvp = ndp->ni_vp;
goto done;
}
/* otherwise we have the fid here, so we use it */
- tvc = afs_GetVCache(adp->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, 0);
+ tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
ICL_TYPE_INT32, code);
*avcp = tvc;
code = (tvc ? 0 : ENOENT);
hit = 1;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT1");
}
if (code) {
/* now check the access */
if (treq.uid != adp->last_looker) {
if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
code = EACCES;
goto done;
}
/* Check for read access as well. We need read access in order to
stat files, but not to stat subdirectories. */
- if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
ObtainReadLock(&afs_xvcache);
osi_vnhold(adp, 0);
ReleaseReadLock(&afs_xvcache);
- code = 0;
- *avcp = tvc = adp;
- hit = 1;
- if (adp && !adp->vrefCount) {
+ code = 0;
+ *avcp = tvc = adp;
+ hit = 1;
+ if (adp && !VREFCOUNT(adp)) {
osi_Panic("TT2");
}
- goto done;
+ goto done;
}
Check_AtSys(adp, aname, &sysState, &treq);
if (tvc) {
if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
/* need read access on dir to stat non-directory / non-link */
- afs_PutVCache(tvc, WRITE_LOCK);
- *avcp = (struct vcache *)0;
+ afs_PutVCache(tvc);
+ *avcp = NULL;
code = EACCES;
goto done;
}
{
register struct dcache *tdc;
- afs_int32 dirOffset, dirLen;
+ afs_size_t dirOffset, dirLen;
ino_t theDir;
struct VenusFid tfid;
/* now we have to lookup the next fid */
- tdc = afs_GetDCache(adp, 0, &treq, &dirOffset, &dirLen, 1);
+ tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
if (!tdc) {
- *avcp = (struct vcache *)0; /* redundant, but harmless */
+ *avcp = NULL; /* redundant, but harmless */
code = EIO;
goto done;
}
/* now we will just call dir package with appropriate inode.
Dirs are always fetched in their entirety for now */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (tdc->flags & DFFetching)
+ && (tdc->dflags & DFFetching)
&& hsame(adp->m.DataVersion, tdc->f.versionNo)) {
- tdc->flags |= DFWaiting;
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
goto redo;
}
tname = sysState.name;
- ReleaseReadLock(&adp->lock);
+ ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
+ if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
+ ReleaseReadLock(&adp->lock);
+ dynrootRetry = 0;
+ if (tname[0] == '.')
+ afs_LookupAFSDB(tname + 1);
+ else
+ afs_LookupAFSDB(tname);
+ if (tname && tname != aname) osi_FreeLargeSpace(tname);
+ goto redo;
+ } else {
+ ReleaseReadLock(&adp->lock);
+ }
+
/* new fid has same cell and volume */
tfid.Cell = adp->fid.Cell;
tfid.Fid.Volume = adp->fid.Fid.Volume;
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)) {
+ if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvc = afs_FindVCache(&tfid, 1, 0, &retry, 0/* !stats,!lru */);
+ tvc = afs_FindVCache(&tfid, &retry, 0/* !stats,!lru */);
ReleaseReadLock(&afs_xvcache);
} while (tvc && retry);
- if (!tvc || !(tvc->states & CStatd)) {
- afs_DoBulkStat(adp, dirCookie, &treq);
- }
+ if (!tvc || !(tvc->states & CStatd))
+ bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
+ else
+ bulkcode = 0;
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
afs_PutVCache(tvc);
- tvc = (struct vcache *) 0;
+ tvc = NULL;
}
+ } else {
+ tvc = NULL;
+ bulkcode = 0;
}
- else tvc = (struct vcache *) 0;
-
+
/* now get the status info, if we don't already have it */
/* This is kind of weird, but we might wind up accidentally calling
* RXAFS_Lookup because we happened upon a file which legitimately
* has a 0 uniquifier. That is the result of allowing unique to wrap
- * to 0. This was fixed in AFS 3.4. For CForeigh, Unique == 0 means that
+ * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
* the file has not yet been looked up.
*/
if (!tvc) {
afs_int32 cached = 0;
if (!tfid.Fid.Unique && (adp->states & CForeign)) {
- tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
- adp, tname);
+ tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
+ }
+ if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
+ tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
}
- if (!tvc) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
- WRITE_LOCK);
- }
} /* if !tvc */
} /* sub-block just to reduce stack usage */
if (tvc) {
- if (adp->states & CForeign)
+ int force_eval = afs_fakestat_enable ? 0 : 1;
+
+ if (adp->states & CForeign)
tvc->states |= CForeign;
tvc->parentVnode = adp->fid.Fid.Vnode;
tvc->parentUnique = adp->fid.Fid.Unique;
tvc->states &= ~CBulkStat;
+ if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
+ ObtainSharedLock(&tvc->lock, 680);
+ if (!tvc->linkData) {
+ UpgradeSToWLock(&tvc->lock, 681);
+ code = afs_HandleLink(tvc, &treq);
+ ConvertWToRLock(&tvc->lock);
+ } else {
+ ConvertSToRLock(&tvc->lock);
+ code = 0;
+ }
+ if (!code && !strchr(tvc->linkData, ':'))
+ force_eval = 1;
+ ReleaseReadLock(&tvc->lock);
+ }
+
#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
if (!(flags & AFS_LOOKUP_NOEVAL))
/* don't eval mount points */
#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
- if (tvc->mvstat == 1) {
- /* a mt point, possibly unevaluated */
- struct volume *tvolp;
+ if (tvc->mvstat == 1 && force_eval) {
+ /* a mt point, possibly unevaluated */
+ struct volume *tvolp;
ObtainWriteLock(&tvc->lock,133);
code = EvalMountPoint(tvc, adp, &tvolp, &treq);
ReleaseWriteLock(&tvc->lock);
if (code) {
+ afs_PutVCache(tvc);
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
if (tvolp && (tvolp->states & VForeign)) {
/* XXXX tvolp has ref cnt on but not locked! XXX */
- tvc = afs_GetRootVCache(tvc->mvid, &treq, (afs_int32 *)0, tvolp, WRITE_LOCK);
+ tvc = afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
} else {
- tvc = afs_GetVCache(tvc->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
}
- afs_PutVCache(uvc, WRITE_LOCK); /* we're done with it */
+ afs_PutVCache(uvc); /* we're done with it */
if (!tvc) {
code = ENOENT;
* ptr to point back to the appropriate place */
if (tvolp) {
ObtainWriteLock(&tvc->lock,134);
- if (tvc->mvid == (struct VenusFid *) 0) {
+ if (tvc->mvid == NULL) {
tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
}
/* setup backpointer */
}
}
else {
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
code = ENOENT;
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
}
*avcp = tvc;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT3");
}
code = 0;
/* Handle RENAME; only need to check rename "." */
if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
if (!FidCmp(&(tvc->fid), &(adp->fid))) {
- afs_PutVCache(*avcp, WRITE_LOCK);
+ afs_PutVCache(*avcp);
*avcp = NULL;
+ afs_PutFakeStat(&fakestate);
return afs_CheckCode(EISDIR, &treq, 18);
}
}
/* So Linux inode cache is up to date. */
code = afs_VerifyVCache(tvc, &treq);
#else
+ afs_PutFakeStat(&fakestate);
return 0; /* can't have been any errors if hit and !code */
#endif
}
}
+ if (bulkcode) code = bulkcode; else
code = afs_CheckCode(code, &treq, 19);
if (code) {
/* If there is an error, make sure *avcp is null.
* Alphas panic otherwise - defect 10719.
*/
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
}
+ afs_PutFakeStat(&fakestate);
return code;
}