* afs_index
*/
-#include "../afs/param.h" /* Should be always first */
+#include <afsconfig.h>
+#include "../afs/param.h"
+
+RCSID("$Header$");
+
#include "../afs/sysincludes.h" /* Standard vendor system headers */
#include "../afs/afsincludes.h" /* Afs-based standard headers */
#include "../afs/afs_stats.h" /* statistics */
extern afs_rwlock_t afs_xcbhash;
extern struct afs_exporter *afs_nfsexporter;
extern char *afs_sysname;
+extern char *afs_sysnamelist[];
+extern int afs_sysnamecount;
extern struct afs_q VLRU; /*vcache LRU*/
#ifdef AFS_LINUX22_ENV
extern struct inode_operations afs_symlink_iops, afs_dir_iops;
afs_int32 afs_bulkStatsDone;
static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
+int afs_fakestat_enable = 0;
/* this would be faster if it did comparison as int32word, but would be
}
/* call under write lock, evaluate mvid field from a mt pt.
- * avc is the vnode of the mount point object.
- * advc is the vnode of the containing directory
+ * avc is the vnode of the mount point object; must be write-locked.
+ * advc is the vnode of the containing directory (optional; if NULL and
+ * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
* avolpp is where we return a pointer to the volume named by the mount pt, if success
* areq is the identity of the caller.
*
volnamep = &avc->linkData[1];
tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
}
- if (!tcell) return ENOENT;
+ if (!tcell) return ENODEV;
mtptCell = tcell->cell; /* The cell for the mountpoint */
if (tcell->lcellp) {
* to the new path.
*/
tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
- tvp->dotdot = advc->fid;
+ if (advc) tvp->dotdot = advc->fid;
*avolpp = tvp;
return 0;
}
+
+/*
+ * afs_InitFakeStat
+ *
+ * Must be called on an afs_fakestat_state object before calling
+ * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
+ * without calling afs_EvalFakeStat is legal, as long as this
+ * function is called.
+ */
+
+void
+afs_InitFakeStat(state)
+ struct afs_fakestat_state *state;
+{
+ state->valid = 1;
+ state->did_eval = 0;
+ state->need_release = 0;
+}
+
+/*
+ * afs_EvalFakeStat_int
+ *
+ * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
+ * which is called by those wrapper functions.
+ *
+ * Only issues RPCs if canblock is non-zero.
+ */
+static int
+afs_EvalFakeStat_int(avcp, state, areq, canblock)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+ int canblock;
+{
+ struct vcache *tvc, *root_vp;
+ struct volume *tvolp = NULL;
+ int code = 0;
+
+ osi_Assert(state->valid == 1);
+ osi_Assert(state->did_eval == 0);
+ state->did_eval = 1;
+ if (!afs_fakestat_enable)
+ return 0;
+ tvc = *avcp;
+ if (tvc->mvstat != 1)
+ return 0;
+
+ /* Is the call to VerifyVCache really necessary? */
+ code = afs_VerifyVCache(tvc, areq);
+ if (code)
+ goto done;
+ if (canblock) {
+ ObtainWriteLock(&tvc->lock, 599);
+ code = EvalMountPoint(tvc, NULL, &tvolp, areq);
+ ReleaseWriteLock(&tvc->lock);
+ if (code)
+ goto done;
+ if (tvolp) {
+ tvolp->dotdot = tvc->fid;
+ tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
+ tvolp->dotdot.Fid.Unique = tvc->parentUnique;
+ }
+ }
+ if (tvc->mvid && (tvc->states & CMValid)) {
+ if (!canblock) {
+ afs_int32 retry;
+
+ do {
+ retry = 0;
+ ObtainWriteLock(&afs_xvcache, 597);
+ root_vp = afs_FindVCache(tvc->mvid, 0, 0, &retry, 0);
+ if (root_vp && retry) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_PutVCache(root_vp, 0);
+ }
+ } while (root_vp && retry);
+ ReleaseWriteLock(&afs_xvcache);
+ } else {
+ root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL, WRITE_LOCK);
+ }
+ if (!root_vp) {
+ code = canblock ? ENOENT : 0;
+ goto done;
+ }
+ if (tvolp) {
+ /* Is this always kosher? Perhaps we should instead use
+ * NBObtainWriteLock to avoid potential deadlock.
+ */
+ ObtainWriteLock(&root_vp->lock, 598);
+ if (!root_vp->mvid)
+ root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *root_vp->mvid = tvolp->dotdot;
+ ReleaseWriteLock(&root_vp->lock);
+ }
+ state->need_release = 1;
+ state->root_vp = root_vp;
+ *avcp = root_vp;
+ code = 0;
+ } else {
+ code = canblock ? ENOENT : 0;
+ }
+
+done:
+ if (tvolp)
+ afs_PutVolume(tvolp, WRITE_LOCK);
+ return code;
+}
+
+/*
+ * afs_EvalFakeStat
+ *
+ * Automatically does the equivalent of EvalMountPoint for vcache entries
+ * which are mount points. Remembers enough state to properly release
+ * the volume root vcache when afs_PutFakeStat() is called.
+ *
+ * State variable must be initialized by afs_InitFakeState() beforehand.
+ *
+ * Returns 0 when everything succeeds and *avcp points to the vcache entry
+ * that should be used for the real vnode operation. Returns non-zero if
+ * something goes wrong and the error code should be returned to the user.
+ */
+int
+afs_EvalFakeStat(avcp, state, areq)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 1);
+}
+
+/*
+ * afs_TryEvalFakeStat
+ *
+ * Same as afs_EvalFakeStat, but tries not to talk to remote servers
+ * and only evaluate the mount point if all the data is already in
+ * local caches.
+ *
+ * Returns 0 if everything succeeds and *avcp points to a valid
+ * vcache entry (possibly evaluated).
+ */
+int
+afs_TryEvalFakeStat(avcp, state, areq)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 0);
+}
+
+/*
+ * afs_PutFakeStat
+ *
+ * Perform any necessary cleanup at the end of a vnode op, given that
+ * afs_InitFakeStat was previously called with this state.
+ */
+void
+afs_PutFakeStat(state)
+ struct afs_fakestat_state *state;
+{
+ osi_Assert(state->valid == 1);
+ if (state->need_release)
+ afs_PutVCache(state->root_vp, 0);
+ state->valid = 0;
+}
afs_ENameOK(aname)
register char *aname; {
return 1;
}
-Check_AtSys(avc, aname, outb, areq)
- register struct vcache *avc;
- char *aname, **outb;
- struct vrequest *areq;
-{
- register char *tname;
- register int error = 0, offset = -1;
-
- for (tname=aname; *tname; tname++) /*Move to the end of the string*/;
-
- /*
- * If the current string is 4 chars long or more, check to see if the
- * tail end is "@sys".
- */
- if ((tname >= aname + 4) && (AFS_EQ_ATSYS(tname-4)))
- offset = (tname - 4) - aname;
- if (offset < 0) {
- tname = aname;
- } else {
- tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
- if (offset)
- strncpy(tname, aname, offset);
- if (!afs_nfsexporter)
- strcpy(tname+offset, (afs_sysname ? afs_sysname : SYS_NAME ));
- else {
- register struct unixuser *au;
- register afs_int32 error;
- au = afs_GetUser(areq->uid, avc->fid.Cell, 0); afs_PutUser(au, 0);
- if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, tname+offset);
- if (error)
- strcpy(tname+offset, "@sys");
- } else {
- strcpy(tname+offset, (afs_sysname ? afs_sysname : SYS_NAME ));
- }
- }
- error = 1;
- }
- *outb = tname;
- return error;
-}
-
-
-char *afs_getsysname(areq, adp)
+afs_getsysname(areq, adp, bufp)
register struct vrequest *areq;
- register struct vcache *adp; {
+ register struct vcache *adp;
+ register char *bufp;
+{
static char sysname[MAXSYSNAME];
register struct unixuser *au;
register afs_int32 error;
+ if (!afs_nfsexporter) {
+ strcpy(bufp, afs_sysname);
+ return 0;
+ }
AFS_STATCNT(getsysname);
- /* this whole interface is wrong, it should take a buffer ptr and copy
- * the data out.
- */
au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
afs_PutUser(au, 0);
if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, sysname);
- if (error) return "@sys";
- else return sysname;
+ error = EXP_SYSNAME(au->exporter, (char *)0, bufp);
+ if (error)
+ strcpy(bufp, "@sys");
+ return -1;
} else {
- return (afs_sysname == 0? SYS_NAME : afs_sysname);
+ strcpy(bufp, afs_sysname);
+ return 0;
}
}
-void afs_HandleAtName(aname, aresult, areq, adp)
- register char *aname;
- register char *aresult;
- register struct vrequest *areq;
- register struct vcache *adp; {
- register int tlen;
- AFS_STATCNT(HandleAtName);
- tlen = strlen(aname);
- if (tlen >= 4 && strcmp(aname+tlen-4, "@sys")==0) {
- strncpy(aresult, aname, tlen-4);
- strcpy(aresult+tlen-4, afs_getsysname(areq, adp));
- }
- else strcpy(aresult, aname);
+Check_AtSys(avc, aname, state, areq)
+ register struct vcache *avc;
+ char *aname;
+ struct sysname_info *state;
+ struct vrequest *areq;
+{
+ if (AFS_EQ_ATSYS(aname)) {
+ state->offset = 0;
+ state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
+ state->allocked = 1;
+ state->index = afs_getsysname(areq, avc, state->name);
+ } else {
+ state->offset = -1;
+ state->allocked = 0;
+ state->index = 0;
+ state->name = aname;
}
+}
+
+Next_AtSys(avc, areq, state)
+ register struct vcache *avc;
+ struct vrequest *areq;
+ struct sysname_info *state;
+{
+ if (state->index == -1)
+ return 0; /* No list */
+
+ /* Check for the initial state of aname != "@sys" in Check_AtSys*/
+ if (state->offset == -1 && state->allocked == 0) {
+ register char *tname;
+ /* Check for .*@sys */
+ for (tname=state->name; *tname; tname++)
+ /*Move to the end of the string*/;
+ if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
+ state->offset = (tname - 4) - state->name;
+ tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
+ strncpy(tname, state->name, state->offset);
+ state->name = tname;
+ state->allocked = 1;
+ state->index = afs_getsysname(areq, avc, state->name+state->offset);
+ return 1;
+ } else
+ return 0; /* .*@sys doesn't match either */
+ } else if (++(state->index) >= afs_sysnamecount
+ || !afs_sysnamelist[state->index])
+ return 0; /* end of list */
+ strcpy(state->name+state->offset, afs_sysnamelist[state->index]);
+ return 1;
+}
#if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
extern int BlobScan(ino64_t *afile, afs_int32 ablob);
* CForeign bit set.
*/
struct vcache * BStvc = (struct vcache *) 0;
-void afs_DoBulkStat(adp, dirCookie, areqp)
+int afs_DoBulkStat(adp, dirCookie, areqp)
struct vcache *adp;
long dirCookie;
struct vrequest *areqp;
struct dcache *dcp; /* chunk containing the dir block */
char *statMemp; /* status memory block */
char *cbfMemp; /* callback and fid memory block */
- long temp; /* temp for holding chunk length, &c. */
+ afs_size_t temp; /* temp for holding chunk length, &c. */
struct AFSFid *fidsp; /* file IDs were collecting */
struct AFSCallBack *cbsp; /* call back pointers */
struct AFSCallBack *tcbp; /* temp callback ptr */
long startTime; /* time we started the call,
* for callback expiration base
*/
- int statSeqNo; /* Valued of file size to detect races */
+ afs_size_t statSeqNo; /* Valued of file size to detect races */
int code; /* error code */
long newIndex; /* new index in the dir */
struct DirEntry *dirEntryp; /* dir entry we are examining */
struct volume *volp=0; /* volume ptr */
struct VenusFid dotdot;
int flagIndex; /* First file with bulk fetch flag set */
+ int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
XSTATS_DECLS
/* first compute some basic parameters. We dont want to prefetch more
code = afs_VerifyVCache(adp, areqp);
if (code) goto done;
- dcp = afs_GetDCache(adp, 0, areqp, &temp, &temp, 1);
+ dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
code = ENOENT;
goto done;
/* lock the directory cache entry */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (dcp->flags & DFFetching)
+ && (dcp->dflags & DFFetching)
&& hsame(adp->m.DataVersion, dcp->f.versionNo)) {
- dcp->flags |= DFWaiting;
+ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
+ ICL_TYPE_STRING, __FILE__,
+ ICL_TYPE_INT32, __LINE__,
+ ICL_TYPE_POINTER, dcp,
+ ICL_TYPE_INT32, dcp->dflags);
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&dcp->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
goto tagain;
tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
* preserve the value of the file size. We could
* flush the pages, but it wouldn't be worthwhile.
*/
- bcopy((char *) &tfid.Fid, (char *)(fidsp+fidIndex),
- sizeof(*fidsp));
+ memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
tvcp->states |= CBulkFetching;
tvcp->m.Length = statSeqNo;
fidIndex++;
}
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* if dir vnode has non-zero entry */
/* move to the next dir entry by adding in the # of entries
} /* while loop over all dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
/* release the chunk */
#ifdef RX_ENABLE_LOCKS
AFS_GUNLOCK();
#endif /* RX_ENABLE_LOCKS */
- code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
- &volSync);
+
+ if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
+ code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ if (code == RXGEN_OPCODE) {
+ tcp->srvr->server->flags |= SNO_INLINEBULK;
+ inlinebulk = 0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ } else
+ inlinebulk=1;
+ } else {
+ inlinebulk=0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
+ &volSync);
+ }
#ifdef RX_ENABLE_LOCKS
AFS_GLOCK();
#endif /* RX_ENABLE_LOCKS */
* We also have to take into account racing token revocations.
*/
for(i=0; i<fidIndex; i++) {
+ if ((&statsp[i])->errorCode)
+ continue;
afid.Cell = adp->fid.Cell;
afid.Fid.Volume = adp->fid.Fid.Volume;
afid.Fid.Vnode = fidsp[i].Vnode;
if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
ReleaseWriteLock(&tvcp->lock);
/* finally, we're done with the entry */
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* for all files we got back */
/* finally return the pointer into the LRU queue */
- afs_PutVCache(lruvcp);
+ afs_PutVCache(lruvcp, 0);
done:
/* Be sure to turn off the CBulkFetching flags */
tvcp->states &= ~CBulkFetching;
}
if (tvcp != NULL) {
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
}
if ( volp )
afs_PutVolume(volp, READ_LOCK);
+ /* If we did the InlineBulk RPC pull out the return code */
+ if (inlinebulk) {
+ if ((&statsp[0])->errorCode) {
+ afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
+ (struct cell *)0);
+ code = (&statsp[0])->errorCode;
+ }
+ } else {
+ code = 0;
+ }
osi_FreeLargeSpace(statMemp);
osi_FreeLargeSpace(cbfMemp);
+ return code;
}
/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
int flags;
struct vnode *rdir;
#else
+#if defined(UKERNEL)
+afs_lookup(adp, aname, avcp, acred, flags)
+ int flags;
+#else
afs_lookup(adp, aname, avcp, acred)
-#endif
+#endif /* UKERNEL */
+#endif /* SUN5 || SGI */
OSI_VC_DECL(adp);
struct vcache **avcp;
char *aname;
char *tname = (char *)0;
register struct vcache *tvc=0;
register afs_int32 code;
+ register afs_int32 bulkcode = 0;
int pass = 0, hit = 0;
long dirCookie;
extern afs_int32 afs_mariner; /*Writing activity to log?*/
OSI_VC_CONVERT(adp)
afs_hyper_t versionNo;
int no_read_access = 0;
+ struct sysname_info sysState; /* used only for @sys checking */
+ int dynrootRetry = 1;
+ struct afs_fakestat_state fakestate;
+ int tryEvalOnly = 0;
AFS_STATCNT(afs_lookup);
+ afs_InitFakeStat(&fakestate);
+
+ if (code = afs_InitReq(&treq, acred))
+ goto done;
+
#ifdef AFS_OSF_ENV
- ndp->ni_dvp = (struct vnode *)adp;
- bcopy(ndp->ni_ptr, aname, ndp->ni_namelen);
+ ndp->ni_dvp = AFSTOV(adp);
+ memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
aname[ndp->ni_namelen] = '\0';
#endif /* AFS_OSF_ENV */
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
-
- if (code = afs_InitReq(&treq, acred)) {
- goto done;
+#if defined(AFS_DARWIN_ENV)
+ /* Workaround for MacOSX Finder, which tries to look for
+ * .DS_Store and Contents under every directory.
+ */
+ if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (strcmp(aname, ".DS_Store") == 0)
+ tryEvalOnly = 1;
+ if (strcmp(aname, "Contents") == 0)
+ tryEvalOnly = 1;
}
+#endif
- /* lookup the name aname in the appropriate dir, and return a cache entry
- on the resulting fid */
+ if (tryEvalOnly)
+ code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
+ else
+ code = afs_EvalFakeStat(&adp, &fakestate, &treq);
+ if (tryEvalOnly && adp->mvstat == 1)
+ code = ENOENT;
+ if (code)
+ goto done;
- /*
- * check for, and handle "@sys" if it's there. We should be able
- * to avoid the alloc and the strcpy with a little work, but it's
- * not pressing. If there aren't any remote users (ie, via the
- * NFS translator), we have a slightly easier job.
- * the faster way to do this is to check for *aname == '@' and if
- * it's there, check for @sys, otherwise, assume there's no @sys
- * then, if the lookup fails, check for .*@sys...
- */
- if (!AFS_EQ_ATSYS(aname)) {
- tname = aname;
- }
- else {
- tname = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
- if (!afs_nfsexporter)
- strcpy(tname, (afs_sysname ? afs_sysname : SYS_NAME ));
- else {
- register struct unixuser *au;
- register afs_int32 error;
- au = afs_GetUser(treq.uid, adp->fid.Cell, 0); afs_PutUser(au, 0);
- if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, tname);
- if (error)
- strcpy(tname, "@sys");
- } else {
- strcpy(tname, (afs_sysname ? afs_sysname : SYS_NAME ));
- }
- }
- }
+ *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
/* come back to here if we encounter a non-existent object in a read-only
volume's directory */
redo:
*avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+ bulkcode = 0;
if (!(adp->states & CStatd)) {
- if (code = afs_VerifyVCache2(adp, &treq))
- goto done;
+ if (code = afs_VerifyVCache2(adp, &treq)) {
+ goto done;
+ }
}
else code = 0;
/* watch for ".." in a volume root */
- if (adp->mvstat == 2 && tname[0] == '.' && tname[1] == '.' && !tname[2]) {
+ if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
/* looking up ".." in root via special hacks */
if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
#ifdef AFS_OSF_ENV
extern struct vcache *afs_globalVp;
if (adp == afs_globalVp) {
- struct vnode *rvp = (struct vnode *)adp;
+ struct vnode *rvp = AFSTOV(adp);
/*
ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
ndp->ni_dvp = ndp->ni_vp;
*avcp = tvc;
code = (tvc ? 0 : ENOENT);
hit = 1;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT1");
}
if (code) {
/* Check for read access as well. We need read access in order to
stat files, but not to stat subdirectories. */
- if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
* I'm not fiddling with the LRUQ here, either, perhaps I should, or else
* invent a lightweight version of GetVCache.
*/
- if (tname[0] == '.' && !tname[1]) { /* special case */
+ if (aname[0] == '.' && !aname[1]) { /* special case */
ObtainReadLock(&afs_xvcache);
osi_vnhold(adp, 0);
ReleaseReadLock(&afs_xvcache);
- code = 0;
- *avcp = tvc = adp;
- hit = 1;
- if (adp && !adp->vrefCount) {
+ code = 0;
+ *avcp = tvc = adp;
+ hit = 1;
+ if (adp && !VREFCOUNT(adp)) {
osi_Panic("TT2");
}
- goto done;
+ goto done;
}
+ Check_AtSys(adp, aname, &sysState, &treq);
+ tname = sysState.name;
+
+ /* 1st Check_AtSys and lookup by tname is required here, for now,
+ because the dnlc is *not* told to remove entries for the parent
+ dir of file/dir op that afs_LocalHero likes, but dnlc is informed
+ if the cached entry for the parent dir is invalidated for a
+ non-local change.
+ Otherwise, we'd be able to do a dnlc lookup on an entry ending
+ w/@sys and know the dnlc was consistent with reality. */
tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
*avcp = tvc; /* maybe wasn't initialized, but it is now */
if (tvc) {
- if (no_read_access && vType(tvc) != VDIR) {
- /* need read access on dir to stat non-directory */
+ if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
+ /* need read access on dir to stat non-directory / non-link */
afs_PutVCache(tvc, WRITE_LOCK);
*avcp = (struct vcache *)0;
code = EACCES;
{
register struct dcache *tdc;
- afs_int32 dirOffset, dirLen;
+ afs_size_t dirOffset, dirLen;
ino_t theDir;
struct VenusFid tfid;
/* now we have to lookup the next fid */
- tdc = afs_GetDCache(adp, 0, &treq, &dirOffset, &dirLen, 1);
+ tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
if (!tdc) {
*avcp = (struct vcache *)0; /* redundant, but harmless */
code = EIO;
/* now we will just call dir package with appropriate inode.
Dirs are always fetched in their entirety for now */
- /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (tdc->flags & DFFetching)
+ && (tdc->dflags & DFFetching)
&& hsame(adp->m.DataVersion, tdc->f.versionNo)) {
- tdc->flags |= DFWaiting;
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
goto redo;
/* Save the version number for when we call osi_dnlc_enter */
hset(versionNo, tdc->f.versionNo);
+ /*
+ * check for, and handle "@sys" if it's there. We should be able
+ * to avoid the alloc and the strcpy with a little work, but it's
+ * not pressing. If there aren't any remote users (ie, via the
+ * NFS translator), we have a slightly easier job.
+ * the faster way to do this is to check for *aname == '@' and if
+ * it's there, check for @sys, otherwise, assume there's no @sys
+ * then, if the lookup fails, check for .*@sys...
+ */
+ /* above now implemented by Check_AtSys and Next_AtSys */
+
+ /* lookup the name in the appropriate dir, and return a cache entry
+ on the resulting fid */
theDir = tdc->f.inode;
- code = afs_dir_LookupOffset(&theDir, tname, &tfid.Fid, &dirCookie);
- if (code == ENOENT && tname == aname) {
- int len;
- len = strlen(aname);
- if (len >= 4 && AFS_EQ_ATSYS(aname+len-4)) {
- tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
- afs_HandleAtName(aname, tname, &treq, adp);
- code = afs_dir_LookupOffset(&theDir, tname, &tfid.Fid, &dirCookie);
- }
+ code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
+
+ /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
+ while (code == ENOENT && Next_AtSys(adp, &treq, &sysState)) {
+ code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
}
- ReleaseReadLock(&adp->lock);
+ tname = sysState.name;
+
+ ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
+ if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
+ struct cell *tcell;
+
+ ReleaseReadLock(&adp->lock);
+ dynrootRetry = 0;
+ if (*tname == '.')
+ tcell = afs_GetCellByName(tname + 1, READ_LOCK);
+ else
+ tcell = afs_GetCellByName(tname, READ_LOCK);
+ if (tcell) {
+ afs_PutCell(tcell, READ_LOCK);
+ afs_RefreshDynroot();
+ if (tname != aname && tname) osi_FreeLargeSpace(tname);
+ goto redo;
+ }
+ } else {
+ ReleaseReadLock(&adp->lock);
+ }
+
/* new fid has same cell and volume */
tfid.Cell = adp->fid.Cell;
tfid.Fid.Volume = adp->fid.Fid.Volume;
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)) {
+ if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.
ReleaseReadLock(&afs_xvcache);
} while (tvc && retry);
- if (!tvc || !(tvc->states & CStatd)) {
- afs_DoBulkStat(adp, dirCookie, &treq);
- }
+ if (!tvc || !(tvc->states & CStatd))
+ bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
+ else
+ bulkcode = 0;
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
- afs_PutVCache(tvc);
+ afs_PutVCache(tvc, 0);
tvc = (struct vcache *) 0;
}
+ } else {
+ tvc = (struct vcache *) 0;
+ bulkcode = 0;
}
- else tvc = (struct vcache *) 0;
-
+
/* now get the status info, if we don't already have it */
/* This is kind of weird, but we might wind up accidentally calling
* RXAFS_Lookup because we happened upon a file which legitimately
* has a 0 uniquifier. That is the result of allowing unique to wrap
- * to 0. This was fixed in AFS 3.4. For CForeigh, Unique == 0 means that
+ * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
* the file has not yet been looked up.
*/
if (!tvc) {
tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
adp, tname);
}
- if (!tvc) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
- WRITE_LOCK);
- }
+ if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
+ tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
+ WRITE_LOCK);
+ }
} /* if !tvc */
} /* sub-block just to reduce stack usage */
tvc->parentVnode = adp->fid.Fid.Vnode;
tvc->parentUnique = adp->fid.Fid.Unique;
tvc->states &= ~CBulkStat;
- if (tvc->mvstat == 1) {
- /* a mt point, possibly unevaluated */
- struct volume *tvolp;
+
+#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+ if (!(flags & AFS_LOOKUP_NOEVAL))
+ /* don't eval mount points */
+#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
+ if (!afs_fakestat_enable && tvc->mvstat == 1) {
+ /* a mt point, possibly unevaluated */
+ struct volume *tvolp;
ObtainWriteLock(&tvc->lock,133);
code = EvalMountPoint(tvc, adp, &tvolp, &treq);
ReleaseWriteLock(&tvc->lock);
if (code) {
+ afs_PutVCache(tvc, WRITE_LOCK);
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
}
}
*avcp = tvc;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT3");
}
code = 0;
if (!FidCmp(&(tvc->fid), &(adp->fid))) {
afs_PutVCache(*avcp, WRITE_LOCK);
*avcp = NULL;
+ afs_PutFakeStat(&fakestate);
return afs_CheckCode(EISDIR, &treq, 18);
}
}
if (afs_mariner)
afs_AddMarinerName(aname, tvc);
+
+#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+ if (!(flags & AFS_LOOKUP_NOEVAL))
+ /* Here we don't enter the name into the DNLC because we want the
+ evaluated mount dir to be there (the vcache for the mounted volume)
+ rather than the vc of the mount point itself. we can still find the
+ mount point's vc in the vcache by its fid. */
+#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
if (!hit) {
osi_dnlc_enter (adp, aname, tvc, &versionNo);
}
/* So Linux inode cache is up to date. */
code = afs_VerifyVCache(tvc, &treq);
#else
+ afs_PutFakeStat(&fakestate);
return 0; /* can't have been any errors if hit and !code */
#endif
}
}
+ if (bulkcode) code = bulkcode; else
code = afs_CheckCode(code, &treq, 19);
if (code) {
/* If there is an error, make sure *avcp is null.
*avcp = (struct vcache *)0;
}
+ afs_PutFakeStat(&fakestate);
return code;
}