* afs_index
*/
-#include "../afs/param.h" /* Should be always first */
+#include <afsconfig.h>
+#include "../afs/param.h"
+
+RCSID("$Header$");
+
#include "../afs/sysincludes.h" /* Standard vendor system headers */
#include "../afs/afsincludes.h" /* Afs-based standard headers */
#include "../afs/afs_stats.h" /* statistics */
extern afs_rwlock_t afs_xcbhash;
extern struct afs_exporter *afs_nfsexporter;
extern char *afs_sysname;
+extern char *afs_sysnamelist[];
+extern int afs_sysnamecount;
extern struct afs_q VLRU; /*vcache LRU*/
#ifdef AFS_LINUX22_ENV
extern struct inode_operations afs_symlink_iops, afs_dir_iops;
afs_int32 afs_bulkStatsDone;
static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
+int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
/* this would be faster if it did comparison as int32word, but would be
* what "@sys" is in binary... */
#define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
-char *
-afs_strcat(s1, s2)
- register char *s1, *s2;
+char *afs_strcat(register char *s1, register char *s2)
{
register char *os1;
while (*s1++)
;
--s1;
- while (*s1++ = *s2++)
+ while ((*s1++ = *s2++))
;
return (os1);
}
-char *afs_index(a, c)
- register char *a, c; {
+char *afs_index(register char *a, register char c)
+{
register char tc;
AFS_STATCNT(afs_index);
- while (tc = *a) {
+ while ((tc = *a)) {
if (tc == c) return a;
else a++;
}
- return (char *) 0;
+ return NULL;
}
/* call under write lock, evaluate mvid field from a mt pt.
- * avc is the vnode of the mount point object.
- * advc is the vnode of the containing directory
+ * avc is the vnode of the mount point object; must be write-locked.
+ * advc is the vnode of the containing directory (optional; if NULL and
+ * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
* avolpp is where we return a pointer to the volume named by the mount pt, if success
* areq is the identity of the caller.
*
* NOTE: this function returns a held volume structure in *volpp if it returns 0!
*/
-EvalMountPoint(avc, advc, avolpp, areq)
- register struct vcache *avc;
- struct volume **avolpp;
- struct vcache *advc; /* the containing dir */
- register struct vrequest *areq;
+int EvalMountPoint(register struct vcache *avc, struct vcache *advc,
+ struct volume **avolpp, register struct vrequest *areq)
{
afs_int32 code;
struct volume *tvp = 0;
struct VenusFid tfid;
struct cell *tcell;
char *cpos, *volnamep;
- char type, buf[128];
+ char type, *buf;
afs_int32 prefetchRO; /* 1=>No 2=>Yes */
afs_int32 mtptCell, assocCell, hac=0;
afs_int32 samecell, roname, len;
#ifdef notdef
if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
#endif
- *avolpp = (struct volume *)0;
+ *avolpp = NULL;
code = afs_HandleLink(avc, areq);
if (code) return code;
}
if (!tcell) return ENODEV;
- mtptCell = tcell->cell; /* The cell for the mountpoint */
+ mtptCell = tcell->cellNum; /* The cell for the mountpoint */
if (tcell->lcellp) {
- hac = 1; /* has associated cell */
- assocCell = tcell->lcellp->cell; /* The associated cell */
+ hac = 1; /* has associated cell */
+ assocCell = tcell->lcellp->cellNum; /* The associated cell */
}
afs_PutCell(tcell, READ_LOCK);
* Don't know why we do this. Would have still found it in above call - jpm.
*/
if (!tvp && (prefetchRO == 2)) {
- strcpy(buf, volnamep);
- afs_strcat(buf, ".readonly");
+ buf = (char *)osi_AllocSmallSpace(strlen(volnamep)+10);
- tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
+ strcpy(buf, volnamep);
+ afs_strcat(buf, ".readonly");
- /* Try the associated linked cell if failed */
- if (!tvp && hac && areq->volumeError) {
- tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
- }
+ tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
+
+ /* Try the associated linked cell if failed */
+ if (!tvp && hac && areq->volumeError) {
+ tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
+ }
+ osi_FreeSmallSpace(buf);
}
- if (!tvp) return ENOENT; /* Couldn't find the volume */
+ if (!tvp) return ENODEV; /* Couldn't find the volume */
/* Don't cross mountpoint from a BK to a BK volume */
if ((avc->states & CBackup) && (tvp->states & VBackup)) {
afs_PutVolume(tvp, WRITE_LOCK);
- return ELOOP;
+ return ENODEV;
}
/* If we want (prefetched) the RO and it exists, then drop the
tfid.Cell = tvp->cell;
afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
- if (!tvp) return ENOENT; /* oops, can't do it */
+ if (!tvp) return ENODEV; /* oops, can't do it */
}
if (avc->mvid == 0)
* to the new path.
*/
tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
- tvp->dotdot = advc->fid;
+ if (advc) tvp->dotdot = advc->fid;
*avolpp = tvp;
return 0;
}
-
-afs_ENameOK(aname)
- register char *aname; {
- register char tc;
- register int tlen;
- AFS_STATCNT(ENameOK);
- tlen = strlen(aname);
- if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
- return 1;
+/*
+ * afs_InitFakeStat
+ *
+ * Must be called on an afs_fakestat_state object before calling
+ * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
+ * without calling afs_EvalFakeStat is legal, as long as this
+ * function is called.
+ */
+void afs_InitFakeStat(struct afs_fakestat_state *state)
+{
+ if (!afs_fakestat_enable)
+ return;
+
+ state->valid = 1;
+ state->did_eval = 0;
+ state->need_release = 0;
}
-Check_AtSys(avc, aname, outb, areq)
- register struct vcache *avc;
- char *aname, **outb;
- struct vrequest *areq;
+/*
+ * afs_EvalFakeStat_int
+ *
+ * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
+ * which is called by those wrapper functions.
+ *
+ * Only issues RPCs if canblock is non-zero.
+ */
+int afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq, int canblock)
{
- register char *tname;
- register int error = 0, offset = -1;
+ struct vcache *tvc, *root_vp;
+ struct volume *tvolp = NULL;
+ int code = 0;
- for (tname=aname; *tname; tname++) /*Move to the end of the string*/;
+ if (!afs_fakestat_enable)
+ return 0;
- /*
- * If the current string is 4 chars long or more, check to see if the
- * tail end is "@sys".
- */
- if ((tname >= aname + 4) && (AFS_EQ_ATSYS(tname-4)))
- offset = (tname - 4) - aname;
- if (offset < 0) {
- tname = aname;
- } else {
- tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
- if (offset)
- strncpy(tname, aname, offset);
- if (!afs_nfsexporter)
- strcpy(tname+offset, (afs_sysname ? afs_sysname : SYS_NAME ));
- else {
- register struct unixuser *au;
- register afs_int32 error;
- au = afs_GetUser(areq->uid, avc->fid.Cell, 0); afs_PutUser(au, 0);
- if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, tname+offset);
- if (error)
- strcpy(tname+offset, "@sys");
- } else {
- strcpy(tname+offset, (afs_sysname ? afs_sysname : SYS_NAME ));
- }
+ osi_Assert(state->valid == 1);
+ osi_Assert(state->did_eval == 0);
+ state->did_eval = 1;
+
+ tvc = *avcp;
+ if (tvc->mvstat != 1)
+ return 0;
+
+ /* Is the call to VerifyVCache really necessary? */
+ code = afs_VerifyVCache(tvc, areq);
+ if (code)
+ goto done;
+ if (canblock) {
+ ObtainWriteLock(&tvc->lock, 599);
+ code = EvalMountPoint(tvc, NULL, &tvolp, areq);
+ ReleaseWriteLock(&tvc->lock);
+ if (code)
+ goto done;
+ if (tvolp) {
+ tvolp->dotdot = tvc->fid;
+ tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
+ tvolp->dotdot.Fid.Unique = tvc->parentUnique;
}
- error = 1;
}
- *outb = tname;
- return error;
+ if (tvc->mvid && (tvc->states & CMValid)) {
+ if (!canblock) {
+ afs_int32 retry;
+
+ do {
+ retry = 0;
+ ObtainWriteLock(&afs_xvcache, 597);
+ root_vp = afs_FindVCache(tvc->mvid, &retry, 0);
+ if (root_vp && retry) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_PutVCache(root_vp);
+ }
+ } while (root_vp && retry);
+ ReleaseWriteLock(&afs_xvcache);
+ } else {
+ root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
+ }
+ if (!root_vp) {
+ code = canblock ? ENOENT : 0;
+ goto done;
+ }
+ if (tvolp) {
+ /* Is this always kosher? Perhaps we should instead use
+ * NBObtainWriteLock to avoid potential deadlock.
+ */
+ ObtainWriteLock(&root_vp->lock, 598);
+ if (!root_vp->mvid)
+ root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *root_vp->mvid = tvolp->dotdot;
+ ReleaseWriteLock(&root_vp->lock);
+ }
+ state->need_release = 1;
+ state->root_vp = root_vp;
+ *avcp = root_vp;
+ code = 0;
+ } else {
+ code = canblock ? ENOENT : 0;
+ }
+
+done:
+ if (tvolp)
+ afs_PutVolume(tvolp, WRITE_LOCK);
+ return code;
+}
+
+/*
+ * afs_EvalFakeStat
+ *
+ * Automatically does the equivalent of EvalMountPoint for vcache entries
+ * which are mount points. Remembers enough state to properly release
+ * the volume root vcache when afs_PutFakeStat() is called.
+ *
+ * State variable must be initialized by afs_InitFakeState() beforehand.
+ *
+ * Returns 0 when everything succeeds and *avcp points to the vcache entry
+ * that should be used for the real vnode operation. Returns non-zero if
+ * something goes wrong and the error code should be returned to the user.
+ */
+int
+afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 1);
+}
+
+/*
+ * afs_TryEvalFakeStat
+ *
+ * Same as afs_EvalFakeStat, but tries not to talk to remote servers
+ * and only evaluate the mount point if all the data is already in
+ * local caches.
+ *
+ * Returns 0 if everything succeeds and *avcp points to a valid
+ * vcache entry (possibly evaluated).
+ */
+int afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 0);
+}
+
+/*
+ * afs_PutFakeStat
+ *
+ * Perform any necessary cleanup at the end of a vnode op, given that
+ * afs_InitFakeStat was previously called with this state.
+ */
+void afs_PutFakeStat(struct afs_fakestat_state *state)
+{
+ if (!afs_fakestat_enable)
+ return;
+
+ osi_Assert(state->valid == 1);
+ if (state->need_release)
+ afs_PutVCache(state->root_vp);
+ state->valid = 0;
}
+
+int afs_ENameOK(register char *aname)
+{
+ register int tlen;
+ AFS_STATCNT(ENameOK);
+ tlen = strlen(aname);
+ if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
+ return 1;
+}
-char *afs_getsysname(areq, adp)
- register struct vrequest *areq;
- register struct vcache *adp; {
+int afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
+ register char *bufp)
+{
static char sysname[MAXSYSNAME];
register struct unixuser *au;
register afs_int32 error;
+ if (!afs_nfsexporter) {
+ strcpy(bufp, afs_sysname);
+ return 0;
+ }
AFS_STATCNT(getsysname);
- /* this whole interface is wrong, it should take a buffer ptr and copy
- * the data out.
- */
au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
afs_PutUser(au, 0);
if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, sysname);
- if (error) return "@sys";
- else return sysname;
+ error = EXP_SYSNAME(au->exporter, NULL, bufp);
+ if (error)
+ strcpy(bufp, "@sys");
+ return -1;
} else {
- return (afs_sysname == 0? SYS_NAME : afs_sysname);
+ strcpy(bufp, afs_sysname);
+ return 0;
}
}
-void afs_HandleAtName(aname, aresult, areq, adp)
- register char *aname;
- register char *aresult;
- register struct vrequest *areq;
- register struct vcache *adp; {
- register int tlen;
- AFS_STATCNT(HandleAtName);
- tlen = strlen(aname);
- if (tlen >= 4 && strcmp(aname+tlen-4, "@sys")==0) {
- strncpy(aresult, aname, tlen-4);
- strcpy(aresult+tlen-4, afs_getsysname(areq, adp));
- }
- else strcpy(aresult, aname);
+int Check_AtSys(register struct vcache *avc, const char *aname,
+ struct sysname_info *state, struct vrequest *areq)
+{
+ if (AFS_EQ_ATSYS(aname)) {
+ state->offset = 0;
+ state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
+ state->allocked = 1;
+ state->index = afs_getsysname(areq, avc, state->name);
+ } else {
+ state->offset = -1;
+ state->allocked = 0;
+ state->index = 0;
+ state->name = aname;
}
+}
+
+int Next_AtSys(register struct vcache *avc, struct vrequest *areq,
+ struct sysname_info *state)
+{
+ if (state->index == -1)
+ return 0; /* No list */
+
+ /* Check for the initial state of aname != "@sys" in Check_AtSys*/
+ if (state->offset == -1 && state->allocked == 0) {
+ register char *tname;
+ /* Check for .*@sys */
+ for (tname=state->name; *tname; tname++)
+ /*Move to the end of the string*/;
+ if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
+ state->offset = (tname - 4) - state->name;
+ tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
+ strncpy(tname, state->name, state->offset);
+ state->name = tname;
+ state->allocked = 1;
+ state->index = afs_getsysname(areq, avc, state->name+state->offset);
+ return 1;
+ } else
+ return 0; /* .*@sys doesn't match either */
+ } else if (++(state->index) >= afs_sysnamecount
+ || !afs_sysnamelist[(int)state->index])
+ return 0; /* end of list */
+ strcpy(state->name+state->offset, afs_sysnamelist[(int)state->index]);
+ return 1;
+}
#if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
extern int BlobScan(ino64_t *afile, afs_int32 ablob);
#else
+#if defined AFS_LINUX_64BIT_KERNEL
+extern int BlobScan(long *afile, afs_int32 ablob);
+#else
extern int BlobScan(afs_int32 *afile, afs_int32 ablob);
#endif
+#endif
/* called with an unlocked directory and directory cookie. Areqp
* ensure that vcaches created for failed RPC's to older servers have the
* CForeign bit set.
*/
-struct vcache * BStvc = (struct vcache *) 0;
-void afs_DoBulkStat(adp, dirCookie, areqp)
- struct vcache *adp;
- long dirCookie;
- struct vrequest *areqp;
+static struct vcache *BStvc = NULL;
+
+int afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
{
int nentries; /* # of entries to prefetch */
int nskip; /* # of slots in the LRU queue to skip */
struct dcache *dcp; /* chunk containing the dir block */
char *statMemp; /* status memory block */
char *cbfMemp; /* callback and fid memory block */
- long temp; /* temp for holding chunk length, &c. */
+ afs_size_t temp; /* temp for holding chunk length, &c. */
struct AFSFid *fidsp; /* file IDs were collecting */
struct AFSCallBack *cbsp; /* call back pointers */
struct AFSCallBack *tcbp; /* temp callback ptr */
struct conn *tcp; /* conn for call */
AFSCBs cbParm; /* callback parm for bulk stat */
struct server *hostp = 0; /* host we got callback from */
- long origEvenCBs; /* original # of callbacks for even-fid files */
- long origOddCBs; /* original # of callbacks for odd-fid files */
- long origEvenZaps; /* original # of recycles for even-fid files */
- long origOddZaps; /* original # of recycles for odd-fid files */
long startTime; /* time we started the call,
* for callback expiration base
*/
- int statSeqNo; /* Valued of file size to detect races */
+ afs_size_t statSeqNo; /* Valued of file size to detect races */
int code; /* error code */
long newIndex; /* new index in the dir */
struct DirEntry *dirEntryp; /* dir entry we are examining */
struct volume *volp=0; /* volume ptr */
struct VenusFid dotdot;
int flagIndex; /* First file with bulk fetch flag set */
+ int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
XSTATS_DECLS
/* first compute some basic parameters. We dont want to prefetch more
code = afs_VerifyVCache(adp, areqp);
if (code) goto done;
- dcp = afs_GetDCache(adp, 0, areqp, &temp, &temp, 1);
+ dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
code = ENOENT;
goto done;
/* lock the directory cache entry */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (dcp->flags & DFFetching)
+ && (dcp->dflags & DFFetching)
&& hsame(adp->m.DataVersion, dcp->f.versionNo)) {
- dcp->flags |= DFWaiting;
+ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
+ ICL_TYPE_STRING, __FILE__,
+ ICL_TYPE_INT32, __LINE__,
+ ICL_TYPE_POINTER, dcp,
+ ICL_TYPE_INT32, dcp->dflags);
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&dcp->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
goto tagain;
do {
retry = 0;
ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
+ tvcp = afs_FindVCache(&tfid, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
- tvcp = afs_NewVCache(&tfid, hostp, 0, 0);
+ tvcp = afs_NewVCache(&tfid, hostp);
ObtainWriteLock(&tvcp->lock, 505);
ReleaseWriteLock(&afs_xvcache);
afs_RemoveVCB(&tfid);
* preserve the value of the file size. We could
* flush the pages, but it wouldn't be worthwhile.
*/
- bcopy((char *) &tfid.Fid, (char *)(fidsp+fidIndex),
- sizeof(*fidsp));
+ memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
tvcp->states |= CBulkFetching;
tvcp->m.Length = statSeqNo;
fidIndex++;
} /* while loop over all dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
/* release the chunk */
if (tcp) {
hostp = tcp->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
- code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
- &volSync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
+
+ if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
+ code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ if (code == RXGEN_OPCODE) {
+ tcp->srvr->server->flags |= SNO_INLINEBULK;
+ inlinebulk = 0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ } else
+ inlinebulk=1;
+ } else {
+ inlinebulk=0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
+ &volSync);
+ }
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while (afs_Analyze(tcp, code, &adp->fid, areqp,
- AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL));
/* now, if we didnt get the info, bail out. */
if (code) goto done;
/* actually a serious error, probably should panic. Probably will
* panic soon, oh well. */
ReleaseReadLock(&afs_xvcache);
+ afs_warnuser("afs_DoBulkStat: VLRU empty!");
goto done;
}
if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
* We also have to take into account racing token revocations.
*/
for(i=0; i<fidIndex; i++) {
+ if ((&statsp[i])->errorCode)
+ continue;
afid.Cell = adp->fid.Cell;
afid.Fid.Volume = adp->fid.Fid.Volume;
afid.Fid.Vnode = fidsp[i].Vnode;
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent4");
+ { refpanic ("Bulkstat VLRU inconsistent4"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
QRemove(&tvcp->vlruq);
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent6");
+ { refpanic ("Bulkstat VLRU inconsistent6"); }
ReleaseWriteLock(&afs_xvcache);
+ ObtainWriteLock(&afs_xcbhash, 494);
+
/* We need to check the flags again. We may have missed
* something while we were waiting for a lock.
*/
if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
+ ReleaseWriteLock(&afs_xcbhash);
afs_PutVCache(tvcp);
continue;
}
tvcp->v.v_op = &afs_symlink_iops;
#endif
- ObtainWriteLock(&afs_xcbhash, 494);
-
- /* We need to check the flags once more. We may have missed
- * something while we were waiting for a lock.
- */
- if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
- flagIndex++;
- ReleaseWriteLock(&afs_xcbhash);
- ReleaseWriteLock(&tvcp->lock);
- afs_PutVCache(tvcp);
- continue;
- }
-
/* do some accounting for bulk stats: mark this entry as
* loaded, so we can tell if we use it before it gets
* recycled.
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
if (tvcp != NULL
if ( volp )
afs_PutVolume(volp, READ_LOCK);
+ /* If we did the InlineBulk RPC pull out the return code */
+ if (inlinebulk) {
+ if ((&statsp[0])->errorCode) {
+ afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
+ NULL);
+ code = (&statsp[0])->errorCode;
+ }
+ } else {
+ code = 0;
+ }
osi_FreeLargeSpace(statMemp);
osi_FreeLargeSpace(cbfMemp);
+ return code;
}
/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
-int AFSDOBULK = 1;
+static int AFSDOBULK = 1;
#ifdef AFS_OSF_ENV
afs_lookup(adp, ndp)
int flags;
struct vnode *rdir;
#else
+#if defined(UKERNEL)
+afs_lookup(adp, aname, avcp, acred, flags)
+ int flags;
+#else
afs_lookup(adp, aname, avcp, acred)
-#endif
+#endif /* UKERNEL */
+#endif /* SUN5 || SGI */
OSI_VC_DECL(adp);
struct vcache **avcp;
char *aname;
struct AFS_UCRED *acred; {
#endif
struct vrequest treq;
- char *tname = (char *)0;
+ char *tname = NULL;
register struct vcache *tvc=0;
register afs_int32 code;
+ register afs_int32 bulkcode = 0;
int pass = 0, hit = 0;
long dirCookie;
extern afs_int32 afs_mariner; /*Writing activity to log?*/
OSI_VC_CONVERT(adp)
afs_hyper_t versionNo;
+ int no_read_access = 0;
+ struct sysname_info sysState; /* used only for @sys checking */
+ int dynrootRetry = 1;
+ struct afs_fakestat_state fakestate;
+ int tryEvalOnly = 0;
AFS_STATCNT(afs_lookup);
+ afs_InitFakeStat(&fakestate);
+
+ if ((code = afs_InitReq(&treq, acred)))
+ goto done;
+
#ifdef AFS_OSF_ENV
- ndp->ni_dvp = (struct vnode *)adp;
- bcopy(ndp->ni_ptr, aname, ndp->ni_namelen);
+ ndp->ni_dvp = AFSTOV(adp);
+ memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
aname[ndp->ni_namelen] = '\0';
#endif /* AFS_OSF_ENV */
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
-
- if (code = afs_InitReq(&treq, acred)) {
- goto done;
+#if defined(AFS_DARWIN_ENV)
+ /* Workaround for MacOSX Finder, which tries to look for
+ * .DS_Store and Contents under every directory.
+ */
+ if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (strcmp(aname, ".DS_Store") == 0)
+ tryEvalOnly = 1;
+ if (strcmp(aname, "Contents") == 0)
+ tryEvalOnly = 1;
}
+#endif
- /* lookup the name aname in the appropriate dir, and return a cache entry
- on the resulting fid */
+ if (tryEvalOnly)
+ code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
+ else
+ code = afs_EvalFakeStat(&adp, &fakestate, &treq);
+ if (tryEvalOnly && adp->mvstat == 1)
+ code = ENOENT;
+ if (code)
+ goto done;
- /*
- * check for, and handle "@sys" if it's there. We should be able
- * to avoid the alloc and the strcpy with a little work, but it's
- * not pressing. If there aren't any remote users (ie, via the
- * NFS translator), we have a slightly easier job.
- * the faster way to do this is to check for *aname == '@' and if
- * it's there, check for @sys, otherwise, assume there's no @sys
- * then, if the lookup fails, check for .*@sys...
- */
- if (!AFS_EQ_ATSYS(aname)) {
- tname = aname;
- }
- else {
- tname = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
- if (!afs_nfsexporter)
- strcpy(tname, (afs_sysname ? afs_sysname : SYS_NAME ));
- else {
- register struct unixuser *au;
- register afs_int32 error;
- au = afs_GetUser(treq.uid, adp->fid.Cell, 0); afs_PutUser(au, 0);
- if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, tname);
- if (error)
- strcpy(tname, "@sys");
- } else {
- strcpy(tname, (afs_sysname ? afs_sysname : SYS_NAME ));
- }
- }
- }
+ *avcp = NULL; /* Since some callers don't initialize it */
/* come back to here if we encounter a non-existent object in a read-only
volume's directory */
redo:
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+ *avcp = NULL; /* Since some callers don't initialize it */
+ bulkcode = 0;
if (!(adp->states & CStatd)) {
- if (code = afs_VerifyVCache2(adp, &treq))
- goto done;
+ if ((code = afs_VerifyVCache2(adp, &treq))) {
+ goto done;
+ }
}
else code = 0;
/* watch for ".." in a volume root */
- if (adp->mvstat == 2 && tname[0] == '.' && tname[1] == '.' && !tname[2]) {
+ if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
/* looking up ".." in root via special hacks */
if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
#ifdef AFS_OSF_ENV
extern struct vcache *afs_globalVp;
if (adp == afs_globalVp) {
- struct vnode *rvp = (struct vnode *)adp;
+ struct vnode *rvp = AFSTOV(adp);
/*
ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
ndp->ni_dvp = ndp->ni_vp;
goto done;
}
/* otherwise we have the fid here, so we use it */
- tvc = afs_GetVCache(adp->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, 0);
+ tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
ICL_TYPE_INT32, code);
*avcp = tvc;
code = (tvc ? 0 : ENOENT);
hit = 1;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT1");
}
if (code) {
/* now check the access */
if (treq.uid != adp->last_looker) {
if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
code = EACCES;
goto done;
}
else adp->last_looker = treq.uid;
}
+ /* Check for read access as well. We need read access in order to
+ stat files, but not to stat subdirectories. */
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
+ no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
* for instance, way up before "redo:" ??
* I'm not fiddling with the LRUQ here, either, perhaps I should, or else
* invent a lightweight version of GetVCache.
*/
- if (tname[0] == '.' && !tname[1]) { /* special case */
+ if (aname[0] == '.' && !aname[1]) { /* special case */
ObtainReadLock(&afs_xvcache);
osi_vnhold(adp, 0);
ReleaseReadLock(&afs_xvcache);
- code = 0;
- *avcp = tvc = adp;
- hit = 1;
- if (adp && !adp->vrefCount) {
+ code = 0;
+ *avcp = tvc = adp;
+ hit = 1;
+ if (adp && !VREFCOUNT(adp)) {
osi_Panic("TT2");
}
- goto done;
+ goto done;
}
+ Check_AtSys(adp, aname, &sysState, &treq);
+ tname = sysState.name;
+
+ /* 1st Check_AtSys and lookup by tname is required here, for now,
+ because the dnlc is *not* told to remove entries for the parent
+ dir of file/dir op that afs_LocalHero likes, but dnlc is informed
+ if the cached entry for the parent dir is invalidated for a
+ non-local change.
+ Otherwise, we'd be able to do a dnlc lookup on an entry ending
+ w/@sys and know the dnlc was consistent with reality. */
tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
*avcp = tvc; /* maybe wasn't initialized, but it is now */
-#ifdef AFS_LINUX22_ENV
if (tvc) {
- if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
- AFS_RELE(tvc);
- *avcp = 0;
- }
- else {
+ if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
+ /* need read access on dir to stat non-directory / non-link */
+ afs_PutVCache(tvc);
+ *avcp = NULL;
+ code = EACCES;
+ goto done;
+ }
+#ifdef AFS_LINUX22_ENV
+ if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
+ AFS_RELE(tvc);
+ *avcp = 0;
+ }
+ else {
+ code = 0;
+ hit = 1;
+ goto done;
+ }
+#else /* non - LINUX */
code = 0;
hit = 1;
goto done;
- }
- }
-#else /* non - LINUX */
- if (tvc) {
- code = 0;
- hit = 1;
- goto done;
- }
#endif /* linux22 */
+ }
{
register struct dcache *tdc;
- afs_int32 dirOffset, dirLen;
+ afs_size_t dirOffset, dirLen;
ino_t theDir;
struct VenusFid tfid;
/* now we have to lookup the next fid */
- tdc = afs_GetDCache(adp, 0, &treq, &dirOffset, &dirLen, 1);
+ tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
if (!tdc) {
- *avcp = (struct vcache *)0; /* redundant, but harmless */
+ *avcp = NULL; /* redundant, but harmless */
code = EIO;
goto done;
}
/* now we will just call dir package with appropriate inode.
Dirs are always fetched in their entirety for now */
- /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (tdc->flags & DFFetching)
+ && (tdc->dflags & DFFetching)
&& hsame(adp->m.DataVersion, tdc->f.versionNo)) {
- tdc->flags |= DFWaiting;
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
goto redo;
/* Save the version number for when we call osi_dnlc_enter */
hset(versionNo, tdc->f.versionNo);
+ /*
+ * check for, and handle "@sys" if it's there. We should be able
+ * to avoid the alloc and the strcpy with a little work, but it's
+ * not pressing. If there aren't any remote users (ie, via the
+ * NFS translator), we have a slightly easier job.
+ * the faster way to do this is to check for *aname == '@' and if
+ * it's there, check for @sys, otherwise, assume there's no @sys
+ * then, if the lookup fails, check for .*@sys...
+ */
+ /* above now implemented by Check_AtSys and Next_AtSys */
+
+ /* lookup the name in the appropriate dir, and return a cache entry
+ on the resulting fid */
theDir = tdc->f.inode;
- code = afs_dir_LookupOffset(&theDir, tname, &tfid.Fid, &dirCookie);
- if (code == ENOENT && tname == aname) {
- int len;
- len = strlen(aname);
- if (len >= 4 && AFS_EQ_ATSYS(aname+len-4)) {
- tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
- afs_HandleAtName(aname, tname, &treq, adp);
- code = afs_dir_LookupOffset(&theDir, tname, &tfid.Fid, &dirCookie);
- }
+ code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
+
+ /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
+ while (code == ENOENT && Next_AtSys(adp, &treq, &sysState)) {
+ code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
}
- ReleaseReadLock(&adp->lock);
+ tname = sysState.name;
+
+ ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
+ if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
+ ReleaseReadLock(&adp->lock);
+ dynrootRetry = 0;
+ if (tname[0] == '.')
+ afs_LookupAFSDB(tname + 1);
+ else
+ afs_LookupAFSDB(tname);
+ if (tname && tname != aname) osi_FreeLargeSpace(tname);
+ goto redo;
+ } else {
+ ReleaseReadLock(&adp->lock);
+ }
+
/* new fid has same cell and volume */
tfid.Cell = adp->fid.Cell;
tfid.Fid.Volume = adp->fid.Fid.Volume;
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)) {
+ if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvc = afs_FindVCache(&tfid, 1, 0, &retry, 0/* !stats,!lru */);
+ tvc = afs_FindVCache(&tfid, &retry, 0/* !stats,!lru */);
ReleaseReadLock(&afs_xvcache);
} while (tvc && retry);
- if (!tvc || !(tvc->states & CStatd)) {
- afs_DoBulkStat(adp, dirCookie, &treq);
- }
+ if (!tvc || !(tvc->states & CStatd))
+ bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
+ else
+ bulkcode = 0;
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
afs_PutVCache(tvc);
- tvc = (struct vcache *) 0;
+ tvc = NULL;
}
+ } else {
+ tvc = NULL;
+ bulkcode = 0;
}
- else tvc = (struct vcache *) 0;
-
+
/* now get the status info, if we don't already have it */
/* This is kind of weird, but we might wind up accidentally calling
* RXAFS_Lookup because we happened upon a file which legitimately
* has a 0 uniquifier. That is the result of allowing unique to wrap
- * to 0. This was fixed in AFS 3.4. For CForeigh, Unique == 0 means that
+ * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
* the file has not yet been looked up.
*/
if (!tvc) {
afs_int32 cached = 0;
if (!tfid.Fid.Unique && (adp->states & CForeign)) {
- tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
- adp, tname);
+ tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
+ }
+ if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
+ tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
}
- if (!tvc) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
- WRITE_LOCK);
- }
} /* if !tvc */
} /* sub-block just to reduce stack usage */
if (tvc) {
- if (adp->states & CForeign)
+ int force_eval = afs_fakestat_enable ? 0 : 1;
+
+ if (adp->states & CForeign)
tvc->states |= CForeign;
tvc->parentVnode = adp->fid.Fid.Vnode;
tvc->parentUnique = adp->fid.Fid.Unique;
tvc->states &= ~CBulkStat;
- if (tvc->mvstat == 1) {
- /* a mt point, possibly unevaluated */
- struct volume *tvolp;
+
+ if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
+ ObtainSharedLock(&tvc->lock, 680);
+ if (!tvc->linkData) {
+ UpgradeSToWLock(&tvc->lock, 681);
+ code = afs_HandleLink(tvc, &treq);
+ ConvertWToRLock(&tvc->lock);
+ } else {
+ ConvertSToRLock(&tvc->lock);
+ code = 0;
+ }
+ if (!code && !strchr(tvc->linkData, ':'))
+ force_eval = 1;
+ ReleaseReadLock(&tvc->lock);
+ }
+
+#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+ if (!(flags & AFS_LOOKUP_NOEVAL))
+ /* don't eval mount points */
+#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
+ if (tvc->mvstat == 1 && force_eval) {
+ /* a mt point, possibly unevaluated */
+ struct volume *tvolp;
ObtainWriteLock(&tvc->lock,133);
code = EvalMountPoint(tvc, adp, &tvolp, &treq);
ReleaseWriteLock(&tvc->lock);
+
+ if (code) {
+ afs_PutVCache(tvc);
+ if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
+ goto done;
+ }
+
/* next, we want to continue using the target of the mt point */
if (tvc->mvid && (tvc->states & CMValid)) {
struct vcache *uvc;
if (tvolp && (tvolp->states & VForeign)) {
/* XXXX tvolp has ref cnt on but not locked! XXX */
- tvc = afs_GetRootVCache(tvc->mvid, &treq, (afs_int32 *)0, tvolp, WRITE_LOCK);
+ tvc = afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
} else {
- tvc = afs_GetVCache(tvc->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
}
- afs_PutVCache(uvc, WRITE_LOCK); /* we're done with it */
+ afs_PutVCache(uvc); /* we're done with it */
if (!tvc) {
code = ENOENT;
* ptr to point back to the appropriate place */
if (tvolp) {
ObtainWriteLock(&tvc->lock,134);
- if (tvc->mvid == (struct VenusFid *) 0) {
+ if (tvc->mvid == NULL) {
tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
}
/* setup backpointer */
}
}
else {
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
code = ENOENT;
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
}
*avcp = tvc;
- if (tvc && !tvc->vrefCount) {
+ if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT3");
}
code = 0;
/* Handle RENAME; only need to check rename "." */
if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
if (!FidCmp(&(tvc->fid), &(adp->fid))) {
- afs_PutVCache(*avcp, WRITE_LOCK);
+ afs_PutVCache(*avcp);
*avcp = NULL;
+ afs_PutFakeStat(&fakestate);
return afs_CheckCode(EISDIR, &treq, 18);
}
}
if (afs_mariner)
afs_AddMarinerName(aname, tvc);
+
+#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+ if (!(flags & AFS_LOOKUP_NOEVAL))
+ /* Here we don't enter the name into the DNLC because we want the
+ evaluated mount dir to be there (the vcache for the mounted volume)
+ rather than the vc of the mount point itself. we can still find the
+ mount point's vc in the vcache by its fid. */
+#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
if (!hit) {
osi_dnlc_enter (adp, aname, tvc, &versionNo);
}
/* So Linux inode cache is up to date. */
code = afs_VerifyVCache(tvc, &treq);
#else
+ afs_PutFakeStat(&fakestate);
return 0; /* can't have been any errors if hit and !code */
#endif
}
}
+ if (bulkcode) code = bulkcode; else
code = afs_CheckCode(code, &treq, 19);
if (code) {
/* If there is an error, make sure *avcp is null.
* Alphas panic otherwise - defect 10719.
*/
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
}
+ afs_PutFakeStat(&fakestate);
return code;
}