* what "@sys" is in binary... */
#define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
-/* call under write lock, evaluate mvid field from a mt pt.
+/* call under write lock, evaluate mvid.target_root field from a mt pt.
* avc is the vnode of the mount point object; must be write-locked.
* advc is the vnode of the containing directory (optional; if NULL and
* EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
*/
static int
EvalMountData(char type, char *data, afs_uint32 states, afs_uint32 cellnum,
- struct volume **avolpp, register struct vrequest *areq,
+ struct volume **avolpp, struct vrequest *areq,
afs_uint32 *acellidxp, afs_uint32 *avolnump,
afs_uint32 *avnoidp, afs_uint32 *auniqp)
{
struct volume *tvp = 0;
struct VenusFid tfid;
struct cell *tcell;
- char *cpos, *volnamep;
- char *buf, *endptr;
+ char *cpos, *volnamep = NULL;
+ char *endptr;
afs_int32 prefetch; /* 1=>None 2=>RO 3=>BK */
afs_int32 mtptCell, assocCell = 0, hac = 0;
afs_int32 samecell, roname, len;
/* Start by figuring out and finding the cell */
cpos = afs_strchr(data, ':'); /* if cell name present */
if (cpos) {
+ afs_uint32 mtptCellnum;
volnamep = cpos + 1;
*cpos = 0;
- if ((afs_strtoi_r(data, &endptr, &cellnum) == 0) &&
- (endptr == cpos))
- tcell = afs_GetCell(cellnum, READ_LOCK);
- else {
+ if ((afs_strtoi_r(data, &endptr, &mtptCellnum) == 0) &&
+ (endptr == cpos)) {
+ tcell = afs_GetCell(mtptCellnum, READ_LOCK);
+ } else {
tcell = afs_GetCellByName(data, READ_LOCK);
- cellnum = 0;
}
*cpos = ':';
} else if (cellnum) {
* in the dynamic mount directory.
*/
if (volid && !avolpp) {
- if (*cpos)
+ if (cpos)
*cpos = ':';
goto done;
}
WRITE_LOCK);
}
- /* Still not found. If we are looking for the RO, then perhaps the RW
- * doesn't exist? Try adding ".readonly" to volname and look for that.
- * Don't know why we do this. Would have still found it in above call - jpm.
- */
- if (!tvp && (prefetch == 2) && len < AFS_SMALLOCSIZ - 10) {
- buf = (char *)osi_AllocSmallSpace(len + 10);
-
- strcpy(buf, volnamep);
- afs_strcat(buf, ".readonly");
-
- tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
-
- /* Try the associated linked cell if failed */
- if (!tvp && hac && areq->volumeError) {
- tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
- }
- osi_FreeSmallSpace(buf);
- }
/* done with volname */
if (cpos)
*cpos = ':';
}
int
-EvalMountPoint(register struct vcache *avc, struct vcache *advc,
- struct volume **avolpp, register struct vrequest *areq)
+EvalMountPoint(struct vcache *avc, struct vcache *advc,
+ struct volume **avolpp, struct vrequest *areq)
{
afs_int32 code;
afs_uint32 avnoid, auniq;
AFS_STATCNT(EvalMountPoint);
-#ifdef notdef
- if (avc->mvid && (avc->f.states & CMValid))
- return 0; /* done while racing */
-#endif
*avolpp = NULL;
code = afs_HandleLink(avc, areq);
if (code)
if (!auniq)
auniq = 1;
- if (avc->mvid == 0)
- avc->mvid =
- (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
- avc->mvid->Cell = (*avolpp)->cell;
- avc->mvid->Fid.Volume = (*avolpp)->volume;
- avc->mvid->Fid.Vnode = avnoid;
- avc->mvid->Fid.Unique = auniq;
+ if (avc->mvid.target_root == NULL)
+ avc->mvid.target_root = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ avc->mvid.target_root->Cell = (*avolpp)->cell;
+ avc->mvid.target_root->Fid.Volume = (*avolpp)->volume;
+ avc->mvid.target_root->Fid.Vnode = avnoid;
+ avc->mvid.target_root->Fid.Unique = auniq;
avc->f.states |= CMValid;
/* Used to: if the mount point is stored within a backup volume,
state->did_eval = 1;
tvc = *avcp;
- if (tvc->mvstat != 1)
+ if (tvc->mvstat != AFS_MVSTAT_MTPT)
return 0;
if (canblock) {
tvolp->dotdot.Fid.Unique = tvc->f.parent.unique;
}
}
- if (tvc->mvid && (tvc->f.states & CMValid)) {
+ if (tvc->mvid.target_root && (tvc->f.states & CMValid)) {
if (!canblock) {
afs_int32 retry;
do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 597);
- root_vp = afs_FindVCache(tvc->mvid, &retry, IS_WLOCK);
+ ObtainReadLock(&afs_xvcache);
+ root_vp = afs_FindVCache(tvc->mvid.target_root, &retry, 0);
if (root_vp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
afs_PutVCache(root_vp);
}
} while (root_vp && retry);
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
} else {
- root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
+ root_vp = afs_GetVCache(tvc->mvid.target_root, areq);
}
if (!root_vp) {
- code = canblock ? ENOENT : 0;
+ code = canblock ? EIO : 0;
goto done;
}
#ifdef AFS_DARWIN80_ENV
* NBObtainWriteLock to avoid potential deadlock.
*/
ObtainWriteLock(&root_vp->lock, 598);
- if (!root_vp->mvid)
- root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
- *root_vp->mvid = tvolp->dotdot;
+ if (!root_vp->mvid.parent)
+ root_vp->mvid.parent = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *root_vp->mvid.parent = tvolp->dotdot;
ReleaseWriteLock(&root_vp->lock);
}
state->need_release = 1;
*avcp = root_vp;
code = 0;
} else {
- code = canblock ? ENOENT : 0;
+ code = canblock ? EIO : 0;
}
done:
}
int
-afs_ENameOK(register char *aname)
+afs_ENameOK(char *aname)
{
- register int tlen;
+ int tlen;
AFS_STATCNT(ENameOK);
tlen = strlen(aname);
}
static int
-afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
- register char *bufp, int *num, char **sysnamelist[])
+afs_getsysname(struct vrequest *areq, struct vcache *adp,
+ char *bufp, int *num, char **sysnamelist[])
{
- register struct unixuser *au;
- register afs_int32 error;
+ struct unixuser *au;
+ afs_int32 error;
AFS_STATCNT(getsysname);
if (!afs_nfsexporter)
strcpy(bufp, (*sysnamelist)[0]);
else {
- au = afs_GetUser(areq->uid, adp->f.fid.Cell, 0);
+ au = afs_GetUser(areq->uid, adp->f.fid.Cell, READ_LOCK);
if (au->exporter) {
error = EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num, 0);
if (error) {
strcpy(bufp, "@sys");
- afs_PutUser(au, 0);
+ afs_PutUser(au, READ_LOCK);
return -1;
} else {
strcpy(bufp, (*sysnamelist)[0]);
}
} else
strcpy(bufp, afs_sysname);
- afs_PutUser(au, 0);
+ afs_PutUser(au, READ_LOCK);
}
return 0;
}
void
-Check_AtSys(register struct vcache *avc, const char *aname,
+Check_AtSys(struct vcache *avc, const char *aname,
struct sysname_info *state, struct vrequest *areq)
{
int num = 0;
if (AFS_EQ_ATSYS(aname)) {
state->offset = 0;
- state->name = (char *)osi_AllocLargeSpace(MAXSYSNAME);
+ state->name = osi_AllocLargeSpace(MAXSYSNAME);
state->allocked = 1;
state->index =
afs_getsysname(areq, avc, state->name, &num, sysnamelist);
}
int
-Next_AtSys(register struct vcache *avc, struct vrequest *areq,
+Next_AtSys(struct vcache *avc, struct vrequest *areq,
struct sysname_info *state)
{
int num = afs_sysnamecount;
/* Check for the initial state of aname != "@sys" in Check_AtSys */
if (state->offset == -1 && state->allocked == 0) {
- register char *tname;
+ char *tname;
/* Check for .*@sys */
for (tname = state->name; *tname; tname++)
if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname - 4))) {
state->offset = (tname - 4) - state->name;
- tname = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
+ tname = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
strncpy(tname, state->name, state->offset);
state->name = tname;
state->allocked = 1;
} else
return 0; /* .*@sys doesn't match either */
} else {
- register struct unixuser *au;
- register afs_int32 error;
+ struct unixuser *au;
+ afs_int32 error;
*sysnamelist = afs_sysnamelist;
if (afs_nfsexporter) {
- au = afs_GetUser(areq->uid, avc->f.fid.Cell, 0);
+ au = afs_GetUser(areq->uid, avc->f.fid.Cell, READ_LOCK);
if (au->exporter) {
error =
EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, &num, 0);
if (error) {
- afs_PutUser(au, 0);
+ afs_PutUser(au, READ_LOCK);
return 0;
}
}
- afs_PutUser(au, 0);
+ afs_PutUser(au, READ_LOCK);
}
if (++(state->index) >= num || !(*sysnamelist)[(unsigned int)state->index])
return 0; /* end of list */
return 1;
}
-extern int BlobScan(struct dcache * afile, afs_int32 ablob);
+static int
+afs_CheckBulkStatus(struct afs_conn *tc, int nFids, AFSBulkStats *statParm,
+ AFSCBs *cbParm)
+{
+ int i;
+ int code;
+
+ if (statParm->AFSBulkStats_len != nFids || cbParm->AFSCBs_len != nFids) {
+ afs_warn("afs: BulkFetchStatus length %u/%u, expected %u\n",
+ (unsigned)statParm->AFSBulkStats_len,
+ (unsigned)cbParm->AFSCBs_len, nFids);
+ afs_BadFetchStatus(tc);
+ return VBUSY;
+ }
+ for (i = 0; i < nFids; i++) {
+ if (statParm->AFSBulkStats_val[i].errorCode) {
+ continue;
+ }
+ code = afs_CheckFetchStatus(tc, &statParm->AFSBulkStats_val[i]);
+ if (code) {
+ return code;
+ }
+ }
+
+ return 0;
+}
+
+extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
/* called with an unlocked directory and directory cookie. Areqp
* describes who is making the call.
{
int nentries; /* # of entries to prefetch */
int nskip; /* # of slots in the LRU queue to skip */
- int novlru = 0; /* Currently Darwin-only but can be used
- globally if needed */
#ifdef AFS_DARWIN80_ENV
int npasses = 0;
struct vnode *lruvp;
long startTime; /* time we started the call,
* for callback expiration base
*/
+#if defined(AFS_DARWIN_ENV)
+ int ftype[4] = {VNON, VREG, VDIR, VLNK}; /* verify type is as expected */
+#endif
afs_size_t statSeqNo = 0; /* Valued of file size to detect races */
int code; /* error code */
- long newIndex; /* new index in the dir */
+ afs_int32 newIndex; /* new index in the dir */
+ struct DirBuffer entry; /* Buffer for dir manipulation */
struct DirEntry *dirEntryp; /* dir entry we are examining */
int i;
struct VenusFid afid; /* file ID we are using now */
struct volume *volp = 0; /* volume ptr */
struct VenusFid dotdot = {0, {0, 0, 0}};
int flagIndex = 0; /* First file with bulk fetch flag set */
- int inlinebulk = 0; /* Did we use InlineBulk RPC or not? */
+ struct rx_connection *rxconn;
+ int attempt_i;
XSTATS_DECLS;
dotdot.Cell = 0;
dotdot.Fid.Unique = 0;
* one for fids and callbacks, and one for stat info. Well set
* up our pointers to the memory from there, too.
*/
- statsp = (AFSFetchStatus *)
- osi_Alloc(AFSCBMAX * sizeof(AFSFetchStatus));
- fidsp = (AFSFid *) osi_AllocLargeSpace(nentries * sizeof(AFSFid));
- cbsp = (AFSCallBack *)
- osi_Alloc(AFSCBMAX * sizeof(AFSCallBack));
+ statsp = osi_Alloc(AFSCBMAX * sizeof(AFSFetchStatus));
+ fidsp = osi_AllocLargeSpace(nentries * sizeof(AFSFid));
+ cbsp = osi_Alloc(AFSCBMAX * sizeof(AFSCallBack));
/* next, we must iterate over the directory, starting from the specified
* cookie offset (dirCookie), and counting out nentries file entries.
dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
- code = ENOENT;
+ code = EIO;
goto done2;
}
*/
while ((adp->f.states & CStatd)
&& (dcp->dflags & DFFetching)
- && hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
+ && afs_IsDCacheFresh(dcp, adp)) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
ICL_TYPE_INT32, dcp->dflags);
ObtainReadLock(&dcp->lock);
}
if (!(adp->f.states & CStatd)
- || !hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
+ || !afs_IsDCacheFresh(dcp, adp)) {
ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
/* now we have dir data in the cache, so scan the dir page */
fidIndex = 0;
flagIndex = 0;
- while (1) { /* Should probably have some constant bound */
+
+ /*
+ * Only examine at most the next 'nentries*4' entries to find dir entries
+ * to stat. This is an arbitrary limit that we set so we don't waste time
+ * scanning an entire dir that contains stat'd entries. For example, if a
+ * dir contains 10k entries, and all or almost all of them are stat'd, then
+ * we'll examine 10k entries for no benefit. For each entry, we run
+ * afs_FindVCache, and grab and release afs_xvcache; doing this e.g. 10k
+ * times can have significant impact if the client is under a lot of load.
+ */
+ for (attempt_i = 0; attempt_i < nentries * 4; attempt_i++) {
+
/* look for first safe entry to examine in the directory. BlobScan
* looks for a the 1st allocated dir after the dirCookie slot.
*/
- newIndex = BlobScan(dcp, (dirCookie >> 5));
- if (newIndex == 0)
+ code = BlobScan(dcp, (dirCookie >> 5), &newIndex);
+ if (code || newIndex == 0)
break;
/* remember the updated directory cookie */
dirCookie = newIndex << 5;
/* get a ptr to the dir entry */
- dirEntryp =
- (struct DirEntry *)afs_dir_GetBlob(dcp, newIndex);
- if (!dirEntryp)
+ code = afs_dir_GetBlob(dcp, newIndex, &entry);
+ if (code)
break;
+ dirEntryp = (struct DirEntry *)entry.data;
/* dont copy more than we have room for */
if (fidIndex >= nentries) {
- DRelease(dirEntryp, 0);
+ DRelease(&entry, 0);
break;
}
tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
+ ObtainSharedLock(&afs_xvcache, 130);
+ tvcp = afs_FindVCache(&tfid, &retry, IS_SLOCK /* no stats | LRU */ );
if (tvcp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
+ UpgradeSToWLock(&afs_xvcache, 129);
tvcp = afs_NewBulkVCache(&tfid, hostp, statSeqNo);
if (tvcp)
{
- ObtainWriteLock(&tvcp->lock, 505);
- ReleaseWriteLock(&afs_xvcache);
- afs_RemoveVCB(&tfid);
- ReleaseWriteLock(&tvcp->lock);
+ ObtainWriteLock(&tvcp->lock, 505);
+#ifdef AFS_DARWIN80_ENV
+ /* use even/odd hack to guess file versus dir.
+ let links be reaped. oh well. */
+ if (dirEntryp->fid.vnode & 1)
+ tvcp->f.m.Type = VDIR;
+ else
+ tvcp->f.m.Type = VREG;
+ /* finalize to a best guess */
+ afs_darwin_finalizevnode(tvcp, AFSTOV(adp), NULL, 0, 1);
+ /* re-acquire usecount that finalizevnode disposed of */
+ vnode_ref(AFSTOV(tvcp));
+#endif
+ ReleaseWriteLock(&afs_xvcache);
+ afs_RemoveVCB(&tfid);
+ ReleaseWriteLock(&tvcp->lock);
} else {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseWriteLock(&afs_xvcache);
}
} else {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
}
if (!tvcp)
{
- DRelease(dirEntryp, 0);
+ DRelease(&entry, 0);
ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
* CBulkFetching state bit and the value in the file size.
* It is safe to set the status only if the CBulkFetching
* flag is still set and the value in the file size does
- * not change. NewBulkVCache sets us up.
+ * not change. NewBulkVCache sets us up for the new ones.
+ * Set up the rest here.
*
* Don't fetch status for dirty files. We need to
* preserve the value of the file size. We could
* flush the pages, but it wouldn't be worthwhile.
*/
+ if (!(tvcp->f.states & CBulkFetching)) {
+ tvcp->f.states |= CBulkFetching;
+ tvcp->f.m.Length = statSeqNo;
+ }
memcpy((char *)(fidsp + fidIndex), (char *)&tfid.Fid,
sizeof(*fidsp));
fidIndex++;
* used by this dir entry.
*/
temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
- DRelease(dirEntryp, 0);
+ DRelease(&entry, 0);
if (temp <= 0)
break;
dirCookie += temp;
- } /* while loop over all dir entries */
+ } /* for loop over dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
ReleaseReadLock(&dcp->lock);
/* start the timer; callback expirations are relative to this */
startTime = osi_Time();
- tcp = afs_Conn(&adp->f.fid, areqp, SHARED_LOCK);
+ tcp = afs_Conn(&adp->f.fid, areqp, SHARED_LOCK, &rxconn);
if (tcp) {
- hostp = tcp->srvr->server;
+ hostp = tcp->parent->srvr->server;
+
+ for (i = 0; i < fidIndex; i++) {
+ /* we must set tvcp->callback before the BulkStatus call, so
+ * we can detect concurrent InitCallBackState's */
+
+ afid.Cell = adp->f.fid.Cell;
+ afid.Fid.Volume = adp->f.fid.Fid.Volume;
+ afid.Fid.Vnode = fidsp[i].Vnode;
+ afid.Fid.Unique = fidsp[i].Unique;
+
+ do {
+ retry = 0;
+ ObtainReadLock(&afs_xvcache);
+ tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */);
+ ReleaseReadLock(&afs_xvcache);
+ } while (tvcp && retry);
+
+ if (!tvcp) {
+ continue;
+ }
+
+ if ((tvcp->f.states & CBulkFetching) &&
+ (tvcp->f.m.Length == statSeqNo)) {
+ tvcp->callback = hostp;
+ }
+
+ afs_PutVCache(tvcp);
+ tvcp = NULL;
+ }
+
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
- RX_AFS_GUNLOCK();
- if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
+ if (!(tcp->parent->srvr->server->flags & SNO_INLINEBULK)) {
+ RX_AFS_GUNLOCK();
code =
- RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
+ RXAFS_InlineBulkStatus(rxconn, &fidParm, &statParm,
&cbParm, &volSync);
+ RX_AFS_GLOCK();
if (code == RXGEN_OPCODE) {
- tcp->srvr->server->flags |= SNO_INLINEBULK;
- inlinebulk = 0;
+ tcp->parent->srvr->server->flags |= SNO_INLINEBULK;
+ RX_AFS_GUNLOCK();
code =
- RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
+ RXAFS_BulkStatus(rxconn, &fidParm, &statParm,
&cbParm, &volSync);
- } else
- inlinebulk = 1;
+ RX_AFS_GLOCK();
+ }
} else {
- inlinebulk = 0;
+ RX_AFS_GUNLOCK();
code =
- RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
+ RXAFS_BulkStatus(rxconn, &fidParm, &statParm, &cbParm,
&volSync);
+ RX_AFS_GLOCK();
}
- RX_AFS_GLOCK();
XSTATS_END_TIME;
+
+ if (code == 0) {
+ code = afs_CheckBulkStatus(tcp, fidIndex, &statParm, &cbParm);
+ }
} else
code = -1;
+ /* make sure we give afs_Analyze a chance to retry,
+ * but if the RPC succeeded we may have entries to merge.
+ * if we wipe code with one entry's status we get bogus failures.
+ */
} while (afs_Analyze
- (tcp, code, &adp->f.fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
+ (tcp, rxconn, code ? code : (&statsp[0])->errorCode,
+ &adp->f.fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
SHARED_LOCK, NULL));
/* now, if we didnt get the info, bail out. */
reskip:
nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
ObtainReadLock(&afs_xvcache);
+#ifdef AFS_DARWIN80_ENV
reskip2:
+#endif
if (QEmpty(&VLRU)) {
/* actually a serious error, probably should panic. Probably will
* panic soon, oh well. */
for (tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
if (--nskip <= 0) {
#ifdef AFS_DARWIN80_ENV
- if (!(QTOV(tq)->f.states & CDeadVnode))
+ if ((!(QTOV(tq)->f.states & CDeadVnode)&&!(QTOV(tq)->f.states & CVInit)))
#endif
break;
}
*/
retry = 0;
#ifdef AFS_DARWIN80_ENV
- if ((lruvcp->f.states & CDeadVnode)) {
+ if (((lruvcp->f.states & CDeadVnode)||(lruvcp->f.states & CVInit))) {
if (npasses == 0) {
nskip = 1;
npasses++;
goto reskip2;
- } else {
- afs_warn("Can't find non-dead vnode in VLRU\n");
- novlru = 1;
- }
+ } else
+ panic("Can't find non-dead vnode in VLRU\n");
}
- if (!novlru) {
- lruvp = AFSTOV(lruvcp);
- if (vnode_get(lruvp)) /* this bumps ref count */
- retry = 1;
- else if (vnode_ref(lruvp)) {
- AFS_GUNLOCK();
- /* AFSTOV(lruvcp) may be NULL */
- vnode_put(lruvp);
- AFS_GLOCK();
- retry = 1;
- }
+ lruvp = AFSTOV(lruvcp);
+ if (vnode_get(lruvp)) /* this bumps ref count */
+ retry = 1;
+ else if (vnode_ref(lruvp)) {
+ AFS_GUNLOCK();
+ /* AFSTOV(lruvcp) may be NULL */
+ vnode_put(lruvp);
+ AFS_GLOCK();
+ retry = 1;
}
#else
- if (novlru)
- osi_vnhold(lruvcp, &retry);
+ if (osi_vnhold(lruvcp) != 0) {
+ retry = 1;
+ }
#endif
ReleaseReadLock(&afs_xvcache); /* could be read lock */
if (retry)
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru */);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
* matches the value we placed there when we set the CBulkFetching
* flag, then someone else has done something with this node,
* and we may not have the latest status information for this
- * file. Leave the entry alone.
+ * file. Leave the entry alone. There's also a file type
+ * change here, for OSX bulkstat support.
*/
- if (!(tvcp->f.states & CBulkFetching) || (tvcp->f.m.Length != statSeqNo)) {
-#ifdef AFS_DARWIN80_ENV
- int isdead = (tvcp->f.states & CDeadVnode);
+ if (!(tvcp->f.states & CBulkFetching)
+ || (tvcp->f.m.Length != statSeqNo)
+#if defined(AFS_DARWIN_ENV)
+ || (ftype[(&statsp[i])->FileType] != vType(tvcp))
#endif
+ ) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
-#ifdef AFS_DARWIN80_ENV
- if (!isdead)
- /* re-acquire the usecount that the other finalizevnode disposed of */
- vnode_ref(AFSTOV(tvcp));
-#endif
afs_PutVCache(tvcp);
continue;
}
/* now copy ".." entry back out of volume structure, if necessary */
- if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
- if (!tvcp->mvid)
- tvcp->mvid = (struct VenusFid *)
- osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvcp->mvid = dotdot;
+ if (tvcp->mvstat == AFS_MVSTAT_ROOT && (dotdot.Fid.Volume != 0)) {
+ if (!tvcp->mvid.parent)
+ tvcp->mvid.parent = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *tvcp->mvid.parent = dotdot;
}
- if (!novlru) {
- ObtainWriteLock(&afs_xvcache, 132);
- if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
- refpanic("Bulkstat VLRU inconsistent2");
- }
- if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
- || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
- refpanic("Bulkstat VLRU inconsistent4");
- }
- if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
- || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
- refpanic("Bulkstat VLRU inconsistent5");
- }
-
- if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
- QRemove(&tvcp->vlruq);
- QAdd(&lruvcp->vlruq, &tvcp->vlruq);
- }
-
- if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
- refpanic("Bulkstat VLRU inconsistent3");
- }
- if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
- || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
- refpanic("Bulkstat VLRU inconsistent5");
- }
- if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
- || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
- refpanic("Bulkstat VLRU inconsistent6");
- }
- ReleaseWriteLock(&afs_xvcache);
+#ifdef AFS_DARWIN80_ENV
+ if (((lruvcp->f.states & CDeadVnode)||(lruvcp->f.states & CVInit)))
+ panic("vlru control point went dead\n");
+#endif
+
+ ObtainWriteLock(&afs_xvcache, 132);
+ if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
+ refpanic("Bulkstat VLRU inconsistent2");
+ }
+ if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
+ || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
+ refpanic("Bulkstat VLRU inconsistent4");
+ }
+ if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
+ || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
+ refpanic("Bulkstat VLRU inconsistent5");
+ }
+
+ if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
+ QRemove(&tvcp->vlruq);
+ QAdd(&lruvcp->vlruq, &tvcp->vlruq);
+ }
+
+ if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
+ refpanic("Bulkstat VLRU inconsistent3");
}
+ if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
+ || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
+ refpanic("Bulkstat VLRU inconsistent5");
+ }
+ if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
+ || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
+ refpanic("Bulkstat VLRU inconsistent6");
+ }
+ ReleaseWriteLock(&afs_xvcache);
ObtainWriteLock(&afs_xcbhash, 494);
*/
if (!(tvcp->f.states & CBulkFetching) || (tvcp->f.m.Length != statSeqNo)) {
flagIndex++;
-#ifdef AFS_DARWIN80_ENV
- if ((tvcp->f.states & CDeadVnode) == 0)
- /* re-acquire the usecount that the other finalizevnode disposed of */
- vnode_ref(AFSTOV(tvcp));
-#endif
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
afs_PutVCache(tvcp);
tvcp->f.states |= CStatd;
afs_QueueCallback(tvcp, CBHash(3600), volp);
} else {
- tvcp->callback = 0;
- tvcp->f.states &= ~(CStatd | CUnique);
- afs_DequeueCallback(tvcp);
- if ((tvcp->f.states & CForeign) || (vType(tvcp) == VDIR))
- osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvcp,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
- ReleaseWriteLock(&afs_xcbhash);
#ifdef AFS_DARWIN80_ENV
/* reclaim->FlushVCache will need xcbhash */
- if (tvcp->f.states & CDeadVnode) {
+ if (((tvcp->f.states & CDeadVnode)||(tvcp->f.states & CVInit))) {
+ ReleaseWriteLock(&afs_xcbhash);
/* passing in a parent hangs getting the vnode lock */
code = afs_darwin_finalizevnode(tvcp, NULL, NULL, 0, 1);
if (code) {
/* It's gonna get recycled - shouldn't happen */
- tvcp->callback = 0;
- tvcp->f.states &= ~(CStatd | CUnique);
- afs_DequeueCallback(tvcp);
- if ((tvcp->f.states & CForeign) || (vType(tvcp) == VDIR))
- osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvcp,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
} else
/* re-acquire the usecount that finalizevnode disposed of */
vnode_ref(AFSTOV(tvcp));
- }
+ } else
#endif
+ ReleaseWriteLock(&afs_xcbhash);
ReleaseWriteLock(&tvcp->lock);
/* finally, we're done with the entry */
} /* for all files we got back */
/* finally return the pointer into the LRU queue */
- if (!novlru) {
#ifdef AFS_DARWIN80_ENV
- AFS_GUNLOCK();
- vnode_put(lruvp);
- vnode_rele(lruvp);
- AFS_GLOCK();
+ if (((lruvcp->f.states & CDeadVnode)||(lruvcp->f.states & CVInit)))
+ panic("vlru control point went dead before put\n");
+ AFS_GUNLOCK();
+ vnode_put(lruvp);
+ vnode_rele(lruvp);
+ AFS_GLOCK();
#else
- afs_PutVCache(lruvcp);
+ afs_PutVCache(lruvcp);
#endif
- }
done:
/* Be sure to turn off the CBulkFetching flags */
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
+ tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
- if (tvcp != NULL && (tvcp->f.states & CBulkFetching)
- && (tvcp->f.m.Length == statSeqNo)) {
- tvcp->f.states &= ~CBulkFetching;
- }
if (tvcp != NULL) {
-#ifdef AFS_DARWIN80_ENV
- if ((tvcp->f.states & CDeadVnode) == 0)
- /* re-acquire the usecount that the other finalizevnode disposed of */
- vnode_ref(AFSTOV(tvcp));
-#endif
+ if ((tvcp->f.states & CBulkFetching)
+ && (tvcp->f.m.Length == statSeqNo)) {
+ tvcp->f.states &= ~CBulkFetching;
+ }
afs_PutVCache(tvcp);
}
}
if (volp)
afs_PutVolume(volp, READ_LOCK);
- /* If we did the InlineBulk RPC pull out the return code */
- if (inlinebulk && code == 0) {
- if ((&statsp[0])->errorCode) {
- afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->f.fid, areqp,
- AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL);
- code = (&statsp[0])->errorCode;
- }
- } else {
- code = 0;
- }
done2:
osi_FreeLargeSpace((char *)fidsp);
osi_Free((char *)statsp, AFSCBMAX * sizeof(AFSFetchStatus));
return code;
}
-/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
-static int AFSDOBULK = 1;
+#ifdef AFS_DARWIN80_ENV
+int AFSDOBULK = 0;
+#endif
+
+static int
+afs_ShouldTryBulkStat(struct vcache *adp)
+{
+#ifdef AFS_DARWIN80_ENV
+ if (!AFSDOBULK) {
+ return 0;
+ }
+#endif
+ if (AFS_IS_DISCONNECTED) {
+ /* We can't prefetch entries if we're offline. */
+ return 0;
+ }
+ if (adp->opens < 1) {
+ /* Don't bother prefetching entries if nobody is holding the dir open
+ * while we're doing a lookup. */
+ return 0;
+ }
+ if (afs_VCacheStressed()) {
+ /* If we already have too many vcaches, don't create more vcaches we
+ * may not even use. */
+ return 0;
+ }
+ if ((adp->f.states & CForeign)) {
+ /* Don't bulkstat for dfs xlator dirs. */
+ return 0;
+ }
+ if (afs_IsDynroot(adp)) {
+ /* Don't prefetch dynroot entries; that's pointless, since we generate
+ * those locally. */
+ return 0;
+ }
+ if (afs_InReadDir(adp)) {
+ /* Don't bulkstat if we're in the middle of servicing a readdir() in
+ * the same process. */
+ return 0;
+ }
+ return 1;
+}
+
+static_inline int
+osi_lookup_isdot(const char *aname)
+{
+#ifdef AFS_SUN5_ENV
+ if (!aname[0]) {
+ /* in Solaris, we can get passed "" as a path component if we are the
+ * root directory, e.g. after a call to chroot. It is equivalent to
+ * looking up "." */
+ return 1;
+ }
+#endif /* AFS_SUN5_ENV */
+ if (aname[0] == '.' && !aname[1]) {
+ return 1;
+ }
+ return 0;
+}
int
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, afs_ucred_t *acred)
#endif
{
- struct vrequest treq;
+ struct vrequest *treq = NULL;
char *tname = NULL;
- register struct vcache *tvc = 0;
- register afs_int32 code;
- register afs_int32 bulkcode = 0;
+ struct vcache *tvc = 0;
+ afs_int32 code;
+ afs_int32 bulkcode = 0;
int pass = 0, hit = 0;
int force_eval = afs_fakestat_enable ? 0 : 1;
long dirCookie;
- extern afs_int32 afs_mariner; /*Writing activity to log? */
afs_hyper_t versionNo;
int no_read_access = 0;
struct sysname_info sysState; /* used only for @sys checking */
int dynrootRetry = 1;
struct afs_fakestat_state fakestate;
int tryEvalOnly = 0;
+
+ /* Don't allow ENOENT errors, except for a specific code path where
+ * 'enoent_prohibited' is cleared below. */
+ int enoent_prohibited = 1;
+
OSI_VC_CONVERT(adp);
AFS_STATCNT(afs_lookup);
afs_InitFakeStat(&fakestate);
AFS_DISCON_LOCK();
-
- if ((code = afs_InitReq(&treq, acred)))
+
+ if ((code = afs_CreateReq(&treq, acred)))
goto done;
- if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (afs_fakestat_enable && adp->mvstat == AFS_MVSTAT_MTPT) {
if (strcmp(aname, ".directory") == 0)
tryEvalOnly = 1;
}
/* Workaround for MacOSX Finder, which tries to look for
* .DS_Store and Contents under every directory.
*/
- if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (afs_fakestat_enable && adp->mvstat == AFS_MVSTAT_MTPT) {
if (strcmp(aname, ".DS_Store") == 0)
tryEvalOnly = 1;
if (strcmp(aname, "Contents") == 0)
tryEvalOnly = 1;
}
- if (afs_fakestat_enable && adp->mvstat == 2) {
+ if (afs_fakestat_enable && adp->mvstat == AFS_MVSTAT_ROOT) {
if (strncmp(aname, "._", 2) == 0)
tryEvalOnly = 1;
}
#endif
if (tryEvalOnly)
- code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
+ code = afs_TryEvalFakeStat(&adp, &fakestate, treq);
else
- code = afs_EvalFakeStat(&adp, &fakestate, &treq);
+ code = afs_EvalFakeStat(&adp, &fakestate, treq);
/*printf("Code is %d\n", code);*/
- if (tryEvalOnly && adp->mvstat == 1)
- code = ENOENT;
+ if (tryEvalOnly && adp->mvstat == AFS_MVSTAT_MTPT)
+ code = ENODEV;
if (code)
goto done;
- *avcp = NULL; /* Since some callers don't initialize it */
-
/* come back to here if we encounter a non-existent object in a read-only
* volume's directory */
-
redo:
*avcp = NULL; /* Since some callers don't initialize it */
bulkcode = 0;
if (!(adp->f.states & CStatd) && !afs_InReadDir(adp)) {
- if ((code = afs_VerifyVCache2(adp, &treq))) {
+ if ((code = afs_VerifyVCache2(adp, treq))) {
goto done;
}
} else
code = 0;
/* watch for ".." in a volume root */
- if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
+ if (adp->mvstat == AFS_MVSTAT_ROOT && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
/* looking up ".." in root via special hacks */
- if (adp->mvid == (struct VenusFid *)0 || adp->mvid->Fid.Volume == 0) {
+ if (adp->mvid.parent == (struct VenusFid *)0 || adp->mvid.parent->Fid.Volume == 0) {
code = ENODEV;
goto done;
}
/* otherwise we have the fid here, so we use it */
/*printf("Getting vcache\n");*/
- tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
- afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid,
+ tvc = afs_GetVCache(adp->mvid.parent, treq);
+ afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid.parent,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, code);
*avcp = tvc;
- code = (tvc ? 0 : ENOENT);
+ code = (tvc ? 0 : EIO);
hit = 1;
if (tvc && !VREFCOUNT_GT(tvc, 0)) {
osi_Panic("TT1");
}
/* now check the access */
- if (treq.uid != adp->last_looker) {
- if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
+ if (treq->uid != adp->last_looker) {
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, treq, CHECK_MODE_BITS)) {
*avcp = NULL;
code = EACCES;
goto done;
} else
- adp->last_looker = treq.uid;
+ adp->last_looker = treq->uid;
}
/* Check for read access as well. We need read access in order to
* stat files, but not to stat subdirectories. */
- if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(adp, PRSFS_READ, treq, CHECK_MODE_BITS))
no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
* I'm not fiddling with the LRUQ here, either, perhaps I should, or else
* invent a lightweight version of GetVCache.
*/
- if (aname[0] == '.' && !aname[1]) { /* special case */
+ if (osi_lookup_isdot(aname)) { /* special case */
ObtainReadLock(&afs_xvcache);
- osi_vnhold(adp, 0);
+ if (osi_vnhold(adp) != 0) {
+ ReleaseReadLock(&afs_xvcache);
+ code = EIO;
+ goto done;
+ }
ReleaseReadLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
vnode_get(AFSTOV(adp));
aname[0] == '.' && aname[1] == '.' && !aname[2]) {
ObtainReadLock(&afs_xvcache);
- osi_vnhold(afs_globalVp, 0);
+ if (osi_vnhold(afs_globalVp) != 0) {
+ ReleaseReadLock(&afs_xvcache);
+ code = EIO;
+ goto done;
+ }
ReleaseReadLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
vnode_get(AFSTOV(afs_globalVp));
struct VenusFid tfid;
afs_uint32 cellidx, volid, vnoid, uniq;
- code = EvalMountData('%', aname, 0, 0, NULL, &treq, &cellidx, &volid, &vnoid, &uniq);
+ code = EvalMountData('%', aname, 0, 0, NULL, treq, &cellidx, &volid, &vnoid, &uniq);
if (code)
goto done;
/* If a vnode was returned, it's not a real mount point */
tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
tfid.Fid.Unique = volid;
}
- *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
- code = (tvc ? 0 : ENOENT);
+ *avcp = tvc = afs_GetVCache(&tfid, treq);
+ code = (tvc ? 0 : EIO);
hit = 1;
goto done;
}
struct VenusFid tfid;
afs_GetDynrootMountFid(&tfid);
- *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
+ *avcp = tvc = afs_GetVCache(&tfid, treq);
code = 0;
hit = 1;
goto done;
}
#endif
- Check_AtSys(adp, aname, &sysState, &treq);
+ Check_AtSys(adp, aname, &sysState, treq);
tname = sysState.name;
/* 1st Check_AtSys and lookup by tname is required here, for now,
if (tvc) {
if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
/* need read access on dir to stat non-directory / non-link */
-#ifndef AFS_FBSD80_ENV
afs_PutVCache(tvc);
-#endif
*avcp = NULL;
code = EACCES;
goto done;
}
#ifdef AFS_LINUX22_ENV
- if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
+ if (tvc->mvstat == AFS_MVSTAT_ROOT) { /* we don't trust the dnlc for root vcaches */
AFS_RELE(AFSTOV(tvc));
*avcp = 0;
} else {
}
{ /* sub-block just to reduce stack usage */
- register struct dcache *tdc;
+ struct dcache *tdc;
afs_size_t dirOffset, dirLen;
struct VenusFid tfid;
if (afs_InReadDir(adp))
tdc = adp->dcreaddir;
else
- tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq,
+ tdc = afs_GetDCache(adp, (afs_size_t) 0, treq,
&dirOffset, &dirLen, 1);
if (!tdc) {
*avcp = NULL; /* redundant, but harmless */
if (!afs_InReadDir(adp)) {
while ((adp->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
+ && afs_IsDCacheFresh(tdc, adp)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&tdc->lock);
}
if (!(adp->f.states & CStatd)
- || !hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
+ || !afs_IsDCacheFresh(tdc, adp)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
&dirCookie);
/* If the first lookup doesn't succeed, maybe it's got @sys in the name */
- while (code == ENOENT && Next_AtSys(adp, &treq, &sysState))
+ while (code == ENOENT && Next_AtSys(adp, treq, &sysState))
code =
afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
&dirCookie);
if (!afs_InReadDir(adp))
afs_PutDCache(tdc);
if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry && !tryEvalOnly) {
+ struct cell *tc;
+ char *cn = (tname[0] == '.') ? tname + 1 : tname;
ReleaseReadLock(&adp->lock);
+ /* confirm it's not just hushed */
+ tc = afs_GetCellByName(cn, WRITE_LOCK);
+ if (tc) {
+ if (tc->states & CHush) {
+ tc->states &= ~CHush;
+ ReleaseWriteLock(&tc->lock);
+ afs_DynrootInvalidate();
+ goto redo;
+ }
+ ReleaseWriteLock(&tc->lock);
+ }
+ /* Allow a second dynroot retry if the cell was hushed before */
dynrootRetry = 0;
if (tname[0] == '.')
afs_LookupAFSDB(tname + 1);
ICL_TYPE_INT32, code);
if (code) {
- if (code != ENOENT) {
- /*printf("LOOKUP dirLookupOff -> %d\n", code);*/
+ if (code == ENOENT) {
+ /* The target name really doesn't exist (according to
+ * afs_dir_LookupOffset, anyway). */
+ enoent_prohibited = 0;
}
goto done;
}
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (!AFS_IS_DISCONNECTED &&
- AFSDOBULK && adp->opens > 0 && !(adp->f.states & CForeign)
- && !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
+ if (afs_ShouldTryBulkStat(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.
} while (tvc && retry);
if (!tvc || !(tvc->f.states & CStatd))
- bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
+ bulkcode = afs_DoBulkStat(adp, dirCookie, treq);
else
bulkcode = 0;
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->f.states & CStatd)) {
-#ifndef AFS_FBSD80_ENV
afs_PutVCache(tvc);
-#endif
tvc = NULL;
}
} else {
* the file has not yet been looked up.
*/
if (!tvc) {
- afs_int32 cached = 0;
if (!tfid.Fid.Unique && (adp->f.states & CForeign)) {
- tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
+ tvc = afs_LookupVCache(&tfid, treq, adp, tname);
}
if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
+ tvc = afs_GetVCache(&tfid, treq);
}
} /* if !tvc */
} /* sub-block just to reduce stack usage */
tvc->f.parent.unique = adp->f.fid.Fid.Unique;
tvc->f.states &= ~CBulkStat;
- if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
+ if (afs_fakestat_enable == 2 && tvc->mvstat == AFS_MVSTAT_MTPT) {
ObtainSharedLock(&tvc->lock, 680);
if (!tvc->linkData) {
UpgradeSToWLock(&tvc->lock, 681);
- code = afs_HandleLink(tvc, &treq);
+ code = afs_HandleLink(tvc, treq);
ConvertWToRLock(&tvc->lock);
} else {
ConvertSToRLock(&tvc->lock);
force_eval = 1;
ReleaseReadLock(&tvc->lock);
}
- if (tvc->mvstat == 1 && (tvc->f.states & CMValid) && tvc->mvid != NULL)
+ if (tvc->mvstat == AFS_MVSTAT_MTPT && (tvc->f.states & CMValid) && tvc->mvid.target_root != NULL)
force_eval = 1; /* This is now almost for free, get it correct */
-#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+#if defined(UKERNEL)
if (!(flags & AFS_LOOKUP_NOEVAL))
/* don't eval mount points */
-#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
- if (tvc->mvstat == 1 && force_eval) {
+#endif /* UKERNEL */
+ if (tvc->mvstat == AFS_MVSTAT_MTPT && force_eval) {
/* a mt point, possibly unevaluated */
struct volume *tvolp;
ObtainWriteLock(&tvc->lock, 133);
- code = EvalMountPoint(tvc, adp, &tvolp, &treq);
+ code = EvalMountPoint(tvc, adp, &tvolp, treq);
ReleaseWriteLock(&tvc->lock);
if (code) {
-#ifndef AFS_FBSD80_ENV
afs_PutVCache(tvc);
-#endif
if (tvolp)
afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
/* next, we want to continue using the target of the mt point */
- if (tvc->mvid && (tvc->f.states & CMValid)) {
+ if (tvc->mvid.target_root && (tvc->f.states & CMValid)) {
struct vcache *uvc;
/* now lookup target, to set .. pointer */
afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
if (tvolp && (tvolp->states & VForeign)) {
/* XXXX tvolp has ref cnt on but not locked! XXX */
tvc =
- afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
+ afs_GetRootVCache(tvc->mvid.target_root, treq, tvolp);
} else {
- tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
+ tvc = afs_GetVCache(tvc->mvid.target_root, treq);
}
-#ifndef AFS_FBSD80_ENV
afs_PutVCache(uvc); /* we're done with it */
-#endif
if (!tvc) {
- code = ENOENT;
+ code = EIO;
if (tvolp) {
afs_PutVolume(tvolp, WRITE_LOCK);
}
* ptr to point back to the appropriate place */
if (tvolp) {
ObtainWriteLock(&tvc->lock, 134);
- if (tvc->mvid == NULL) {
- tvc->mvid = (struct VenusFid *)
+ if (tvc->mvid.parent == NULL) {
+ tvc->mvid.parent =
osi_AllocSmallSpace(sizeof(struct VenusFid));
}
/* setup backpointer */
- *tvc->mvid = tvolp->dotdot;
+ *tvc->mvid.parent = tvolp->dotdot;
ReleaseWriteLock(&tvc->lock);
afs_PutVolume(tvolp, WRITE_LOCK);
}
} else {
-#ifndef AFS_FBSD80_ENV
afs_PutVCache(tvc);
-#endif
- code = ENOENT;
+ code = ENODEV;
if (tvolp)
afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
if (!AFS_IS_DISCONNECTED) {
if (pass == 0) {
struct volume *tv;
- tv = afs_GetVolume(&adp->f.fid, &treq, READ_LOCK);
+ tv = afs_GetVolume(&adp->f.fid, treq, READ_LOCK);
if (tv) {
if (tv->states & VRO) {
pass = 1; /* try this *once* */
- ObtainWriteLock(&afs_xcbhash, 495);
- afs_DequeueCallback(adp);
- /* re-stat to get later version */
- adp->f.states &= ~CStatd;
- ReleaseWriteLock(&afs_xcbhash);
- osi_dnlc_purgedp(adp);
+ /* re-stat to get later version */
+ afs_StaleVCache(adp);
afs_PutVolume(tv, READ_LOCK);
goto redo;
}
afs_PutVolume(tv, READ_LOCK);
}
}
- code = ENOENT;
+ code = EIO;
} else {
code = ENETDOWN;
}
if (afs_mariner)
afs_AddMarinerName(aname, tvc);
-#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+#if defined(UKERNEL)
if (!(flags & AFS_LOOKUP_NOEVAL)) {
/* Here we don't enter the name into the DNLC because we want the
* evaluated mount dir to be there (the vcache for the mounted
* volume) rather than the vc of the mount point itself. We can
* still find the mount point's vc in the vcache by its fid. */
-#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
- if (!hit && force_eval) {
+#endif /* UKERNEL */
+ if (!hit && (force_eval || tvc->mvstat != AFS_MVSTAT_MTPT)) {
osi_dnlc_enter(adp, aname, tvc, &versionNo);
} else {
#ifdef AFS_LINUX20_ENV
/* So Linux inode cache is up to date. */
- code = afs_VerifyVCache(tvc, &treq);
+ code = afs_VerifyVCache(tvc, treq);
#else
afs_PutFakeStat(&fakestate);
+ afs_DestroyReq(treq);
AFS_DISCON_UNLOCK();
return 0; /* can't have been any errors if hit and !code */
#endif
}
-#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
+#if defined(UKERNEL)
}
#endif
}
if (bulkcode)
code = bulkcode;
- code = afs_CheckCode(code, &treq, 19);
+ code = afs_CheckCode(code, treq, 19);
if (code) {
/* If there is an error, make sure *avcp is null.
* Alphas panic otherwise - defect 10719.
*/
*avcp = NULL;
}
+ if (code == ENOENT && enoent_prohibited) {
+ /*
+ * We got an ENOENT error, but we didn't get it while looking up the
+ * dir entry in the relevant dir blob. That means we likely hit some
+ * other internal error; don't allow us to return ENOENT in this case,
+ * since some platforms cache ENOENT errors, and the target path name
+ * may actually exist.
+ */
+ code = EIO;
+ }
afs_PutFakeStat(&fakestate);
+ afs_DestroyReq(treq);
AFS_DISCON_UNLOCK();
return code;
}