2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
17 #include <afsconfig.h>
18 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/exporter.h"
29 #include "afs/afs_osidnlc.h"
32 extern struct DirEntry *afs_dir_GetBlob();
35 afs_int32 afs_bkvolpref = 0;
36 afs_int32 afs_bulkStatsDone;
37 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
38 int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
41 /* this would be faster if it did comparison as int32word, but would be
42 * dependant on byte-order and alignment, and I haven't figured out
43 * what "@sys" is in binary... */
44 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
46 /* call under write lock, evaluate mvid field from a mt pt.
47 * avc is the vnode of the mount point object; must be write-locked.
48 * advc is the vnode of the containing directory (optional; if NULL and
49 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
50 * avolpp is where we return a pointer to the volume named by the mount pt, if success
51 * areq is the identity of the caller.
53 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
56 EvalMountPoint(register struct vcache *avc, struct vcache *advc,
57 struct volume **avolpp, register struct vrequest *areq)
60 struct volume *tvp = 0;
63 char *cpos, *volnamep;
65 afs_int32 prefetch; /* 1=>None 2=>RO 3=>BK */
66 afs_int32 mtptCell, assocCell = 0, hac = 0;
67 afs_int32 samecell, roname, len;
69 AFS_STATCNT(EvalMountPoint);
71 if (avc->mvid && (avc->states & CMValid))
72 return 0; /* done while racing */
75 code = afs_HandleLink(avc, areq);
79 /* Determine which cell and volume the mointpoint goes to */
80 type = avc->linkData[0]; /* '#'=>Regular '%'=>RW */
81 cpos = afs_strchr(&avc->linkData[1], ':'); /* if cell name present */
85 tcell = afs_GetCellByName(&avc->linkData[1], READ_LOCK);
88 volnamep = &avc->linkData[1];
89 tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
94 mtptCell = tcell->cellNum; /* The cell for the mountpoint */
96 hac = 1; /* has associated cell */
97 assocCell = tcell->lcellp->cellNum; /* The associated cell */
99 afs_PutCell(tcell, READ_LOCK);
101 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
102 len = strlen(volnamep);
103 roname = ((len > 9) && (strcmp(&volnamep[len - 9], ".readonly") == 0))
104 || ((len > 7) && (strcmp(&volnamep[len - 7], ".backup") == 0));
106 /* When we cross mountpoint, do we stay in the same cell */
107 samecell = (avc->fid.Cell == mtptCell) || (hac
111 /* Decide whether to prefetch the BK, or RO. Also means we want the BK or
113 * If this is a regular mountpoint with a RW volume name
114 * - If BK preference is enabled AND we remain within the same cell AND
115 * start from a BK volume, then we will want to prefetch the BK volume.
116 * - If we cross a cell boundary OR start from a RO volume, then we will
117 * want to prefetch the RO volume.
119 if ((type == '#') && !roname) {
120 if (afs_bkvolpref && samecell && (avc->states & CBackup))
121 prefetch = 3; /* Prefetch the BK */
122 else if (!samecell || (avc->states & CRO))
123 prefetch = 2; /* Prefetch the RO */
125 prefetch = 1; /* Do not prefetch */
127 prefetch = 1; /* Do not prefetch */
130 /* Get the volume struct. Unless this volume name has ".readonly" or
131 * ".backup" in it, this will get the volume struct for the RW volume.
132 * The RO volume will be prefetched if requested (but not returned).
134 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetch, areq, WRITE_LOCK);
136 /* If no volume was found in this cell, try the associated linked cell */
137 if (!tvp && hac && areq->volumeError) {
139 afs_GetVolumeByName(volnamep, assocCell, prefetch, areq,
143 /* Still not found. If we are looking for the RO, then perhaps the RW
144 * doesn't exist? Try adding ".readonly" to volname and look for that.
145 * Don't know why we do this. Would have still found it in above call - jpm.
147 if (!tvp && (prefetch == 2) && len < AFS_SMALLOCSIZ - 10) {
148 buf = (char *)osi_AllocSmallSpace(len + 10);
150 strcpy(buf, volnamep);
151 afs_strcat(buf, ".readonly");
153 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
155 /* Try the associated linked cell if failed */
156 if (!tvp && hac && areq->volumeError) {
157 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
159 osi_FreeSmallSpace(buf);
163 return ENODEV; /* Couldn't find the volume */
165 /* Don't cross mountpoint from a BK to a BK volume */
166 if ((avc->states & CBackup) && (tvp->states & VBackup)) {
167 afs_PutVolume(tvp, WRITE_LOCK);
171 /* If we want (prefetched) the BK and it exists, then drop the RW volume
173 * Otherwise, if we want (prefetched0 the RO and it exists, then drop the
174 * RW volume and get the RO.
175 * Otherwise, go with the RW.
177 if ((prefetch == 3) && tvp->backVol) {
178 tfid.Fid.Volume = tvp->backVol; /* remember BK volume */
179 tfid.Cell = tvp->cell;
180 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
181 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
183 return ENODEV; /* oops, can't do it */
184 } else if ((prefetch >= 2) && tvp->roVol) {
185 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
186 tfid.Cell = tvp->cell;
187 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
188 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
190 return ENODEV; /* oops, can't do it */
195 (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
196 avc->mvid->Cell = tvp->cell;
197 avc->mvid->Fid.Volume = tvp->volume;
198 avc->mvid->Fid.Vnode = 1;
199 avc->mvid->Fid.Unique = 1;
200 avc->states |= CMValid;
202 /* Used to: if the mount point is stored within a backup volume,
203 * then we should only update the parent pointer information if
204 * there's none already set, so as to avoid updating a volume's ..
205 * info with something in an OldFiles directory.
207 * Next two lines used to be under this if:
209 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
211 * Now: update mount point back pointer on every call, so that we handle
212 * multiple mount points better. This way, when du tries to go back
213 * via chddir(".."), it will end up exactly where it started, yet
214 * cd'ing via a new path to a volume will reset the ".." pointer
217 tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
219 tvp->dotdot = advc->fid;
228 * Must be called on an afs_fakestat_state object before calling
229 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
230 * without calling afs_EvalFakeStat is legal, as long as this
231 * function is called.
234 afs_InitFakeStat(struct afs_fakestat_state *state)
236 if (!afs_fakestat_enable)
241 state->need_release = 0;
245 * afs_EvalFakeStat_int
247 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
248 * which is called by those wrapper functions.
250 * Only issues RPCs if canblock is non-zero.
253 afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
254 struct vrequest *areq, int canblock)
256 struct vcache *tvc, *root_vp;
257 struct volume *tvolp = NULL;
260 if (!afs_fakestat_enable)
263 osi_Assert(state->valid == 1);
264 osi_Assert(state->did_eval == 0);
268 if (tvc->mvstat != 1)
271 /* Is the call to VerifyVCache really necessary? */
272 code = afs_VerifyVCache(tvc, areq);
276 ObtainWriteLock(&tvc->lock, 599);
277 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
278 ReleaseWriteLock(&tvc->lock);
282 tvolp->dotdot = tvc->fid;
283 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
284 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
287 if (tvc->mvid && (tvc->states & CMValid)) {
293 ObtainWriteLock(&afs_xvcache, 597);
294 root_vp = afs_FindVCache(tvc->mvid, &retry, IS_WLOCK);
295 if (root_vp && retry) {
296 ReleaseWriteLock(&afs_xvcache);
297 afs_PutVCache(root_vp);
299 } while (root_vp && retry);
300 ReleaseWriteLock(&afs_xvcache);
302 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
305 code = canblock ? ENOENT : 0;
308 #ifdef AFS_DARWIN80_ENV
309 root_vp->m.Type = VDIR;
311 code = afs_darwin_finalizevnode(root_vp, NULL, NULL, 0);
314 vnode_ref(AFSTOV(root_vp));
317 /* Is this always kosher? Perhaps we should instead use
318 * NBObtainWriteLock to avoid potential deadlock.
320 ObtainWriteLock(&root_vp->lock, 598);
322 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
323 *root_vp->mvid = tvolp->dotdot;
324 ReleaseWriteLock(&root_vp->lock);
326 state->need_release = 1;
327 state->root_vp = root_vp;
331 code = canblock ? ENOENT : 0;
336 afs_PutVolume(tvolp, WRITE_LOCK);
343 * Automatically does the equivalent of EvalMountPoint for vcache entries
344 * which are mount points. Remembers enough state to properly release
345 * the volume root vcache when afs_PutFakeStat() is called.
347 * State variable must be initialized by afs_InitFakeState() beforehand.
349 * Returns 0 when everything succeeds and *avcp points to the vcache entry
350 * that should be used for the real vnode operation. Returns non-zero if
351 * something goes wrong and the error code should be returned to the user.
354 afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
355 struct vrequest *areq)
357 return afs_EvalFakeStat_int(avcp, state, areq, 1);
361 * afs_TryEvalFakeStat
363 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
364 * and only evaluate the mount point if all the data is already in
367 * Returns 0 if everything succeeds and *avcp points to a valid
368 * vcache entry (possibly evaluated).
371 afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
372 struct vrequest *areq)
374 return afs_EvalFakeStat_int(avcp, state, areq, 0);
380 * Perform any necessary cleanup at the end of a vnode op, given that
381 * afs_InitFakeStat was previously called with this state.
384 afs_PutFakeStat(struct afs_fakestat_state *state)
386 if (!afs_fakestat_enable)
389 osi_Assert(state->valid == 1);
390 if (state->need_release)
391 afs_PutVCache(state->root_vp);
396 afs_ENameOK(register char *aname)
400 AFS_STATCNT(ENameOK);
401 tlen = strlen(aname);
402 if (tlen >= 4 && strcmp(aname + tlen - 4, "@sys") == 0)
408 afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
409 register char *bufp, int *num, char **sysnamelist[])
411 register struct unixuser *au;
412 register afs_int32 error;
414 AFS_STATCNT(getsysname);
416 *sysnamelist = afs_sysnamelist;
418 if (!afs_nfsexporter)
419 strcpy(bufp, (*sysnamelist)[0]);
421 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
423 error = EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num);
425 strcpy(bufp, "@sys");
429 strcpy(bufp, (*sysnamelist)[0]);
432 strcpy(bufp, afs_sysname);
439 Check_AtSys(register struct vcache *avc, const char *aname,
440 struct sysname_info *state, struct vrequest *areq)
443 char **sysnamelist[MAXNUMSYSNAMES];
445 if (AFS_EQ_ATSYS(aname)) {
447 state->name = (char *)osi_AllocLargeSpace(AFS_SMALLOCSIZ);
450 afs_getsysname(areq, avc, state->name, &num, sysnamelist);
455 state->name = (char *)aname;
460 Next_AtSys(register struct vcache *avc, struct vrequest *areq,
461 struct sysname_info *state)
463 int num = afs_sysnamecount;
464 char **sysnamelist[MAXNUMSYSNAMES];
466 if (state->index == -1)
467 return 0; /* No list */
469 /* Check for the initial state of aname != "@sys" in Check_AtSys */
470 if (state->offset == -1 && state->allocked == 0) {
471 register char *tname;
473 /* Check for .*@sys */
474 for (tname = state->name; *tname; tname++)
475 /*Move to the end of the string */ ;
477 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname - 4))) {
478 state->offset = (tname - 4) - state->name;
479 tname = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
480 strncpy(tname, state->name, state->offset);
485 afs_getsysname(areq, avc, state->name + state->offset, &num,
489 return 0; /* .*@sys doesn't match either */
491 register struct unixuser *au;
492 register afs_int32 error;
494 *sysnamelist = afs_sysnamelist;
496 if (afs_nfsexporter) {
497 au = afs_GetUser(areq->uid, avc->fid.Cell, 0);
500 EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num);
507 if (++(state->index) >= num || !(*sysnamelist)[(unsigned int)state->index])
508 return 0; /* end of list */
510 strcpy(state->name + state->offset, (*sysnamelist)[(unsigned int)state->index]);
514 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
516 /* called with an unlocked directory and directory cookie. Areqp
517 * describes who is making the call.
518 * Scans the next N (about 30, typically) directory entries, and does
519 * a bulk stat call to stat them all.
521 * Must be very careful when merging in RPC responses, since we dont
522 * want to overwrite newer info that was added by a file system mutating
523 * call that ran concurrently with our bulk stat call.
525 * We do that, as described below, by not merging in our info (always
526 * safe to skip the merge) if the status info is valid in the vcache entry.
528 * If adapt ever implements the bulk stat RPC, then this code will need to
529 * ensure that vcaches created for failed RPC's to older servers have the
532 static struct vcache *BStvc = NULL;
535 afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
537 int nentries; /* # of entries to prefetch */
538 int nskip; /* # of slots in the LRU queue to skip */
539 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
540 struct dcache *dcp; /* chunk containing the dir block */
541 char *statMemp; /* status memory block */
542 char *cbfMemp; /* callback and fid memory block */
543 afs_size_t temp; /* temp for holding chunk length, &c. */
544 struct AFSFid *fidsp; /* file IDs were collecting */
545 struct AFSCallBack *cbsp; /* call back pointers */
546 struct AFSCallBack *tcbp; /* temp callback ptr */
547 struct AFSFetchStatus *statsp; /* file status info */
548 struct AFSVolSync volSync; /* vol sync return info */
549 struct vcache *tvcp; /* temp vcp */
550 struct afs_q *tq; /* temp queue variable */
551 AFSCBFids fidParm; /* file ID parm for bulk stat */
552 AFSBulkStats statParm; /* stat info parm for bulk stat */
553 int fidIndex = 0; /* which file were stating */
554 struct conn *tcp = 0; /* conn for call */
555 AFSCBs cbParm; /* callback parm for bulk stat */
556 struct server *hostp = 0; /* host we got callback from */
557 long startTime; /* time we started the call,
558 * for callback expiration base
560 afs_size_t statSeqNo = 0; /* Valued of file size to detect races */
561 int code; /* error code */
562 long newIndex; /* new index in the dir */
563 struct DirEntry *dirEntryp; /* dir entry we are examining */
565 struct VenusFid afid; /* file ID we are using now */
566 struct VenusFid tfid; /* another temp. file ID */
567 afs_int32 retry; /* handle low-level SGI MP race conditions */
568 long volStates; /* flags from vol structure */
569 struct volume *volp = 0; /* volume ptr */
570 struct VenusFid dotdot;
571 int flagIndex = 0; /* First file with bulk fetch flag set */
572 int inlinebulk = 0; /* Did we use InlineBulk RPC or not? */
574 #ifdef AFS_DARWIN80_ENV
575 panic("bulkstatus doesn't work on AFS_DARWIN80_ENV. don't call it");
577 /* first compute some basic parameters. We dont want to prefetch more
578 * than a fraction of the cache in any given call, and we want to preserve
579 * a portion of the LRU queue in any event, so as to avoid thrashing
580 * the entire stat cache (we will at least leave some of it alone).
581 * presently dont stat more than 1/8 the cache in any one call. */
582 nentries = afs_cacheStats / 8;
584 /* dont bother prefetching more than one calls worth of info */
585 if (nentries > AFSCBMAX)
588 /* heuristic to make sure that things fit in 4K. This means that
589 * we shouldnt make it any bigger than 47 entries. I am typically
590 * going to keep it a little lower, since we don't want to load
591 * too much of the stat cache.
596 /* now, to reduce the stack size, well allocate two 4K blocks,
597 * one for fids and callbacks, and one for stat info. Well set
598 * up our pointers to the memory from there, too.
600 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
601 statsp = (struct AFSFetchStatus *)statMemp;
603 osi_AllocLargeSpace(nentries *
604 (sizeof(AFSCallBack) + sizeof(AFSFid)));
605 fidsp = (AFSFid *) cbfMemp;
606 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
608 /* next, we must iterate over the directory, starting from the specified
609 * cookie offset (dirCookie), and counting out nentries file entries.
610 * We skip files that already have stat cache entries, since we
611 * dont want to bulk stat files that are already in the cache.
614 code = afs_VerifyVCache(adp, areqp);
618 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
624 /* lock the directory cache entry */
625 ObtainReadLock(&adp->lock);
626 ObtainReadLock(&dcp->lock);
629 * Make sure that the data in the cache is current. There are two
630 * cases we need to worry about:
631 * 1. The cache data is being fetched by another process.
632 * 2. The cache data is no longer valid
634 while ((adp->states & CStatd)
635 && (dcp->dflags & DFFetching)
636 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
637 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
638 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
639 ICL_TYPE_INT32, dcp->dflags);
640 ReleaseReadLock(&dcp->lock);
641 ReleaseReadLock(&adp->lock);
642 afs_osi_Sleep(&dcp->validPos);
643 ObtainReadLock(&adp->lock);
644 ObtainReadLock(&dcp->lock);
646 if (!(adp->states & CStatd)
647 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
648 ReleaseReadLock(&dcp->lock);
649 ReleaseReadLock(&adp->lock);
654 /* Generate a sequence number so we can tell whether we should
655 * store the attributes when processing the response. This number is
656 * stored in the file size when we set the CBulkFetching bit. If the
657 * CBulkFetching is still set and this value hasn't changed, then
658 * we know we were the last to set CBulkFetching bit for this file,
659 * and it is safe to set the status information for this file.
661 statSeqNo = bulkStatCounter++;
663 /* now we have dir data in the cache, so scan the dir page */
666 while (1) { /* Should probably have some constant bound */
667 /* look for first safe entry to examine in the directory. BlobScan
668 * looks for a the 1st allocated dir after the dirCookie slot.
670 newIndex = BlobScan(dcp, (dirCookie >> 5));
674 /* remember the updated directory cookie */
675 dirCookie = newIndex << 5;
677 /* get a ptr to the dir entry */
679 (struct DirEntry *)afs_dir_GetBlob(dcp, newIndex);
683 /* dont copy more than we have room for */
684 if (fidIndex >= nentries) {
685 DRelease((struct buffer *)dirEntryp, 0);
689 /* now, if the dir entry looks good, copy it out to our list. Vnode
690 * 0 means deleted, although it should also be free were it deleted.
692 if (dirEntryp->fid.vnode != 0) {
693 /* dont copy entries we have in our cache. This check will
694 * also make us skip "." and probably "..", unless it has
695 * disappeared from the cache since we did our namei call.
697 tfid.Cell = adp->fid.Cell;
698 tfid.Fid.Volume = adp->fid.Fid.Volume;
699 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
700 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
703 ObtainWriteLock(&afs_xvcache, 130);
704 tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
706 ReleaseWriteLock(&afs_xvcache);
709 } while (tvcp && retry);
710 if (!tvcp) { /* otherwise, create manually */
711 tvcp = afs_NewVCache(&tfid, hostp);
714 ObtainWriteLock(&tvcp->lock, 505);
715 ReleaseWriteLock(&afs_xvcache);
716 afs_RemoveVCB(&tfid);
717 ReleaseWriteLock(&tvcp->lock);
719 ReleaseWriteLock(&afs_xvcache);
722 ReleaseWriteLock(&afs_xvcache);
726 DRelease((struct buffer *)dirEntryp, 0);
727 ReleaseReadLock(&dcp->lock);
728 ReleaseReadLock(&adp->lock);
730 goto done; /* can happen if afs_NewVCache fails */
733 #ifdef AFS_DARWIN80_ENV
734 if (tvcp->states & CVInit) {
735 /* XXX don't have status yet, so creating the vnode is
736 not yet useful. we would get CDeadVnode set, and the
737 upcoming PutVCache will cause the vcache to be flushed &
738 freed, which in turn means the bulkstatus results won't
742 /* WARNING: afs_DoBulkStat uses the Length field to store a
743 * sequence number for each bulk status request. Under no
744 * circumstances should afs_DoBulkStat store a sequence number
745 * if the new length will be ignored when afs_ProcessFS is
746 * called with new stats. */
748 if (!(tvcp->states & (CStatd | CBulkFetching))
749 && (tvcp->execsOrWriters <= 0)
750 && !afs_DirtyPages(tvcp)
751 && !AFS_VN_MAPPED((vnode_t *) tvcp))
753 if (!(tvcp->states & (CStatd | CBulkFetching))
754 && (tvcp->execsOrWriters <= 0)
755 && !afs_DirtyPages(tvcp))
759 /* this entry doesnt exist in the cache, and is not
760 * already being fetched by someone else, so add it to the
761 * list of file IDs to obtain.
763 * We detect a callback breaking race condition by checking the
764 * CBulkFetching state bit and the value in the file size.
765 * It is safe to set the status only if the CBulkFetching
766 * flag is still set and the value in the file size does
769 * Don't fetch status for dirty files. We need to
770 * preserve the value of the file size. We could
771 * flush the pages, but it wouldn't be worthwhile.
773 memcpy((char *)(fidsp + fidIndex), (char *)&tfid.Fid,
775 tvcp->states |= CBulkFetching;
776 tvcp->m.Length = statSeqNo;
782 /* if dir vnode has non-zero entry */
783 /* move to the next dir entry by adding in the # of entries
784 * used by this dir entry.
786 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
787 DRelease((struct buffer *)dirEntryp, 0);
791 } /* while loop over all dir entries */
793 /* now release the dir lock and prepare to make the bulk RPC */
794 ReleaseReadLock(&dcp->lock);
795 ReleaseReadLock(&adp->lock);
797 /* release the chunk */
800 /* dont make a null call */
805 /* setup the RPC parm structures */
806 fidParm.AFSCBFids_len = fidIndex;
807 fidParm.AFSCBFids_val = fidsp;
808 statParm.AFSBulkStats_len = fidIndex;
809 statParm.AFSBulkStats_val = statsp;
810 cbParm.AFSCBs_len = fidIndex;
811 cbParm.AFSCBs_val = cbsp;
813 /* start the timer; callback expirations are relative to this */
814 startTime = osi_Time();
816 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
818 hostp = tcp->srvr->server;
819 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
822 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
824 RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
826 if (code == RXGEN_OPCODE) {
827 tcp->srvr->server->flags |= SNO_INLINEBULK;
830 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
837 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
845 (tcp, code, &adp->fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
848 /* now, if we didnt get the info, bail out. */
852 /* we need vol flags to create the entries properly */
853 dotdot.Fid.Volume = 0;
854 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
856 volStates = volp->states;
857 if (volp->dotdot.Fid.Volume != 0)
858 dotdot = volp->dotdot;
862 /* find the place to merge the info into We do this by skipping
863 * nskip entries in the LRU queue. The more we skip, the more
864 * we preserve, since the head of the VLRU queue is the most recently
868 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
869 ObtainReadLock(&afs_xvcache);
871 /* actually a serious error, probably should panic. Probably will
872 * panic soon, oh well. */
873 ReleaseReadLock(&afs_xvcache);
874 afs_warnuser("afs_DoBulkStat: VLRU empty!");
877 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
878 refpanic("Bulkstat VLRU inconsistent");
880 for (tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
883 else if (QNext(QPrev(tq)) != tq) {
885 refpanic("BulkStat VLRU inconsistent");
891 lruvcp = QTOV(VLRU.next);
893 /* now we have to hold this entry, so that it does not get moved
894 * into the free list while we're running. It could still get
895 * moved within the lru queue, but hopefully that will be rare; it
896 * doesn't hurt nearly as much.
899 osi_vnhold(lruvcp, &retry);
900 ReleaseReadLock(&afs_xvcache); /* could be read lock */
903 #ifdef AFS_DARWIN80_ENV
904 vnode_get(AFSTOV(lruvcp));
907 /* otherwise, merge in the info. We have to be quite careful here,
908 * since we need to ensure that we don't merge old info over newer
909 * stuff in a stat cache entry. We're very conservative here: we don't
910 * do the merge at all unless we ourselves create the stat cache
911 * entry. That's pretty safe, and should work pretty well, since we
912 * typically expect to do the stat cache creation ourselves.
914 * We also have to take into account racing token revocations.
916 for (i = 0; i < fidIndex; i++) {
917 if ((&statsp[i])->errorCode)
919 afid.Cell = adp->fid.Cell;
920 afid.Fid.Volume = adp->fid.Fid.Volume;
921 afid.Fid.Vnode = fidsp[i].Vnode;
922 afid.Fid.Unique = fidsp[i].Unique;
925 ObtainReadLock(&afs_xvcache);
926 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
927 ReleaseReadLock(&afs_xvcache);
928 } while (tvcp && retry);
930 /* The entry may no longer exist */
935 /* now we have the entry held, but we need to fill it in */
936 ObtainWriteLock(&tvcp->lock, 131);
938 /* if CBulkFetching is not set, or if the file size no longer
939 * matches the value we placed there when we set the CBulkFetching
940 * flag, then someone else has done something with this node,
941 * and we may not have the latest status information for this
942 * file. Leave the entry alone.
944 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
946 ReleaseWriteLock(&tvcp->lock);
951 /* now copy ".." entry back out of volume structure, if necessary */
952 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
954 tvcp->mvid = (struct VenusFid *)
955 osi_AllocSmallSpace(sizeof(struct VenusFid));
956 *tvcp->mvid = dotdot;
959 ObtainWriteLock(&afs_xvcache, 132);
960 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
961 refpanic("Bulkstat VLRU inconsistent2");
963 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
964 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
965 refpanic("Bulkstat VLRU inconsistent4");
967 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
968 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
969 refpanic("Bulkstat VLRU inconsistent5");
972 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
973 QRemove(&tvcp->vlruq);
974 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
977 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
978 refpanic("Bulkstat VLRU inconsistent3");
980 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
981 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
982 refpanic("Bulkstat VLRU inconsistent5");
984 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
985 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
986 refpanic("Bulkstat VLRU inconsistent6");
988 ReleaseWriteLock(&afs_xvcache);
990 ObtainWriteLock(&afs_xcbhash, 494);
992 /* We need to check the flags again. We may have missed
993 * something while we were waiting for a lock.
995 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
997 ReleaseWriteLock(&tvcp->lock);
998 ReleaseWriteLock(&afs_xcbhash);
1003 /* now merge in the resulting status back into the vnode.
1004 * We only do this if the entry looks clear.
1006 afs_ProcessFS(tvcp, &statsp[i], areqp);
1007 #if defined(AFS_LINUX22_ENV)
1008 afs_fill_inode(AFSTOV(tvcp), NULL); /* reset inode operations */
1011 /* do some accounting for bulk stats: mark this entry as
1012 * loaded, so we can tell if we use it before it gets
1015 tvcp->states |= CBulkStat;
1016 tvcp->states &= ~CBulkFetching;
1018 afs_bulkStatsDone++;
1020 /* merge in vol info */
1021 if (volStates & VRO)
1022 tvcp->states |= CRO;
1023 if (volStates & VBackup)
1024 tvcp->states |= CBackup;
1025 if (volStates & VForeign)
1026 tvcp->states |= CForeign;
1028 /* merge in the callback info */
1029 tvcp->states |= CTruth;
1031 /* get ptr to the callback we are interested in */
1034 if (tcbp->ExpirationTime != 0) {
1035 tvcp->cbExpires = tcbp->ExpirationTime + startTime;
1036 tvcp->callback = hostp;
1037 tvcp->states |= CStatd;
1038 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
1039 } else if (tvcp->states & CRO) {
1040 /* ordinary callback on a read-only volume -- AFS 3.2 style */
1041 tvcp->cbExpires = 3600 + startTime;
1042 tvcp->callback = hostp;
1043 tvcp->states |= CStatd;
1044 afs_QueueCallback(tvcp, CBHash(3600), volp);
1047 tvcp->states &= ~(CStatd | CUnique);
1048 afs_DequeueCallback(tvcp);
1049 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
1050 osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
1052 ReleaseWriteLock(&afs_xcbhash);
1054 ReleaseWriteLock(&tvcp->lock);
1055 /* finally, we're done with the entry */
1056 afs_PutVCache(tvcp);
1057 } /* for all files we got back */
1059 /* finally return the pointer into the LRU queue */
1060 afs_PutVCache(lruvcp);
1063 /* Be sure to turn off the CBulkFetching flags */
1064 for (i = flagIndex; i < fidIndex; i++) {
1065 afid.Cell = adp->fid.Cell;
1066 afid.Fid.Volume = adp->fid.Fid.Volume;
1067 afid.Fid.Vnode = fidsp[i].Vnode;
1068 afid.Fid.Unique = fidsp[i].Unique;
1071 ObtainReadLock(&afs_xvcache);
1072 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
1073 ReleaseReadLock(&afs_xvcache);
1074 } while (tvcp && retry);
1075 if (tvcp != NULL && (tvcp->states & CBulkFetching)
1076 && (tvcp->m.Length == statSeqNo)) {
1077 tvcp->states &= ~CBulkFetching;
1080 afs_PutVCache(tvcp);
1084 afs_PutVolume(volp, READ_LOCK);
1086 /* If we did the InlineBulk RPC pull out the return code */
1088 if ((&statsp[0])->errorCode) {
1089 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1090 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL);
1091 code = (&statsp[0])->errorCode;
1097 osi_FreeLargeSpace(statMemp);
1098 osi_FreeLargeSpace(cbfMemp);
1102 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1103 #ifdef AFS_DARWIN80_ENV
1106 static int AFSDOBULK = 1;
1111 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int opflag, int wantparent)
1112 #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1113 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct pathname *pnp, int flags, struct vnode *rdir, struct AFS_UCRED *acred)
1114 #elif defined(UKERNEL)
1115 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int flags)
1117 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred)
1120 struct vrequest treq;
1122 register struct vcache *tvc = 0;
1123 register afs_int32 code;
1124 register afs_int32 bulkcode = 0;
1125 int pass = 0, hit = 0;
1127 extern afs_int32 afs_mariner; /*Writing activity to log? */
1128 afs_hyper_t versionNo;
1129 int no_read_access = 0;
1130 struct sysname_info sysState; /* used only for @sys checking */
1131 int dynrootRetry = 1;
1132 struct afs_fakestat_state fakestate;
1133 int tryEvalOnly = 0;
1134 OSI_VC_CONVERT(adp);
1136 AFS_STATCNT(afs_lookup);
1137 afs_InitFakeStat(&fakestate);
1139 if ((code = afs_InitReq(&treq, acred)))
1143 ndp->ni_dvp = AFSTOV(adp);
1144 #endif /* AFS_OSF_ENV */
1146 #if defined(AFS_DARWIN_ENV)
1147 /* Workaround for MacOSX Finder, which tries to look for
1148 * .DS_Store and Contents under every directory.
1150 if (afs_fakestat_enable && adp->mvstat == 1) {
1151 if (strcmp(aname, ".DS_Store") == 0)
1153 if (strcmp(aname, "Contents") == 0)
1159 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1161 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1162 if (tryEvalOnly && adp->mvstat == 1)
1167 *avcp = NULL; /* Since some callers don't initialize it */
1169 /* come back to here if we encounter a non-existent object in a read-only
1170 * volume's directory */
1173 *avcp = NULL; /* Since some callers don't initialize it */
1176 if (!(adp->states & CStatd)) {
1177 if ((code = afs_VerifyVCache2(adp, &treq))) {
1183 /* watch for ".." in a volume root */
1184 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1185 /* looking up ".." in root via special hacks */
1186 if (adp->mvid == (struct VenusFid *)0 || adp->mvid->Fid.Volume == 0) {
1188 extern struct vcache *afs_globalVp;
1189 if (adp == afs_globalVp) {
1190 struct vnode *rvp = AFSTOV(adp);
1192 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1193 ndp->ni_dvp = ndp->ni_vp;
1203 /* otherwise we have the fid here, so we use it */
1204 tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
1205 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid,
1206 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, code);
1208 code = (tvc ? 0 : ENOENT);
1210 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1214 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code); */
1219 /* now check the access */
1220 if (treq.uid != adp->last_looker) {
1221 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1226 adp->last_looker = treq.uid;
1229 /* Check for read access as well. We need read access in order to
1230 * stat files, but not to stat subdirectories. */
1231 if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
1234 /* special case lookup of ".". Can we check for it sooner in this code,
1235 * for instance, way up before "redo:" ??
1236 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1237 * invent a lightweight version of GetVCache.
1239 if (aname[0] == '.' && !aname[1]) { /* special case */
1240 ObtainReadLock(&afs_xvcache);
1242 ReleaseReadLock(&afs_xvcache);
1243 #ifdef AFS_DARWIN80_ENV
1244 vnode_get(AFSTOV(adp));
1249 if (adp && !VREFCOUNT_GT(adp, 0)) {
1255 Check_AtSys(adp, aname, &sysState, &treq);
1256 tname = sysState.name;
1258 /* 1st Check_AtSys and lookup by tname is required here, for now,
1259 * because the dnlc is *not* told to remove entries for the parent
1260 * dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1261 * if the cached entry for the parent dir is invalidated for a
1263 * Otherwise, we'd be able to do a dnlc lookup on an entry ending
1264 * w/@sys and know the dnlc was consistent with reality. */
1265 tvc = osi_dnlc_lookup(adp, tname, WRITE_LOCK);
1266 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1268 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1269 /* need read access on dir to stat non-directory / non-link */
1275 #ifdef AFS_LINUX22_ENV
1276 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1277 AFS_RELE(AFSTOV(tvc));
1284 #else /* non - LINUX */
1288 #endif /* linux22 */
1291 { /* sub-block just to reduce stack usage */
1292 register struct dcache *tdc;
1293 afs_size_t dirOffset, dirLen;
1294 struct VenusFid tfid;
1296 /* now we have to lookup the next fid */
1298 afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
1300 *avcp = NULL; /* redundant, but harmless */
1305 /* now we will just call dir package with appropriate inode.
1306 * Dirs are always fetched in their entirety for now */
1307 ObtainReadLock(&adp->lock);
1308 ObtainReadLock(&tdc->lock);
1311 * Make sure that the data in the cache is current. There are two
1312 * cases we need to worry about:
1313 * 1. The cache data is being fetched by another process.
1314 * 2. The cache data is no longer valid
1316 while ((adp->states & CStatd)
1317 && (tdc->dflags & DFFetching)
1318 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1319 ReleaseReadLock(&tdc->lock);
1320 ReleaseReadLock(&adp->lock);
1321 afs_osi_Sleep(&tdc->validPos);
1322 ObtainReadLock(&adp->lock);
1323 ObtainReadLock(&tdc->lock);
1325 if (!(adp->states & CStatd)
1326 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1327 ReleaseReadLock(&tdc->lock);
1328 ReleaseReadLock(&adp->lock);
1330 if (tname && tname != aname)
1331 osi_FreeLargeSpace(tname);
1335 /* Save the version number for when we call osi_dnlc_enter */
1336 hset(versionNo, tdc->f.versionNo);
1339 * check for, and handle "@sys" if it's there. We should be able
1340 * to avoid the alloc and the strcpy with a little work, but it's
1341 * not pressing. If there aren't any remote users (ie, via the
1342 * NFS translator), we have a slightly easier job.
1343 * the faster way to do this is to check for *aname == '@' and if
1344 * it's there, check for @sys, otherwise, assume there's no @sys
1345 * then, if the lookup fails, check for .*@sys...
1347 /* above now implemented by Check_AtSys and Next_AtSys */
1349 /* lookup the name in the appropriate dir, and return a cache entry
1350 * on the resulting fid */
1352 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1355 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1356 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState))
1358 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1360 tname = sysState.name;
1362 ReleaseReadLock(&tdc->lock);
1365 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1366 ReleaseReadLock(&adp->lock);
1368 if (tname[0] == '.')
1369 afs_LookupAFSDB(tname + 1);
1371 afs_LookupAFSDB(tname);
1372 if (tname && tname != aname)
1373 osi_FreeLargeSpace(tname);
1376 ReleaseReadLock(&adp->lock);
1379 /* new fid has same cell and volume */
1380 tfid.Cell = adp->fid.Cell;
1381 tfid.Fid.Volume = adp->fid.Fid.Volume;
1382 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1383 ICL_TYPE_STRING, tname, ICL_TYPE_FID, &tfid,
1384 ICL_TYPE_INT32, code);
1387 if (code != ENOENT) {
1388 printf("LOOKUP dirLookupOff -> %d\n", code);
1393 /* prefetch some entries, if the dir is currently open. The variable
1394 * dirCookie tells us where to start prefetching from.
1396 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)
1397 && !afs_IsDynroot(adp)) {
1399 /* if the entry is not in the cache, or is in the cache,
1400 * but hasn't been statd, then do a bulk stat operation.
1404 ObtainReadLock(&afs_xvcache);
1405 tvc = afs_FindVCache(&tfid, &retry, 0 /* !stats,!lru */ );
1406 ReleaseReadLock(&afs_xvcache);
1407 } while (tvc && retry);
1409 if (!tvc || !(tvc->states & CStatd))
1410 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1414 /* if the vcache isn't usable, release it */
1415 if (tvc && !(tvc->states & CStatd)) {
1424 /* now get the status info, if we don't already have it */
1425 /* This is kind of weird, but we might wind up accidentally calling
1426 * RXAFS_Lookup because we happened upon a file which legitimately
1427 * has a 0 uniquifier. That is the result of allowing unique to wrap
1428 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1429 * the file has not yet been looked up.
1432 afs_int32 cached = 0;
1433 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1434 tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
1436 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1437 tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
1440 } /* sub-block just to reduce stack usage */
1443 int force_eval = afs_fakestat_enable ? 0 : 1;
1445 if (adp->states & CForeign)
1446 tvc->states |= CForeign;
1447 tvc->parentVnode = adp->fid.Fid.Vnode;
1448 tvc->parentUnique = adp->fid.Fid.Unique;
1449 tvc->states &= ~CBulkStat;
1451 if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
1452 ObtainSharedLock(&tvc->lock, 680);
1453 if (!tvc->linkData) {
1454 UpgradeSToWLock(&tvc->lock, 681);
1455 code = afs_HandleLink(tvc, &treq);
1456 ConvertWToRLock(&tvc->lock);
1458 ConvertSToRLock(&tvc->lock);
1461 if (!code && !afs_strchr(tvc->linkData, ':'))
1463 ReleaseReadLock(&tvc->lock);
1465 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1466 if (!(flags & AFS_LOOKUP_NOEVAL))
1467 /* don't eval mount points */
1468 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1469 if (tvc->mvstat == 1 && force_eval) {
1470 /* a mt point, possibly unevaluated */
1471 struct volume *tvolp;
1473 ObtainWriteLock(&tvc->lock, 133);
1474 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1475 ReleaseWriteLock(&tvc->lock);
1480 afs_PutVolume(tvolp, WRITE_LOCK);
1484 /* next, we want to continue using the target of the mt point */
1485 if (tvc->mvid && (tvc->states & CMValid)) {
1487 /* now lookup target, to set .. pointer */
1488 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1489 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID,
1491 uvc = tvc; /* remember for later */
1493 if (tvolp && (tvolp->states & VForeign)) {
1494 /* XXXX tvolp has ref cnt on but not locked! XXX */
1496 afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
1498 tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
1500 afs_PutVCache(uvc); /* we're done with it */
1505 afs_PutVolume(tvolp, WRITE_LOCK);
1510 /* now, if we came via a new mt pt (say because of a new
1511 * release of a R/O volume), we must reevaluate the ..
1512 * ptr to point back to the appropriate place */
1514 ObtainWriteLock(&tvc->lock, 134);
1515 if (tvc->mvid == NULL) {
1516 tvc->mvid = (struct VenusFid *)
1517 osi_AllocSmallSpace(sizeof(struct VenusFid));
1519 /* setup backpointer */
1520 *tvc->mvid = tvolp->dotdot;
1521 ReleaseWriteLock(&tvc->lock);
1522 afs_PutVolume(tvolp, WRITE_LOCK);
1528 afs_PutVolume(tvolp, WRITE_LOCK);
1533 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1538 /* if we get here, we found something in a directory that couldn't
1539 * be located (a Multics "connection failure"). If the volume is
1540 * read-only, we try flushing this entry from the cache and trying
1544 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1546 if (tv->states & VRO) {
1547 pass = 1; /* try this *once* */
1548 ObtainWriteLock(&afs_xcbhash, 495);
1549 afs_DequeueCallback(adp);
1550 /* re-stat to get later version */
1551 adp->states &= ~CStatd;
1552 ReleaseWriteLock(&afs_xcbhash);
1553 osi_dnlc_purgedp(adp);
1554 afs_PutVolume(tv, READ_LOCK);
1557 afs_PutVolume(tv, READ_LOCK);
1564 /* put the network buffer back, if need be */
1565 if (tname != aname && tname)
1566 osi_FreeLargeSpace(tname);
1569 /* Handle RENAME; only need to check rename "." */
1570 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1571 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1572 afs_PutVCache(*avcp);
1574 afs_PutFakeStat(&fakestate);
1575 return afs_CheckCode(EISDIR, &treq, 18);
1578 #endif /* AFS_OSF_ENV */
1581 afs_AddMarinerName(aname, tvc);
1583 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1584 if (!(flags & AFS_LOOKUP_NOEVAL))
1585 /* Here we don't enter the name into the DNLC because we want the
1586 * evaluated mount dir to be there (the vcache for the mounted volume)
1587 * rather than the vc of the mount point itself. we can still find the
1588 * mount point's vc in the vcache by its fid. */
1589 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1591 osi_dnlc_enter(adp, aname, tvc, &versionNo);
1593 #ifdef AFS_LINUX20_ENV
1594 /* So Linux inode cache is up to date. */
1595 code = afs_VerifyVCache(tvc, &treq);
1597 afs_PutFakeStat(&fakestate);
1598 return 0; /* can't have been any errors if hit and !code */
1605 code = afs_CheckCode(code, &treq, 19);
1607 /* If there is an error, make sure *avcp is null.
1608 * Alphas panic otherwise - defect 10719.
1613 afs_PutFakeStat(&fakestate);