2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
18 * AFS_EQ_ATSYS (macro)
22 #include <afsconfig.h>
23 #include "../afs/param.h"
27 #include "../afs/sysincludes.h" /* Standard vendor system headers */
28 #include "../afs/afsincludes.h" /* Afs-based standard headers */
29 #include "../afs/afs_stats.h" /* statistics */
30 #include "../afs/afs_cbqueue.h"
31 #include "../afs/nfsclient.h"
32 #include "../afs/exporter.h"
33 #include "../afs/afs_osidnlc.h"
37 * A few definitions. This is until we have a proper header file
38 * which has prototypes for all functions
41 extern struct DirEntry * afs_dir_GetBlob();
43 extern afs_rwlock_t afs_xvcache;
44 extern afs_rwlock_t afs_xcbhash;
45 extern struct afs_exporter *afs_nfsexporter;
46 extern char *afs_sysname;
47 extern char *afs_sysnamelist[];
48 extern int afs_sysnamecount;
49 extern struct afs_q VLRU; /*vcache LRU*/
50 #ifdef AFS_LINUX22_ENV
51 extern struct inode_operations afs_symlink_iops, afs_dir_iops;
55 afs_int32 afs_bulkStatsDone;
56 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
57 int afs_fakestat_enable = 0;
60 /* this would be faster if it did comparison as int32word, but would be
61 * dependant on byte-order and alignment, and I haven't figured out
62 * what "@sys" is in binary... */
63 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
67 register char *s1, *s2;
83 register char *a, c; {
85 AFS_STATCNT(afs_index);
87 if (tc == c) return a;
93 /* call under write lock, evaluate mvid field from a mt pt.
94 * avc is the vnode of the mount point object; must be write-locked.
95 * advc is the vnode of the containing directory (optional; if NULL and
96 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
97 * avolpp is where we return a pointer to the volume named by the mount pt, if success
98 * areq is the identity of the caller.
100 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
102 EvalMountPoint(avc, advc, avolpp, areq)
103 register struct vcache *avc;
104 struct volume **avolpp;
105 struct vcache *advc; /* the containing dir */
106 register struct vrequest *areq;
109 struct volume *tvp = 0;
110 struct VenusFid tfid;
112 char *cpos, *volnamep;
114 afs_int32 prefetchRO; /* 1=>No 2=>Yes */
115 afs_int32 mtptCell, assocCell, hac=0;
116 afs_int32 samecell, roname, len;
118 AFS_STATCNT(EvalMountPoint);
120 if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
122 *avolpp = (struct volume *)0;
123 code = afs_HandleLink(avc, areq);
124 if (code) return code;
126 /* Determine which cell and volume the mointpoint goes to */
127 type = avc->linkData[0]; /* '#'=>Regular '%'=>RW */
128 cpos = afs_index(&avc->linkData[1], ':'); /* if cell name present */
132 tcell = afs_GetCellByName(&avc->linkData[1], READ_LOCK);
135 volnamep = &avc->linkData[1];
136 tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
138 if (!tcell) return ENODEV;
140 mtptCell = tcell->cell; /* The cell for the mountpoint */
142 hac = 1; /* has associated cell */
143 assocCell = tcell->lcellp->cell; /* The associated cell */
145 afs_PutCell(tcell, READ_LOCK);
147 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
148 len = strlen(volnamep);
149 roname = ((len > 9) && (strcmp(&volnamep[len - 9],".readonly") == 0)) ||
150 ((len > 7) && (strcmp(&volnamep[len - 7],".backup") == 0));
152 /* When we cross mountpoint, do we stay in the same cell */
153 samecell = (avc->fid.Cell == mtptCell) || (hac && (avc->fid.Cell == assocCell));
155 /* Decide whether to prefetch the RO. Also means we want the RO.
156 * If this is a regular mountpoint with a RW volume name and
157 * we cross a cell boundary -or- start from a RO volume, then we will
158 * want to prefetch the RO volume when we get the RW below.
160 if ( (type == '#') && !roname && (!samecell || (avc->states & CRO)) ) {
161 prefetchRO = 2; /* Yes, prefetch the RO */
163 prefetchRO = 1; /* No prefetch of the RO */
166 /* Get the volume struct. Unless this volume name has ".readonly" or
167 * ".backup" in it, this will get the volume struct for the RW volume.
168 * The RO volume will be prefetched if requested (but not returned).
170 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetchRO, areq, WRITE_LOCK);
172 /* If no volume was found in this cell, try the associated linked cell */
173 if (!tvp && hac && areq->volumeError) {
174 tvp = afs_GetVolumeByName(volnamep, assocCell, prefetchRO, areq, WRITE_LOCK);
177 /* Still not found. If we are looking for the RO, then perhaps the RW
178 * doesn't exist? Try adding ".readonly" to volname and look for that.
179 * Don't know why we do this. Would have still found it in above call - jpm.
181 if (!tvp && (prefetchRO == 2)) {
182 strcpy(buf, volnamep);
183 afs_strcat(buf, ".readonly");
185 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
187 /* Try the associated linked cell if failed */
188 if (!tvp && hac && areq->volumeError) {
189 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
193 if (!tvp) return ENODEV; /* Couldn't find the volume */
195 /* Don't cross mountpoint from a BK to a BK volume */
196 if ((avc->states & CBackup) && (tvp->states & VBackup)) {
197 afs_PutVolume(tvp, WRITE_LOCK);
201 /* If we want (prefetched) the RO and it exists, then drop the
202 * RW volume and get the RO. Othewise, go with the RW.
204 if ((prefetchRO == 2) && tvp->roVol) {
205 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
206 tfid.Cell = tvp->cell;
207 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
208 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
209 if (!tvp) return ENODEV; /* oops, can't do it */
213 avc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
214 avc->mvid->Cell = tvp->cell;
215 avc->mvid->Fid.Volume = tvp->volume;
216 avc->mvid->Fid.Vnode = 1;
217 avc->mvid->Fid.Unique = 1;
218 avc->states |= CMValid;
220 /* Used to: if the mount point is stored within a backup volume,
221 * then we should only update the parent pointer information if
222 * there's none already set, so as to avoid updating a volume's ..
223 * info with something in an OldFiles directory.
225 * Next two lines used to be under this if:
227 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
229 * Now: update mount point back pointer on every call, so that we handle
230 * multiple mount points better. This way, when du tries to go back
231 * via chddir(".."), it will end up exactly where it started, yet
232 * cd'ing via a new path to a volume will reset the ".." pointer
235 tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
236 if (advc) tvp->dotdot = advc->fid;
245 * Must be called on an afs_fakestat_state object before calling
246 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
247 * without calling afs_EvalFakeStat is legal, as long as this
248 * function is called.
252 afs_InitFakeStat(state)
253 struct afs_fakestat_state *state;
257 state->need_release = 0;
264 * Automatically does the equivalent of EvalMountPoint for vcache entries
265 * which are mount points. Remembers enough state to properly release
266 * the volume root vcache when afs_PutFakeStat() is called.
268 * State variable must be initialized by afs_InitFakeState() beforehand.
270 * Returns 0 when everything succeeds and *avcp points to the vcache entry
271 * that should be used for the real vnode operation. Returns non-zero if
272 * something goes wrong and the error code should be returned to the user.
275 afs_EvalFakeStat(avcp, state, areq)
276 struct vcache **avcp;
277 struct afs_fakestat_state *state;
278 struct vrequest *areq;
280 struct vcache *tvc, *root_vp;
281 struct volume *tvolp = NULL;
284 osi_Assert(state->valid == 1);
285 osi_Assert(state->did_eval == 0);
287 if (!afs_fakestat_enable)
290 if (tvc->mvstat != 1)
293 /* Is the call to VerifyVCache really necessary? */
294 code = afs_VerifyVCache(tvc, areq);
297 if (!state->nonblock) {
298 ObtainWriteLock(&tvc->lock, 599);
299 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
300 ReleaseWriteLock(&tvc->lock);
304 tvolp->dotdot = tvc->fid;
305 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
306 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
309 if (tvc->mvid && (tvc->states & CMValid)) {
310 if (state->nonblock) {
315 ObtainWriteLock(&afs_xvcache, 597);
316 root_vp = afs_FindVCache(tvc->mvid, 0, 0, &retry, 0);
317 if (root_vp && retry) {
318 ReleaseWriteLock(&afs_xvcache);
319 afs_PutVCache(root_vp, 0);
321 } while (root_vp && retry);
322 ReleaseWriteLock(&afs_xvcache);
324 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL, WRITE_LOCK);
327 code = state->nonblock ? 0 : ENOENT;
331 /* Is this always kosher? Perhaps we should instead use
332 * NBObtainWriteLock to avoid potential deadlock.
334 ObtainWriteLock(&root_vp->lock, 598);
336 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
337 *root_vp->mvid = tvolp->dotdot;
338 ReleaseWriteLock(&root_vp->lock);
340 state->need_release = 1;
341 state->root_vp = root_vp;
345 code = state->nonblock ? 0 : ENOENT;
350 afs_PutVolume(tvolp, WRITE_LOCK);
355 * afs_TryEvalFakeStat
357 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
358 * and only evaluate the mount point if all the data is already in
361 * Returns 0 if everything succeeds and *avcp points to a valid
362 * vcache entry (possibly evaluated).
365 afs_TryEvalFakeStat(avcp, state, areq)
366 struct vcache **avcp;
367 struct afs_fakestat_state *state;
368 struct vrequest *areq;
371 return afs_EvalFakeStat(avcp, state, areq);
377 * Perform any necessary cleanup at the end of a vnode op, given that
378 * afs_InitFakeStat was previously called with this state.
381 afs_PutFakeStat(state)
382 struct afs_fakestat_state *state;
384 osi_Assert(state->valid == 1);
385 if (state->need_release)
386 afs_PutVCache(state->root_vp, 0);
391 register char *aname; {
395 AFS_STATCNT(ENameOK);
396 tlen = strlen(aname);
397 if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
401 afs_getsysname(areq, adp, bufp)
402 register struct vrequest *areq;
403 register struct vcache *adp;
406 static char sysname[MAXSYSNAME];
407 register struct unixuser *au;
408 register afs_int32 error;
410 if (!afs_nfsexporter) {
411 strcpy(bufp, afs_sysname);
414 AFS_STATCNT(getsysname);
415 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
418 error = EXP_SYSNAME(au->exporter, (char *)0, bufp);
420 strcpy(bufp, "@sys");
423 strcpy(bufp, afs_sysname);
428 Check_AtSys(avc, aname, state, areq)
429 register struct vcache *avc;
431 struct sysname_info *state;
432 struct vrequest *areq;
434 if (AFS_EQ_ATSYS(aname)) {
436 state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
438 state->index = afs_getsysname(areq, avc, state->name);
447 Next_AtSys(avc, areq, state)
448 register struct vcache *avc;
449 struct vrequest *areq;
450 struct sysname_info *state;
452 if (state->index == -1)
453 return 0; /* No list */
455 /* Check for the initial state of aname != "@sys" in Check_AtSys*/
456 if (state->offset == -1 && state->allocked == 0) {
457 register char *tname;
458 /* Check for .*@sys */
459 for (tname=state->name; *tname; tname++)
460 /*Move to the end of the string*/;
461 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
462 state->offset = (tname - 4) - state->name;
463 tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
464 strncpy(tname, state->name, state->offset);
467 state->index = afs_getsysname(areq, avc, state->name+state->offset);
470 return 0; /* .*@sys doesn't match either */
471 } else if (++(state->index) >= afs_sysnamecount
472 || !afs_sysnamelist[state->index])
473 return 0; /* end of list */
474 strcpy(state->name+state->offset, afs_sysnamelist[state->index]);
478 #if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
479 extern int BlobScan(ino64_t *afile, afs_int32 ablob);
481 #if defined AFS_LINUX_64BIT_KERNEL
482 extern int BlobScan(long *afile, afs_int32 ablob);
484 extern int BlobScan(afs_int32 *afile, afs_int32 ablob);
489 /* called with an unlocked directory and directory cookie. Areqp
490 * describes who is making the call.
491 * Scans the next N (about 30, typically) directory entries, and does
492 * a bulk stat call to stat them all.
494 * Must be very careful when merging in RPC responses, since we dont
495 * want to overwrite newer info that was added by a file system mutating
496 * call that ran concurrently with our bulk stat call.
498 * We do that, as described below, by not merging in our info (always
499 * safe to skip the merge) if the status info is valid in the vcache entry.
501 * If adapt ever implements the bulk stat RPC, then this code will need to
502 * ensure that vcaches created for failed RPC's to older servers have the
505 struct vcache * BStvc = (struct vcache *) 0;
506 int afs_DoBulkStat(adp, dirCookie, areqp)
509 struct vrequest *areqp;
511 int nentries; /* # of entries to prefetch */
512 int nskip; /* # of slots in the LRU queue to skip */
513 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
514 struct dcache *dcp; /* chunk containing the dir block */
515 char *statMemp; /* status memory block */
516 char *cbfMemp; /* callback and fid memory block */
517 afs_size_t temp; /* temp for holding chunk length, &c. */
518 struct AFSFid *fidsp; /* file IDs were collecting */
519 struct AFSCallBack *cbsp; /* call back pointers */
520 struct AFSCallBack *tcbp; /* temp callback ptr */
521 struct AFSFetchStatus *statsp; /* file status info */
522 struct AFSVolSync volSync; /* vol sync return info */
523 struct vcache *tvcp; /* temp vcp */
524 struct afs_q *tq; /* temp queue variable */
525 AFSCBFids fidParm; /* file ID parm for bulk stat */
526 AFSBulkStats statParm; /* stat info parm for bulk stat */
527 int fidIndex; /* which file were stating */
528 struct conn *tcp; /* conn for call */
529 AFSCBs cbParm; /* callback parm for bulk stat */
530 struct server *hostp = 0; /* host we got callback from */
531 long origEvenCBs; /* original # of callbacks for even-fid files */
532 long origOddCBs; /* original # of callbacks for odd-fid files */
533 long origEvenZaps; /* original # of recycles for even-fid files */
534 long origOddZaps; /* original # of recycles for odd-fid files */
535 long startTime; /* time we started the call,
536 * for callback expiration base
538 afs_size_t statSeqNo; /* Valued of file size to detect races */
539 int code; /* error code */
540 long newIndex; /* new index in the dir */
541 struct DirEntry *dirEntryp; /* dir entry we are examining */
543 struct VenusFid afid; /* file ID we are using now */
544 struct VenusFid tfid; /* another temp. file ID */
545 afs_int32 retry; /* handle low-level SGI MP race conditions */
546 long volStates; /* flags from vol structure */
547 struct volume *volp=0; /* volume ptr */
548 struct VenusFid dotdot;
549 int flagIndex; /* First file with bulk fetch flag set */
550 int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
553 /* first compute some basic parameters. We dont want to prefetch more
554 * than a fraction of the cache in any given call, and we want to preserve
555 * a portion of the LRU queue in any event, so as to avoid thrashing
556 * the entire stat cache (we will at least leave some of it alone).
557 * presently dont stat more than 1/8 the cache in any one call. */
558 nentries = afs_cacheStats / 8;
560 /* dont bother prefetching more than one calls worth of info */
561 if (nentries > AFSCBMAX) nentries = AFSCBMAX;
563 /* heuristic to make sure that things fit in 4K. This means that
564 * we shouldnt make it any bigger than 47 entries. I am typically
565 * going to keep it a little lower, since we don't want to load
566 * too much of the stat cache.
568 if (nentries > 30) nentries = 30;
570 /* now, to reduce the stack size, well allocate two 4K blocks,
571 * one for fids and callbacks, and one for stat info. Well set
572 * up our pointers to the memory from there, too.
574 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
575 statsp = (struct AFSFetchStatus *) statMemp;
576 cbfMemp = osi_AllocLargeSpace(nentries *
577 (sizeof(AFSCallBack) + sizeof(AFSFid)));
578 fidsp = (AFSFid *) cbfMemp;
579 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
581 /* next, we must iterate over the directory, starting from the specified
582 * cookie offset (dirCookie), and counting out nentries file entries.
583 * We skip files that already have stat cache entries, since we
584 * dont want to bulk stat files that are already in the cache.
587 code = afs_VerifyVCache(adp, areqp);
590 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
596 /* lock the directory cache entry */
597 ObtainReadLock(&adp->lock);
598 ObtainReadLock(&dcp->lock);
601 * Make sure that the data in the cache is current. There are two
602 * cases we need to worry about:
603 * 1. The cache data is being fetched by another process.
604 * 2. The cache data is no longer valid
606 while ((adp->states & CStatd)
607 && (dcp->dflags & DFFetching)
608 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
609 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
610 ICL_TYPE_STRING, __FILE__,
611 ICL_TYPE_INT32, __LINE__,
612 ICL_TYPE_POINTER, dcp,
613 ICL_TYPE_INT32, dcp->dflags);
614 ReleaseReadLock(&dcp->lock);
615 ReleaseReadLock(&adp->lock);
616 afs_osi_Sleep(&dcp->validPos);
617 ObtainReadLock(&adp->lock);
618 ObtainReadLock(&dcp->lock);
620 if (!(adp->states & CStatd)
621 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
622 ReleaseReadLock(&dcp->lock);
623 ReleaseReadLock(&adp->lock);
628 /* Generate a sequence number so we can tell whether we should
629 * store the attributes when processing the response. This number is
630 * stored in the file size when we set the CBulkFetching bit. If the
631 * CBulkFetching is still set and this value hasn't changed, then
632 * we know we were the last to set CBulkFetching bit for this file,
633 * and it is safe to set the status information for this file.
635 statSeqNo = bulkStatCounter++;
637 /* now we have dir data in the cache, so scan the dir page */
640 while (1) { /* Should probably have some constant bound */
641 /* look for first safe entry to examine in the directory. BlobScan
642 * looks for a the 1st allocated dir after the dirCookie slot.
644 newIndex = BlobScan(&dcp->f.inode, (dirCookie>>5));
645 if (newIndex == 0) break;
647 /* remember the updated directory cookie */
648 dirCookie = newIndex << 5;
650 /* get a ptr to the dir entry */
651 dirEntryp =(struct DirEntry *)afs_dir_GetBlob(&dcp->f.inode, newIndex);
652 if (!dirEntryp) break;
654 /* dont copy more than we have room for */
655 if (fidIndex >= nentries) {
656 DRelease((char *) dirEntryp, 0);
660 /* now, if the dir entry looks good, copy it out to our list. Vnode
661 * 0 means deleted, although it should also be free were it deleted.
663 if (dirEntryp->fid.vnode != 0) {
664 /* dont copy entries we have in our cache. This check will
665 * also make us skip "." and probably "..", unless it has
666 * disappeared from the cache since we did our namei call.
668 tfid.Cell = adp->fid.Cell;
669 tfid.Fid.Volume = adp->fid.Fid.Volume;
670 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
671 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
674 ObtainWriteLock(&afs_xvcache, 130);
675 tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
677 ReleaseWriteLock(&afs_xvcache);
678 afs_PutVCache(tvcp, 0);
680 } while (tvcp && retry);
681 if (!tvcp) { /* otherwise, create manually */
682 tvcp = afs_NewVCache(&tfid, hostp, 0, 0);
683 ObtainWriteLock(&tvcp->lock, 505);
684 ReleaseWriteLock(&afs_xvcache);
685 afs_RemoveVCB(&tfid);
686 ReleaseWriteLock(&tvcp->lock);
688 ReleaseWriteLock(&afs_xvcache);
691 goto done; /* can't happen at present, more's the pity */
693 /* WARNING: afs_DoBulkStat uses the Length field to store a
694 * sequence number for each bulk status request. Under no
695 * circumstances should afs_DoBulkStat store a sequence number
696 * if the new length will be ignored when afs_ProcessFS is
697 * called with new stats. */
699 if (!(tvcp->states & (CStatd|CBulkFetching))
700 && (tvcp->execsOrWriters <= 0)
701 && !afs_DirtyPages(tvcp)
702 && !AFS_VN_MAPPED((vnode_t*)tvcp))
704 if (!(tvcp->states & (CStatd|CBulkFetching))
705 && (tvcp->execsOrWriters <= 0)
706 && !afs_DirtyPages(tvcp))
710 /* this entry doesnt exist in the cache, and is not
711 * already being fetched by someone else, so add it to the
712 * list of file IDs to obtain.
714 * We detect a callback breaking race condition by checking the
715 * CBulkFetching state bit and the value in the file size.
716 * It is safe to set the status only if the CBulkFetching
717 * flag is still set and the value in the file size does
720 * Don't fetch status for dirty files. We need to
721 * preserve the value of the file size. We could
722 * flush the pages, but it wouldn't be worthwhile.
724 memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
725 tvcp->states |= CBulkFetching;
726 tvcp->m.Length = statSeqNo;
729 afs_PutVCache(tvcp, 0);
730 } /* if dir vnode has non-zero entry */
732 /* move to the next dir entry by adding in the # of entries
733 * used by this dir entry.
735 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
736 DRelease((char *) dirEntryp, 0);
737 if (temp <= 0) break;
739 } /* while loop over all dir entries */
741 /* now release the dir lock and prepare to make the bulk RPC */
742 ReleaseReadLock(&dcp->lock);
743 ReleaseReadLock(&adp->lock);
745 /* release the chunk */
748 /* dont make a null call */
749 if (fidIndex == 0) goto done;
752 /* setup the RPC parm structures */
753 fidParm.AFSCBFids_len = fidIndex;
754 fidParm.AFSCBFids_val = fidsp;
755 statParm.AFSBulkStats_len = fidIndex;
756 statParm.AFSBulkStats_val = statsp;
757 cbParm.AFSCBs_len = fidIndex;
758 cbParm.AFSCBs_val = cbsp;
760 /* start the timer; callback expirations are relative to this */
761 startTime = osi_Time();
763 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
765 hostp = tcp->srvr->server;
766 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
767 #ifdef RX_ENABLE_LOCKS
769 #endif /* RX_ENABLE_LOCKS */
771 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
772 code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
774 if (code == RXGEN_OPCODE) {
775 tcp->srvr->server->flags |= SNO_INLINEBULK;
777 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
783 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
786 #ifdef RX_ENABLE_LOCKS
788 #endif /* RX_ENABLE_LOCKS */
792 } while (afs_Analyze(tcp, code, &adp->fid, areqp,
793 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, (struct cell *)0));
795 /* now, if we didnt get the info, bail out. */
798 /* we need vol flags to create the entries properly */
799 dotdot.Fid.Volume = 0;
800 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
802 volStates = volp->states;
803 if (volp->dotdot.Fid.Volume != 0)
804 dotdot = volp->dotdot;
808 /* find the place to merge the info into We do this by skipping
809 * nskip entries in the LRU queue. The more we skip, the more
810 * we preserve, since the head of the VLRU queue is the most recently
814 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
815 ObtainReadLock(&afs_xvcache);
817 /* actually a serious error, probably should panic. Probably will
818 * panic soon, oh well. */
819 ReleaseReadLock(&afs_xvcache);
820 afs_warnuser("afs_DoBulkStat: VLRU empty!");
823 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
824 refpanic ("Bulkstat VLRU inconsistent");
826 for(tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
827 if (--nskip <= 0) break;
828 else if (QNext(QPrev(tq)) != tq) {
830 refpanic ("BulkStat VLRU inconsistent");
833 if (tq != &VLRU) lruvcp = QTOV(tq);
834 else lruvcp = QTOV(VLRU.next);
836 /* now we have to hold this entry, so that it does not get moved
837 * into the free list while we're running. It could still get
838 * moved within the lru queue, but hopefully that will be rare; it
839 * doesn't hurt nearly as much.
842 osi_vnhold(lruvcp, &retry);
843 ReleaseReadLock(&afs_xvcache); /* could be read lock */
847 /* otherwise, merge in the info. We have to be quite careful here,
848 * since we need to ensure that we don't merge old info over newer
849 * stuff in a stat cache entry. We're very conservative here: we don't
850 * do the merge at all unless we ourselves create the stat cache
851 * entry. That's pretty safe, and should work pretty well, since we
852 * typically expect to do the stat cache creation ourselves.
854 * We also have to take into account racing token revocations.
856 for(i=0; i<fidIndex; i++) {
857 if ((&statsp[i])->errorCode)
859 afid.Cell = adp->fid.Cell;
860 afid.Fid.Volume = adp->fid.Fid.Volume;
861 afid.Fid.Vnode = fidsp[i].Vnode;
862 afid.Fid.Unique = fidsp[i].Unique;
865 ObtainReadLock(&afs_xvcache);
866 tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
867 ReleaseReadLock(&afs_xvcache);
868 } while (tvcp && retry);
870 /* The entry may no longer exist */
875 /* now we have the entry held, but we need to fill it in */
876 ObtainWriteLock(&tvcp->lock,131);
878 /* if CBulkFetching is not set, or if the file size no longer
879 * matches the value we placed there when we set the CBulkFetching
880 * flag, then someone else has done something with this node,
881 * and we may not have the latest status information for this
882 * file. Leave the entry alone.
884 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
886 ReleaseWriteLock(&tvcp->lock);
887 afs_PutVCache(tvcp, 0);
891 /* now copy ".." entry back out of volume structure, if necessary */
892 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
894 tvcp->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
895 *tvcp->mvid = dotdot;
898 ObtainWriteLock(&afs_xvcache,132);
899 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
900 refpanic ("Bulkstat VLRU inconsistent2");
902 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
903 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
904 refpanic ("Bulkstat VLRU inconsistent4");
905 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
906 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
907 refpanic ("Bulkstat VLRU inconsistent5");
909 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
910 QRemove(&tvcp->vlruq);
911 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
914 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
915 refpanic ("Bulkstat VLRU inconsistent3");
917 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
918 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
919 refpanic ("Bulkstat VLRU inconsistent5");
920 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
921 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
922 refpanic ("Bulkstat VLRU inconsistent6");
923 ReleaseWriteLock(&afs_xvcache);
925 ObtainWriteLock(&afs_xcbhash, 494);
927 /* We need to check the flags again. We may have missed
928 * something while we were waiting for a lock.
930 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
932 ReleaseWriteLock(&tvcp->lock);
933 ReleaseWriteLock(&afs_xcbhash);
934 afs_PutVCache(tvcp, 0);
938 /* now merge in the resulting status back into the vnode.
939 * We only do this if the entry looks clear.
941 afs_ProcessFS(tvcp, &statsp[i], areqp);
942 #ifdef AFS_LINUX22_ENV
943 /* overwrite the ops if it's a directory or symlink. */
944 if (vType(tvcp) == VDIR)
945 tvcp->v.v_op = &afs_dir_iops;
946 else if (vType(tvcp) == VLNK)
947 tvcp->v.v_op = &afs_symlink_iops;
950 /* do some accounting for bulk stats: mark this entry as
951 * loaded, so we can tell if we use it before it gets
954 tvcp->states |= CBulkStat;
955 tvcp->states &= ~CBulkFetching;
959 /* merge in vol info */
960 if (volStates & VRO) tvcp->states |= CRO;
961 if (volStates & VBackup) tvcp->states |= CBackup;
962 if (volStates & VForeign) tvcp->states |= CForeign;
964 /* merge in the callback info */
965 tvcp->states |= CTruth;
967 /* get ptr to the callback we are interested in */
970 if (tcbp->ExpirationTime != 0) {
971 tvcp->cbExpires = tcbp->ExpirationTime+startTime;
972 tvcp->callback = hostp;
973 tvcp->states |= CStatd;
974 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
976 else if (tvcp->states & CRO) {
977 /* ordinary callback on a read-only volume -- AFS 3.2 style */
978 tvcp->cbExpires = 3600+startTime;
979 tvcp->callback = hostp;
980 tvcp->states |= CStatd;
981 afs_QueueCallback(tvcp, CBHash(3600), volp);
985 tvcp->states &= ~(CStatd|CUnique);
986 afs_DequeueCallback(tvcp);
987 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
988 osi_dnlc_purgedp (tvcp); /* if it (could be) a directory */
990 ReleaseWriteLock(&afs_xcbhash);
992 ReleaseWriteLock(&tvcp->lock);
993 /* finally, we're done with the entry */
994 afs_PutVCache(tvcp, 0);
995 } /* for all files we got back */
997 /* finally return the pointer into the LRU queue */
998 afs_PutVCache(lruvcp, 0);
1001 /* Be sure to turn off the CBulkFetching flags */
1002 for(i=flagIndex; i<fidIndex; i++) {
1003 afid.Cell = adp->fid.Cell;
1004 afid.Fid.Volume = adp->fid.Fid.Volume;
1005 afid.Fid.Vnode = fidsp[i].Vnode;
1006 afid.Fid.Unique = fidsp[i].Unique;
1009 ObtainReadLock(&afs_xvcache);
1010 tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
1011 ReleaseReadLock(&afs_xvcache);
1012 } while (tvcp && retry);
1014 && (tvcp->states & CBulkFetching)
1015 && (tvcp->m.Length == statSeqNo)) {
1016 tvcp->states &= ~CBulkFetching;
1019 afs_PutVCache(tvcp, 0);
1023 afs_PutVolume(volp, READ_LOCK);
1025 /* If we did the InlineBulk RPC pull out the return code */
1027 if ((&statsp[0])->errorCode) {
1028 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1029 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
1031 code = (&statsp[0])->errorCode;
1036 osi_FreeLargeSpace(statMemp);
1037 osi_FreeLargeSpace(cbfMemp);
1041 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1045 afs_lookup(adp, ndp)
1047 struct nameidata *ndp; {
1048 char aname[MAXNAMLEN+1]; /* XXX */
1049 struct vcache **avcp = (struct vcache **)&(ndp->ni_vp);
1050 struct ucred *acred = ndp->ni_cred;
1051 int wantparent = ndp->ni_nameiop & WANTPARENT;
1052 int opflag = ndp->ni_nameiop & OPFLAG;
1053 #else /* AFS_OSF_ENV */
1054 #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1055 afs_lookup(OSI_VC_ARG(adp), aname, avcp, pnp, flags, rdir, acred)
1056 struct pathname *pnp;
1060 #if defined(UKERNEL)
1061 afs_lookup(adp, aname, avcp, acred, flags)
1064 afs_lookup(adp, aname, avcp, acred)
1065 #endif /* UKERNEL */
1066 #endif /* SUN5 || SGI */
1068 struct vcache **avcp;
1070 struct AFS_UCRED *acred; {
1072 struct vrequest treq;
1073 char *tname = (char *)0;
1074 register struct vcache *tvc=0;
1075 register afs_int32 code;
1076 register afs_int32 bulkcode = 0;
1077 int pass = 0, hit = 0;
1079 extern afs_int32 afs_mariner; /*Writing activity to log?*/
1081 afs_hyper_t versionNo;
1082 int no_read_access = 0;
1083 struct sysname_info sysState; /* used only for @sys checking */
1084 int dynrootRetry = 1;
1085 struct afs_fakestat_state fakestate;
1087 AFS_STATCNT(afs_lookup);
1088 afs_InitFakeStat(&fakestate);
1090 if (code = afs_InitReq(&treq, acred))
1093 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1097 ndp->ni_dvp = AFSTOV(adp);
1098 memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
1099 aname[ndp->ni_namelen] = '\0';
1100 #endif /* AFS_OSF_ENV */
1102 *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
1104 /* come back to here if we encounter a non-existent object in a read-only
1105 volume's directory */
1108 *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
1111 if (!(adp->states & CStatd)) {
1112 if (code = afs_VerifyVCache2(adp, &treq)) {
1118 /* watch for ".." in a volume root */
1119 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1120 /* looking up ".." in root via special hacks */
1121 if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
1123 extern struct vcache *afs_globalVp;
1124 if (adp == afs_globalVp) {
1125 struct vnode *rvp = AFSTOV(adp);
1127 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1128 ndp->ni_dvp = ndp->ni_vp;
1138 /* otherwise we have the fid here, so we use it */
1139 tvc = afs_GetVCache(adp->mvid, &treq, (afs_int32 *)0,
1140 (struct vcache*)0, 0);
1141 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
1142 ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
1143 ICL_TYPE_INT32, code);
1145 code = (tvc ? 0 : ENOENT);
1147 if (tvc && !VREFCOUNT(tvc)) {
1151 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code);*/
1156 /* now check the access */
1157 if (treq.uid != adp->last_looker) {
1158 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1159 *avcp = (struct vcache *)0;
1163 else adp->last_looker = treq.uid;
1166 /* Check for read access as well. We need read access in order to
1167 stat files, but not to stat subdirectories. */
1168 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
1171 /* special case lookup of ".". Can we check for it sooner in this code,
1172 * for instance, way up before "redo:" ??
1173 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1174 * invent a lightweight version of GetVCache.
1176 if (aname[0] == '.' && !aname[1]) { /* special case */
1177 ObtainReadLock(&afs_xvcache);
1179 ReleaseReadLock(&afs_xvcache);
1183 if (adp && !VREFCOUNT(adp)) {
1189 Check_AtSys(adp, aname, &sysState, &treq);
1190 tname = sysState.name;
1192 /* 1st Check_AtSys and lookup by tname is required here, for now,
1193 because the dnlc is *not* told to remove entries for the parent
1194 dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1195 if the cached entry for the parent dir is invalidated for a
1197 Otherwise, we'd be able to do a dnlc lookup on an entry ending
1198 w/@sys and know the dnlc was consistent with reality. */
1199 tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
1200 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1202 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1203 /* need read access on dir to stat non-directory / non-link */
1204 afs_PutVCache(tvc, WRITE_LOCK);
1205 *avcp = (struct vcache *)0;
1209 #ifdef AFS_LINUX22_ENV
1210 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1219 #else /* non - LINUX */
1223 #endif /* linux22 */
1227 register struct dcache *tdc;
1228 afs_size_t dirOffset, dirLen;
1230 struct VenusFid tfid;
1232 /* now we have to lookup the next fid */
1233 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
1235 *avcp = (struct vcache *)0; /* redundant, but harmless */
1240 /* now we will just call dir package with appropriate inode.
1241 Dirs are always fetched in their entirety for now */
1242 ObtainReadLock(&adp->lock);
1243 ObtainReadLock(&tdc->lock);
1246 * Make sure that the data in the cache is current. There are two
1247 * cases we need to worry about:
1248 * 1. The cache data is being fetched by another process.
1249 * 2. The cache data is no longer valid
1251 while ((adp->states & CStatd)
1252 && (tdc->dflags & DFFetching)
1253 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1254 ReleaseReadLock(&tdc->lock);
1255 ReleaseReadLock(&adp->lock);
1256 afs_osi_Sleep(&tdc->validPos);
1257 ObtainReadLock(&adp->lock);
1258 ObtainReadLock(&tdc->lock);
1260 if (!(adp->states & CStatd)
1261 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1262 ReleaseReadLock(&tdc->lock);
1263 ReleaseReadLock(&adp->lock);
1268 /* Save the version number for when we call osi_dnlc_enter */
1269 hset(versionNo, tdc->f.versionNo);
1272 * check for, and handle "@sys" if it's there. We should be able
1273 * to avoid the alloc and the strcpy with a little work, but it's
1274 * not pressing. If there aren't any remote users (ie, via the
1275 * NFS translator), we have a slightly easier job.
1276 * the faster way to do this is to check for *aname == '@' and if
1277 * it's there, check for @sys, otherwise, assume there's no @sys
1278 * then, if the lookup fails, check for .*@sys...
1280 /* above now implemented by Check_AtSys and Next_AtSys */
1282 /* lookup the name in the appropriate dir, and return a cache entry
1283 on the resulting fid */
1284 theDir = tdc->f.inode;
1285 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1287 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1288 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState)) {
1289 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1291 tname = sysState.name;
1293 ReleaseReadLock(&tdc->lock);
1296 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1299 ReleaseReadLock(&adp->lock);
1302 tcell = afs_GetCellByName(tname + 1, READ_LOCK);
1304 tcell = afs_GetCellByName(tname, READ_LOCK);
1306 afs_PutCell(tcell, READ_LOCK);
1307 afs_RefreshDynroot();
1308 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1312 ReleaseReadLock(&adp->lock);
1315 /* new fid has same cell and volume */
1316 tfid.Cell = adp->fid.Cell;
1317 tfid.Fid.Volume = adp->fid.Fid.Volume;
1318 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1319 ICL_TYPE_STRING, tname,
1320 ICL_TYPE_FID, &tfid, ICL_TYPE_INT32, code);
1323 if (code != ENOENT) {
1324 printf("LOOKUP dirLookupOff -> %d\n", code);
1329 /* prefetch some entries, if the dir is currently open. The variable
1330 * dirCookie tells us where to start prefetching from.
1332 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
1334 /* if the entry is not in the cache, or is in the cache,
1335 * but hasn't been statd, then do a bulk stat operation.
1339 ObtainReadLock(&afs_xvcache);
1340 tvc = afs_FindVCache(&tfid, 1, 0, &retry, 0/* !stats,!lru */);
1341 ReleaseReadLock(&afs_xvcache);
1342 } while (tvc && retry);
1344 if (!tvc || !(tvc->states & CStatd))
1345 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1349 /* if the vcache isn't usable, release it */
1350 if (tvc && !(tvc->states & CStatd)) {
1351 afs_PutVCache(tvc, 0);
1352 tvc = (struct vcache *) 0;
1355 tvc = (struct vcache *) 0;
1359 /* now get the status info, if we don't already have it */
1360 /* This is kind of weird, but we might wind up accidentally calling
1361 * RXAFS_Lookup because we happened upon a file which legitimately
1362 * has a 0 uniquifier. That is the result of allowing unique to wrap
1363 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1364 * the file has not yet been looked up.
1367 afs_int32 cached = 0;
1368 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1369 tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
1372 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1373 tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
1377 } /* sub-block just to reduce stack usage */
1380 if (adp->states & CForeign)
1381 tvc->states |= CForeign;
1382 tvc->parentVnode = adp->fid.Fid.Vnode;
1383 tvc->parentUnique = adp->fid.Fid.Unique;
1384 tvc->states &= ~CBulkStat;
1386 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1387 if (!(flags & AFS_LOOKUP_NOEVAL))
1388 /* don't eval mount points */
1389 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1390 if (!afs_fakestat_enable && tvc->mvstat == 1) {
1391 /* a mt point, possibly unevaluated */
1392 struct volume *tvolp;
1394 ObtainWriteLock(&tvc->lock,133);
1395 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1396 ReleaseWriteLock(&tvc->lock);
1399 afs_PutVCache(tvc, WRITE_LOCK);
1400 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1404 /* next, we want to continue using the target of the mt point */
1405 if (tvc->mvid && (tvc->states & CMValid)) {
1407 /* now lookup target, to set .. pointer */
1408 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1409 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID, &tvc->fid);
1410 uvc = tvc; /* remember for later */
1412 if (tvolp && (tvolp->states & VForeign)) {
1413 /* XXXX tvolp has ref cnt on but not locked! XXX */
1414 tvc = afs_GetRootVCache(tvc->mvid, &treq, (afs_int32 *)0, tvolp, WRITE_LOCK);
1416 tvc = afs_GetVCache(tvc->mvid, &treq, (afs_int32 *)0,
1417 (struct vcache*)0, WRITE_LOCK);
1419 afs_PutVCache(uvc, WRITE_LOCK); /* we're done with it */
1424 afs_PutVolume(tvolp, WRITE_LOCK);
1429 /* now, if we came via a new mt pt (say because of a new
1430 * release of a R/O volume), we must reevaluate the ..
1431 * ptr to point back to the appropriate place */
1433 ObtainWriteLock(&tvc->lock,134);
1434 if (tvc->mvid == (struct VenusFid *) 0) {
1435 tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
1437 /* setup backpointer */
1438 *tvc->mvid = tvolp->dotdot;
1439 ReleaseWriteLock(&tvc->lock);
1440 afs_PutVolume(tvolp, WRITE_LOCK);
1444 afs_PutVCache(tvc, WRITE_LOCK);
1446 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1451 if (tvc && !VREFCOUNT(tvc)) {
1457 /* if we get here, we found something in a directory that couldn't
1458 be located (a Multics "connection failure"). If the volume is
1459 read-only, we try flushing this entry from the cache and trying
1463 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1465 if (tv->states & VRO) {
1466 pass = 1; /* try this *once* */
1467 ObtainWriteLock(&afs_xcbhash, 495);
1468 afs_DequeueCallback(adp);
1469 /* re-stat to get later version */
1470 adp->states &= ~CStatd;
1471 ReleaseWriteLock(&afs_xcbhash);
1472 osi_dnlc_purgedp(adp);
1473 afs_PutVolume(tv, READ_LOCK);
1476 afs_PutVolume(tv, READ_LOCK);
1483 /* put the network buffer back, if need be */
1484 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1487 /* Handle RENAME; only need to check rename "." */
1488 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1489 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1490 afs_PutVCache(*avcp, WRITE_LOCK);
1492 afs_PutFakeStat(&fakestate);
1493 return afs_CheckCode(EISDIR, &treq, 18);
1496 #endif /* AFS_OSF_ENV */
1499 afs_AddMarinerName(aname, tvc);
1501 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1502 if (!(flags & AFS_LOOKUP_NOEVAL))
1503 /* Here we don't enter the name into the DNLC because we want the
1504 evaluated mount dir to be there (the vcache for the mounted volume)
1505 rather than the vc of the mount point itself. we can still find the
1506 mount point's vc in the vcache by its fid. */
1507 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1509 osi_dnlc_enter (adp, aname, tvc, &versionNo);
1512 #ifdef AFS_LINUX20_ENV
1513 /* So Linux inode cache is up to date. */
1514 code = afs_VerifyVCache(tvc, &treq);
1516 afs_PutFakeStat(&fakestate);
1517 return 0; /* can't have been any errors if hit and !code */
1521 if (bulkcode) code = bulkcode; else
1522 code = afs_CheckCode(code, &treq, 19);
1524 /* If there is an error, make sure *avcp is null.
1525 * Alphas panic otherwise - defect 10719.
1527 *avcp = (struct vcache *)0;
1530 afs_PutFakeStat(&fakestate);