2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
18 * AFS_EQ_ATSYS (macro)
22 #include <afsconfig.h>
23 #include "../afs/param.h"
27 #include "../afs/sysincludes.h" /* Standard vendor system headers */
28 #include "../afs/afsincludes.h" /* Afs-based standard headers */
29 #include "../afs/afs_stats.h" /* statistics */
30 #include "../afs/afs_cbqueue.h"
31 #include "../afs/nfsclient.h"
32 #include "../afs/exporter.h"
33 #include "../afs/afs_osidnlc.h"
37 * A few definitions. This is until we have a proper header file
38 * which has prototypes for all functions
41 extern struct DirEntry * afs_dir_GetBlob();
43 extern afs_rwlock_t afs_xvcache;
44 extern afs_rwlock_t afs_xcbhash;
45 extern struct afs_exporter *afs_nfsexporter;
46 extern char *afs_sysname;
47 extern char *afs_sysnamelist[];
48 extern int afs_sysnamecount;
49 extern struct afs_q VLRU; /*vcache LRU*/
50 #ifdef AFS_LINUX22_ENV
51 extern struct inode_operations afs_symlink_iops, afs_dir_iops;
55 afs_int32 afs_bulkStatsDone;
56 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
57 int afs_fakestat_enable = 0;
60 /* this would be faster if it did comparison as int32word, but would be
61 * dependant on byte-order and alignment, and I haven't figured out
62 * what "@sys" is in binary... */
63 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
67 register char *s1, *s2;
83 register char *a, c; {
85 AFS_STATCNT(afs_index);
87 if (tc == c) return a;
93 /* call under write lock, evaluate mvid field from a mt pt.
94 * avc is the vnode of the mount point object; must be write-locked.
95 * advc is the vnode of the containing directory (optional; if NULL and
96 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
97 * avolpp is where we return a pointer to the volume named by the mount pt, if success
98 * areq is the identity of the caller.
100 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
102 EvalMountPoint(avc, advc, avolpp, areq)
103 register struct vcache *avc;
104 struct volume **avolpp;
105 struct vcache *advc; /* the containing dir */
106 register struct vrequest *areq;
109 struct volume *tvp = 0;
110 struct VenusFid tfid;
112 char *cpos, *volnamep;
114 afs_int32 prefetchRO; /* 1=>No 2=>Yes */
115 afs_int32 mtptCell, assocCell, hac=0;
116 afs_int32 samecell, roname, len;
118 AFS_STATCNT(EvalMountPoint);
120 if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
122 *avolpp = (struct volume *)0;
123 code = afs_HandleLink(avc, areq);
124 if (code) return code;
126 /* Determine which cell and volume the mointpoint goes to */
127 type = avc->linkData[0]; /* '#'=>Regular '%'=>RW */
128 cpos = afs_index(&avc->linkData[1], ':'); /* if cell name present */
132 tcell = afs_GetCellByName(&avc->linkData[1], READ_LOCK);
135 volnamep = &avc->linkData[1];
136 tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
138 if (!tcell) return ENODEV;
140 mtptCell = tcell->cell; /* The cell for the mountpoint */
142 hac = 1; /* has associated cell */
143 assocCell = tcell->lcellp->cell; /* The associated cell */
145 afs_PutCell(tcell, READ_LOCK);
147 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
148 len = strlen(volnamep);
149 roname = ((len > 9) && (strcmp(&volnamep[len - 9],".readonly") == 0)) ||
150 ((len > 7) && (strcmp(&volnamep[len - 7],".backup") == 0));
152 /* When we cross mountpoint, do we stay in the same cell */
153 samecell = (avc->fid.Cell == mtptCell) || (hac && (avc->fid.Cell == assocCell));
155 /* Decide whether to prefetch the RO. Also means we want the RO.
156 * If this is a regular mountpoint with a RW volume name and
157 * we cross a cell boundary -or- start from a RO volume, then we will
158 * want to prefetch the RO volume when we get the RW below.
160 if ( (type == '#') && !roname && (!samecell || (avc->states & CRO)) ) {
161 prefetchRO = 2; /* Yes, prefetch the RO */
163 prefetchRO = 1; /* No prefetch of the RO */
166 /* Get the volume struct. Unless this volume name has ".readonly" or
167 * ".backup" in it, this will get the volume struct for the RW volume.
168 * The RO volume will be prefetched if requested (but not returned).
170 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetchRO, areq, WRITE_LOCK);
172 /* If no volume was found in this cell, try the associated linked cell */
173 if (!tvp && hac && areq->volumeError) {
174 tvp = afs_GetVolumeByName(volnamep, assocCell, prefetchRO, areq, WRITE_LOCK);
177 /* Still not found. If we are looking for the RO, then perhaps the RW
178 * doesn't exist? Try adding ".readonly" to volname and look for that.
179 * Don't know why we do this. Would have still found it in above call - jpm.
181 if (!tvp && (prefetchRO == 2)) {
182 strcpy(buf, volnamep);
183 afs_strcat(buf, ".readonly");
185 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
187 /* Try the associated linked cell if failed */
188 if (!tvp && hac && areq->volumeError) {
189 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
193 if (!tvp) return ENODEV; /* Couldn't find the volume */
195 /* Don't cross mountpoint from a BK to a BK volume */
196 if ((avc->states & CBackup) && (tvp->states & VBackup)) {
197 afs_PutVolume(tvp, WRITE_LOCK);
201 /* If we want (prefetched) the RO and it exists, then drop the
202 * RW volume and get the RO. Othewise, go with the RW.
204 if ((prefetchRO == 2) && tvp->roVol) {
205 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
206 tfid.Cell = tvp->cell;
207 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
208 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
209 if (!tvp) return ENODEV; /* oops, can't do it */
213 avc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
214 avc->mvid->Cell = tvp->cell;
215 avc->mvid->Fid.Volume = tvp->volume;
216 avc->mvid->Fid.Vnode = 1;
217 avc->mvid->Fid.Unique = 1;
218 avc->states |= CMValid;
220 /* Used to: if the mount point is stored within a backup volume,
221 * then we should only update the parent pointer information if
222 * there's none already set, so as to avoid updating a volume's ..
223 * info with something in an OldFiles directory.
225 * Next two lines used to be under this if:
227 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
229 * Now: update mount point back pointer on every call, so that we handle
230 * multiple mount points better. This way, when du tries to go back
231 * via chddir(".."), it will end up exactly where it started, yet
232 * cd'ing via a new path to a volume will reset the ".." pointer
235 tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
236 if (advc) tvp->dotdot = advc->fid;
245 * Must be called on an afs_fakestat_state object before calling
246 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
247 * without calling afs_EvalFakeStat is legal, as long as this
248 * function is called.
252 afs_InitFakeStat(state)
253 struct afs_fakestat_state *state;
257 state->need_release = 0;
261 * afs_EvalFakeStat_int
263 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
264 * which is called by those wrapper functions.
266 * Only issues RPCs if canblock is non-zero.
269 afs_EvalFakeStat_int(avcp, state, areq, canblock)
270 struct vcache **avcp;
271 struct afs_fakestat_state *state;
272 struct vrequest *areq;
275 struct vcache *tvc, *root_vp;
276 struct volume *tvolp = NULL;
279 osi_Assert(state->valid == 1);
280 osi_Assert(state->did_eval == 0);
282 if (!afs_fakestat_enable)
285 if (tvc->mvstat != 1)
288 /* Is the call to VerifyVCache really necessary? */
289 code = afs_VerifyVCache(tvc, areq);
293 ObtainWriteLock(&tvc->lock, 599);
294 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
295 ReleaseWriteLock(&tvc->lock);
299 tvolp->dotdot = tvc->fid;
300 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
301 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
304 if (tvc->mvid && (tvc->states & CMValid)) {
310 ObtainWriteLock(&afs_xvcache, 597);
311 root_vp = afs_FindVCache(tvc->mvid, 0, 0, &retry, 0);
312 if (root_vp && retry) {
313 ReleaseWriteLock(&afs_xvcache);
314 afs_PutVCache(root_vp, 0);
316 } while (root_vp && retry);
317 ReleaseWriteLock(&afs_xvcache);
319 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL, WRITE_LOCK);
322 code = canblock ? ENOENT : 0;
326 /* Is this always kosher? Perhaps we should instead use
327 * NBObtainWriteLock to avoid potential deadlock.
329 ObtainWriteLock(&root_vp->lock, 598);
331 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
332 *root_vp->mvid = tvolp->dotdot;
333 ReleaseWriteLock(&root_vp->lock);
335 state->need_release = 1;
336 state->root_vp = root_vp;
340 code = canblock ? ENOENT : 0;
345 afs_PutVolume(tvolp, WRITE_LOCK);
352 * Automatically does the equivalent of EvalMountPoint for vcache entries
353 * which are mount points. Remembers enough state to properly release
354 * the volume root vcache when afs_PutFakeStat() is called.
356 * State variable must be initialized by afs_InitFakeState() beforehand.
358 * Returns 0 when everything succeeds and *avcp points to the vcache entry
359 * that should be used for the real vnode operation. Returns non-zero if
360 * something goes wrong and the error code should be returned to the user.
363 afs_EvalFakeStat(avcp, state, areq)
364 struct vcache **avcp;
365 struct afs_fakestat_state *state;
366 struct vrequest *areq;
368 return afs_EvalFakeStat_int(avcp, state, areq, 1);
372 * afs_TryEvalFakeStat
374 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
375 * and only evaluate the mount point if all the data is already in
378 * Returns 0 if everything succeeds and *avcp points to a valid
379 * vcache entry (possibly evaluated).
382 afs_TryEvalFakeStat(avcp, state, areq)
383 struct vcache **avcp;
384 struct afs_fakestat_state *state;
385 struct vrequest *areq;
387 return afs_EvalFakeStat_int(avcp, state, areq, 0);
393 * Perform any necessary cleanup at the end of a vnode op, given that
394 * afs_InitFakeStat was previously called with this state.
397 afs_PutFakeStat(state)
398 struct afs_fakestat_state *state;
400 osi_Assert(state->valid == 1);
401 if (state->need_release)
402 afs_PutVCache(state->root_vp, 0);
407 register char *aname; {
411 AFS_STATCNT(ENameOK);
412 tlen = strlen(aname);
413 if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
417 afs_getsysname(areq, adp, bufp)
418 register struct vrequest *areq;
419 register struct vcache *adp;
422 static char sysname[MAXSYSNAME];
423 register struct unixuser *au;
424 register afs_int32 error;
426 if (!afs_nfsexporter) {
427 strcpy(bufp, afs_sysname);
430 AFS_STATCNT(getsysname);
431 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
434 error = EXP_SYSNAME(au->exporter, (char *)0, bufp);
436 strcpy(bufp, "@sys");
439 strcpy(bufp, afs_sysname);
444 Check_AtSys(avc, aname, state, areq)
445 register struct vcache *avc;
447 struct sysname_info *state;
448 struct vrequest *areq;
450 if (AFS_EQ_ATSYS(aname)) {
452 state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
454 state->index = afs_getsysname(areq, avc, state->name);
463 Next_AtSys(avc, areq, state)
464 register struct vcache *avc;
465 struct vrequest *areq;
466 struct sysname_info *state;
468 if (state->index == -1)
469 return 0; /* No list */
471 /* Check for the initial state of aname != "@sys" in Check_AtSys*/
472 if (state->offset == -1 && state->allocked == 0) {
473 register char *tname;
474 /* Check for .*@sys */
475 for (tname=state->name; *tname; tname++)
476 /*Move to the end of the string*/;
477 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
478 state->offset = (tname - 4) - state->name;
479 tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
480 strncpy(tname, state->name, state->offset);
483 state->index = afs_getsysname(areq, avc, state->name+state->offset);
486 return 0; /* .*@sys doesn't match either */
487 } else if (++(state->index) >= afs_sysnamecount
488 || !afs_sysnamelist[state->index])
489 return 0; /* end of list */
490 strcpy(state->name+state->offset, afs_sysnamelist[state->index]);
494 #if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
495 extern int BlobScan(ino64_t *afile, afs_int32 ablob);
497 #if defined AFS_LINUX_64BIT_KERNEL
498 extern int BlobScan(long *afile, afs_int32 ablob);
500 extern int BlobScan(afs_int32 *afile, afs_int32 ablob);
505 /* called with an unlocked directory and directory cookie. Areqp
506 * describes who is making the call.
507 * Scans the next N (about 30, typically) directory entries, and does
508 * a bulk stat call to stat them all.
510 * Must be very careful when merging in RPC responses, since we dont
511 * want to overwrite newer info that was added by a file system mutating
512 * call that ran concurrently with our bulk stat call.
514 * We do that, as described below, by not merging in our info (always
515 * safe to skip the merge) if the status info is valid in the vcache entry.
517 * If adapt ever implements the bulk stat RPC, then this code will need to
518 * ensure that vcaches created for failed RPC's to older servers have the
521 struct vcache * BStvc = (struct vcache *) 0;
522 int afs_DoBulkStat(adp, dirCookie, areqp)
525 struct vrequest *areqp;
527 int nentries; /* # of entries to prefetch */
528 int nskip; /* # of slots in the LRU queue to skip */
529 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
530 struct dcache *dcp; /* chunk containing the dir block */
531 char *statMemp; /* status memory block */
532 char *cbfMemp; /* callback and fid memory block */
533 afs_size_t temp; /* temp for holding chunk length, &c. */
534 struct AFSFid *fidsp; /* file IDs were collecting */
535 struct AFSCallBack *cbsp; /* call back pointers */
536 struct AFSCallBack *tcbp; /* temp callback ptr */
537 struct AFSFetchStatus *statsp; /* file status info */
538 struct AFSVolSync volSync; /* vol sync return info */
539 struct vcache *tvcp; /* temp vcp */
540 struct afs_q *tq; /* temp queue variable */
541 AFSCBFids fidParm; /* file ID parm for bulk stat */
542 AFSBulkStats statParm; /* stat info parm for bulk stat */
543 int fidIndex; /* which file were stating */
544 struct conn *tcp; /* conn for call */
545 AFSCBs cbParm; /* callback parm for bulk stat */
546 struct server *hostp = 0; /* host we got callback from */
547 long origEvenCBs; /* original # of callbacks for even-fid files */
548 long origOddCBs; /* original # of callbacks for odd-fid files */
549 long origEvenZaps; /* original # of recycles for even-fid files */
550 long origOddZaps; /* original # of recycles for odd-fid files */
551 long startTime; /* time we started the call,
552 * for callback expiration base
554 afs_size_t statSeqNo; /* Valued of file size to detect races */
555 int code; /* error code */
556 long newIndex; /* new index in the dir */
557 struct DirEntry *dirEntryp; /* dir entry we are examining */
559 struct VenusFid afid; /* file ID we are using now */
560 struct VenusFid tfid; /* another temp. file ID */
561 afs_int32 retry; /* handle low-level SGI MP race conditions */
562 long volStates; /* flags from vol structure */
563 struct volume *volp=0; /* volume ptr */
564 struct VenusFid dotdot;
565 int flagIndex; /* First file with bulk fetch flag set */
566 int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
569 /* first compute some basic parameters. We dont want to prefetch more
570 * than a fraction of the cache in any given call, and we want to preserve
571 * a portion of the LRU queue in any event, so as to avoid thrashing
572 * the entire stat cache (we will at least leave some of it alone).
573 * presently dont stat more than 1/8 the cache in any one call. */
574 nentries = afs_cacheStats / 8;
576 /* dont bother prefetching more than one calls worth of info */
577 if (nentries > AFSCBMAX) nentries = AFSCBMAX;
579 /* heuristic to make sure that things fit in 4K. This means that
580 * we shouldnt make it any bigger than 47 entries. I am typically
581 * going to keep it a little lower, since we don't want to load
582 * too much of the stat cache.
584 if (nentries > 30) nentries = 30;
586 /* now, to reduce the stack size, well allocate two 4K blocks,
587 * one for fids and callbacks, and one for stat info. Well set
588 * up our pointers to the memory from there, too.
590 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
591 statsp = (struct AFSFetchStatus *) statMemp;
592 cbfMemp = osi_AllocLargeSpace(nentries *
593 (sizeof(AFSCallBack) + sizeof(AFSFid)));
594 fidsp = (AFSFid *) cbfMemp;
595 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
597 /* next, we must iterate over the directory, starting from the specified
598 * cookie offset (dirCookie), and counting out nentries file entries.
599 * We skip files that already have stat cache entries, since we
600 * dont want to bulk stat files that are already in the cache.
603 code = afs_VerifyVCache(adp, areqp);
606 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
612 /* lock the directory cache entry */
613 ObtainReadLock(&adp->lock);
614 ObtainReadLock(&dcp->lock);
617 * Make sure that the data in the cache is current. There are two
618 * cases we need to worry about:
619 * 1. The cache data is being fetched by another process.
620 * 2. The cache data is no longer valid
622 while ((adp->states & CStatd)
623 && (dcp->dflags & DFFetching)
624 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
625 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
626 ICL_TYPE_STRING, __FILE__,
627 ICL_TYPE_INT32, __LINE__,
628 ICL_TYPE_POINTER, dcp,
629 ICL_TYPE_INT32, dcp->dflags);
630 ReleaseReadLock(&dcp->lock);
631 ReleaseReadLock(&adp->lock);
632 afs_osi_Sleep(&dcp->validPos);
633 ObtainReadLock(&adp->lock);
634 ObtainReadLock(&dcp->lock);
636 if (!(adp->states & CStatd)
637 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
638 ReleaseReadLock(&dcp->lock);
639 ReleaseReadLock(&adp->lock);
644 /* Generate a sequence number so we can tell whether we should
645 * store the attributes when processing the response. This number is
646 * stored in the file size when we set the CBulkFetching bit. If the
647 * CBulkFetching is still set and this value hasn't changed, then
648 * we know we were the last to set CBulkFetching bit for this file,
649 * and it is safe to set the status information for this file.
651 statSeqNo = bulkStatCounter++;
653 /* now we have dir data in the cache, so scan the dir page */
656 while (1) { /* Should probably have some constant bound */
657 /* look for first safe entry to examine in the directory. BlobScan
658 * looks for a the 1st allocated dir after the dirCookie slot.
660 newIndex = BlobScan(&dcp->f.inode, (dirCookie>>5));
661 if (newIndex == 0) break;
663 /* remember the updated directory cookie */
664 dirCookie = newIndex << 5;
666 /* get a ptr to the dir entry */
667 dirEntryp =(struct DirEntry *)afs_dir_GetBlob(&dcp->f.inode, newIndex);
668 if (!dirEntryp) break;
670 /* dont copy more than we have room for */
671 if (fidIndex >= nentries) {
672 DRelease((char *) dirEntryp, 0);
676 /* now, if the dir entry looks good, copy it out to our list. Vnode
677 * 0 means deleted, although it should also be free were it deleted.
679 if (dirEntryp->fid.vnode != 0) {
680 /* dont copy entries we have in our cache. This check will
681 * also make us skip "." and probably "..", unless it has
682 * disappeared from the cache since we did our namei call.
684 tfid.Cell = adp->fid.Cell;
685 tfid.Fid.Volume = adp->fid.Fid.Volume;
686 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
687 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
690 ObtainWriteLock(&afs_xvcache, 130);
691 tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
693 ReleaseWriteLock(&afs_xvcache);
694 afs_PutVCache(tvcp, 0);
696 } while (tvcp && retry);
697 if (!tvcp) { /* otherwise, create manually */
698 tvcp = afs_NewVCache(&tfid, hostp, 0, 0);
699 ObtainWriteLock(&tvcp->lock, 505);
700 ReleaseWriteLock(&afs_xvcache);
701 afs_RemoveVCB(&tfid);
702 ReleaseWriteLock(&tvcp->lock);
704 ReleaseWriteLock(&afs_xvcache);
707 goto done; /* can't happen at present, more's the pity */
709 /* WARNING: afs_DoBulkStat uses the Length field to store a
710 * sequence number for each bulk status request. Under no
711 * circumstances should afs_DoBulkStat store a sequence number
712 * if the new length will be ignored when afs_ProcessFS is
713 * called with new stats. */
715 if (!(tvcp->states & (CStatd|CBulkFetching))
716 && (tvcp->execsOrWriters <= 0)
717 && !afs_DirtyPages(tvcp)
718 && !AFS_VN_MAPPED((vnode_t*)tvcp))
720 if (!(tvcp->states & (CStatd|CBulkFetching))
721 && (tvcp->execsOrWriters <= 0)
722 && !afs_DirtyPages(tvcp))
726 /* this entry doesnt exist in the cache, and is not
727 * already being fetched by someone else, so add it to the
728 * list of file IDs to obtain.
730 * We detect a callback breaking race condition by checking the
731 * CBulkFetching state bit and the value in the file size.
732 * It is safe to set the status only if the CBulkFetching
733 * flag is still set and the value in the file size does
736 * Don't fetch status for dirty files. We need to
737 * preserve the value of the file size. We could
738 * flush the pages, but it wouldn't be worthwhile.
740 memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
741 tvcp->states |= CBulkFetching;
742 tvcp->m.Length = statSeqNo;
745 afs_PutVCache(tvcp, 0);
746 } /* if dir vnode has non-zero entry */
748 /* move to the next dir entry by adding in the # of entries
749 * used by this dir entry.
751 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
752 DRelease((char *) dirEntryp, 0);
753 if (temp <= 0) break;
755 } /* while loop over all dir entries */
757 /* now release the dir lock and prepare to make the bulk RPC */
758 ReleaseReadLock(&dcp->lock);
759 ReleaseReadLock(&adp->lock);
761 /* release the chunk */
764 /* dont make a null call */
765 if (fidIndex == 0) goto done;
768 /* setup the RPC parm structures */
769 fidParm.AFSCBFids_len = fidIndex;
770 fidParm.AFSCBFids_val = fidsp;
771 statParm.AFSBulkStats_len = fidIndex;
772 statParm.AFSBulkStats_val = statsp;
773 cbParm.AFSCBs_len = fidIndex;
774 cbParm.AFSCBs_val = cbsp;
776 /* start the timer; callback expirations are relative to this */
777 startTime = osi_Time();
779 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
781 hostp = tcp->srvr->server;
782 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
783 #ifdef RX_ENABLE_LOCKS
785 #endif /* RX_ENABLE_LOCKS */
787 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
788 code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
790 if (code == RXGEN_OPCODE) {
791 tcp->srvr->server->flags |= SNO_INLINEBULK;
793 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
799 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
802 #ifdef RX_ENABLE_LOCKS
804 #endif /* RX_ENABLE_LOCKS */
808 } while (afs_Analyze(tcp, code, &adp->fid, areqp,
809 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, (struct cell *)0));
811 /* now, if we didnt get the info, bail out. */
814 /* we need vol flags to create the entries properly */
815 dotdot.Fid.Volume = 0;
816 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
818 volStates = volp->states;
819 if (volp->dotdot.Fid.Volume != 0)
820 dotdot = volp->dotdot;
824 /* find the place to merge the info into We do this by skipping
825 * nskip entries in the LRU queue. The more we skip, the more
826 * we preserve, since the head of the VLRU queue is the most recently
830 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
831 ObtainReadLock(&afs_xvcache);
833 /* actually a serious error, probably should panic. Probably will
834 * panic soon, oh well. */
835 ReleaseReadLock(&afs_xvcache);
836 afs_warnuser("afs_DoBulkStat: VLRU empty!");
839 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
840 refpanic ("Bulkstat VLRU inconsistent");
842 for(tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
843 if (--nskip <= 0) break;
844 else if (QNext(QPrev(tq)) != tq) {
846 refpanic ("BulkStat VLRU inconsistent");
849 if (tq != &VLRU) lruvcp = QTOV(tq);
850 else lruvcp = QTOV(VLRU.next);
852 /* now we have to hold this entry, so that it does not get moved
853 * into the free list while we're running. It could still get
854 * moved within the lru queue, but hopefully that will be rare; it
855 * doesn't hurt nearly as much.
858 osi_vnhold(lruvcp, &retry);
859 ReleaseReadLock(&afs_xvcache); /* could be read lock */
863 /* otherwise, merge in the info. We have to be quite careful here,
864 * since we need to ensure that we don't merge old info over newer
865 * stuff in a stat cache entry. We're very conservative here: we don't
866 * do the merge at all unless we ourselves create the stat cache
867 * entry. That's pretty safe, and should work pretty well, since we
868 * typically expect to do the stat cache creation ourselves.
870 * We also have to take into account racing token revocations.
872 for(i=0; i<fidIndex; i++) {
873 if ((&statsp[i])->errorCode)
875 afid.Cell = adp->fid.Cell;
876 afid.Fid.Volume = adp->fid.Fid.Volume;
877 afid.Fid.Vnode = fidsp[i].Vnode;
878 afid.Fid.Unique = fidsp[i].Unique;
881 ObtainReadLock(&afs_xvcache);
882 tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
883 ReleaseReadLock(&afs_xvcache);
884 } while (tvcp && retry);
886 /* The entry may no longer exist */
891 /* now we have the entry held, but we need to fill it in */
892 ObtainWriteLock(&tvcp->lock,131);
894 /* if CBulkFetching is not set, or if the file size no longer
895 * matches the value we placed there when we set the CBulkFetching
896 * flag, then someone else has done something with this node,
897 * and we may not have the latest status information for this
898 * file. Leave the entry alone.
900 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
902 ReleaseWriteLock(&tvcp->lock);
903 afs_PutVCache(tvcp, 0);
907 /* now copy ".." entry back out of volume structure, if necessary */
908 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
910 tvcp->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
911 *tvcp->mvid = dotdot;
914 ObtainWriteLock(&afs_xvcache,132);
915 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
916 refpanic ("Bulkstat VLRU inconsistent2");
918 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
919 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
920 refpanic ("Bulkstat VLRU inconsistent4");
921 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
922 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
923 refpanic ("Bulkstat VLRU inconsistent5");
925 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
926 QRemove(&tvcp->vlruq);
927 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
930 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
931 refpanic ("Bulkstat VLRU inconsistent3");
933 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
934 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
935 refpanic ("Bulkstat VLRU inconsistent5");
936 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
937 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
938 refpanic ("Bulkstat VLRU inconsistent6");
939 ReleaseWriteLock(&afs_xvcache);
941 ObtainWriteLock(&afs_xcbhash, 494);
943 /* We need to check the flags again. We may have missed
944 * something while we were waiting for a lock.
946 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
948 ReleaseWriteLock(&tvcp->lock);
949 ReleaseWriteLock(&afs_xcbhash);
950 afs_PutVCache(tvcp, 0);
954 /* now merge in the resulting status back into the vnode.
955 * We only do this if the entry looks clear.
957 afs_ProcessFS(tvcp, &statsp[i], areqp);
958 #ifdef AFS_LINUX22_ENV
959 /* overwrite the ops if it's a directory or symlink. */
960 if (vType(tvcp) == VDIR)
961 tvcp->v.v_op = &afs_dir_iops;
962 else if (vType(tvcp) == VLNK)
963 tvcp->v.v_op = &afs_symlink_iops;
966 /* do some accounting for bulk stats: mark this entry as
967 * loaded, so we can tell if we use it before it gets
970 tvcp->states |= CBulkStat;
971 tvcp->states &= ~CBulkFetching;
975 /* merge in vol info */
976 if (volStates & VRO) tvcp->states |= CRO;
977 if (volStates & VBackup) tvcp->states |= CBackup;
978 if (volStates & VForeign) tvcp->states |= CForeign;
980 /* merge in the callback info */
981 tvcp->states |= CTruth;
983 /* get ptr to the callback we are interested in */
986 if (tcbp->ExpirationTime != 0) {
987 tvcp->cbExpires = tcbp->ExpirationTime+startTime;
988 tvcp->callback = hostp;
989 tvcp->states |= CStatd;
990 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
992 else if (tvcp->states & CRO) {
993 /* ordinary callback on a read-only volume -- AFS 3.2 style */
994 tvcp->cbExpires = 3600+startTime;
995 tvcp->callback = hostp;
996 tvcp->states |= CStatd;
997 afs_QueueCallback(tvcp, CBHash(3600), volp);
1001 tvcp->states &= ~(CStatd|CUnique);
1002 afs_DequeueCallback(tvcp);
1003 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
1004 osi_dnlc_purgedp (tvcp); /* if it (could be) a directory */
1006 ReleaseWriteLock(&afs_xcbhash);
1008 ReleaseWriteLock(&tvcp->lock);
1009 /* finally, we're done with the entry */
1010 afs_PutVCache(tvcp, 0);
1011 } /* for all files we got back */
1013 /* finally return the pointer into the LRU queue */
1014 afs_PutVCache(lruvcp, 0);
1017 /* Be sure to turn off the CBulkFetching flags */
1018 for(i=flagIndex; i<fidIndex; i++) {
1019 afid.Cell = adp->fid.Cell;
1020 afid.Fid.Volume = adp->fid.Fid.Volume;
1021 afid.Fid.Vnode = fidsp[i].Vnode;
1022 afid.Fid.Unique = fidsp[i].Unique;
1025 ObtainReadLock(&afs_xvcache);
1026 tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
1027 ReleaseReadLock(&afs_xvcache);
1028 } while (tvcp && retry);
1030 && (tvcp->states & CBulkFetching)
1031 && (tvcp->m.Length == statSeqNo)) {
1032 tvcp->states &= ~CBulkFetching;
1035 afs_PutVCache(tvcp, 0);
1039 afs_PutVolume(volp, READ_LOCK);
1041 /* If we did the InlineBulk RPC pull out the return code */
1043 if ((&statsp[0])->errorCode) {
1044 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1045 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
1047 code = (&statsp[0])->errorCode;
1052 osi_FreeLargeSpace(statMemp);
1053 osi_FreeLargeSpace(cbfMemp);
1057 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1061 afs_lookup(adp, ndp)
1063 struct nameidata *ndp; {
1064 char aname[MAXNAMLEN+1]; /* XXX */
1065 struct vcache **avcp = (struct vcache **)&(ndp->ni_vp);
1066 struct ucred *acred = ndp->ni_cred;
1067 int wantparent = ndp->ni_nameiop & WANTPARENT;
1068 int opflag = ndp->ni_nameiop & OPFLAG;
1069 #else /* AFS_OSF_ENV */
1070 #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1071 afs_lookup(OSI_VC_ARG(adp), aname, avcp, pnp, flags, rdir, acred)
1072 struct pathname *pnp;
1076 #if defined(UKERNEL)
1077 afs_lookup(adp, aname, avcp, acred, flags)
1080 afs_lookup(adp, aname, avcp, acred)
1081 #endif /* UKERNEL */
1082 #endif /* SUN5 || SGI */
1084 struct vcache **avcp;
1086 struct AFS_UCRED *acred; {
1088 struct vrequest treq;
1089 char *tname = (char *)0;
1090 register struct vcache *tvc=0;
1091 register afs_int32 code;
1092 register afs_int32 bulkcode = 0;
1093 int pass = 0, hit = 0;
1095 extern afs_int32 afs_mariner; /*Writing activity to log?*/
1097 afs_hyper_t versionNo;
1098 int no_read_access = 0;
1099 struct sysname_info sysState; /* used only for @sys checking */
1100 int dynrootRetry = 1;
1101 struct afs_fakestat_state fakestate;
1102 int tryEvalOnly = 0;
1104 AFS_STATCNT(afs_lookup);
1105 afs_InitFakeStat(&fakestate);
1107 if (code = afs_InitReq(&treq, acred))
1111 ndp->ni_dvp = AFSTOV(adp);
1112 memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
1113 aname[ndp->ni_namelen] = '\0';
1114 #endif /* AFS_OSF_ENV */
1116 #if defined(AFS_DARWIN_ENV)
1117 /* Workaround for MacOSX Finder, which tries to look for
1118 * .DS_Store and Contents under every directory.
1120 if (afs_fakestat_enable && adp->mvstat == 1) {
1121 if (strcmp(aname, ".DS_Store") == 0)
1123 if (strcmp(aname, "Contents") == 0)
1129 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1131 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1132 if (tryEvalOnly && adp->mvstat == 1)
1137 *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
1139 /* come back to here if we encounter a non-existent object in a read-only
1140 volume's directory */
1143 *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
1146 if (!(adp->states & CStatd)) {
1147 if (code = afs_VerifyVCache2(adp, &treq)) {
1153 /* watch for ".." in a volume root */
1154 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1155 /* looking up ".." in root via special hacks */
1156 if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
1158 extern struct vcache *afs_globalVp;
1159 if (adp == afs_globalVp) {
1160 struct vnode *rvp = AFSTOV(adp);
1162 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1163 ndp->ni_dvp = ndp->ni_vp;
1173 /* otherwise we have the fid here, so we use it */
1174 tvc = afs_GetVCache(adp->mvid, &treq, (afs_int32 *)0,
1175 (struct vcache*)0, 0);
1176 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
1177 ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
1178 ICL_TYPE_INT32, code);
1180 code = (tvc ? 0 : ENOENT);
1182 if (tvc && !VREFCOUNT(tvc)) {
1186 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code);*/
1191 /* now check the access */
1192 if (treq.uid != adp->last_looker) {
1193 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1194 *avcp = (struct vcache *)0;
1198 else adp->last_looker = treq.uid;
1201 /* Check for read access as well. We need read access in order to
1202 stat files, but not to stat subdirectories. */
1203 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
1206 /* special case lookup of ".". Can we check for it sooner in this code,
1207 * for instance, way up before "redo:" ??
1208 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1209 * invent a lightweight version of GetVCache.
1211 if (aname[0] == '.' && !aname[1]) { /* special case */
1212 ObtainReadLock(&afs_xvcache);
1214 ReleaseReadLock(&afs_xvcache);
1218 if (adp && !VREFCOUNT(adp)) {
1224 Check_AtSys(adp, aname, &sysState, &treq);
1225 tname = sysState.name;
1227 /* 1st Check_AtSys and lookup by tname is required here, for now,
1228 because the dnlc is *not* told to remove entries for the parent
1229 dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1230 if the cached entry for the parent dir is invalidated for a
1232 Otherwise, we'd be able to do a dnlc lookup on an entry ending
1233 w/@sys and know the dnlc was consistent with reality. */
1234 tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
1235 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1237 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1238 /* need read access on dir to stat non-directory / non-link */
1239 afs_PutVCache(tvc, WRITE_LOCK);
1240 *avcp = (struct vcache *)0;
1244 #ifdef AFS_LINUX22_ENV
1245 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1254 #else /* non - LINUX */
1258 #endif /* linux22 */
1262 register struct dcache *tdc;
1263 afs_size_t dirOffset, dirLen;
1265 struct VenusFid tfid;
1267 /* now we have to lookup the next fid */
1268 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
1270 *avcp = (struct vcache *)0; /* redundant, but harmless */
1275 /* now we will just call dir package with appropriate inode.
1276 Dirs are always fetched in their entirety for now */
1277 ObtainReadLock(&adp->lock);
1278 ObtainReadLock(&tdc->lock);
1281 * Make sure that the data in the cache is current. There are two
1282 * cases we need to worry about:
1283 * 1. The cache data is being fetched by another process.
1284 * 2. The cache data is no longer valid
1286 while ((adp->states & CStatd)
1287 && (tdc->dflags & DFFetching)
1288 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1289 ReleaseReadLock(&tdc->lock);
1290 ReleaseReadLock(&adp->lock);
1291 afs_osi_Sleep(&tdc->validPos);
1292 ObtainReadLock(&adp->lock);
1293 ObtainReadLock(&tdc->lock);
1295 if (!(adp->states & CStatd)
1296 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1297 ReleaseReadLock(&tdc->lock);
1298 ReleaseReadLock(&adp->lock);
1303 /* Save the version number for when we call osi_dnlc_enter */
1304 hset(versionNo, tdc->f.versionNo);
1307 * check for, and handle "@sys" if it's there. We should be able
1308 * to avoid the alloc and the strcpy with a little work, but it's
1309 * not pressing. If there aren't any remote users (ie, via the
1310 * NFS translator), we have a slightly easier job.
1311 * the faster way to do this is to check for *aname == '@' and if
1312 * it's there, check for @sys, otherwise, assume there's no @sys
1313 * then, if the lookup fails, check for .*@sys...
1315 /* above now implemented by Check_AtSys and Next_AtSys */
1317 /* lookup the name in the appropriate dir, and return a cache entry
1318 on the resulting fid */
1319 theDir = tdc->f.inode;
1320 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1322 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1323 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState)) {
1324 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1326 tname = sysState.name;
1328 ReleaseReadLock(&tdc->lock);
1331 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1334 ReleaseReadLock(&adp->lock);
1337 tcell = afs_GetCellByName(tname + 1, READ_LOCK);
1339 tcell = afs_GetCellByName(tname, READ_LOCK);
1341 afs_PutCell(tcell, READ_LOCK);
1342 afs_RefreshDynroot();
1343 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1347 ReleaseReadLock(&adp->lock);
1350 /* new fid has same cell and volume */
1351 tfid.Cell = adp->fid.Cell;
1352 tfid.Fid.Volume = adp->fid.Fid.Volume;
1353 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1354 ICL_TYPE_STRING, tname,
1355 ICL_TYPE_FID, &tfid, ICL_TYPE_INT32, code);
1358 if (code != ENOENT) {
1359 printf("LOOKUP dirLookupOff -> %d\n", code);
1364 /* prefetch some entries, if the dir is currently open. The variable
1365 * dirCookie tells us where to start prefetching from.
1367 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
1369 /* if the entry is not in the cache, or is in the cache,
1370 * but hasn't been statd, then do a bulk stat operation.
1374 ObtainReadLock(&afs_xvcache);
1375 tvc = afs_FindVCache(&tfid, 1, 0, &retry, 0/* !stats,!lru */);
1376 ReleaseReadLock(&afs_xvcache);
1377 } while (tvc && retry);
1379 if (!tvc || !(tvc->states & CStatd))
1380 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1384 /* if the vcache isn't usable, release it */
1385 if (tvc && !(tvc->states & CStatd)) {
1386 afs_PutVCache(tvc, 0);
1387 tvc = (struct vcache *) 0;
1390 tvc = (struct vcache *) 0;
1394 /* now get the status info, if we don't already have it */
1395 /* This is kind of weird, but we might wind up accidentally calling
1396 * RXAFS_Lookup because we happened upon a file which legitimately
1397 * has a 0 uniquifier. That is the result of allowing unique to wrap
1398 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1399 * the file has not yet been looked up.
1402 afs_int32 cached = 0;
1403 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1404 tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
1407 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1408 tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
1412 } /* sub-block just to reduce stack usage */
1415 if (adp->states & CForeign)
1416 tvc->states |= CForeign;
1417 tvc->parentVnode = adp->fid.Fid.Vnode;
1418 tvc->parentUnique = adp->fid.Fid.Unique;
1419 tvc->states &= ~CBulkStat;
1421 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1422 if (!(flags & AFS_LOOKUP_NOEVAL))
1423 /* don't eval mount points */
1424 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1425 if (!afs_fakestat_enable && tvc->mvstat == 1) {
1426 /* a mt point, possibly unevaluated */
1427 struct volume *tvolp;
1429 ObtainWriteLock(&tvc->lock,133);
1430 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1431 ReleaseWriteLock(&tvc->lock);
1434 afs_PutVCache(tvc, WRITE_LOCK);
1435 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1439 /* next, we want to continue using the target of the mt point */
1440 if (tvc->mvid && (tvc->states & CMValid)) {
1442 /* now lookup target, to set .. pointer */
1443 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1444 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID, &tvc->fid);
1445 uvc = tvc; /* remember for later */
1447 if (tvolp && (tvolp->states & VForeign)) {
1448 /* XXXX tvolp has ref cnt on but not locked! XXX */
1449 tvc = afs_GetRootVCache(tvc->mvid, &treq, (afs_int32 *)0, tvolp, WRITE_LOCK);
1451 tvc = afs_GetVCache(tvc->mvid, &treq, (afs_int32 *)0,
1452 (struct vcache*)0, WRITE_LOCK);
1454 afs_PutVCache(uvc, WRITE_LOCK); /* we're done with it */
1459 afs_PutVolume(tvolp, WRITE_LOCK);
1464 /* now, if we came via a new mt pt (say because of a new
1465 * release of a R/O volume), we must reevaluate the ..
1466 * ptr to point back to the appropriate place */
1468 ObtainWriteLock(&tvc->lock,134);
1469 if (tvc->mvid == (struct VenusFid *) 0) {
1470 tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
1472 /* setup backpointer */
1473 *tvc->mvid = tvolp->dotdot;
1474 ReleaseWriteLock(&tvc->lock);
1475 afs_PutVolume(tvolp, WRITE_LOCK);
1479 afs_PutVCache(tvc, WRITE_LOCK);
1481 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1486 if (tvc && !VREFCOUNT(tvc)) {
1492 /* if we get here, we found something in a directory that couldn't
1493 be located (a Multics "connection failure"). If the volume is
1494 read-only, we try flushing this entry from the cache and trying
1498 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1500 if (tv->states & VRO) {
1501 pass = 1; /* try this *once* */
1502 ObtainWriteLock(&afs_xcbhash, 495);
1503 afs_DequeueCallback(adp);
1504 /* re-stat to get later version */
1505 adp->states &= ~CStatd;
1506 ReleaseWriteLock(&afs_xcbhash);
1507 osi_dnlc_purgedp(adp);
1508 afs_PutVolume(tv, READ_LOCK);
1511 afs_PutVolume(tv, READ_LOCK);
1518 /* put the network buffer back, if need be */
1519 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1522 /* Handle RENAME; only need to check rename "." */
1523 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1524 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1525 afs_PutVCache(*avcp, WRITE_LOCK);
1527 afs_PutFakeStat(&fakestate);
1528 return afs_CheckCode(EISDIR, &treq, 18);
1531 #endif /* AFS_OSF_ENV */
1534 afs_AddMarinerName(aname, tvc);
1536 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1537 if (!(flags & AFS_LOOKUP_NOEVAL))
1538 /* Here we don't enter the name into the DNLC because we want the
1539 evaluated mount dir to be there (the vcache for the mounted volume)
1540 rather than the vc of the mount point itself. we can still find the
1541 mount point's vc in the vcache by its fid. */
1542 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1544 osi_dnlc_enter (adp, aname, tvc, &versionNo);
1547 #ifdef AFS_LINUX20_ENV
1548 /* So Linux inode cache is up to date. */
1549 code = afs_VerifyVCache(tvc, &treq);
1551 afs_PutFakeStat(&fakestate);
1552 return 0; /* can't have been any errors if hit and !code */
1556 if (bulkcode) code = bulkcode; else
1557 code = afs_CheckCode(code, &treq, 19);
1559 /* If there is an error, make sure *avcp is null.
1560 * Alphas panic otherwise - defect 10719.
1562 *avcp = (struct vcache *)0;
1565 afs_PutFakeStat(&fakestate);