2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
17 #include <afsconfig.h>
18 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/exporter.h"
29 #include "afs/afs_osidnlc.h"
30 #include "afs/afs_dynroot.h"
33 extern struct DirEntry *afs_dir_GetBlob();
34 extern struct vcache *afs_globalVp;
37 afs_int32 afs_bkvolpref = 0;
38 afs_int32 afs_bulkStatsDone;
39 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
40 int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
43 /* this would be faster if it did comparison as int32word, but would be
44 * dependant on byte-order and alignment, and I haven't figured out
45 * what "@sys" is in binary... */
46 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
48 /* call under write lock, evaluate mvid field from a mt pt.
49 * avc is the vnode of the mount point object; must be write-locked.
50 * advc is the vnode of the containing directory (optional; if NULL and
51 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
52 * avolpp is where we return a pointer to the volume named by the mount pt, if success
53 * areq is the identity of the caller.
55 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
58 EvalMountData(char type, char *data, afs_uint32 states, afs_uint32 cellnum,
59 struct volume **avolpp, register struct vrequest *areq,
60 afs_uint32 *acellidxp, afs_uint32 *avolnump, afs_uint32 *avnoidp)
62 struct volume *tvp = 0;
65 char *cpos, *volnamep, *x;
67 afs_int32 prefetch; /* 1=>None 2=>RO 3=>BK */
68 afs_int32 mtptCell, assocCell = 0, hac = 0;
69 afs_int32 samecell, roname, len;
70 afs_uint32 volid, cellidx, vnoid = 0;
72 cpos = afs_strchr(data, ':'); /* if cell name present */
77 for (x = data; *x >= '0' && *x <= '9'; x++)
78 cellnum = (cellnum * 10) + (*x - '0');
80 tcell = afs_GetCell(cellnum, READ_LOCK);
82 tcell = afs_GetCellByName(data, READ_LOCK);
88 tcell = afs_GetCell(cellnum, READ_LOCK);
95 cellidx = tcell->cellIndex;
96 mtptCell = tcell->cellNum; /* The cell for the mountpoint */
98 hac = 1; /* has associated cell */
99 assocCell = tcell->lcellp->cellNum; /* The associated cell */
101 afs_PutCell(tcell, READ_LOCK);
103 cpos = afs_strrchr(volnamep, ':'); /* if vno present */
106 /* Look for an all-numeric volume ID */
108 for (x = volnamep; *x >= '0' && *x <= '9'; x++)
109 volid = (volid * 10) + (*x - '0');
113 if (!*x) /* allow vno with numeric volid only */
114 for (x = (cpos + 1); *x >= '0' && *x <= '9'; x++)
115 vnoid = (vnoid * 10) + (*x - '0');
121 * If the volume ID was all-numeric, and they didn't ask for a
122 * pointer to the volume structure, then just return the number
123 * as-is. This is currently only used for handling name lookups
124 * in the dynamic mount directory.
126 if (!*x && !avolpp) {
128 *acellidxp = cellidx;
137 * If the volume ID was all-numeric, and the type was '%', then
138 * assume whoever made the mount point knew what they were doing,
139 * and don't second-guess them by forcing use of a RW volume when
140 * they gave the ID of something else.
142 if (!*x && type == '%') {
143 tfid.Fid.Volume = volid; /* remember BK volume */
144 tfid.Cell = mtptCell;
145 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
147 return ENODEV; /* oops, can't do it */
151 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
152 len = strlen(volnamep);
153 roname = ((len > 9) && (strcmp(&volnamep[len - 9], ".readonly") == 0))
154 || ((len > 7) && (strcmp(&volnamep[len - 7], ".backup") == 0));
156 /* When we cross mountpoint, do we stay in the same cell */
157 samecell = (cellnum == mtptCell) || (hac && (cellnum == assocCell));
159 /* Decide whether to prefetch the BK, or RO. Also means we want the BK or
161 * If this is a regular mountpoint with a RW volume name
162 * - If BK preference is enabled AND we remain within the same cell AND
163 * start from a BK volume, then we will want to prefetch the BK volume.
164 * - If we cross a cell boundary OR start from a RO volume, then we will
165 * want to prefetch the RO volume.
167 if ((type == '#') && !roname) {
168 if (afs_bkvolpref && samecell && (states & CBackup))
169 prefetch = 3; /* Prefetch the BK */
170 else if (!samecell || (states & CRO))
171 prefetch = 2; /* Prefetch the RO */
173 prefetch = 1; /* Do not prefetch */
175 prefetch = 1; /* Do not prefetch */
178 /* Get the volume struct. Unless this volume name has ".readonly" or
179 * ".backup" in it, this will get the volume struct for the RW volume.
180 * The RO volume will be prefetched if requested (but not returned).
182 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetch, areq, WRITE_LOCK);
184 /* If no volume was found in this cell, try the associated linked cell */
185 if (!tvp && hac && areq->volumeError) {
187 afs_GetVolumeByName(volnamep, assocCell, prefetch, areq,
191 /* Still not found. If we are looking for the RO, then perhaps the RW
192 * doesn't exist? Try adding ".readonly" to volname and look for that.
193 * Don't know why we do this. Would have still found it in above call - jpm.
195 if (!tvp && (prefetch == 2) && len < AFS_SMALLOCSIZ - 10) {
196 buf = (char *)osi_AllocSmallSpace(len + 10);
198 strcpy(buf, volnamep);
199 afs_strcat(buf, ".readonly");
201 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
203 /* Try the associated linked cell if failed */
204 if (!tvp && hac && areq->volumeError) {
205 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
207 osi_FreeSmallSpace(buf);
211 return ENODEV; /* Couldn't find the volume */
213 /* Don't cross mountpoint from a BK to a BK volume */
214 if ((states & CBackup) && (tvp->states & VBackup)) {
215 afs_PutVolume(tvp, WRITE_LOCK);
219 /* If we want (prefetched) the BK and it exists, then drop the RW volume
221 * Otherwise, if we want (prefetched0 the RO and it exists, then drop the
222 * RW volume and get the RO.
223 * Otherwise, go with the RW.
225 if ((prefetch == 3) && tvp->backVol) {
226 tfid.Fid.Volume = tvp->backVol; /* remember BK volume */
227 tfid.Cell = tvp->cell;
228 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
229 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
231 return ENODEV; /* oops, can't do it */
232 } else if ((prefetch >= 2) && tvp->roVol) {
233 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
234 tfid.Cell = tvp->cell;
235 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
236 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
238 return ENODEV; /* oops, can't do it */
243 *acellidxp = cellidx;
245 *avolnump = tvp->volume;
251 afs_PutVolume(tvp, WRITE_LOCK);
256 EvalMountPoint(register struct vcache *avc, struct vcache *advc,
257 struct volume **avolpp, register struct vrequest *areq)
262 AFS_STATCNT(EvalMountPoint);
264 if (avc->mvid && (avc->states & CMValid))
265 return 0; /* done while racing */
268 code = afs_HandleLink(avc, areq);
272 /* Determine which cell and volume the mointpoint goes to */
273 code = EvalMountData(avc->linkData[0], avc->linkData + 1,
274 avc->states, avc->fid.Cell, avolpp, areq, 0, 0,
276 if (code) return code;
283 (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
284 avc->mvid->Cell = (*avolpp)->cell;
285 avc->mvid->Fid.Volume = (*avolpp)->volume;
286 avc->mvid->Fid.Vnode = avnoid;
287 avc->mvid->Fid.Unique = 1;
288 avc->states |= CMValid;
290 /* Used to: if the mount point is stored within a backup volume,
291 * then we should only update the parent pointer information if
292 * there's none already set, so as to avoid updating a volume's ..
293 * info with something in an OldFiles directory.
295 * Next two lines used to be under this if:
297 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
299 * Now: update mount point back pointer on every call, so that we handle
300 * multiple mount points better. This way, when du tries to go back
301 * via chddir(".."), it will end up exactly where it started, yet
302 * cd'ing via a new path to a volume will reset the ".." pointer
305 (*avolpp)->mtpoint = avc->fid; /* setup back pointer to mtpoint */
307 (*avolpp)->dotdot = advc->fid;
315 * Must be called on an afs_fakestat_state object before calling
316 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
317 * without calling afs_EvalFakeStat is legal, as long as this
318 * function is called.
321 afs_InitFakeStat(struct afs_fakestat_state *state)
323 if (!afs_fakestat_enable)
328 state->need_release = 0;
332 * afs_EvalFakeStat_int
334 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
335 * which is called by those wrapper functions.
337 * Only issues RPCs if canblock is non-zero.
340 afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
341 struct vrequest *areq, int canblock)
343 struct vcache *tvc, *root_vp;
344 struct volume *tvolp = NULL;
347 if (!afs_fakestat_enable)
350 osi_Assert(state->valid == 1);
351 osi_Assert(state->did_eval == 0);
355 if (tvc->mvstat != 1)
358 /* Is the call to VerifyVCache really necessary? */
359 code = afs_VerifyVCache(tvc, areq);
363 ObtainWriteLock(&tvc->lock, 599);
364 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
365 ReleaseWriteLock(&tvc->lock);
369 tvolp->dotdot = tvc->fid;
370 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
371 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
374 if (tvc->mvid && (tvc->states & CMValid)) {
380 ObtainWriteLock(&afs_xvcache, 597);
381 root_vp = afs_FindVCache(tvc->mvid, &retry, IS_WLOCK);
382 if (root_vp && retry) {
383 ReleaseWriteLock(&afs_xvcache);
384 afs_PutVCache(root_vp);
386 } while (root_vp && retry);
387 ReleaseWriteLock(&afs_xvcache);
389 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
392 code = canblock ? ENOENT : 0;
395 #ifdef AFS_DARWIN80_ENV
396 root_vp->m.Type = VDIR;
398 code = afs_darwin_finalizevnode(root_vp, NULL, NULL, 0);
401 vnode_ref(AFSTOV(root_vp));
403 if (tvolp && !afs_InReadDir(root_vp)) {
404 /* Is this always kosher? Perhaps we should instead use
405 * NBObtainWriteLock to avoid potential deadlock.
407 ObtainWriteLock(&root_vp->lock, 598);
409 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
410 *root_vp->mvid = tvolp->dotdot;
411 ReleaseWriteLock(&root_vp->lock);
413 state->need_release = 1;
414 state->root_vp = root_vp;
418 code = canblock ? ENOENT : 0;
423 afs_PutVolume(tvolp, WRITE_LOCK);
430 * Automatically does the equivalent of EvalMountPoint for vcache entries
431 * which are mount points. Remembers enough state to properly release
432 * the volume root vcache when afs_PutFakeStat() is called.
434 * State variable must be initialized by afs_InitFakeState() beforehand.
436 * Returns 0 when everything succeeds and *avcp points to the vcache entry
437 * that should be used for the real vnode operation. Returns non-zero if
438 * something goes wrong and the error code should be returned to the user.
441 afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
442 struct vrequest *areq)
444 return afs_EvalFakeStat_int(avcp, state, areq, 1);
448 * afs_TryEvalFakeStat
450 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
451 * and only evaluate the mount point if all the data is already in
454 * Returns 0 if everything succeeds and *avcp points to a valid
455 * vcache entry (possibly evaluated).
458 afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
459 struct vrequest *areq)
461 return afs_EvalFakeStat_int(avcp, state, areq, 0);
467 * Perform any necessary cleanup at the end of a vnode op, given that
468 * afs_InitFakeStat was previously called with this state.
471 afs_PutFakeStat(struct afs_fakestat_state *state)
473 if (!afs_fakestat_enable)
476 osi_Assert(state->valid == 1);
477 if (state->need_release)
478 afs_PutVCache(state->root_vp);
483 afs_ENameOK(register char *aname)
487 AFS_STATCNT(ENameOK);
488 tlen = strlen(aname);
489 if (tlen >= 4 && strcmp(aname + tlen - 4, "@sys") == 0)
495 afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
496 register char *bufp, int *num, char **sysnamelist[])
498 register struct unixuser *au;
499 register afs_int32 error;
501 AFS_STATCNT(getsysname);
503 *sysnamelist = afs_sysnamelist;
505 if (!afs_nfsexporter)
506 strcpy(bufp, (*sysnamelist)[0]);
508 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
510 error = EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num, 0);
512 strcpy(bufp, "@sys");
516 strcpy(bufp, (*sysnamelist)[0]);
519 strcpy(bufp, afs_sysname);
526 Check_AtSys(register struct vcache *avc, const char *aname,
527 struct sysname_info *state, struct vrequest *areq)
530 char **sysnamelist[MAXNUMSYSNAMES];
532 if (AFS_EQ_ATSYS(aname)) {
534 state->name = (char *)osi_AllocLargeSpace(MAXSYSNAME);
537 afs_getsysname(areq, avc, state->name, &num, sysnamelist);
542 state->name = (char *)aname;
547 Next_AtSys(register struct vcache *avc, struct vrequest *areq,
548 struct sysname_info *state)
550 int num = afs_sysnamecount;
551 char **sysnamelist[MAXNUMSYSNAMES];
553 if (state->index == -1)
554 return 0; /* No list */
556 /* Check for the initial state of aname != "@sys" in Check_AtSys */
557 if (state->offset == -1 && state->allocked == 0) {
558 register char *tname;
560 /* Check for .*@sys */
561 for (tname = state->name; *tname; tname++)
562 /*Move to the end of the string */ ;
564 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname - 4))) {
565 state->offset = (tname - 4) - state->name;
566 tname = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
567 strncpy(tname, state->name, state->offset);
572 afs_getsysname(areq, avc, state->name + state->offset, &num,
576 return 0; /* .*@sys doesn't match either */
578 register struct unixuser *au;
579 register afs_int32 error;
581 *sysnamelist = afs_sysnamelist;
583 if (afs_nfsexporter) {
584 au = afs_GetUser(areq->uid, avc->fid.Cell, 0);
587 EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, &num, 0);
595 if (++(state->index) >= num || !(*sysnamelist)[(unsigned int)state->index])
596 return 0; /* end of list */
598 strcpy(state->name + state->offset, (*sysnamelist)[(unsigned int)state->index]);
602 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
604 /* called with an unlocked directory and directory cookie. Areqp
605 * describes who is making the call.
606 * Scans the next N (about 30, typically) directory entries, and does
607 * a bulk stat call to stat them all.
609 * Must be very careful when merging in RPC responses, since we dont
610 * want to overwrite newer info that was added by a file system mutating
611 * call that ran concurrently with our bulk stat call.
613 * We do that, as described below, by not merging in our info (always
614 * safe to skip the merge) if the status info is valid in the vcache entry.
616 * If adapt ever implements the bulk stat RPC, then this code will need to
617 * ensure that vcaches created for failed RPC's to older servers have the
620 static struct vcache *BStvc = NULL;
623 afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
625 int nentries; /* # of entries to prefetch */
626 int nskip; /* # of slots in the LRU queue to skip */
627 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
628 struct dcache *dcp; /* chunk containing the dir block */
629 char *statMemp; /* status memory block */
630 char *cbfMemp; /* callback and fid memory block */
631 afs_size_t temp; /* temp for holding chunk length, &c. */
632 struct AFSFid *fidsp; /* file IDs were collecting */
633 struct AFSCallBack *cbsp; /* call back pointers */
634 struct AFSCallBack *tcbp; /* temp callback ptr */
635 struct AFSFetchStatus *statsp; /* file status info */
636 struct AFSVolSync volSync; /* vol sync return info */
637 struct vcache *tvcp; /* temp vcp */
638 struct afs_q *tq; /* temp queue variable */
639 AFSCBFids fidParm; /* file ID parm for bulk stat */
640 AFSBulkStats statParm; /* stat info parm for bulk stat */
641 int fidIndex = 0; /* which file were stating */
642 struct conn *tcp = 0; /* conn for call */
643 AFSCBs cbParm; /* callback parm for bulk stat */
644 struct server *hostp = 0; /* host we got callback from */
645 long startTime; /* time we started the call,
646 * for callback expiration base
648 afs_size_t statSeqNo = 0; /* Valued of file size to detect races */
649 int code; /* error code */
650 long newIndex; /* new index in the dir */
651 struct DirEntry *dirEntryp; /* dir entry we are examining */
653 struct VenusFid afid; /* file ID we are using now */
654 struct VenusFid tfid; /* another temp. file ID */
655 afs_int32 retry; /* handle low-level SGI MP race conditions */
656 long volStates; /* flags from vol structure */
657 struct volume *volp = 0; /* volume ptr */
658 struct VenusFid dotdot;
659 int flagIndex = 0; /* First file with bulk fetch flag set */
660 int inlinebulk = 0; /* Did we use InlineBulk RPC or not? */
662 #ifdef AFS_DARWIN80_ENV
663 panic("bulkstatus doesn't work on AFS_DARWIN80_ENV. don't call it");
665 /* first compute some basic parameters. We dont want to prefetch more
666 * than a fraction of the cache in any given call, and we want to preserve
667 * a portion of the LRU queue in any event, so as to avoid thrashing
668 * the entire stat cache (we will at least leave some of it alone).
669 * presently dont stat more than 1/8 the cache in any one call. */
670 nentries = afs_cacheStats / 8;
672 /* dont bother prefetching more than one calls worth of info */
673 if (nentries > AFSCBMAX)
676 /* heuristic to make sure that things fit in 4K. This means that
677 * we shouldnt make it any bigger than 47 entries. I am typically
678 * going to keep it a little lower, since we don't want to load
679 * too much of the stat cache.
684 /* now, to reduce the stack size, well allocate two 4K blocks,
685 * one for fids and callbacks, and one for stat info. Well set
686 * up our pointers to the memory from there, too.
688 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
689 statsp = (struct AFSFetchStatus *)statMemp;
691 osi_AllocLargeSpace(nentries *
692 (sizeof(AFSCallBack) + sizeof(AFSFid)));
693 fidsp = (AFSFid *) cbfMemp;
694 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
696 /* next, we must iterate over the directory, starting from the specified
697 * cookie offset (dirCookie), and counting out nentries file entries.
698 * We skip files that already have stat cache entries, since we
699 * dont want to bulk stat files that are already in the cache.
702 code = afs_VerifyVCache(adp, areqp);
706 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
712 /* lock the directory cache entry */
713 ObtainReadLock(&adp->lock);
714 ObtainReadLock(&dcp->lock);
717 * Make sure that the data in the cache is current. There are two
718 * cases we need to worry about:
719 * 1. The cache data is being fetched by another process.
720 * 2. The cache data is no longer valid
722 while ((adp->states & CStatd)
723 && (dcp->dflags & DFFetching)
724 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
725 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
726 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
727 ICL_TYPE_INT32, dcp->dflags);
728 ReleaseReadLock(&dcp->lock);
729 ReleaseReadLock(&adp->lock);
730 afs_osi_Sleep(&dcp->validPos);
731 ObtainReadLock(&adp->lock);
732 ObtainReadLock(&dcp->lock);
734 if (!(adp->states & CStatd)
735 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
736 ReleaseReadLock(&dcp->lock);
737 ReleaseReadLock(&adp->lock);
742 /* Generate a sequence number so we can tell whether we should
743 * store the attributes when processing the response. This number is
744 * stored in the file size when we set the CBulkFetching bit. If the
745 * CBulkFetching is still set and this value hasn't changed, then
746 * we know we were the last to set CBulkFetching bit for this file,
747 * and it is safe to set the status information for this file.
749 statSeqNo = bulkStatCounter++;
751 /* now we have dir data in the cache, so scan the dir page */
754 while (1) { /* Should probably have some constant bound */
755 /* look for first safe entry to examine in the directory. BlobScan
756 * looks for a the 1st allocated dir after the dirCookie slot.
758 newIndex = BlobScan(dcp, (dirCookie >> 5));
762 /* remember the updated directory cookie */
763 dirCookie = newIndex << 5;
765 /* get a ptr to the dir entry */
767 (struct DirEntry *)afs_dir_GetBlob(dcp, newIndex);
771 /* dont copy more than we have room for */
772 if (fidIndex >= nentries) {
773 DRelease((struct buffer *)dirEntryp, 0);
777 /* now, if the dir entry looks good, copy it out to our list. Vnode
778 * 0 means deleted, although it should also be free were it deleted.
780 if (dirEntryp->fid.vnode != 0) {
781 /* dont copy entries we have in our cache. This check will
782 * also make us skip "." and probably "..", unless it has
783 * disappeared from the cache since we did our namei call.
785 tfid.Cell = adp->fid.Cell;
786 tfid.Fid.Volume = adp->fid.Fid.Volume;
787 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
788 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
791 ObtainWriteLock(&afs_xvcache, 130);
792 tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
794 ReleaseWriteLock(&afs_xvcache);
797 } while (tvcp && retry);
798 if (!tvcp) { /* otherwise, create manually */
799 tvcp = afs_NewVCache(&tfid, hostp);
802 ObtainWriteLock(&tvcp->lock, 505);
803 ReleaseWriteLock(&afs_xvcache);
804 afs_RemoveVCB(&tfid);
805 ReleaseWriteLock(&tvcp->lock);
807 ReleaseWriteLock(&afs_xvcache);
810 ReleaseWriteLock(&afs_xvcache);
814 DRelease((struct buffer *)dirEntryp, 0);
815 ReleaseReadLock(&dcp->lock);
816 ReleaseReadLock(&adp->lock);
818 goto done; /* can happen if afs_NewVCache fails */
821 #ifdef AFS_DARWIN80_ENV
822 if (tvcp->states & CVInit) {
823 /* XXX don't have status yet, so creating the vnode is
824 not yet useful. we would get CDeadVnode set, and the
825 upcoming PutVCache will cause the vcache to be flushed &
826 freed, which in turn means the bulkstatus results won't
830 /* WARNING: afs_DoBulkStat uses the Length field to store a
831 * sequence number for each bulk status request. Under no
832 * circumstances should afs_DoBulkStat store a sequence number
833 * if the new length will be ignored when afs_ProcessFS is
834 * called with new stats. */
836 if (!(tvcp->states & (CStatd | CBulkFetching))
837 && (tvcp->execsOrWriters <= 0)
838 && !afs_DirtyPages(tvcp)
839 && !AFS_VN_MAPPED((vnode_t *) tvcp))
841 if (!(tvcp->states & (CStatd | CBulkFetching))
842 && (tvcp->execsOrWriters <= 0)
843 && !afs_DirtyPages(tvcp))
847 /* this entry doesnt exist in the cache, and is not
848 * already being fetched by someone else, so add it to the
849 * list of file IDs to obtain.
851 * We detect a callback breaking race condition by checking the
852 * CBulkFetching state bit and the value in the file size.
853 * It is safe to set the status only if the CBulkFetching
854 * flag is still set and the value in the file size does
857 * Don't fetch status for dirty files. We need to
858 * preserve the value of the file size. We could
859 * flush the pages, but it wouldn't be worthwhile.
861 memcpy((char *)(fidsp + fidIndex), (char *)&tfid.Fid,
863 tvcp->states |= CBulkFetching;
864 tvcp->m.Length = statSeqNo;
870 /* if dir vnode has non-zero entry */
871 /* move to the next dir entry by adding in the # of entries
872 * used by this dir entry.
874 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
875 DRelease((struct buffer *)dirEntryp, 0);
879 } /* while loop over all dir entries */
881 /* now release the dir lock and prepare to make the bulk RPC */
882 ReleaseReadLock(&dcp->lock);
883 ReleaseReadLock(&adp->lock);
885 /* release the chunk */
888 /* dont make a null call */
893 /* setup the RPC parm structures */
894 fidParm.AFSCBFids_len = fidIndex;
895 fidParm.AFSCBFids_val = fidsp;
896 statParm.AFSBulkStats_len = fidIndex;
897 statParm.AFSBulkStats_val = statsp;
898 cbParm.AFSCBs_len = fidIndex;
899 cbParm.AFSCBs_val = cbsp;
901 /* start the timer; callback expirations are relative to this */
902 startTime = osi_Time();
904 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
906 hostp = tcp->srvr->server;
907 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
910 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
912 RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
914 if (code == RXGEN_OPCODE) {
915 tcp->srvr->server->flags |= SNO_INLINEBULK;
918 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
925 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
933 (tcp, code, &adp->fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
936 /* now, if we didnt get the info, bail out. */
940 /* we need vol flags to create the entries properly */
941 dotdot.Fid.Volume = 0;
942 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
944 volStates = volp->states;
945 if (volp->dotdot.Fid.Volume != 0)
946 dotdot = volp->dotdot;
950 /* find the place to merge the info into We do this by skipping
951 * nskip entries in the LRU queue. The more we skip, the more
952 * we preserve, since the head of the VLRU queue is the most recently
956 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
957 ObtainReadLock(&afs_xvcache);
959 /* actually a serious error, probably should panic. Probably will
960 * panic soon, oh well. */
961 ReleaseReadLock(&afs_xvcache);
962 afs_warnuser("afs_DoBulkStat: VLRU empty!");
965 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
966 refpanic("Bulkstat VLRU inconsistent");
968 for (tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
971 else if (QNext(QPrev(tq)) != tq) {
973 refpanic("BulkStat VLRU inconsistent");
979 lruvcp = QTOV(VLRU.next);
981 /* now we have to hold this entry, so that it does not get moved
982 * into the free list while we're running. It could still get
983 * moved within the lru queue, but hopefully that will be rare; it
984 * doesn't hurt nearly as much.
987 osi_vnhold(lruvcp, &retry);
988 ReleaseReadLock(&afs_xvcache); /* could be read lock */
992 /* otherwise, merge in the info. We have to be quite careful here,
993 * since we need to ensure that we don't merge old info over newer
994 * stuff in a stat cache entry. We're very conservative here: we don't
995 * do the merge at all unless we ourselves create the stat cache
996 * entry. That's pretty safe, and should work pretty well, since we
997 * typically expect to do the stat cache creation ourselves.
999 * We also have to take into account racing token revocations.
1001 for (i = 0; i < fidIndex; i++) {
1002 if ((&statsp[i])->errorCode)
1004 afid.Cell = adp->fid.Cell;
1005 afid.Fid.Volume = adp->fid.Fid.Volume;
1006 afid.Fid.Vnode = fidsp[i].Vnode;
1007 afid.Fid.Unique = fidsp[i].Unique;
1010 ObtainReadLock(&afs_xvcache);
1011 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
1012 ReleaseReadLock(&afs_xvcache);
1013 } while (tvcp && retry);
1015 /* The entry may no longer exist */
1020 /* now we have the entry held, but we need to fill it in */
1021 ObtainWriteLock(&tvcp->lock, 131);
1023 /* if CBulkFetching is not set, or if the file size no longer
1024 * matches the value we placed there when we set the CBulkFetching
1025 * flag, then someone else has done something with this node,
1026 * and we may not have the latest status information for this
1027 * file. Leave the entry alone.
1029 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
1031 ReleaseWriteLock(&tvcp->lock);
1032 afs_PutVCache(tvcp);
1036 /* now copy ".." entry back out of volume structure, if necessary */
1037 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
1039 tvcp->mvid = (struct VenusFid *)
1040 osi_AllocSmallSpace(sizeof(struct VenusFid));
1041 *tvcp->mvid = dotdot;
1044 ObtainWriteLock(&afs_xvcache, 132);
1045 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1046 refpanic("Bulkstat VLRU inconsistent2");
1048 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
1049 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
1050 refpanic("Bulkstat VLRU inconsistent4");
1052 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
1053 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
1054 refpanic("Bulkstat VLRU inconsistent5");
1057 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
1058 QRemove(&tvcp->vlruq);
1059 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
1062 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1063 refpanic("Bulkstat VLRU inconsistent3");
1065 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
1066 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
1067 refpanic("Bulkstat VLRU inconsistent5");
1069 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
1070 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
1071 refpanic("Bulkstat VLRU inconsistent6");
1073 ReleaseWriteLock(&afs_xvcache);
1075 ObtainWriteLock(&afs_xcbhash, 494);
1077 /* We need to check the flags again. We may have missed
1078 * something while we were waiting for a lock.
1080 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
1082 ReleaseWriteLock(&tvcp->lock);
1083 ReleaseWriteLock(&afs_xcbhash);
1084 afs_PutVCache(tvcp);
1088 /* now merge in the resulting status back into the vnode.
1089 * We only do this if the entry looks clear.
1091 afs_ProcessFS(tvcp, &statsp[i], areqp);
1092 #if defined(AFS_LINUX22_ENV)
1093 afs_fill_inode(AFSTOV(tvcp), NULL); /* reset inode operations */
1096 /* do some accounting for bulk stats: mark this entry as
1097 * loaded, so we can tell if we use it before it gets
1100 tvcp->states |= CBulkStat;
1101 tvcp->states &= ~CBulkFetching;
1103 afs_bulkStatsDone++;
1105 /* merge in vol info */
1106 if (volStates & VRO)
1107 tvcp->states |= CRO;
1108 if (volStates & VBackup)
1109 tvcp->states |= CBackup;
1110 if (volStates & VForeign)
1111 tvcp->states |= CForeign;
1113 /* merge in the callback info */
1114 tvcp->states |= CTruth;
1116 /* get ptr to the callback we are interested in */
1119 if (tcbp->ExpirationTime != 0) {
1120 tvcp->cbExpires = tcbp->ExpirationTime + startTime;
1121 tvcp->callback = hostp;
1122 tvcp->states |= CStatd;
1123 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
1124 } else if (tvcp->states & CRO) {
1125 /* ordinary callback on a read-only volume -- AFS 3.2 style */
1126 tvcp->cbExpires = 3600 + startTime;
1127 tvcp->callback = hostp;
1128 tvcp->states |= CStatd;
1129 afs_QueueCallback(tvcp, CBHash(3600), volp);
1132 tvcp->states &= ~(CStatd | CUnique);
1133 afs_DequeueCallback(tvcp);
1134 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
1135 osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
1137 ReleaseWriteLock(&afs_xcbhash);
1139 ReleaseWriteLock(&tvcp->lock);
1140 /* finally, we're done with the entry */
1141 afs_PutVCache(tvcp);
1142 } /* for all files we got back */
1144 /* finally return the pointer into the LRU queue */
1145 afs_PutVCache(lruvcp);
1148 /* Be sure to turn off the CBulkFetching flags */
1149 for (i = flagIndex; i < fidIndex; i++) {
1150 afid.Cell = adp->fid.Cell;
1151 afid.Fid.Volume = adp->fid.Fid.Volume;
1152 afid.Fid.Vnode = fidsp[i].Vnode;
1153 afid.Fid.Unique = fidsp[i].Unique;
1156 ObtainReadLock(&afs_xvcache);
1157 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
1158 ReleaseReadLock(&afs_xvcache);
1159 } while (tvcp && retry);
1160 if (tvcp != NULL && (tvcp->states & CBulkFetching)
1161 && (tvcp->m.Length == statSeqNo)) {
1162 tvcp->states &= ~CBulkFetching;
1165 afs_PutVCache(tvcp);
1169 afs_PutVolume(volp, READ_LOCK);
1171 /* If we did the InlineBulk RPC pull out the return code */
1173 if ((&statsp[0])->errorCode) {
1174 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1175 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL);
1176 code = (&statsp[0])->errorCode;
1182 osi_FreeLargeSpace(statMemp);
1183 osi_FreeLargeSpace(cbfMemp);
1187 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1188 #ifdef AFS_DARWIN80_ENV
1191 static int AFSDOBULK = 1;
1196 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int opflag, int wantparent)
1197 #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1198 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct pathname *pnp, int flags, struct vnode *rdir, struct AFS_UCRED *acred)
1199 #elif defined(UKERNEL)
1200 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int flags)
1202 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred)
1205 struct vrequest treq;
1207 register struct vcache *tvc = 0;
1208 register afs_int32 code;
1209 register afs_int32 bulkcode = 0;
1210 int pass = 0, hit = 0;
1212 extern afs_int32 afs_mariner; /*Writing activity to log? */
1213 afs_hyper_t versionNo;
1214 int no_read_access = 0;
1215 struct sysname_info sysState; /* used only for @sys checking */
1216 int dynrootRetry = 1;
1217 struct afs_fakestat_state fakestate;
1218 int tryEvalOnly = 0;
1219 OSI_VC_CONVERT(adp);
1221 AFS_STATCNT(afs_lookup);
1222 afs_InitFakeStat(&fakestate);
1224 if ((code = afs_InitReq(&treq, acred)))
1228 ndp->ni_dvp = AFSTOV(adp);
1229 #endif /* AFS_OSF_ENV */
1231 #if defined(AFS_DARWIN_ENV)
1232 /* Workaround for MacOSX Finder, which tries to look for
1233 * .DS_Store and Contents under every directory.
1235 if (afs_fakestat_enable && adp->mvstat == 1) {
1236 if (strcmp(aname, ".DS_Store") == 0)
1238 if (strcmp(aname, "Contents") == 0)
1244 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1246 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1247 if (tryEvalOnly && adp->mvstat == 1)
1252 *avcp = NULL; /* Since some callers don't initialize it */
1254 /* come back to here if we encounter a non-existent object in a read-only
1255 * volume's directory */
1258 *avcp = NULL; /* Since some callers don't initialize it */
1261 if (!(adp->states & CStatd) && !afs_InReadDir(adp)) {
1262 if ((code = afs_VerifyVCache2(adp, &treq))) {
1268 /* watch for ".." in a volume root */
1269 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1270 /* looking up ".." in root via special hacks */
1271 if (adp->mvid == (struct VenusFid *)0 || adp->mvid->Fid.Volume == 0) {
1273 if (adp == afs_globalVp) {
1274 struct vnode *rvp = AFSTOV(adp);
1276 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1277 ndp->ni_dvp = ndp->ni_vp;
1287 /* otherwise we have the fid here, so we use it */
1288 tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
1289 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid,
1290 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, code);
1292 code = (tvc ? 0 : ENOENT);
1294 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1298 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code); */
1303 /* now check the access */
1304 if (treq.uid != adp->last_looker) {
1305 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1310 adp->last_looker = treq.uid;
1313 /* Check for read access as well. We need read access in order to
1314 * stat files, but not to stat subdirectories. */
1315 if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
1318 /* special case lookup of ".". Can we check for it sooner in this code,
1319 * for instance, way up before "redo:" ??
1320 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1321 * invent a lightweight version of GetVCache.
1323 if (aname[0] == '.' && !aname[1]) { /* special case */
1324 ObtainReadLock(&afs_xvcache);
1326 ReleaseReadLock(&afs_xvcache);
1327 #ifdef AFS_DARWIN80_ENV
1328 vnode_get(AFSTOV(adp));
1333 if (adp && !VREFCOUNT_GT(adp, 0)) {
1340 * Special case lookup of ".." in the dynamic mount directory.
1341 * The parent of this directory is _always_ the AFS root volume.
1343 if (afs_IsDynrootMount(adp) &&
1344 aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1346 ObtainReadLock(&afs_xvcache);
1347 osi_vnhold(afs_globalVp, 0);
1348 ReleaseReadLock(&afs_xvcache);
1349 #ifdef AFS_DARWIN80_ENV
1350 vnode_get(AFSTOV(afs_globalVp));
1353 *avcp = tvc = afs_globalVp;
1359 * Special case lookups in the dynamic mount directory.
1360 * The names here take the form cell:volume, similar to a mount point.
1361 * EvalMountData parses that and returns a cell and volume ID, which
1362 * we use to construct the appropriate dynroot Fid.
1364 if (afs_IsDynrootMount(adp)) {
1365 struct VenusFid tfid;
1366 afs_uint32 cellidx, volid, vnoid;
1368 code = EvalMountData('%', aname, 0, 0, NULL, &treq, &cellidx, &volid, &vnoid);
1371 afs_GetDynrootMountFid(&tfid);
1372 tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
1373 tfid.Fid.Unique = volid;
1374 *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
1379 #ifdef AFS_LINUX26_ENV
1381 * Special case of the dynamic mount volume in a static root.
1382 * This is really unfortunate, but we need this for the translator.
1384 if (adp == afs_globalVp && !afs_GetDynrootEnable() &&
1385 !strcmp(aname, AFS_DYNROOT_MOUNTNAME)) {
1386 struct VenusFid tfid;
1388 afs_GetDynrootMountFid(&tfid);
1389 *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
1396 Check_AtSys(adp, aname, &sysState, &treq);
1397 tname = sysState.name;
1399 /* 1st Check_AtSys and lookup by tname is required here, for now,
1400 * because the dnlc is *not* told to remove entries for the parent
1401 * dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1402 * if the cached entry for the parent dir is invalidated for a
1404 * Otherwise, we'd be able to do a dnlc lookup on an entry ending
1405 * w/@sys and know the dnlc was consistent with reality. */
1406 tvc = osi_dnlc_lookup(adp, tname, WRITE_LOCK);
1407 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1409 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1410 /* need read access on dir to stat non-directory / non-link */
1416 #ifdef AFS_LINUX22_ENV
1417 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1418 AFS_RELE(AFSTOV(tvc));
1425 #else /* non - LINUX */
1429 #endif /* linux22 */
1432 { /* sub-block just to reduce stack usage */
1433 register struct dcache *tdc;
1434 afs_size_t dirOffset, dirLen;
1435 struct VenusFid tfid;
1437 /* now we have to lookup the next fid */
1438 if (afs_InReadDir(adp))
1439 tdc = adp->dcreaddir;
1441 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq,
1442 &dirOffset, &dirLen, 1);
1444 *avcp = NULL; /* redundant, but harmless */
1449 /* now we will just call dir package with appropriate inode.
1450 * Dirs are always fetched in their entirety for now */
1451 ObtainReadLock(&adp->lock);
1452 ObtainReadLock(&tdc->lock);
1455 * Make sure that the data in the cache is current. There are two
1456 * cases we need to worry about:
1457 * 1. The cache data is being fetched by another process.
1458 * 2. The cache data is no longer valid
1460 * If a readdir is in progress _in this thread_, it has a shared
1461 * lock on the vcache and has obtained current data, so we just
1462 * use that. This eliminates several possible deadlocks.
1464 if (!afs_InReadDir(adp)) {
1465 while ((adp->states & CStatd)
1466 && (tdc->dflags & DFFetching)
1467 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1468 ReleaseReadLock(&tdc->lock);
1469 ReleaseReadLock(&adp->lock);
1470 afs_osi_Sleep(&tdc->validPos);
1471 ObtainReadLock(&adp->lock);
1472 ObtainReadLock(&tdc->lock);
1474 if (!(adp->states & CStatd)
1475 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1476 ReleaseReadLock(&tdc->lock);
1477 ReleaseReadLock(&adp->lock);
1479 if (tname && tname != aname)
1480 osi_FreeLargeSpace(tname);
1485 /* Save the version number for when we call osi_dnlc_enter */
1486 hset(versionNo, tdc->f.versionNo);
1489 * check for, and handle "@sys" if it's there. We should be able
1490 * to avoid the alloc and the strcpy with a little work, but it's
1491 * not pressing. If there aren't any remote users (ie, via the
1492 * NFS translator), we have a slightly easier job.
1493 * the faster way to do this is to check for *aname == '@' and if
1494 * it's there, check for @sys, otherwise, assume there's no @sys
1495 * then, if the lookup fails, check for .*@sys...
1497 /* above now implemented by Check_AtSys and Next_AtSys */
1499 /* lookup the name in the appropriate dir, and return a cache entry
1500 * on the resulting fid */
1502 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1505 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1506 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState))
1508 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1510 tname = sysState.name;
1512 ReleaseReadLock(&tdc->lock);
1513 if (!afs_InReadDir(adp))
1516 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1517 ReleaseReadLock(&adp->lock);
1519 if (tname[0] == '.')
1520 afs_LookupAFSDB(tname + 1);
1522 afs_LookupAFSDB(tname);
1523 if (tname && tname != aname)
1524 osi_FreeLargeSpace(tname);
1527 ReleaseReadLock(&adp->lock);
1530 /* new fid has same cell and volume */
1531 tfid.Cell = adp->fid.Cell;
1532 tfid.Fid.Volume = adp->fid.Fid.Volume;
1533 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1534 ICL_TYPE_STRING, tname, ICL_TYPE_FID, &tfid,
1535 ICL_TYPE_INT32, code);
1538 if (code != ENOENT) {
1539 printf("LOOKUP dirLookupOff -> %d\n", code);
1544 /* prefetch some entries, if the dir is currently open. The variable
1545 * dirCookie tells us where to start prefetching from.
1547 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)
1548 && !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
1550 /* if the entry is not in the cache, or is in the cache,
1551 * but hasn't been statd, then do a bulk stat operation.
1555 ObtainReadLock(&afs_xvcache);
1556 tvc = afs_FindVCache(&tfid, &retry, 0 /* !stats,!lru */ );
1557 ReleaseReadLock(&afs_xvcache);
1558 } while (tvc && retry);
1560 if (!tvc || !(tvc->states & CStatd))
1561 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1565 /* if the vcache isn't usable, release it */
1566 if (tvc && !(tvc->states & CStatd)) {
1575 /* now get the status info, if we don't already have it */
1576 /* This is kind of weird, but we might wind up accidentally calling
1577 * RXAFS_Lookup because we happened upon a file which legitimately
1578 * has a 0 uniquifier. That is the result of allowing unique to wrap
1579 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1580 * the file has not yet been looked up.
1583 afs_int32 cached = 0;
1584 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1585 tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
1587 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1588 tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
1591 } /* sub-block just to reduce stack usage */
1594 int force_eval = afs_fakestat_enable ? 0 : 1;
1596 if (adp->states & CForeign)
1597 tvc->states |= CForeign;
1598 tvc->parentVnode = adp->fid.Fid.Vnode;
1599 tvc->parentUnique = adp->fid.Fid.Unique;
1600 tvc->states &= ~CBulkStat;
1602 if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
1603 ObtainSharedLock(&tvc->lock, 680);
1604 if (!tvc->linkData) {
1605 UpgradeSToWLock(&tvc->lock, 681);
1606 code = afs_HandleLink(tvc, &treq);
1607 ConvertWToRLock(&tvc->lock);
1609 ConvertSToRLock(&tvc->lock);
1612 if (!code && !afs_strchr(tvc->linkData, ':'))
1614 ReleaseReadLock(&tvc->lock);
1616 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1617 if (!(flags & AFS_LOOKUP_NOEVAL))
1618 /* don't eval mount points */
1619 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1620 if (tvc->mvstat == 1 && force_eval) {
1621 /* a mt point, possibly unevaluated */
1622 struct volume *tvolp;
1624 ObtainWriteLock(&tvc->lock, 133);
1625 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1626 ReleaseWriteLock(&tvc->lock);
1631 afs_PutVolume(tvolp, WRITE_LOCK);
1635 /* next, we want to continue using the target of the mt point */
1636 if (tvc->mvid && (tvc->states & CMValid)) {
1638 /* now lookup target, to set .. pointer */
1639 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1640 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID,
1642 uvc = tvc; /* remember for later */
1644 if (tvolp && (tvolp->states & VForeign)) {
1645 /* XXXX tvolp has ref cnt on but not locked! XXX */
1647 afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
1649 tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
1651 afs_PutVCache(uvc); /* we're done with it */
1656 afs_PutVolume(tvolp, WRITE_LOCK);
1661 /* now, if we came via a new mt pt (say because of a new
1662 * release of a R/O volume), we must reevaluate the ..
1663 * ptr to point back to the appropriate place */
1665 ObtainWriteLock(&tvc->lock, 134);
1666 if (tvc->mvid == NULL) {
1667 tvc->mvid = (struct VenusFid *)
1668 osi_AllocSmallSpace(sizeof(struct VenusFid));
1670 /* setup backpointer */
1671 *tvc->mvid = tvolp->dotdot;
1672 ReleaseWriteLock(&tvc->lock);
1673 afs_PutVolume(tvolp, WRITE_LOCK);
1679 afs_PutVolume(tvolp, WRITE_LOCK);
1684 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1689 /* if we get here, we found something in a directory that couldn't
1690 * be located (a Multics "connection failure"). If the volume is
1691 * read-only, we try flushing this entry from the cache and trying
1695 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1697 if (tv->states & VRO) {
1698 pass = 1; /* try this *once* */
1699 ObtainWriteLock(&afs_xcbhash, 495);
1700 afs_DequeueCallback(adp);
1701 /* re-stat to get later version */
1702 adp->states &= ~CStatd;
1703 ReleaseWriteLock(&afs_xcbhash);
1704 osi_dnlc_purgedp(adp);
1705 afs_PutVolume(tv, READ_LOCK);
1708 afs_PutVolume(tv, READ_LOCK);
1715 /* put the network buffer back, if need be */
1716 if (tname != aname && tname)
1717 osi_FreeLargeSpace(tname);
1720 /* Handle RENAME; only need to check rename "." */
1721 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1722 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1723 afs_PutVCache(*avcp);
1725 afs_PutFakeStat(&fakestate);
1726 return afs_CheckCode(EISDIR, &treq, 18);
1729 #endif /* AFS_OSF_ENV */
1732 afs_AddMarinerName(aname, tvc);
1734 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1735 if (!(flags & AFS_LOOKUP_NOEVAL))
1736 /* Here we don't enter the name into the DNLC because we want the
1737 * evaluated mount dir to be there (the vcache for the mounted volume)
1738 * rather than the vc of the mount point itself. we can still find the
1739 * mount point's vc in the vcache by its fid. */
1740 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1742 osi_dnlc_enter(adp, aname, tvc, &versionNo);
1744 #ifdef AFS_LINUX20_ENV
1745 /* So Linux inode cache is up to date. */
1746 code = afs_VerifyVCache(tvc, &treq);
1748 afs_PutFakeStat(&fakestate);
1749 return 0; /* can't have been any errors if hit and !code */
1756 code = afs_CheckCode(code, &treq, 19);
1758 /* If there is an error, make sure *avcp is null.
1759 * Alphas panic otherwise - defect 10719.
1764 afs_PutFakeStat(&fakestate);