2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
17 #include <afsconfig.h>
18 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/exporter.h"
29 #include "afs/afs_osidnlc.h"
30 #include "afs/afs_dynroot.h"
33 extern struct DirEntry *afs_dir_GetBlob();
34 extern struct vcache *afs_globalVp;
37 afs_int32 afs_bkvolpref = 0;
38 afs_int32 afs_bulkStatsDone;
39 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
40 int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
43 /* this would be faster if it did comparison as int32word, but would be
44 * dependant on byte-order and alignment, and I haven't figured out
45 * what "@sys" is in binary... */
46 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
48 /* call under write lock, evaluate mvid field from a mt pt.
49 * avc is the vnode of the mount point object; must be write-locked.
50 * advc is the vnode of the containing directory (optional; if NULL and
51 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
52 * avolpp is where we return a pointer to the volume named by the mount pt, if success
53 * areq is the identity of the caller.
55 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
58 EvalMountData(char type, char *data, afs_uint32 states, afs_uint32 cellnum,
59 struct volume **avolpp, register struct vrequest *areq,
60 afs_uint32 *acellidxp, afs_uint32 *avolnump)
62 struct volume *tvp = 0;
65 char *cpos, *volnamep, *x;
67 afs_int32 prefetch; /* 1=>None 2=>RO 3=>BK */
68 afs_int32 mtptCell, assocCell = 0, hac = 0;
69 afs_int32 samecell, roname, len;
70 afs_uint32 volid, cellidx;
72 cpos = afs_strchr(data, ':'); /* if cell name present */
76 tcell = afs_GetCellByName(data, READ_LOCK);
80 tcell = afs_GetCell(cellnum, READ_LOCK);
87 cellidx = tcell->cellIndex;
88 mtptCell = tcell->cellNum; /* The cell for the mountpoint */
90 hac = 1; /* has associated cell */
91 assocCell = tcell->lcellp->cellNum; /* The associated cell */
93 afs_PutCell(tcell, READ_LOCK);
95 /* Look for an all-numeric volume ID */
97 for (x = volnamep; *x >= '0' && *x <= '9'; x++)
98 volid = (volid * 10) + (*x - '0');
101 * If the volume ID was all-numeric, and they didn't ask for a
102 * pointer to the volume structure, then just return the number
103 * as-is. This is currently only used for handling name lookups
104 * in the dynamic mount directory.
106 if (!*x && !avolpp) {
108 *acellidxp = cellidx;
115 * If the volume ID was all-numeric, and the type was '%', then
116 * assume whoever made the mount point knew what they were doing,
117 * and don't second-guess them by forcing use of a RW volume when
118 * they gave the ID of something else.
120 if (!*x && type == '%') {
121 tfid.Fid.Volume = volid; /* remember BK volume */
122 tfid.Cell = mtptCell;
123 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
125 return ENODEV; /* oops, can't do it */
129 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
130 len = strlen(volnamep);
131 roname = ((len > 9) && (strcmp(&volnamep[len - 9], ".readonly") == 0))
132 || ((len > 7) && (strcmp(&volnamep[len - 7], ".backup") == 0));
134 /* When we cross mountpoint, do we stay in the same cell */
135 samecell = (cellnum == mtptCell) || (hac && (cellnum == assocCell));
137 /* Decide whether to prefetch the BK, or RO. Also means we want the BK or
139 * If this is a regular mountpoint with a RW volume name
140 * - If BK preference is enabled AND we remain within the same cell AND
141 * start from a BK volume, then we will want to prefetch the BK volume.
142 * - If we cross a cell boundary OR start from a RO volume, then we will
143 * want to prefetch the RO volume.
145 if ((type == '#') && !roname) {
146 if (afs_bkvolpref && samecell && (states & CBackup))
147 prefetch = 3; /* Prefetch the BK */
148 else if (!samecell || (states & CRO))
149 prefetch = 2; /* Prefetch the RO */
151 prefetch = 1; /* Do not prefetch */
153 prefetch = 1; /* Do not prefetch */
156 /* Get the volume struct. Unless this volume name has ".readonly" or
157 * ".backup" in it, this will get the volume struct for the RW volume.
158 * The RO volume will be prefetched if requested (but not returned).
160 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetch, areq, WRITE_LOCK);
162 /* If no volume was found in this cell, try the associated linked cell */
163 if (!tvp && hac && areq->volumeError) {
165 afs_GetVolumeByName(volnamep, assocCell, prefetch, areq,
169 /* Still not found. If we are looking for the RO, then perhaps the RW
170 * doesn't exist? Try adding ".readonly" to volname and look for that.
171 * Don't know why we do this. Would have still found it in above call - jpm.
173 if (!tvp && (prefetch == 2) && len < AFS_SMALLOCSIZ - 10) {
174 buf = (char *)osi_AllocSmallSpace(len + 10);
176 strcpy(buf, volnamep);
177 afs_strcat(buf, ".readonly");
179 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
181 /* Try the associated linked cell if failed */
182 if (!tvp && hac && areq->volumeError) {
183 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
185 osi_FreeSmallSpace(buf);
189 return ENODEV; /* Couldn't find the volume */
191 /* Don't cross mountpoint from a BK to a BK volume */
192 if ((states & CBackup) && (tvp->states & VBackup)) {
193 afs_PutVolume(tvp, WRITE_LOCK);
197 /* If we want (prefetched) the BK and it exists, then drop the RW volume
199 * Otherwise, if we want (prefetched0 the RO and it exists, then drop the
200 * RW volume and get the RO.
201 * Otherwise, go with the RW.
203 if ((prefetch == 3) && tvp->backVol) {
204 tfid.Fid.Volume = tvp->backVol; /* remember BK volume */
205 tfid.Cell = tvp->cell;
206 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
207 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
209 return ENODEV; /* oops, can't do it */
210 } else if ((prefetch >= 2) && tvp->roVol) {
211 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
212 tfid.Cell = tvp->cell;
213 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
214 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
216 return ENODEV; /* oops, can't do it */
221 *acellidxp = cellidx;
223 *avolnump = tvp->volume;
227 afs_PutVolume(tvp, WRITE_LOCK);
232 EvalMountPoint(register struct vcache *avc, struct vcache *advc,
233 struct volume **avolpp, register struct vrequest *areq)
237 AFS_STATCNT(EvalMountPoint);
239 if (avc->mvid && (avc->states & CMValid))
240 return 0; /* done while racing */
243 code = afs_HandleLink(avc, areq);
247 /* Determine which cell and volume the mointpoint goes to */
248 code = EvalMountData(avc->linkData[0], avc->linkData + 1,
249 avc->states, avc->fid.Cell, avolpp, areq, 0, 0);
250 if (code) return code;
254 (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
255 avc->mvid->Cell = (*avolpp)->cell;
256 avc->mvid->Fid.Volume = (*avolpp)->volume;
257 avc->mvid->Fid.Vnode = 1;
258 avc->mvid->Fid.Unique = 1;
259 avc->states |= CMValid;
261 /* Used to: if the mount point is stored within a backup volume,
262 * then we should only update the parent pointer information if
263 * there's none already set, so as to avoid updating a volume's ..
264 * info with something in an OldFiles directory.
266 * Next two lines used to be under this if:
268 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
270 * Now: update mount point back pointer on every call, so that we handle
271 * multiple mount points better. This way, when du tries to go back
272 * via chddir(".."), it will end up exactly where it started, yet
273 * cd'ing via a new path to a volume will reset the ".." pointer
276 (*avolpp)->mtpoint = avc->fid; /* setup back pointer to mtpoint */
278 (*avolpp)->dotdot = advc->fid;
286 * Must be called on an afs_fakestat_state object before calling
287 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
288 * without calling afs_EvalFakeStat is legal, as long as this
289 * function is called.
292 afs_InitFakeStat(struct afs_fakestat_state *state)
294 if (!afs_fakestat_enable)
299 state->need_release = 0;
303 * afs_EvalFakeStat_int
305 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
306 * which is called by those wrapper functions.
308 * Only issues RPCs if canblock is non-zero.
311 afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
312 struct vrequest *areq, int canblock)
314 struct vcache *tvc, *root_vp;
315 struct volume *tvolp = NULL;
318 if (!afs_fakestat_enable)
321 osi_Assert(state->valid == 1);
322 osi_Assert(state->did_eval == 0);
326 if (tvc->mvstat != 1)
329 /* Is the call to VerifyVCache really necessary? */
330 code = afs_VerifyVCache(tvc, areq);
334 ObtainWriteLock(&tvc->lock, 599);
335 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
336 ReleaseWriteLock(&tvc->lock);
340 tvolp->dotdot = tvc->fid;
341 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
342 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
345 if (tvc->mvid && (tvc->states & CMValid)) {
351 ObtainWriteLock(&afs_xvcache, 597);
352 root_vp = afs_FindVCache(tvc->mvid, &retry, IS_WLOCK);
353 if (root_vp && retry) {
354 ReleaseWriteLock(&afs_xvcache);
355 afs_PutVCache(root_vp);
357 } while (root_vp && retry);
358 ReleaseWriteLock(&afs_xvcache);
360 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
363 code = canblock ? ENOENT : 0;
366 #ifdef AFS_DARWIN80_ENV
367 root_vp->m.Type = VDIR;
369 code = afs_darwin_finalizevnode(root_vp, NULL, NULL, 0);
372 vnode_ref(AFSTOV(root_vp));
374 if (tvolp && !afs_InReadDir(root_vp)) {
375 /* Is this always kosher? Perhaps we should instead use
376 * NBObtainWriteLock to avoid potential deadlock.
378 ObtainWriteLock(&root_vp->lock, 598);
380 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
381 *root_vp->mvid = tvolp->dotdot;
382 ReleaseWriteLock(&root_vp->lock);
384 state->need_release = 1;
385 state->root_vp = root_vp;
389 code = canblock ? ENOENT : 0;
394 afs_PutVolume(tvolp, WRITE_LOCK);
401 * Automatically does the equivalent of EvalMountPoint for vcache entries
402 * which are mount points. Remembers enough state to properly release
403 * the volume root vcache when afs_PutFakeStat() is called.
405 * State variable must be initialized by afs_InitFakeState() beforehand.
407 * Returns 0 when everything succeeds and *avcp points to the vcache entry
408 * that should be used for the real vnode operation. Returns non-zero if
409 * something goes wrong and the error code should be returned to the user.
412 afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
413 struct vrequest *areq)
415 return afs_EvalFakeStat_int(avcp, state, areq, 1);
419 * afs_TryEvalFakeStat
421 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
422 * and only evaluate the mount point if all the data is already in
425 * Returns 0 if everything succeeds and *avcp points to a valid
426 * vcache entry (possibly evaluated).
429 afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
430 struct vrequest *areq)
432 return afs_EvalFakeStat_int(avcp, state, areq, 0);
438 * Perform any necessary cleanup at the end of a vnode op, given that
439 * afs_InitFakeStat was previously called with this state.
442 afs_PutFakeStat(struct afs_fakestat_state *state)
444 if (!afs_fakestat_enable)
447 osi_Assert(state->valid == 1);
448 if (state->need_release)
449 afs_PutVCache(state->root_vp);
454 afs_ENameOK(register char *aname)
458 AFS_STATCNT(ENameOK);
459 tlen = strlen(aname);
460 if (tlen >= 4 && strcmp(aname + tlen - 4, "@sys") == 0)
466 afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
467 register char *bufp, int *num, char **sysnamelist[])
469 register struct unixuser *au;
470 register afs_int32 error;
472 AFS_STATCNT(getsysname);
474 *sysnamelist = afs_sysnamelist;
476 if (!afs_nfsexporter)
477 strcpy(bufp, (*sysnamelist)[0]);
479 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
481 error = EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num, 0);
483 strcpy(bufp, "@sys");
487 strcpy(bufp, (*sysnamelist)[0]);
490 strcpy(bufp, afs_sysname);
497 Check_AtSys(register struct vcache *avc, const char *aname,
498 struct sysname_info *state, struct vrequest *areq)
501 char **sysnamelist[MAXNUMSYSNAMES];
503 if (AFS_EQ_ATSYS(aname)) {
505 state->name = (char *)osi_AllocLargeSpace(MAXSYSNAME);
508 afs_getsysname(areq, avc, state->name, &num, sysnamelist);
513 state->name = (char *)aname;
518 Next_AtSys(register struct vcache *avc, struct vrequest *areq,
519 struct sysname_info *state)
521 int num = afs_sysnamecount;
522 char **sysnamelist[MAXNUMSYSNAMES];
524 if (state->index == -1)
525 return 0; /* No list */
527 /* Check for the initial state of aname != "@sys" in Check_AtSys */
528 if (state->offset == -1 && state->allocked == 0) {
529 register char *tname;
531 /* Check for .*@sys */
532 for (tname = state->name; *tname; tname++)
533 /*Move to the end of the string */ ;
535 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname - 4))) {
536 state->offset = (tname - 4) - state->name;
537 tname = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
538 strncpy(tname, state->name, state->offset);
543 afs_getsysname(areq, avc, state->name + state->offset, &num,
547 return 0; /* .*@sys doesn't match either */
549 register struct unixuser *au;
550 register afs_int32 error;
552 *sysnamelist = afs_sysnamelist;
554 if (afs_nfsexporter) {
555 au = afs_GetUser(areq->uid, avc->fid.Cell, 0);
558 EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, &num, 0);
566 if (++(state->index) >= num || !(*sysnamelist)[(unsigned int)state->index])
567 return 0; /* end of list */
569 strcpy(state->name + state->offset, (*sysnamelist)[(unsigned int)state->index]);
573 extern int BlobScan(struct dcache * afile, afs_int32 ablob);
575 /* called with an unlocked directory and directory cookie. Areqp
576 * describes who is making the call.
577 * Scans the next N (about 30, typically) directory entries, and does
578 * a bulk stat call to stat them all.
580 * Must be very careful when merging in RPC responses, since we dont
581 * want to overwrite newer info that was added by a file system mutating
582 * call that ran concurrently with our bulk stat call.
584 * We do that, as described below, by not merging in our info (always
585 * safe to skip the merge) if the status info is valid in the vcache entry.
587 * If adapt ever implements the bulk stat RPC, then this code will need to
588 * ensure that vcaches created for failed RPC's to older servers have the
591 static struct vcache *BStvc = NULL;
594 afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
596 int nentries; /* # of entries to prefetch */
597 int nskip; /* # of slots in the LRU queue to skip */
598 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
599 struct dcache *dcp; /* chunk containing the dir block */
600 char *statMemp; /* status memory block */
601 char *cbfMemp; /* callback and fid memory block */
602 afs_size_t temp; /* temp for holding chunk length, &c. */
603 struct AFSFid *fidsp; /* file IDs were collecting */
604 struct AFSCallBack *cbsp; /* call back pointers */
605 struct AFSCallBack *tcbp; /* temp callback ptr */
606 struct AFSFetchStatus *statsp; /* file status info */
607 struct AFSVolSync volSync; /* vol sync return info */
608 struct vcache *tvcp; /* temp vcp */
609 struct afs_q *tq; /* temp queue variable */
610 AFSCBFids fidParm; /* file ID parm for bulk stat */
611 AFSBulkStats statParm; /* stat info parm for bulk stat */
612 int fidIndex = 0; /* which file were stating */
613 struct conn *tcp = 0; /* conn for call */
614 AFSCBs cbParm; /* callback parm for bulk stat */
615 struct server *hostp = 0; /* host we got callback from */
616 long startTime; /* time we started the call,
617 * for callback expiration base
619 afs_size_t statSeqNo = 0; /* Valued of file size to detect races */
620 int code; /* error code */
621 long newIndex; /* new index in the dir */
622 struct DirEntry *dirEntryp; /* dir entry we are examining */
624 struct VenusFid afid; /* file ID we are using now */
625 struct VenusFid tfid; /* another temp. file ID */
626 afs_int32 retry; /* handle low-level SGI MP race conditions */
627 long volStates; /* flags from vol structure */
628 struct volume *volp = 0; /* volume ptr */
629 struct VenusFid dotdot;
630 int flagIndex = 0; /* First file with bulk fetch flag set */
631 int inlinebulk = 0; /* Did we use InlineBulk RPC or not? */
633 #ifdef AFS_DARWIN80_ENV
634 panic("bulkstatus doesn't work on AFS_DARWIN80_ENV. don't call it");
636 /* first compute some basic parameters. We dont want to prefetch more
637 * than a fraction of the cache in any given call, and we want to preserve
638 * a portion of the LRU queue in any event, so as to avoid thrashing
639 * the entire stat cache (we will at least leave some of it alone).
640 * presently dont stat more than 1/8 the cache in any one call. */
641 nentries = afs_cacheStats / 8;
643 /* dont bother prefetching more than one calls worth of info */
644 if (nentries > AFSCBMAX)
647 /* heuristic to make sure that things fit in 4K. This means that
648 * we shouldnt make it any bigger than 47 entries. I am typically
649 * going to keep it a little lower, since we don't want to load
650 * too much of the stat cache.
655 /* now, to reduce the stack size, well allocate two 4K blocks,
656 * one for fids and callbacks, and one for stat info. Well set
657 * up our pointers to the memory from there, too.
659 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
660 statsp = (struct AFSFetchStatus *)statMemp;
662 osi_AllocLargeSpace(nentries *
663 (sizeof(AFSCallBack) + sizeof(AFSFid)));
664 fidsp = (AFSFid *) cbfMemp;
665 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
667 /* next, we must iterate over the directory, starting from the specified
668 * cookie offset (dirCookie), and counting out nentries file entries.
669 * We skip files that already have stat cache entries, since we
670 * dont want to bulk stat files that are already in the cache.
673 code = afs_VerifyVCache(adp, areqp);
677 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
683 /* lock the directory cache entry */
684 ObtainReadLock(&adp->lock);
685 ObtainReadLock(&dcp->lock);
688 * Make sure that the data in the cache is current. There are two
689 * cases we need to worry about:
690 * 1. The cache data is being fetched by another process.
691 * 2. The cache data is no longer valid
693 while ((adp->states & CStatd)
694 && (dcp->dflags & DFFetching)
695 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
696 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
697 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
698 ICL_TYPE_INT32, dcp->dflags);
699 ReleaseReadLock(&dcp->lock);
700 ReleaseReadLock(&adp->lock);
701 afs_osi_Sleep(&dcp->validPos);
702 ObtainReadLock(&adp->lock);
703 ObtainReadLock(&dcp->lock);
705 if (!(adp->states & CStatd)
706 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
707 ReleaseReadLock(&dcp->lock);
708 ReleaseReadLock(&adp->lock);
713 /* Generate a sequence number so we can tell whether we should
714 * store the attributes when processing the response. This number is
715 * stored in the file size when we set the CBulkFetching bit. If the
716 * CBulkFetching is still set and this value hasn't changed, then
717 * we know we were the last to set CBulkFetching bit for this file,
718 * and it is safe to set the status information for this file.
720 statSeqNo = bulkStatCounter++;
722 /* now we have dir data in the cache, so scan the dir page */
725 while (1) { /* Should probably have some constant bound */
726 /* look for first safe entry to examine in the directory. BlobScan
727 * looks for a the 1st allocated dir after the dirCookie slot.
729 newIndex = BlobScan(dcp, (dirCookie >> 5));
733 /* remember the updated directory cookie */
734 dirCookie = newIndex << 5;
736 /* get a ptr to the dir entry */
738 (struct DirEntry *)afs_dir_GetBlob(dcp, newIndex);
742 /* dont copy more than we have room for */
743 if (fidIndex >= nentries) {
744 DRelease((struct buffer *)dirEntryp, 0);
748 /* now, if the dir entry looks good, copy it out to our list. Vnode
749 * 0 means deleted, although it should also be free were it deleted.
751 if (dirEntryp->fid.vnode != 0) {
752 /* dont copy entries we have in our cache. This check will
753 * also make us skip "." and probably "..", unless it has
754 * disappeared from the cache since we did our namei call.
756 tfid.Cell = adp->fid.Cell;
757 tfid.Fid.Volume = adp->fid.Fid.Volume;
758 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
759 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
762 ObtainWriteLock(&afs_xvcache, 130);
763 tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
765 ReleaseWriteLock(&afs_xvcache);
768 } while (tvcp && retry);
769 if (!tvcp) { /* otherwise, create manually */
770 tvcp = afs_NewVCache(&tfid, hostp);
773 ObtainWriteLock(&tvcp->lock, 505);
774 ReleaseWriteLock(&afs_xvcache);
775 afs_RemoveVCB(&tfid);
776 ReleaseWriteLock(&tvcp->lock);
778 ReleaseWriteLock(&afs_xvcache);
781 ReleaseWriteLock(&afs_xvcache);
785 DRelease((struct buffer *)dirEntryp, 0);
786 ReleaseReadLock(&dcp->lock);
787 ReleaseReadLock(&adp->lock);
789 goto done; /* can happen if afs_NewVCache fails */
792 #ifdef AFS_DARWIN80_ENV
793 if (tvcp->states & CVInit) {
794 /* XXX don't have status yet, so creating the vnode is
795 not yet useful. we would get CDeadVnode set, and the
796 upcoming PutVCache will cause the vcache to be flushed &
797 freed, which in turn means the bulkstatus results won't
801 /* WARNING: afs_DoBulkStat uses the Length field to store a
802 * sequence number for each bulk status request. Under no
803 * circumstances should afs_DoBulkStat store a sequence number
804 * if the new length will be ignored when afs_ProcessFS is
805 * called with new stats. */
807 if (!(tvcp->states & (CStatd | CBulkFetching))
808 && (tvcp->execsOrWriters <= 0)
809 && !afs_DirtyPages(tvcp)
810 && !AFS_VN_MAPPED((vnode_t *) tvcp))
812 if (!(tvcp->states & (CStatd | CBulkFetching))
813 && (tvcp->execsOrWriters <= 0)
814 && !afs_DirtyPages(tvcp))
818 /* this entry doesnt exist in the cache, and is not
819 * already being fetched by someone else, so add it to the
820 * list of file IDs to obtain.
822 * We detect a callback breaking race condition by checking the
823 * CBulkFetching state bit and the value in the file size.
824 * It is safe to set the status only if the CBulkFetching
825 * flag is still set and the value in the file size does
828 * Don't fetch status for dirty files. We need to
829 * preserve the value of the file size. We could
830 * flush the pages, but it wouldn't be worthwhile.
832 memcpy((char *)(fidsp + fidIndex), (char *)&tfid.Fid,
834 tvcp->states |= CBulkFetching;
835 tvcp->m.Length = statSeqNo;
841 /* if dir vnode has non-zero entry */
842 /* move to the next dir entry by adding in the # of entries
843 * used by this dir entry.
845 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
846 DRelease((struct buffer *)dirEntryp, 0);
850 } /* while loop over all dir entries */
852 /* now release the dir lock and prepare to make the bulk RPC */
853 ReleaseReadLock(&dcp->lock);
854 ReleaseReadLock(&adp->lock);
856 /* release the chunk */
859 /* dont make a null call */
864 /* setup the RPC parm structures */
865 fidParm.AFSCBFids_len = fidIndex;
866 fidParm.AFSCBFids_val = fidsp;
867 statParm.AFSBulkStats_len = fidIndex;
868 statParm.AFSBulkStats_val = statsp;
869 cbParm.AFSCBs_len = fidIndex;
870 cbParm.AFSCBs_val = cbsp;
872 /* start the timer; callback expirations are relative to this */
873 startTime = osi_Time();
875 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
877 hostp = tcp->srvr->server;
878 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
881 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
883 RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
885 if (code == RXGEN_OPCODE) {
886 tcp->srvr->server->flags |= SNO_INLINEBULK;
889 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
896 RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
904 (tcp, code, &adp->fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
907 /* now, if we didnt get the info, bail out. */
911 /* we need vol flags to create the entries properly */
912 dotdot.Fid.Volume = 0;
913 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
915 volStates = volp->states;
916 if (volp->dotdot.Fid.Volume != 0)
917 dotdot = volp->dotdot;
921 /* find the place to merge the info into We do this by skipping
922 * nskip entries in the LRU queue. The more we skip, the more
923 * we preserve, since the head of the VLRU queue is the most recently
927 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
928 ObtainReadLock(&afs_xvcache);
930 /* actually a serious error, probably should panic. Probably will
931 * panic soon, oh well. */
932 ReleaseReadLock(&afs_xvcache);
933 afs_warnuser("afs_DoBulkStat: VLRU empty!");
936 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
937 refpanic("Bulkstat VLRU inconsistent");
939 for (tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
942 else if (QNext(QPrev(tq)) != tq) {
944 refpanic("BulkStat VLRU inconsistent");
950 lruvcp = QTOV(VLRU.next);
952 /* now we have to hold this entry, so that it does not get moved
953 * into the free list while we're running. It could still get
954 * moved within the lru queue, but hopefully that will be rare; it
955 * doesn't hurt nearly as much.
958 osi_vnhold(lruvcp, &retry);
959 ReleaseReadLock(&afs_xvcache); /* could be read lock */
963 /* otherwise, merge in the info. We have to be quite careful here,
964 * since we need to ensure that we don't merge old info over newer
965 * stuff in a stat cache entry. We're very conservative here: we don't
966 * do the merge at all unless we ourselves create the stat cache
967 * entry. That's pretty safe, and should work pretty well, since we
968 * typically expect to do the stat cache creation ourselves.
970 * We also have to take into account racing token revocations.
972 for (i = 0; i < fidIndex; i++) {
973 if ((&statsp[i])->errorCode)
975 afid.Cell = adp->fid.Cell;
976 afid.Fid.Volume = adp->fid.Fid.Volume;
977 afid.Fid.Vnode = fidsp[i].Vnode;
978 afid.Fid.Unique = fidsp[i].Unique;
981 ObtainReadLock(&afs_xvcache);
982 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
983 ReleaseReadLock(&afs_xvcache);
984 } while (tvcp && retry);
986 /* The entry may no longer exist */
991 /* now we have the entry held, but we need to fill it in */
992 ObtainWriteLock(&tvcp->lock, 131);
994 /* if CBulkFetching is not set, or if the file size no longer
995 * matches the value we placed there when we set the CBulkFetching
996 * flag, then someone else has done something with this node,
997 * and we may not have the latest status information for this
998 * file. Leave the entry alone.
1000 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
1002 ReleaseWriteLock(&tvcp->lock);
1003 afs_PutVCache(tvcp);
1007 /* now copy ".." entry back out of volume structure, if necessary */
1008 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
1010 tvcp->mvid = (struct VenusFid *)
1011 osi_AllocSmallSpace(sizeof(struct VenusFid));
1012 *tvcp->mvid = dotdot;
1015 ObtainWriteLock(&afs_xvcache, 132);
1016 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1017 refpanic("Bulkstat VLRU inconsistent2");
1019 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
1020 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
1021 refpanic("Bulkstat VLRU inconsistent4");
1023 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
1024 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
1025 refpanic("Bulkstat VLRU inconsistent5");
1028 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
1029 QRemove(&tvcp->vlruq);
1030 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
1033 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1034 refpanic("Bulkstat VLRU inconsistent3");
1036 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
1037 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq)) {
1038 refpanic("Bulkstat VLRU inconsistent5");
1040 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
1041 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq)) {
1042 refpanic("Bulkstat VLRU inconsistent6");
1044 ReleaseWriteLock(&afs_xvcache);
1046 ObtainWriteLock(&afs_xcbhash, 494);
1048 /* We need to check the flags again. We may have missed
1049 * something while we were waiting for a lock.
1051 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
1053 ReleaseWriteLock(&tvcp->lock);
1054 ReleaseWriteLock(&afs_xcbhash);
1055 afs_PutVCache(tvcp);
1059 /* now merge in the resulting status back into the vnode.
1060 * We only do this if the entry looks clear.
1062 afs_ProcessFS(tvcp, &statsp[i], areqp);
1063 #if defined(AFS_LINUX22_ENV)
1064 afs_fill_inode(AFSTOV(tvcp), NULL); /* reset inode operations */
1067 /* do some accounting for bulk stats: mark this entry as
1068 * loaded, so we can tell if we use it before it gets
1071 tvcp->states |= CBulkStat;
1072 tvcp->states &= ~CBulkFetching;
1074 afs_bulkStatsDone++;
1076 /* merge in vol info */
1077 if (volStates & VRO)
1078 tvcp->states |= CRO;
1079 if (volStates & VBackup)
1080 tvcp->states |= CBackup;
1081 if (volStates & VForeign)
1082 tvcp->states |= CForeign;
1084 /* merge in the callback info */
1085 tvcp->states |= CTruth;
1087 /* get ptr to the callback we are interested in */
1090 if (tcbp->ExpirationTime != 0) {
1091 tvcp->cbExpires = tcbp->ExpirationTime + startTime;
1092 tvcp->callback = hostp;
1093 tvcp->states |= CStatd;
1094 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
1095 } else if (tvcp->states & CRO) {
1096 /* ordinary callback on a read-only volume -- AFS 3.2 style */
1097 tvcp->cbExpires = 3600 + startTime;
1098 tvcp->callback = hostp;
1099 tvcp->states |= CStatd;
1100 afs_QueueCallback(tvcp, CBHash(3600), volp);
1103 tvcp->states &= ~(CStatd | CUnique);
1104 afs_DequeueCallback(tvcp);
1105 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
1106 osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
1108 ReleaseWriteLock(&afs_xcbhash);
1110 ReleaseWriteLock(&tvcp->lock);
1111 /* finally, we're done with the entry */
1112 afs_PutVCache(tvcp);
1113 } /* for all files we got back */
1115 /* finally return the pointer into the LRU queue */
1116 afs_PutVCache(lruvcp);
1119 /* Be sure to turn off the CBulkFetching flags */
1120 for (i = flagIndex; i < fidIndex; i++) {
1121 afid.Cell = adp->fid.Cell;
1122 afid.Fid.Volume = adp->fid.Fid.Volume;
1123 afid.Fid.Vnode = fidsp[i].Vnode;
1124 afid.Fid.Unique = fidsp[i].Unique;
1127 ObtainReadLock(&afs_xvcache);
1128 tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
1129 ReleaseReadLock(&afs_xvcache);
1130 } while (tvcp && retry);
1131 if (tvcp != NULL && (tvcp->states & CBulkFetching)
1132 && (tvcp->m.Length == statSeqNo)) {
1133 tvcp->states &= ~CBulkFetching;
1136 afs_PutVCache(tvcp);
1140 afs_PutVolume(volp, READ_LOCK);
1142 /* If we did the InlineBulk RPC pull out the return code */
1144 if ((&statsp[0])->errorCode) {
1145 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1146 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL);
1147 code = (&statsp[0])->errorCode;
1153 osi_FreeLargeSpace(statMemp);
1154 osi_FreeLargeSpace(cbfMemp);
1158 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1159 #ifdef AFS_DARWIN80_ENV
1162 static int AFSDOBULK = 1;
1167 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int opflag, int wantparent)
1168 #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1169 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct pathname *pnp, int flags, struct vnode *rdir, struct AFS_UCRED *acred)
1170 #elif defined(UKERNEL)
1171 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred, int flags)
1173 afs_lookup(OSI_VC_DECL(adp), char *aname, struct vcache **avcp, struct AFS_UCRED *acred)
1176 struct vrequest treq;
1178 register struct vcache *tvc = 0;
1179 register afs_int32 code;
1180 register afs_int32 bulkcode = 0;
1181 int pass = 0, hit = 0;
1183 extern afs_int32 afs_mariner; /*Writing activity to log? */
1184 afs_hyper_t versionNo;
1185 int no_read_access = 0;
1186 struct sysname_info sysState; /* used only for @sys checking */
1187 int dynrootRetry = 1;
1188 struct afs_fakestat_state fakestate;
1189 int tryEvalOnly = 0;
1190 OSI_VC_CONVERT(adp);
1192 AFS_STATCNT(afs_lookup);
1193 afs_InitFakeStat(&fakestate);
1195 if ((code = afs_InitReq(&treq, acred)))
1199 ndp->ni_dvp = AFSTOV(adp);
1200 #endif /* AFS_OSF_ENV */
1202 #if defined(AFS_DARWIN_ENV)
1203 /* Workaround for MacOSX Finder, which tries to look for
1204 * .DS_Store and Contents under every directory.
1206 if (afs_fakestat_enable && adp->mvstat == 1) {
1207 if (strcmp(aname, ".DS_Store") == 0)
1209 if (strcmp(aname, "Contents") == 0)
1215 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1217 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1218 if (tryEvalOnly && adp->mvstat == 1)
1223 *avcp = NULL; /* Since some callers don't initialize it */
1225 /* come back to here if we encounter a non-existent object in a read-only
1226 * volume's directory */
1229 *avcp = NULL; /* Since some callers don't initialize it */
1232 if (!(adp->states & CStatd) && !afs_InReadDir(adp)) {
1233 if ((code = afs_VerifyVCache2(adp, &treq))) {
1239 /* watch for ".." in a volume root */
1240 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1241 /* looking up ".." in root via special hacks */
1242 if (adp->mvid == (struct VenusFid *)0 || adp->mvid->Fid.Volume == 0) {
1244 if (adp == afs_globalVp) {
1245 struct vnode *rvp = AFSTOV(adp);
1247 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1248 ndp->ni_dvp = ndp->ni_vp;
1258 /* otherwise we have the fid here, so we use it */
1259 tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
1260 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid,
1261 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, code);
1263 code = (tvc ? 0 : ENOENT);
1265 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1269 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code); */
1274 /* now check the access */
1275 if (treq.uid != adp->last_looker) {
1276 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1281 adp->last_looker = treq.uid;
1284 /* Check for read access as well. We need read access in order to
1285 * stat files, but not to stat subdirectories. */
1286 if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
1289 /* special case lookup of ".". Can we check for it sooner in this code,
1290 * for instance, way up before "redo:" ??
1291 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1292 * invent a lightweight version of GetVCache.
1294 if (aname[0] == '.' && !aname[1]) { /* special case */
1295 ObtainReadLock(&afs_xvcache);
1297 ReleaseReadLock(&afs_xvcache);
1298 #ifdef AFS_DARWIN80_ENV
1299 vnode_get(AFSTOV(adp));
1304 if (adp && !VREFCOUNT_GT(adp, 0)) {
1311 * Special case lookup of ".." in the dynamic mount directory.
1312 * The parent of this directory is _always_ the AFS root volume.
1314 if (afs_IsDynrootMount(adp) &&
1315 aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1317 ObtainReadLock(&afs_xvcache);
1318 osi_vnhold(afs_globalVp, 0);
1319 ReleaseReadLock(&afs_xvcache);
1320 #ifdef AFS_DARWIN80_ENV
1321 vnode_get(AFSTOV(afs_globalVp));
1324 *avcp = tvc = afs_globalVp;
1330 * Special case lookups in the dynamic mount directory.
1331 * The names here take the form cell:volume, similar to a mount point.
1332 * EvalMountData parses that and returns a cell and volume ID, which
1333 * we use to construct the appropriate dynroot Fid.
1335 if (afs_IsDynrootMount(adp)) {
1336 struct VenusFid tfid;
1337 afs_uint32 cellidx, volid;
1339 code = EvalMountData('%', aname, 0, 0, NULL, &treq, &cellidx, &volid);
1342 afs_GetDynrootMountFid(&tfid);
1343 tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
1344 tfid.Fid.Unique = volid;
1345 *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
1350 #ifdef AFS_LINUX26_ENV
1352 * Special case of the dynamic mount volume in a static root.
1353 * This is really unfortunate, but we need this for the translator.
1355 if (adp == afs_globalVp && !afs_GetDynrootEnable() &&
1356 !strcmp(aname, AFS_DYNROOT_MOUNTNAME)) {
1357 struct VenusFid tfid;
1359 afs_GetDynrootMountFid(&tfid);
1360 *avcp = tvc = afs_GetVCache(&tfid, &treq, NULL, NULL);
1367 Check_AtSys(adp, aname, &sysState, &treq);
1368 tname = sysState.name;
1370 /* 1st Check_AtSys and lookup by tname is required here, for now,
1371 * because the dnlc is *not* told to remove entries for the parent
1372 * dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1373 * if the cached entry for the parent dir is invalidated for a
1375 * Otherwise, we'd be able to do a dnlc lookup on an entry ending
1376 * w/@sys and know the dnlc was consistent with reality. */
1377 tvc = osi_dnlc_lookup(adp, tname, WRITE_LOCK);
1378 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1380 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1381 /* need read access on dir to stat non-directory / non-link */
1387 #ifdef AFS_LINUX22_ENV
1388 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1389 AFS_RELE(AFSTOV(tvc));
1396 #else /* non - LINUX */
1400 #endif /* linux22 */
1403 { /* sub-block just to reduce stack usage */
1404 register struct dcache *tdc;
1405 afs_size_t dirOffset, dirLen;
1406 struct VenusFid tfid;
1408 /* now we have to lookup the next fid */
1409 if (afs_InReadDir(adp))
1410 tdc = adp->dcreaddir;
1412 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq,
1413 &dirOffset, &dirLen, 1);
1415 *avcp = NULL; /* redundant, but harmless */
1420 /* now we will just call dir package with appropriate inode.
1421 * Dirs are always fetched in their entirety for now */
1422 ObtainReadLock(&adp->lock);
1423 ObtainReadLock(&tdc->lock);
1426 * Make sure that the data in the cache is current. There are two
1427 * cases we need to worry about:
1428 * 1. The cache data is being fetched by another process.
1429 * 2. The cache data is no longer valid
1431 * If a readdir is in progress _in this thread_, it has a shared
1432 * lock on the vcache and has obtained current data, so we just
1433 * use that. This eliminates several possible deadlocks.
1435 if (!afs_InReadDir(adp)) {
1436 while ((adp->states & CStatd)
1437 && (tdc->dflags & DFFetching)
1438 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1439 ReleaseReadLock(&tdc->lock);
1440 ReleaseReadLock(&adp->lock);
1441 afs_osi_Sleep(&tdc->validPos);
1442 ObtainReadLock(&adp->lock);
1443 ObtainReadLock(&tdc->lock);
1445 if (!(adp->states & CStatd)
1446 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1447 ReleaseReadLock(&tdc->lock);
1448 ReleaseReadLock(&adp->lock);
1450 if (tname && tname != aname)
1451 osi_FreeLargeSpace(tname);
1456 /* Save the version number for when we call osi_dnlc_enter */
1457 hset(versionNo, tdc->f.versionNo);
1460 * check for, and handle "@sys" if it's there. We should be able
1461 * to avoid the alloc and the strcpy with a little work, but it's
1462 * not pressing. If there aren't any remote users (ie, via the
1463 * NFS translator), we have a slightly easier job.
1464 * the faster way to do this is to check for *aname == '@' and if
1465 * it's there, check for @sys, otherwise, assume there's no @sys
1466 * then, if the lookup fails, check for .*@sys...
1468 /* above now implemented by Check_AtSys and Next_AtSys */
1470 /* lookup the name in the appropriate dir, and return a cache entry
1471 * on the resulting fid */
1473 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1476 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1477 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState))
1479 afs_dir_LookupOffset(tdc, sysState.name, &tfid.Fid,
1481 tname = sysState.name;
1483 ReleaseReadLock(&tdc->lock);
1484 if (!afs_InReadDir(adp))
1487 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1488 ReleaseReadLock(&adp->lock);
1490 if (tname[0] == '.')
1491 afs_LookupAFSDB(tname + 1);
1493 afs_LookupAFSDB(tname);
1494 if (tname && tname != aname)
1495 osi_FreeLargeSpace(tname);
1498 ReleaseReadLock(&adp->lock);
1501 /* new fid has same cell and volume */
1502 tfid.Cell = adp->fid.Cell;
1503 tfid.Fid.Volume = adp->fid.Fid.Volume;
1504 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1505 ICL_TYPE_STRING, tname, ICL_TYPE_FID, &tfid,
1506 ICL_TYPE_INT32, code);
1509 if (code != ENOENT) {
1510 printf("LOOKUP dirLookupOff -> %d\n", code);
1515 /* prefetch some entries, if the dir is currently open. The variable
1516 * dirCookie tells us where to start prefetching from.
1518 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)
1519 && !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
1521 /* if the entry is not in the cache, or is in the cache,
1522 * but hasn't been statd, then do a bulk stat operation.
1526 ObtainReadLock(&afs_xvcache);
1527 tvc = afs_FindVCache(&tfid, &retry, 0 /* !stats,!lru */ );
1528 ReleaseReadLock(&afs_xvcache);
1529 } while (tvc && retry);
1531 if (!tvc || !(tvc->states & CStatd))
1532 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1536 /* if the vcache isn't usable, release it */
1537 if (tvc && !(tvc->states & CStatd)) {
1546 /* now get the status info, if we don't already have it */
1547 /* This is kind of weird, but we might wind up accidentally calling
1548 * RXAFS_Lookup because we happened upon a file which legitimately
1549 * has a 0 uniquifier. That is the result of allowing unique to wrap
1550 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1551 * the file has not yet been looked up.
1554 afs_int32 cached = 0;
1555 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1556 tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
1558 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1559 tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
1562 } /* sub-block just to reduce stack usage */
1565 int force_eval = afs_fakestat_enable ? 0 : 1;
1567 if (adp->states & CForeign)
1568 tvc->states |= CForeign;
1569 tvc->parentVnode = adp->fid.Fid.Vnode;
1570 tvc->parentUnique = adp->fid.Fid.Unique;
1571 tvc->states &= ~CBulkStat;
1573 if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
1574 ObtainSharedLock(&tvc->lock, 680);
1575 if (!tvc->linkData) {
1576 UpgradeSToWLock(&tvc->lock, 681);
1577 code = afs_HandleLink(tvc, &treq);
1578 ConvertWToRLock(&tvc->lock);
1580 ConvertSToRLock(&tvc->lock);
1583 if (!code && !afs_strchr(tvc->linkData, ':'))
1585 ReleaseReadLock(&tvc->lock);
1587 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1588 if (!(flags & AFS_LOOKUP_NOEVAL))
1589 /* don't eval mount points */
1590 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1591 if (tvc->mvstat == 1 && force_eval) {
1592 /* a mt point, possibly unevaluated */
1593 struct volume *tvolp;
1595 ObtainWriteLock(&tvc->lock, 133);
1596 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1597 ReleaseWriteLock(&tvc->lock);
1602 afs_PutVolume(tvolp, WRITE_LOCK);
1606 /* next, we want to continue using the target of the mt point */
1607 if (tvc->mvid && (tvc->states & CMValid)) {
1609 /* now lookup target, to set .. pointer */
1610 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1611 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID,
1613 uvc = tvc; /* remember for later */
1615 if (tvolp && (tvolp->states & VForeign)) {
1616 /* XXXX tvolp has ref cnt on but not locked! XXX */
1618 afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
1620 tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
1622 afs_PutVCache(uvc); /* we're done with it */
1627 afs_PutVolume(tvolp, WRITE_LOCK);
1632 /* now, if we came via a new mt pt (say because of a new
1633 * release of a R/O volume), we must reevaluate the ..
1634 * ptr to point back to the appropriate place */
1636 ObtainWriteLock(&tvc->lock, 134);
1637 if (tvc->mvid == NULL) {
1638 tvc->mvid = (struct VenusFid *)
1639 osi_AllocSmallSpace(sizeof(struct VenusFid));
1641 /* setup backpointer */
1642 *tvc->mvid = tvolp->dotdot;
1643 ReleaseWriteLock(&tvc->lock);
1644 afs_PutVolume(tvolp, WRITE_LOCK);
1650 afs_PutVolume(tvolp, WRITE_LOCK);
1655 if (tvc && !VREFCOUNT_GT(tvc, 0)) {
1660 /* if we get here, we found something in a directory that couldn't
1661 * be located (a Multics "connection failure"). If the volume is
1662 * read-only, we try flushing this entry from the cache and trying
1666 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1668 if (tv->states & VRO) {
1669 pass = 1; /* try this *once* */
1670 ObtainWriteLock(&afs_xcbhash, 495);
1671 afs_DequeueCallback(adp);
1672 /* re-stat to get later version */
1673 adp->states &= ~CStatd;
1674 ReleaseWriteLock(&afs_xcbhash);
1675 osi_dnlc_purgedp(adp);
1676 afs_PutVolume(tv, READ_LOCK);
1679 afs_PutVolume(tv, READ_LOCK);
1686 /* put the network buffer back, if need be */
1687 if (tname != aname && tname)
1688 osi_FreeLargeSpace(tname);
1691 /* Handle RENAME; only need to check rename "." */
1692 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1693 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1694 afs_PutVCache(*avcp);
1696 afs_PutFakeStat(&fakestate);
1697 return afs_CheckCode(EISDIR, &treq, 18);
1700 #endif /* AFS_OSF_ENV */
1703 afs_AddMarinerName(aname, tvc);
1705 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1706 if (!(flags & AFS_LOOKUP_NOEVAL))
1707 /* Here we don't enter the name into the DNLC because we want the
1708 * evaluated mount dir to be there (the vcache for the mounted volume)
1709 * rather than the vc of the mount point itself. we can still find the
1710 * mount point's vc in the vcache by its fid. */
1711 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1713 osi_dnlc_enter(adp, aname, tvc, &versionNo);
1715 #ifdef AFS_LINUX20_ENV
1716 /* So Linux inode cache is up to date. */
1717 code = afs_VerifyVCache(tvc, &treq);
1719 afs_PutFakeStat(&fakestate);
1720 return 0; /* can't have been any errors if hit and !code */
1727 code = afs_CheckCode(code, &treq, 19);
1729 /* If there is an error, make sure *avcp is null.
1730 * Alphas panic otherwise - defect 10719.
1735 afs_PutFakeStat(&fakestate);