2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
18 * AFS_EQ_ATSYS (macro)
22 #include <afsconfig.h>
23 #include "../afs/param.h"
27 #include "../afs/sysincludes.h" /* Standard vendor system headers */
28 #include "../afs/afsincludes.h" /* Afs-based standard headers */
29 #include "../afs/afs_stats.h" /* statistics */
30 #include "../afs/afs_cbqueue.h"
31 #include "../afs/nfsclient.h"
32 #include "../afs/exporter.h"
33 #include "../afs/afs_osidnlc.h"
37 * A few definitions. This is until we have a proper header file
38 * which has prototypes for all functions
41 extern struct DirEntry * afs_dir_GetBlob();
43 extern afs_rwlock_t afs_xvcache;
44 extern afs_rwlock_t afs_xcbhash;
45 extern struct afs_exporter *afs_nfsexporter;
46 extern char *afs_sysname;
47 extern char *afs_sysnamelist[];
48 extern int afs_sysnamecount;
49 extern struct afs_q VLRU; /*vcache LRU*/
50 #ifdef AFS_LINUX22_ENV
51 extern struct inode_operations afs_symlink_iops, afs_dir_iops;
55 afs_int32 afs_bulkStatsDone;
56 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
57 int afs_fakestat_enable = 0;
60 /* this would be faster if it did comparison as int32word, but would be
61 * dependant on byte-order and alignment, and I haven't figured out
62 * what "@sys" is in binary... */
63 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
65 char *afs_strcat(register char *s1, register char *s2)
74 while ((*s1++ = *s2++))
80 char *afs_index(register char *a, register char c)
83 AFS_STATCNT(afs_index);
85 if (tc == c) return a;
91 /* call under write lock, evaluate mvid field from a mt pt.
92 * avc is the vnode of the mount point object; must be write-locked.
93 * advc is the vnode of the containing directory (optional; if NULL and
94 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
95 * avolpp is where we return a pointer to the volume named by the mount pt, if success
96 * areq is the identity of the caller.
98 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
100 int EvalMountPoint(register struct vcache *avc, struct vcache *advc,
101 struct volume **avolpp, register struct vrequest *areq)
104 struct volume *tvp = 0;
105 struct VenusFid tfid;
107 char *cpos, *volnamep;
109 afs_int32 prefetchRO; /* 1=>No 2=>Yes */
110 afs_int32 mtptCell, assocCell, hac=0;
111 afs_int32 samecell, roname, len;
113 AFS_STATCNT(EvalMountPoint);
115 if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
118 code = afs_HandleLink(avc, areq);
119 if (code) return code;
121 /* Determine which cell and volume the mointpoint goes to */
122 type = avc->linkData[0]; /* '#'=>Regular '%'=>RW */
123 cpos = afs_index(&avc->linkData[1], ':'); /* if cell name present */
127 tcell = afs_GetCellByName(&avc->linkData[1], READ_LOCK);
130 volnamep = &avc->linkData[1];
131 tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
133 if (!tcell) return ENODEV;
135 mtptCell = tcell->cell; /* The cell for the mountpoint */
137 hac = 1; /* has associated cell */
138 assocCell = tcell->lcellp->cell; /* The associated cell */
140 afs_PutCell(tcell, READ_LOCK);
142 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
143 len = strlen(volnamep);
144 roname = ((len > 9) && (strcmp(&volnamep[len - 9],".readonly") == 0)) ||
145 ((len > 7) && (strcmp(&volnamep[len - 7],".backup") == 0));
147 /* When we cross mountpoint, do we stay in the same cell */
148 samecell = (avc->fid.Cell == mtptCell) || (hac && (avc->fid.Cell == assocCell));
150 /* Decide whether to prefetch the RO. Also means we want the RO.
151 * If this is a regular mountpoint with a RW volume name and
152 * we cross a cell boundary -or- start from a RO volume, then we will
153 * want to prefetch the RO volume when we get the RW below.
155 if ( (type == '#') && !roname && (!samecell || (avc->states & CRO)) ) {
156 prefetchRO = 2; /* Yes, prefetch the RO */
158 prefetchRO = 1; /* No prefetch of the RO */
161 /* Get the volume struct. Unless this volume name has ".readonly" or
162 * ".backup" in it, this will get the volume struct for the RW volume.
163 * The RO volume will be prefetched if requested (but not returned).
165 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetchRO, areq, WRITE_LOCK);
167 /* If no volume was found in this cell, try the associated linked cell */
168 if (!tvp && hac && areq->volumeError) {
169 tvp = afs_GetVolumeByName(volnamep, assocCell, prefetchRO, areq, WRITE_LOCK);
172 /* Still not found. If we are looking for the RO, then perhaps the RW
173 * doesn't exist? Try adding ".readonly" to volname and look for that.
174 * Don't know why we do this. Would have still found it in above call - jpm.
176 if (!tvp && (prefetchRO == 2)) {
177 strcpy(buf, volnamep);
178 afs_strcat(buf, ".readonly");
180 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
182 /* Try the associated linked cell if failed */
183 if (!tvp && hac && areq->volumeError) {
184 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
188 if (!tvp) return ENODEV; /* Couldn't find the volume */
190 /* Don't cross mountpoint from a BK to a BK volume */
191 if ((avc->states & CBackup) && (tvp->states & VBackup)) {
192 afs_PutVolume(tvp, WRITE_LOCK);
196 /* If we want (prefetched) the RO and it exists, then drop the
197 * RW volume and get the RO. Othewise, go with the RW.
199 if ((prefetchRO == 2) && tvp->roVol) {
200 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
201 tfid.Cell = tvp->cell;
202 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
203 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
204 if (!tvp) return ENODEV; /* oops, can't do it */
208 avc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
209 avc->mvid->Cell = tvp->cell;
210 avc->mvid->Fid.Volume = tvp->volume;
211 avc->mvid->Fid.Vnode = 1;
212 avc->mvid->Fid.Unique = 1;
213 avc->states |= CMValid;
215 /* Used to: if the mount point is stored within a backup volume,
216 * then we should only update the parent pointer information if
217 * there's none already set, so as to avoid updating a volume's ..
218 * info with something in an OldFiles directory.
220 * Next two lines used to be under this if:
222 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
224 * Now: update mount point back pointer on every call, so that we handle
225 * multiple mount points better. This way, when du tries to go back
226 * via chddir(".."), it will end up exactly where it started, yet
227 * cd'ing via a new path to a volume will reset the ".." pointer
230 tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
231 if (advc) tvp->dotdot = advc->fid;
240 * Must be called on an afs_fakestat_state object before calling
241 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
242 * without calling afs_EvalFakeStat is legal, as long as this
243 * function is called.
245 void afs_InitFakeStat(struct afs_fakestat_state *state)
249 state->need_release = 0;
253 * afs_EvalFakeStat_int
255 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
256 * which is called by those wrapper functions.
258 * Only issues RPCs if canblock is non-zero.
260 int afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
261 struct vrequest *areq, int canblock)
263 struct vcache *tvc, *root_vp;
264 struct volume *tvolp = NULL;
267 osi_Assert(state->valid == 1);
268 osi_Assert(state->did_eval == 0);
270 if (!afs_fakestat_enable)
273 if (tvc->mvstat != 1)
276 /* Is the call to VerifyVCache really necessary? */
277 code = afs_VerifyVCache(tvc, areq);
281 ObtainWriteLock(&tvc->lock, 599);
282 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
283 ReleaseWriteLock(&tvc->lock);
287 tvolp->dotdot = tvc->fid;
288 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
289 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
292 if (tvc->mvid && (tvc->states & CMValid)) {
298 ObtainWriteLock(&afs_xvcache, 597);
299 root_vp = afs_FindVCache(tvc->mvid, &retry, 0);
300 if (root_vp && retry) {
301 ReleaseWriteLock(&afs_xvcache);
302 afs_PutVCache(root_vp);
304 } while (root_vp && retry);
305 ReleaseWriteLock(&afs_xvcache);
307 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
310 code = canblock ? ENOENT : 0;
314 /* Is this always kosher? Perhaps we should instead use
315 * NBObtainWriteLock to avoid potential deadlock.
317 ObtainWriteLock(&root_vp->lock, 598);
319 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
320 *root_vp->mvid = tvolp->dotdot;
321 ReleaseWriteLock(&root_vp->lock);
323 state->need_release = 1;
324 state->root_vp = root_vp;
328 code = canblock ? ENOENT : 0;
333 afs_PutVolume(tvolp, WRITE_LOCK);
340 * Automatically does the equivalent of EvalMountPoint for vcache entries
341 * which are mount points. Remembers enough state to properly release
342 * the volume root vcache when afs_PutFakeStat() is called.
344 * State variable must be initialized by afs_InitFakeState() beforehand.
346 * Returns 0 when everything succeeds and *avcp points to the vcache entry
347 * that should be used for the real vnode operation. Returns non-zero if
348 * something goes wrong and the error code should be returned to the user.
351 afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
352 struct vrequest *areq)
354 return afs_EvalFakeStat_int(avcp, state, areq, 1);
358 * afs_TryEvalFakeStat
360 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
361 * and only evaluate the mount point if all the data is already in
364 * Returns 0 if everything succeeds and *avcp points to a valid
365 * vcache entry (possibly evaluated).
367 int afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
368 struct vrequest *areq)
370 return afs_EvalFakeStat_int(avcp, state, areq, 0);
376 * Perform any necessary cleanup at the end of a vnode op, given that
377 * afs_InitFakeStat was previously called with this state.
379 void afs_PutFakeStat(struct afs_fakestat_state *state)
381 osi_Assert(state->valid == 1);
382 if (state->need_release)
383 afs_PutVCache(state->root_vp);
387 int afs_ENameOK(register char *aname)
392 AFS_STATCNT(ENameOK);
393 tlen = strlen(aname);
394 if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
398 int afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
401 static char sysname[MAXSYSNAME];
402 register struct unixuser *au;
403 register afs_int32 error;
405 if (!afs_nfsexporter) {
406 strcpy(bufp, afs_sysname);
409 AFS_STATCNT(getsysname);
410 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
413 error = EXP_SYSNAME(au->exporter, NULL, bufp);
415 strcpy(bufp, "@sys");
418 strcpy(bufp, afs_sysname);
423 int Check_AtSys(register struct vcache *avc, const char *aname,
424 struct sysname_info *state, struct vrequest *areq)
426 if (AFS_EQ_ATSYS(aname)) {
428 state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
430 state->index = afs_getsysname(areq, avc, state->name);
439 int Next_AtSys(register struct vcache *avc, struct vrequest *areq,
440 struct sysname_info *state)
442 if (state->index == -1)
443 return 0; /* No list */
445 /* Check for the initial state of aname != "@sys" in Check_AtSys*/
446 if (state->offset == -1 && state->allocked == 0) {
447 register char *tname;
448 /* Check for .*@sys */
449 for (tname=state->name; *tname; tname++)
450 /*Move to the end of the string*/;
451 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
452 state->offset = (tname - 4) - state->name;
453 tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
454 strncpy(tname, state->name, state->offset);
457 state->index = afs_getsysname(areq, avc, state->name+state->offset);
460 return 0; /* .*@sys doesn't match either */
461 } else if (++(state->index) >= afs_sysnamecount
462 || !afs_sysnamelist[state->index])
463 return 0; /* end of list */
464 strcpy(state->name+state->offset, afs_sysnamelist[state->index]);
468 #if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
469 extern int BlobScan(ino64_t *afile, afs_int32 ablob);
471 #if defined AFS_LINUX_64BIT_KERNEL
472 extern int BlobScan(long *afile, afs_int32 ablob);
474 extern int BlobScan(afs_int32 *afile, afs_int32 ablob);
479 /* called with an unlocked directory and directory cookie. Areqp
480 * describes who is making the call.
481 * Scans the next N (about 30, typically) directory entries, and does
482 * a bulk stat call to stat them all.
484 * Must be very careful when merging in RPC responses, since we dont
485 * want to overwrite newer info that was added by a file system mutating
486 * call that ran concurrently with our bulk stat call.
488 * We do that, as described below, by not merging in our info (always
489 * safe to skip the merge) if the status info is valid in the vcache entry.
491 * If adapt ever implements the bulk stat RPC, then this code will need to
492 * ensure that vcaches created for failed RPC's to older servers have the
495 static struct vcache *BStvc = NULL;
497 int afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
499 int nentries; /* # of entries to prefetch */
500 int nskip; /* # of slots in the LRU queue to skip */
501 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
502 struct dcache *dcp; /* chunk containing the dir block */
503 char *statMemp; /* status memory block */
504 char *cbfMemp; /* callback and fid memory block */
505 afs_size_t temp; /* temp for holding chunk length, &c. */
506 struct AFSFid *fidsp; /* file IDs were collecting */
507 struct AFSCallBack *cbsp; /* call back pointers */
508 struct AFSCallBack *tcbp; /* temp callback ptr */
509 struct AFSFetchStatus *statsp; /* file status info */
510 struct AFSVolSync volSync; /* vol sync return info */
511 struct vcache *tvcp; /* temp vcp */
512 struct afs_q *tq; /* temp queue variable */
513 AFSCBFids fidParm; /* file ID parm for bulk stat */
514 AFSBulkStats statParm; /* stat info parm for bulk stat */
515 int fidIndex; /* which file were stating */
516 struct conn *tcp; /* conn for call */
517 AFSCBs cbParm; /* callback parm for bulk stat */
518 struct server *hostp = 0; /* host we got callback from */
519 long origEvenCBs; /* original # of callbacks for even-fid files */
520 long origOddCBs; /* original # of callbacks for odd-fid files */
521 long origEvenZaps; /* original # of recycles for even-fid files */
522 long origOddZaps; /* original # of recycles for odd-fid files */
523 long startTime; /* time we started the call,
524 * for callback expiration base
526 afs_size_t statSeqNo; /* Valued of file size to detect races */
527 int code; /* error code */
528 long newIndex; /* new index in the dir */
529 struct DirEntry *dirEntryp; /* dir entry we are examining */
531 struct VenusFid afid; /* file ID we are using now */
532 struct VenusFid tfid; /* another temp. file ID */
533 afs_int32 retry; /* handle low-level SGI MP race conditions */
534 long volStates; /* flags from vol structure */
535 struct volume *volp=0; /* volume ptr */
536 struct VenusFid dotdot;
537 int flagIndex; /* First file with bulk fetch flag set */
538 int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
541 /* first compute some basic parameters. We dont want to prefetch more
542 * than a fraction of the cache in any given call, and we want to preserve
543 * a portion of the LRU queue in any event, so as to avoid thrashing
544 * the entire stat cache (we will at least leave some of it alone).
545 * presently dont stat more than 1/8 the cache in any one call. */
546 nentries = afs_cacheStats / 8;
548 /* dont bother prefetching more than one calls worth of info */
549 if (nentries > AFSCBMAX) nentries = AFSCBMAX;
551 /* heuristic to make sure that things fit in 4K. This means that
552 * we shouldnt make it any bigger than 47 entries. I am typically
553 * going to keep it a little lower, since we don't want to load
554 * too much of the stat cache.
556 if (nentries > 30) nentries = 30;
558 /* now, to reduce the stack size, well allocate two 4K blocks,
559 * one for fids and callbacks, and one for stat info. Well set
560 * up our pointers to the memory from there, too.
562 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
563 statsp = (struct AFSFetchStatus *) statMemp;
564 cbfMemp = osi_AllocLargeSpace(nentries *
565 (sizeof(AFSCallBack) + sizeof(AFSFid)));
566 fidsp = (AFSFid *) cbfMemp;
567 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
569 /* next, we must iterate over the directory, starting from the specified
570 * cookie offset (dirCookie), and counting out nentries file entries.
571 * We skip files that already have stat cache entries, since we
572 * dont want to bulk stat files that are already in the cache.
575 code = afs_VerifyVCache(adp, areqp);
578 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
584 /* lock the directory cache entry */
585 ObtainReadLock(&adp->lock);
586 ObtainReadLock(&dcp->lock);
589 * Make sure that the data in the cache is current. There are two
590 * cases we need to worry about:
591 * 1. The cache data is being fetched by another process.
592 * 2. The cache data is no longer valid
594 while ((adp->states & CStatd)
595 && (dcp->dflags & DFFetching)
596 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
597 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
598 ICL_TYPE_STRING, __FILE__,
599 ICL_TYPE_INT32, __LINE__,
600 ICL_TYPE_POINTER, dcp,
601 ICL_TYPE_INT32, dcp->dflags);
602 ReleaseReadLock(&dcp->lock);
603 ReleaseReadLock(&adp->lock);
604 afs_osi_Sleep(&dcp->validPos);
605 ObtainReadLock(&adp->lock);
606 ObtainReadLock(&dcp->lock);
608 if (!(adp->states & CStatd)
609 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
610 ReleaseReadLock(&dcp->lock);
611 ReleaseReadLock(&adp->lock);
616 /* Generate a sequence number so we can tell whether we should
617 * store the attributes when processing the response. This number is
618 * stored in the file size when we set the CBulkFetching bit. If the
619 * CBulkFetching is still set and this value hasn't changed, then
620 * we know we were the last to set CBulkFetching bit for this file,
621 * and it is safe to set the status information for this file.
623 statSeqNo = bulkStatCounter++;
625 /* now we have dir data in the cache, so scan the dir page */
628 while (1) { /* Should probably have some constant bound */
629 /* look for first safe entry to examine in the directory. BlobScan
630 * looks for a the 1st allocated dir after the dirCookie slot.
632 newIndex = BlobScan(&dcp->f.inode, (dirCookie>>5));
633 if (newIndex == 0) break;
635 /* remember the updated directory cookie */
636 dirCookie = newIndex << 5;
638 /* get a ptr to the dir entry */
639 dirEntryp =(struct DirEntry *)afs_dir_GetBlob(&dcp->f.inode, newIndex);
640 if (!dirEntryp) break;
642 /* dont copy more than we have room for */
643 if (fidIndex >= nentries) {
644 DRelease((char *) dirEntryp, 0);
648 /* now, if the dir entry looks good, copy it out to our list. Vnode
649 * 0 means deleted, although it should also be free were it deleted.
651 if (dirEntryp->fid.vnode != 0) {
652 /* dont copy entries we have in our cache. This check will
653 * also make us skip "." and probably "..", unless it has
654 * disappeared from the cache since we did our namei call.
656 tfid.Cell = adp->fid.Cell;
657 tfid.Fid.Volume = adp->fid.Fid.Volume;
658 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
659 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
662 ObtainWriteLock(&afs_xvcache, 130);
663 tvcp = afs_FindVCache(&tfid, &retry, 0 /* no stats | LRU */);
665 ReleaseWriteLock(&afs_xvcache);
668 } while (tvcp && retry);
669 if (!tvcp) { /* otherwise, create manually */
670 tvcp = afs_NewVCache(&tfid, hostp);
671 ObtainWriteLock(&tvcp->lock, 505);
672 ReleaseWriteLock(&afs_xvcache);
673 afs_RemoveVCB(&tfid);
674 ReleaseWriteLock(&tvcp->lock);
676 ReleaseWriteLock(&afs_xvcache);
679 goto done; /* can't happen at present, more's the pity */
681 /* WARNING: afs_DoBulkStat uses the Length field to store a
682 * sequence number for each bulk status request. Under no
683 * circumstances should afs_DoBulkStat store a sequence number
684 * if the new length will be ignored when afs_ProcessFS is
685 * called with new stats. */
687 if (!(tvcp->states & (CStatd|CBulkFetching))
688 && (tvcp->execsOrWriters <= 0)
689 && !afs_DirtyPages(tvcp)
690 && !AFS_VN_MAPPED((vnode_t*)tvcp))
692 if (!(tvcp->states & (CStatd|CBulkFetching))
693 && (tvcp->execsOrWriters <= 0)
694 && !afs_DirtyPages(tvcp))
698 /* this entry doesnt exist in the cache, and is not
699 * already being fetched by someone else, so add it to the
700 * list of file IDs to obtain.
702 * We detect a callback breaking race condition by checking the
703 * CBulkFetching state bit and the value in the file size.
704 * It is safe to set the status only if the CBulkFetching
705 * flag is still set and the value in the file size does
708 * Don't fetch status for dirty files. We need to
709 * preserve the value of the file size. We could
710 * flush the pages, but it wouldn't be worthwhile.
712 memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
713 tvcp->states |= CBulkFetching;
714 tvcp->m.Length = statSeqNo;
718 } /* if dir vnode has non-zero entry */
720 /* move to the next dir entry by adding in the # of entries
721 * used by this dir entry.
723 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
724 DRelease((char *) dirEntryp, 0);
725 if (temp <= 0) break;
727 } /* while loop over all dir entries */
729 /* now release the dir lock and prepare to make the bulk RPC */
730 ReleaseReadLock(&dcp->lock);
731 ReleaseReadLock(&adp->lock);
733 /* release the chunk */
736 /* dont make a null call */
737 if (fidIndex == 0) goto done;
740 /* setup the RPC parm structures */
741 fidParm.AFSCBFids_len = fidIndex;
742 fidParm.AFSCBFids_val = fidsp;
743 statParm.AFSBulkStats_len = fidIndex;
744 statParm.AFSBulkStats_val = statsp;
745 cbParm.AFSCBs_len = fidIndex;
746 cbParm.AFSCBs_val = cbsp;
748 /* start the timer; callback expirations are relative to this */
749 startTime = osi_Time();
751 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
753 hostp = tcp->srvr->server;
754 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
757 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
758 code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
760 if (code == RXGEN_OPCODE) {
761 tcp->srvr->server->flags |= SNO_INLINEBULK;
763 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
769 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
776 } while (afs_Analyze(tcp, code, &adp->fid, areqp,
777 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL));
779 /* now, if we didnt get the info, bail out. */
782 /* we need vol flags to create the entries properly */
783 dotdot.Fid.Volume = 0;
784 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
786 volStates = volp->states;
787 if (volp->dotdot.Fid.Volume != 0)
788 dotdot = volp->dotdot;
792 /* find the place to merge the info into We do this by skipping
793 * nskip entries in the LRU queue. The more we skip, the more
794 * we preserve, since the head of the VLRU queue is the most recently
798 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
799 ObtainReadLock(&afs_xvcache);
801 /* actually a serious error, probably should panic. Probably will
802 * panic soon, oh well. */
803 ReleaseReadLock(&afs_xvcache);
804 afs_warnuser("afs_DoBulkStat: VLRU empty!");
807 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
808 refpanic ("Bulkstat VLRU inconsistent");
810 for(tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
811 if (--nskip <= 0) break;
812 else if (QNext(QPrev(tq)) != tq) {
814 refpanic ("BulkStat VLRU inconsistent");
817 if (tq != &VLRU) lruvcp = QTOV(tq);
818 else lruvcp = QTOV(VLRU.next);
820 /* now we have to hold this entry, so that it does not get moved
821 * into the free list while we're running. It could still get
822 * moved within the lru queue, but hopefully that will be rare; it
823 * doesn't hurt nearly as much.
826 osi_vnhold(lruvcp, &retry);
827 ReleaseReadLock(&afs_xvcache); /* could be read lock */
831 /* otherwise, merge in the info. We have to be quite careful here,
832 * since we need to ensure that we don't merge old info over newer
833 * stuff in a stat cache entry. We're very conservative here: we don't
834 * do the merge at all unless we ourselves create the stat cache
835 * entry. That's pretty safe, and should work pretty well, since we
836 * typically expect to do the stat cache creation ourselves.
838 * We also have to take into account racing token revocations.
840 for(i=0; i<fidIndex; i++) {
841 if ((&statsp[i])->errorCode)
843 afid.Cell = adp->fid.Cell;
844 afid.Fid.Volume = adp->fid.Fid.Volume;
845 afid.Fid.Vnode = fidsp[i].Vnode;
846 afid.Fid.Unique = fidsp[i].Unique;
849 ObtainReadLock(&afs_xvcache);
850 tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
851 ReleaseReadLock(&afs_xvcache);
852 } while (tvcp && retry);
854 /* The entry may no longer exist */
859 /* now we have the entry held, but we need to fill it in */
860 ObtainWriteLock(&tvcp->lock,131);
862 /* if CBulkFetching is not set, or if the file size no longer
863 * matches the value we placed there when we set the CBulkFetching
864 * flag, then someone else has done something with this node,
865 * and we may not have the latest status information for this
866 * file. Leave the entry alone.
868 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
870 ReleaseWriteLock(&tvcp->lock);
875 /* now copy ".." entry back out of volume structure, if necessary */
876 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
878 tvcp->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
879 *tvcp->mvid = dotdot;
882 ObtainWriteLock(&afs_xvcache,132);
883 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
884 refpanic ("Bulkstat VLRU inconsistent2");
886 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
887 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
888 { refpanic ("Bulkstat VLRU inconsistent4"); }
889 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
890 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
891 { refpanic ("Bulkstat VLRU inconsistent5"); }
893 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
894 QRemove(&tvcp->vlruq);
895 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
898 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
899 refpanic ("Bulkstat VLRU inconsistent3");
901 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
902 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
903 { refpanic ("Bulkstat VLRU inconsistent5"); }
904 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
905 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
906 { refpanic ("Bulkstat VLRU inconsistent6"); }
907 ReleaseWriteLock(&afs_xvcache);
909 ObtainWriteLock(&afs_xcbhash, 494);
911 /* We need to check the flags again. We may have missed
912 * something while we were waiting for a lock.
914 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
916 ReleaseWriteLock(&tvcp->lock);
917 ReleaseWriteLock(&afs_xcbhash);
922 /* now merge in the resulting status back into the vnode.
923 * We only do this if the entry looks clear.
925 afs_ProcessFS(tvcp, &statsp[i], areqp);
926 #ifdef AFS_LINUX22_ENV
927 /* overwrite the ops if it's a directory or symlink. */
928 if (vType(tvcp) == VDIR)
929 tvcp->v.v_op = &afs_dir_iops;
930 else if (vType(tvcp) == VLNK)
931 tvcp->v.v_op = &afs_symlink_iops;
934 /* do some accounting for bulk stats: mark this entry as
935 * loaded, so we can tell if we use it before it gets
938 tvcp->states |= CBulkStat;
939 tvcp->states &= ~CBulkFetching;
943 /* merge in vol info */
944 if (volStates & VRO) tvcp->states |= CRO;
945 if (volStates & VBackup) tvcp->states |= CBackup;
946 if (volStates & VForeign) tvcp->states |= CForeign;
948 /* merge in the callback info */
949 tvcp->states |= CTruth;
951 /* get ptr to the callback we are interested in */
954 if (tcbp->ExpirationTime != 0) {
955 tvcp->cbExpires = tcbp->ExpirationTime+startTime;
956 tvcp->callback = hostp;
957 tvcp->states |= CStatd;
958 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
960 else if (tvcp->states & CRO) {
961 /* ordinary callback on a read-only volume -- AFS 3.2 style */
962 tvcp->cbExpires = 3600+startTime;
963 tvcp->callback = hostp;
964 tvcp->states |= CStatd;
965 afs_QueueCallback(tvcp, CBHash(3600), volp);
969 tvcp->states &= ~(CStatd|CUnique);
970 afs_DequeueCallback(tvcp);
971 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
972 osi_dnlc_purgedp (tvcp); /* if it (could be) a directory */
974 ReleaseWriteLock(&afs_xcbhash);
976 ReleaseWriteLock(&tvcp->lock);
977 /* finally, we're done with the entry */
979 } /* for all files we got back */
981 /* finally return the pointer into the LRU queue */
982 afs_PutVCache(lruvcp);
985 /* Be sure to turn off the CBulkFetching flags */
986 for(i=flagIndex; i<fidIndex; i++) {
987 afid.Cell = adp->fid.Cell;
988 afid.Fid.Volume = adp->fid.Fid.Volume;
989 afid.Fid.Vnode = fidsp[i].Vnode;
990 afid.Fid.Unique = fidsp[i].Unique;
993 ObtainReadLock(&afs_xvcache);
994 tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
995 ReleaseReadLock(&afs_xvcache);
996 } while (tvcp && retry);
998 && (tvcp->states & CBulkFetching)
999 && (tvcp->m.Length == statSeqNo)) {
1000 tvcp->states &= ~CBulkFetching;
1003 afs_PutVCache(tvcp);
1007 afs_PutVolume(volp, READ_LOCK);
1009 /* If we did the InlineBulk RPC pull out the return code */
1011 if ((&statsp[0])->errorCode) {
1012 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
1013 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
1015 code = (&statsp[0])->errorCode;
1020 osi_FreeLargeSpace(statMemp);
1021 osi_FreeLargeSpace(cbfMemp);
1025 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
1026 static int AFSDOBULK = 1;
1029 afs_lookup(adp, ndp)
1031 struct nameidata *ndp; {
1032 char aname[MAXNAMLEN+1]; /* XXX */
1033 struct vcache **avcp = (struct vcache **)&(ndp->ni_vp);
1034 struct ucred *acred = ndp->ni_cred;
1035 int wantparent = ndp->ni_nameiop & WANTPARENT;
1036 int opflag = ndp->ni_nameiop & OPFLAG;
1037 #else /* AFS_OSF_ENV */
1038 #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1039 afs_lookup(OSI_VC_ARG(adp), aname, avcp, pnp, flags, rdir, acred)
1040 struct pathname *pnp;
1044 #if defined(UKERNEL)
1045 afs_lookup(adp, aname, avcp, acred, flags)
1048 afs_lookup(adp, aname, avcp, acred)
1049 #endif /* UKERNEL */
1050 #endif /* SUN5 || SGI */
1052 struct vcache **avcp;
1054 struct AFS_UCRED *acred; {
1056 struct vrequest treq;
1058 register struct vcache *tvc=0;
1059 register afs_int32 code;
1060 register afs_int32 bulkcode = 0;
1061 int pass = 0, hit = 0;
1063 extern afs_int32 afs_mariner; /*Writing activity to log?*/
1065 afs_hyper_t versionNo;
1066 int no_read_access = 0;
1067 struct sysname_info sysState; /* used only for @sys checking */
1068 int dynrootRetry = 1;
1069 struct afs_fakestat_state fakestate;
1070 int tryEvalOnly = 0;
1072 AFS_STATCNT(afs_lookup);
1073 afs_InitFakeStat(&fakestate);
1075 if ((code = afs_InitReq(&treq, acred)))
1079 ndp->ni_dvp = AFSTOV(adp);
1080 memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
1081 aname[ndp->ni_namelen] = '\0';
1082 #endif /* AFS_OSF_ENV */
1084 #if defined(AFS_DARWIN_ENV)
1085 /* Workaround for MacOSX Finder, which tries to look for
1086 * .DS_Store and Contents under every directory.
1088 if (afs_fakestat_enable && adp->mvstat == 1) {
1089 if (strcmp(aname, ".DS_Store") == 0)
1091 if (strcmp(aname, "Contents") == 0)
1097 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1099 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1100 if (tryEvalOnly && adp->mvstat == 1)
1105 *avcp = NULL; /* Since some callers don't initialize it */
1107 /* come back to here if we encounter a non-existent object in a read-only
1108 volume's directory */
1111 *avcp = NULL; /* Since some callers don't initialize it */
1114 if (!(adp->states & CStatd)) {
1115 if ((code = afs_VerifyVCache2(adp, &treq))) {
1121 /* watch for ".." in a volume root */
1122 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1123 /* looking up ".." in root via special hacks */
1124 if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
1126 extern struct vcache *afs_globalVp;
1127 if (adp == afs_globalVp) {
1128 struct vnode *rvp = AFSTOV(adp);
1130 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1131 ndp->ni_dvp = ndp->ni_vp;
1141 /* otherwise we have the fid here, so we use it */
1142 tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
1143 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
1144 ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
1145 ICL_TYPE_INT32, code);
1147 code = (tvc ? 0 : ENOENT);
1149 if (tvc && !VREFCOUNT(tvc)) {
1153 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code);*/
1158 /* now check the access */
1159 if (treq.uid != adp->last_looker) {
1160 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1165 else adp->last_looker = treq.uid;
1168 /* Check for read access as well. We need read access in order to
1169 stat files, but not to stat subdirectories. */
1170 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
1173 /* special case lookup of ".". Can we check for it sooner in this code,
1174 * for instance, way up before "redo:" ??
1175 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1176 * invent a lightweight version of GetVCache.
1178 if (aname[0] == '.' && !aname[1]) { /* special case */
1179 ObtainReadLock(&afs_xvcache);
1181 ReleaseReadLock(&afs_xvcache);
1185 if (adp && !VREFCOUNT(adp)) {
1191 Check_AtSys(adp, aname, &sysState, &treq);
1192 tname = sysState.name;
1194 /* 1st Check_AtSys and lookup by tname is required here, for now,
1195 because the dnlc is *not* told to remove entries for the parent
1196 dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1197 if the cached entry for the parent dir is invalidated for a
1199 Otherwise, we'd be able to do a dnlc lookup on an entry ending
1200 w/@sys and know the dnlc was consistent with reality. */
1201 tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
1202 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1204 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1205 /* need read access on dir to stat non-directory / non-link */
1211 #ifdef AFS_LINUX22_ENV
1212 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1221 #else /* non - LINUX */
1225 #endif /* linux22 */
1229 register struct dcache *tdc;
1230 afs_size_t dirOffset, dirLen;
1232 struct VenusFid tfid;
1234 /* now we have to lookup the next fid */
1235 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
1237 *avcp = NULL; /* redundant, but harmless */
1242 /* now we will just call dir package with appropriate inode.
1243 Dirs are always fetched in their entirety for now */
1244 ObtainReadLock(&adp->lock);
1245 ObtainReadLock(&tdc->lock);
1248 * Make sure that the data in the cache is current. There are two
1249 * cases we need to worry about:
1250 * 1. The cache data is being fetched by another process.
1251 * 2. The cache data is no longer valid
1253 while ((adp->states & CStatd)
1254 && (tdc->dflags & DFFetching)
1255 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1256 ReleaseReadLock(&tdc->lock);
1257 ReleaseReadLock(&adp->lock);
1258 afs_osi_Sleep(&tdc->validPos);
1259 ObtainReadLock(&adp->lock);
1260 ObtainReadLock(&tdc->lock);
1262 if (!(adp->states & CStatd)
1263 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1264 ReleaseReadLock(&tdc->lock);
1265 ReleaseReadLock(&adp->lock);
1270 /* Save the version number for when we call osi_dnlc_enter */
1271 hset(versionNo, tdc->f.versionNo);
1274 * check for, and handle "@sys" if it's there. We should be able
1275 * to avoid the alloc and the strcpy with a little work, but it's
1276 * not pressing. If there aren't any remote users (ie, via the
1277 * NFS translator), we have a slightly easier job.
1278 * the faster way to do this is to check for *aname == '@' and if
1279 * it's there, check for @sys, otherwise, assume there's no @sys
1280 * then, if the lookup fails, check for .*@sys...
1282 /* above now implemented by Check_AtSys and Next_AtSys */
1284 /* lookup the name in the appropriate dir, and return a cache entry
1285 on the resulting fid */
1286 theDir = tdc->f.inode;
1287 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1289 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1290 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState)) {
1291 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1293 tname = sysState.name;
1295 ReleaseReadLock(&tdc->lock);
1298 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1301 ReleaseReadLock(&adp->lock);
1304 tcell = afs_GetCellByName(tname + 1, READ_LOCK);
1306 tcell = afs_GetCellByName(tname, READ_LOCK);
1308 afs_PutCell(tcell, READ_LOCK);
1309 afs_RefreshDynroot();
1310 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1314 ReleaseReadLock(&adp->lock);
1317 /* new fid has same cell and volume */
1318 tfid.Cell = adp->fid.Cell;
1319 tfid.Fid.Volume = adp->fid.Fid.Volume;
1320 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1321 ICL_TYPE_STRING, tname,
1322 ICL_TYPE_FID, &tfid, ICL_TYPE_INT32, code);
1325 if (code != ENOENT) {
1326 printf("LOOKUP dirLookupOff -> %d\n", code);
1331 /* prefetch some entries, if the dir is currently open. The variable
1332 * dirCookie tells us where to start prefetching from.
1334 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
1336 /* if the entry is not in the cache, or is in the cache,
1337 * but hasn't been statd, then do a bulk stat operation.
1341 ObtainReadLock(&afs_xvcache);
1342 tvc = afs_FindVCache(&tfid, &retry, 0/* !stats,!lru */);
1343 ReleaseReadLock(&afs_xvcache);
1344 } while (tvc && retry);
1346 if (!tvc || !(tvc->states & CStatd))
1347 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1351 /* if the vcache isn't usable, release it */
1352 if (tvc && !(tvc->states & CStatd)) {
1361 /* now get the status info, if we don't already have it */
1362 /* This is kind of weird, but we might wind up accidentally calling
1363 * RXAFS_Lookup because we happened upon a file which legitimately
1364 * has a 0 uniquifier. That is the result of allowing unique to wrap
1365 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1366 * the file has not yet been looked up.
1369 afs_int32 cached = 0;
1370 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1371 tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
1373 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1374 tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
1377 } /* sub-block just to reduce stack usage */
1380 if (adp->states & CForeign)
1381 tvc->states |= CForeign;
1382 tvc->parentVnode = adp->fid.Fid.Vnode;
1383 tvc->parentUnique = adp->fid.Fid.Unique;
1384 tvc->states &= ~CBulkStat;
1386 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1387 if (!(flags & AFS_LOOKUP_NOEVAL))
1388 /* don't eval mount points */
1389 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1390 if (!afs_fakestat_enable && tvc->mvstat == 1) {
1391 /* a mt point, possibly unevaluated */
1392 struct volume *tvolp;
1394 ObtainWriteLock(&tvc->lock,133);
1395 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1396 ReleaseWriteLock(&tvc->lock);
1400 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1404 /* next, we want to continue using the target of the mt point */
1405 if (tvc->mvid && (tvc->states & CMValid)) {
1407 /* now lookup target, to set .. pointer */
1408 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1409 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID, &tvc->fid);
1410 uvc = tvc; /* remember for later */
1412 if (tvolp && (tvolp->states & VForeign)) {
1413 /* XXXX tvolp has ref cnt on but not locked! XXX */
1414 tvc = afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
1416 tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
1418 afs_PutVCache(uvc); /* we're done with it */
1423 afs_PutVolume(tvolp, WRITE_LOCK);
1428 /* now, if we came via a new mt pt (say because of a new
1429 * release of a R/O volume), we must reevaluate the ..
1430 * ptr to point back to the appropriate place */
1432 ObtainWriteLock(&tvc->lock,134);
1433 if (tvc->mvid == NULL) {
1434 tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
1436 /* setup backpointer */
1437 *tvc->mvid = tvolp->dotdot;
1438 ReleaseWriteLock(&tvc->lock);
1439 afs_PutVolume(tvolp, WRITE_LOCK);
1445 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1450 if (tvc && !VREFCOUNT(tvc)) {
1456 /* if we get here, we found something in a directory that couldn't
1457 be located (a Multics "connection failure"). If the volume is
1458 read-only, we try flushing this entry from the cache and trying
1462 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1464 if (tv->states & VRO) {
1465 pass = 1; /* try this *once* */
1466 ObtainWriteLock(&afs_xcbhash, 495);
1467 afs_DequeueCallback(adp);
1468 /* re-stat to get later version */
1469 adp->states &= ~CStatd;
1470 ReleaseWriteLock(&afs_xcbhash);
1471 osi_dnlc_purgedp(adp);
1472 afs_PutVolume(tv, READ_LOCK);
1475 afs_PutVolume(tv, READ_LOCK);
1482 /* put the network buffer back, if need be */
1483 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1486 /* Handle RENAME; only need to check rename "." */
1487 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1488 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1489 afs_PutVCache(*avcp);
1491 afs_PutFakeStat(&fakestate);
1492 return afs_CheckCode(EISDIR, &treq, 18);
1495 #endif /* AFS_OSF_ENV */
1498 afs_AddMarinerName(aname, tvc);
1500 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1501 if (!(flags & AFS_LOOKUP_NOEVAL))
1502 /* Here we don't enter the name into the DNLC because we want the
1503 evaluated mount dir to be there (the vcache for the mounted volume)
1504 rather than the vc of the mount point itself. we can still find the
1505 mount point's vc in the vcache by its fid. */
1506 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1508 osi_dnlc_enter (adp, aname, tvc, &versionNo);
1511 #ifdef AFS_LINUX20_ENV
1512 /* So Linux inode cache is up to date. */
1513 code = afs_VerifyVCache(tvc, &treq);
1515 afs_PutFakeStat(&fakestate);
1516 return 0; /* can't have been any errors if hit and !code */
1520 if (bulkcode) code = bulkcode; else
1521 code = afs_CheckCode(code, &treq, 19);
1523 /* If there is an error, make sure *avcp is null.
1524 * Alphas panic otherwise - defect 10719.
1529 afs_PutFakeStat(&fakestate);