2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
18 * AFS_EQ_ATSYS (macro)
22 #include <afsconfig.h>
23 #include "afs/param.h"
27 #include "afs/sysincludes.h" /* Standard vendor system headers */
28 #include "afsincludes.h" /* Afs-based standard headers */
29 #include "afs/afs_stats.h" /* statistics */
30 #include "afs/afs_cbqueue.h"
31 #include "afs/nfsclient.h"
32 #include "afs/exporter.h"
33 #include "afs/afs_osidnlc.h"
36 extern struct DirEntry * afs_dir_GetBlob();
38 #ifdef AFS_LINUX22_ENV
39 extern struct inode_operations afs_symlink_iops, afs_dir_iops;
43 afs_int32 afs_bulkStatsDone;
44 static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
45 int afs_fakestat_enable = 0; /* 1: fakestat-all, 2: fakestat-crosscell */
48 /* this would be faster if it did comparison as int32word, but would be
49 * dependant on byte-order and alignment, and I haven't figured out
50 * what "@sys" is in binary... */
51 #define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
53 /* call under write lock, evaluate mvid field from a mt pt.
54 * avc is the vnode of the mount point object; must be write-locked.
55 * advc is the vnode of the containing directory (optional; if NULL and
56 * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
57 * avolpp is where we return a pointer to the volume named by the mount pt, if success
58 * areq is the identity of the caller.
60 * NOTE: this function returns a held volume structure in *volpp if it returns 0!
62 int EvalMountPoint(register struct vcache *avc, struct vcache *advc,
63 struct volume **avolpp, register struct vrequest *areq)
66 struct volume *tvp = 0;
69 char *cpos, *volnamep;
71 afs_int32 prefetchRO; /* 1=>No 2=>Yes */
72 afs_int32 mtptCell, assocCell, hac=0;
73 afs_int32 samecell, roname, len;
75 AFS_STATCNT(EvalMountPoint);
77 if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
80 code = afs_HandleLink(avc, areq);
81 if (code) return code;
83 /* Determine which cell and volume the mointpoint goes to */
84 type = avc->linkData[0]; /* '#'=>Regular '%'=>RW */
85 cpos = afs_strchr(&avc->linkData[1], ':'); /* if cell name present */
89 tcell = afs_GetCellByName(&avc->linkData[1], READ_LOCK);
92 volnamep = &avc->linkData[1];
93 tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
95 if (!tcell) return ENODEV;
97 mtptCell = tcell->cellNum; /* The cell for the mountpoint */
99 hac = 1; /* has associated cell */
100 assocCell = tcell->lcellp->cellNum; /* The associated cell */
102 afs_PutCell(tcell, READ_LOCK);
104 /* Is volume name a "<n>.backup" or "<n>.readonly" name */
105 len = strlen(volnamep);
106 roname = ((len > 9) && (strcmp(&volnamep[len - 9],".readonly") == 0)) ||
107 ((len > 7) && (strcmp(&volnamep[len - 7],".backup") == 0));
109 /* When we cross mountpoint, do we stay in the same cell */
110 samecell = (avc->fid.Cell == mtptCell) || (hac && (avc->fid.Cell == assocCell));
112 /* Decide whether to prefetch the RO. Also means we want the RO.
113 * If this is a regular mountpoint with a RW volume name and
114 * we cross a cell boundary -or- start from a RO volume, then we will
115 * want to prefetch the RO volume when we get the RW below.
117 if ( (type == '#') && !roname && (!samecell || (avc->states & CRO)) ) {
118 prefetchRO = 2; /* Yes, prefetch the RO */
120 prefetchRO = 1; /* No prefetch of the RO */
123 /* Get the volume struct. Unless this volume name has ".readonly" or
124 * ".backup" in it, this will get the volume struct for the RW volume.
125 * The RO volume will be prefetched if requested (but not returned).
127 tvp = afs_GetVolumeByName(volnamep, mtptCell, prefetchRO, areq, WRITE_LOCK);
129 /* If no volume was found in this cell, try the associated linked cell */
130 if (!tvp && hac && areq->volumeError) {
131 tvp = afs_GetVolumeByName(volnamep, assocCell, prefetchRO, areq, WRITE_LOCK);
134 /* Still not found. If we are looking for the RO, then perhaps the RW
135 * doesn't exist? Try adding ".readonly" to volname and look for that.
136 * Don't know why we do this. Would have still found it in above call - jpm.
138 if (!tvp && (prefetchRO == 2)) {
139 buf = (char *)osi_AllocSmallSpace(strlen(volnamep)+10);
141 strcpy(buf, volnamep);
142 afs_strcat(buf, ".readonly");
144 tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
146 /* Try the associated linked cell if failed */
147 if (!tvp && hac && areq->volumeError) {
148 tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
150 osi_FreeSmallSpace(buf);
153 if (!tvp) return ENODEV; /* Couldn't find the volume */
155 /* Don't cross mountpoint from a BK to a BK volume */
156 if ((avc->states & CBackup) && (tvp->states & VBackup)) {
157 afs_PutVolume(tvp, WRITE_LOCK);
161 /* If we want (prefetched) the RO and it exists, then drop the
162 * RW volume and get the RO. Othewise, go with the RW.
164 if ((prefetchRO == 2) && tvp->roVol) {
165 tfid.Fid.Volume = tvp->roVol; /* remember RO volume */
166 tfid.Cell = tvp->cell;
167 afs_PutVolume(tvp, WRITE_LOCK); /* release old volume */
168 tvp = afs_GetVolume(&tfid, areq, WRITE_LOCK); /* get the new one */
169 if (!tvp) return ENODEV; /* oops, can't do it */
173 avc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
174 avc->mvid->Cell = tvp->cell;
175 avc->mvid->Fid.Volume = tvp->volume;
176 avc->mvid->Fid.Vnode = 1;
177 avc->mvid->Fid.Unique = 1;
178 avc->states |= CMValid;
180 /* Used to: if the mount point is stored within a backup volume,
181 * then we should only update the parent pointer information if
182 * there's none already set, so as to avoid updating a volume's ..
183 * info with something in an OldFiles directory.
185 * Next two lines used to be under this if:
187 * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
189 * Now: update mount point back pointer on every call, so that we handle
190 * multiple mount points better. This way, when du tries to go back
191 * via chddir(".."), it will end up exactly where it started, yet
192 * cd'ing via a new path to a volume will reset the ".." pointer
195 tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
196 if (advc) tvp->dotdot = advc->fid;
205 * Must be called on an afs_fakestat_state object before calling
206 * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
207 * without calling afs_EvalFakeStat is legal, as long as this
208 * function is called.
210 void afs_InitFakeStat(struct afs_fakestat_state *state)
212 if (!afs_fakestat_enable)
217 state->need_release = 0;
221 * afs_EvalFakeStat_int
223 * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
224 * which is called by those wrapper functions.
226 * Only issues RPCs if canblock is non-zero.
228 int afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
229 struct vrequest *areq, int canblock)
231 struct vcache *tvc, *root_vp;
232 struct volume *tvolp = NULL;
235 if (!afs_fakestat_enable)
238 osi_Assert(state->valid == 1);
239 osi_Assert(state->did_eval == 0);
243 if (tvc->mvstat != 1)
246 /* Is the call to VerifyVCache really necessary? */
247 code = afs_VerifyVCache(tvc, areq);
251 ObtainWriteLock(&tvc->lock, 599);
252 code = EvalMountPoint(tvc, NULL, &tvolp, areq);
253 ReleaseWriteLock(&tvc->lock);
257 tvolp->dotdot = tvc->fid;
258 tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
259 tvolp->dotdot.Fid.Unique = tvc->parentUnique;
262 if (tvc->mvid && (tvc->states & CMValid)) {
268 ObtainWriteLock(&afs_xvcache, 597);
269 root_vp = afs_FindVCache(tvc->mvid, &retry, 0);
270 if (root_vp && retry) {
271 ReleaseWriteLock(&afs_xvcache);
272 afs_PutVCache(root_vp);
274 } while (root_vp && retry);
275 ReleaseWriteLock(&afs_xvcache);
277 root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
280 code = canblock ? ENOENT : 0;
284 /* Is this always kosher? Perhaps we should instead use
285 * NBObtainWriteLock to avoid potential deadlock.
287 ObtainWriteLock(&root_vp->lock, 598);
289 root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
290 *root_vp->mvid = tvolp->dotdot;
291 ReleaseWriteLock(&root_vp->lock);
293 state->need_release = 1;
294 state->root_vp = root_vp;
298 code = canblock ? ENOENT : 0;
303 afs_PutVolume(tvolp, WRITE_LOCK);
310 * Automatically does the equivalent of EvalMountPoint for vcache entries
311 * which are mount points. Remembers enough state to properly release
312 * the volume root vcache when afs_PutFakeStat() is called.
314 * State variable must be initialized by afs_InitFakeState() beforehand.
316 * Returns 0 when everything succeeds and *avcp points to the vcache entry
317 * that should be used for the real vnode operation. Returns non-zero if
318 * something goes wrong and the error code should be returned to the user.
321 afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
322 struct vrequest *areq)
324 return afs_EvalFakeStat_int(avcp, state, areq, 1);
328 * afs_TryEvalFakeStat
330 * Same as afs_EvalFakeStat, but tries not to talk to remote servers
331 * and only evaluate the mount point if all the data is already in
334 * Returns 0 if everything succeeds and *avcp points to a valid
335 * vcache entry (possibly evaluated).
337 int afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
338 struct vrequest *areq)
340 return afs_EvalFakeStat_int(avcp, state, areq, 0);
346 * Perform any necessary cleanup at the end of a vnode op, given that
347 * afs_InitFakeStat was previously called with this state.
349 void afs_PutFakeStat(struct afs_fakestat_state *state)
351 if (!afs_fakestat_enable)
354 osi_Assert(state->valid == 1);
355 if (state->need_release)
356 afs_PutVCache(state->root_vp);
360 int afs_ENameOK(register char *aname)
364 AFS_STATCNT(ENameOK);
365 tlen = strlen(aname);
366 if (tlen >= 4 && strcmp(aname+tlen-4, "@sys") == 0) return 0;
370 int afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
373 register struct unixuser *au;
374 register afs_int32 error;
376 if (!afs_nfsexporter) {
377 strcpy(bufp, afs_sysname);
380 AFS_STATCNT(getsysname);
381 au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
384 error = EXP_SYSNAME(au->exporter, NULL, bufp);
386 strcpy(bufp, "@sys");
389 strcpy(bufp, afs_sysname);
394 void Check_AtSys(register struct vcache *avc, const char *aname,
395 struct sysname_info *state, struct vrequest *areq)
397 if (AFS_EQ_ATSYS(aname)) {
399 state->name = (char *) osi_AllocLargeSpace(AFS_SMALLOCSIZ);
401 state->index = afs_getsysname(areq, avc, state->name);
410 int Next_AtSys(register struct vcache *avc, struct vrequest *areq,
411 struct sysname_info *state)
413 if (state->index == -1)
414 return 0; /* No list */
416 /* Check for the initial state of aname != "@sys" in Check_AtSys*/
417 if (state->offset == -1 && state->allocked == 0) {
418 register char *tname;
419 /* Check for .*@sys */
420 for (tname=state->name; *tname; tname++)
421 /*Move to the end of the string*/;
422 if ((tname > state->name + 4) && (AFS_EQ_ATSYS(tname-4))) {
423 state->offset = (tname - 4) - state->name;
424 tname = (char *) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
425 strncpy(tname, state->name, state->offset);
428 state->index = afs_getsysname(areq, avc, state->name+state->offset);
431 return 0; /* .*@sys doesn't match either */
432 } else if (++(state->index) >= afs_sysnamecount
433 || !afs_sysnamelist[(int)state->index])
434 return 0; /* end of list */
435 strcpy(state->name+state->offset, afs_sysnamelist[(int)state->index]);
439 #if (defined(AFS_SGI62_ENV) || defined(AFS_SUN57_64BIT_ENV))
440 extern int BlobScan(ino64_t *afile, afs_int32 ablob);
442 #if defined AFS_LINUX_64BIT_KERNEL
443 extern int BlobScan(long *afile, afs_int32 ablob);
445 extern int BlobScan(afs_int32 *afile, afs_int32 ablob);
450 /* called with an unlocked directory and directory cookie. Areqp
451 * describes who is making the call.
452 * Scans the next N (about 30, typically) directory entries, and does
453 * a bulk stat call to stat them all.
455 * Must be very careful when merging in RPC responses, since we dont
456 * want to overwrite newer info that was added by a file system mutating
457 * call that ran concurrently with our bulk stat call.
459 * We do that, as described below, by not merging in our info (always
460 * safe to skip the merge) if the status info is valid in the vcache entry.
462 * If adapt ever implements the bulk stat RPC, then this code will need to
463 * ensure that vcaches created for failed RPC's to older servers have the
466 static struct vcache *BStvc = NULL;
468 int afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
470 int nentries; /* # of entries to prefetch */
471 int nskip; /* # of slots in the LRU queue to skip */
472 struct vcache *lruvcp; /* vcache ptr of our goal pos in LRU queue */
473 struct dcache *dcp; /* chunk containing the dir block */
474 char *statMemp; /* status memory block */
475 char *cbfMemp; /* callback and fid memory block */
476 afs_size_t temp; /* temp for holding chunk length, &c. */
477 struct AFSFid *fidsp; /* file IDs were collecting */
478 struct AFSCallBack *cbsp; /* call back pointers */
479 struct AFSCallBack *tcbp; /* temp callback ptr */
480 struct AFSFetchStatus *statsp; /* file status info */
481 struct AFSVolSync volSync; /* vol sync return info */
482 struct vcache *tvcp; /* temp vcp */
483 struct afs_q *tq; /* temp queue variable */
484 AFSCBFids fidParm; /* file ID parm for bulk stat */
485 AFSBulkStats statParm; /* stat info parm for bulk stat */
486 int fidIndex; /* which file were stating */
487 struct conn *tcp; /* conn for call */
488 AFSCBs cbParm; /* callback parm for bulk stat */
489 struct server *hostp = 0; /* host we got callback from */
490 long startTime; /* time we started the call,
491 * for callback expiration base
493 afs_size_t statSeqNo; /* Valued of file size to detect races */
494 int code; /* error code */
495 long newIndex; /* new index in the dir */
496 struct DirEntry *dirEntryp; /* dir entry we are examining */
498 struct VenusFid afid; /* file ID we are using now */
499 struct VenusFid tfid; /* another temp. file ID */
500 afs_int32 retry; /* handle low-level SGI MP race conditions */
501 long volStates; /* flags from vol structure */
502 struct volume *volp=0; /* volume ptr */
503 struct VenusFid dotdot;
504 int flagIndex; /* First file with bulk fetch flag set */
505 int inlinebulk=0; /* Did we use InlineBulk RPC or not? */
508 /* first compute some basic parameters. We dont want to prefetch more
509 * than a fraction of the cache in any given call, and we want to preserve
510 * a portion of the LRU queue in any event, so as to avoid thrashing
511 * the entire stat cache (we will at least leave some of it alone).
512 * presently dont stat more than 1/8 the cache in any one call. */
513 nentries = afs_cacheStats / 8;
515 /* dont bother prefetching more than one calls worth of info */
516 if (nentries > AFSCBMAX) nentries = AFSCBMAX;
518 /* heuristic to make sure that things fit in 4K. This means that
519 * we shouldnt make it any bigger than 47 entries. I am typically
520 * going to keep it a little lower, since we don't want to load
521 * too much of the stat cache.
523 if (nentries > 30) nentries = 30;
525 /* now, to reduce the stack size, well allocate two 4K blocks,
526 * one for fids and callbacks, and one for stat info. Well set
527 * up our pointers to the memory from there, too.
529 statMemp = osi_AllocLargeSpace(nentries * sizeof(AFSFetchStatus));
530 statsp = (struct AFSFetchStatus *) statMemp;
531 cbfMemp = osi_AllocLargeSpace(nentries *
532 (sizeof(AFSCallBack) + sizeof(AFSFid)));
533 fidsp = (AFSFid *) cbfMemp;
534 cbsp = (AFSCallBack *) (cbfMemp + nentries * sizeof(AFSFid));
536 /* next, we must iterate over the directory, starting from the specified
537 * cookie offset (dirCookie), and counting out nentries file entries.
538 * We skip files that already have stat cache entries, since we
539 * dont want to bulk stat files that are already in the cache.
542 code = afs_VerifyVCache(adp, areqp);
545 dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
551 /* lock the directory cache entry */
552 ObtainReadLock(&adp->lock);
553 ObtainReadLock(&dcp->lock);
556 * Make sure that the data in the cache is current. There are two
557 * cases we need to worry about:
558 * 1. The cache data is being fetched by another process.
559 * 2. The cache data is no longer valid
561 while ((adp->states & CStatd)
562 && (dcp->dflags & DFFetching)
563 && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
564 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
565 ICL_TYPE_STRING, __FILE__,
566 ICL_TYPE_INT32, __LINE__,
567 ICL_TYPE_POINTER, dcp,
568 ICL_TYPE_INT32, dcp->dflags);
569 ReleaseReadLock(&dcp->lock);
570 ReleaseReadLock(&adp->lock);
571 afs_osi_Sleep(&dcp->validPos);
572 ObtainReadLock(&adp->lock);
573 ObtainReadLock(&dcp->lock);
575 if (!(adp->states & CStatd)
576 || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
577 ReleaseReadLock(&dcp->lock);
578 ReleaseReadLock(&adp->lock);
583 /* Generate a sequence number so we can tell whether we should
584 * store the attributes when processing the response. This number is
585 * stored in the file size when we set the CBulkFetching bit. If the
586 * CBulkFetching is still set and this value hasn't changed, then
587 * we know we were the last to set CBulkFetching bit for this file,
588 * and it is safe to set the status information for this file.
590 statSeqNo = bulkStatCounter++;
592 /* now we have dir data in the cache, so scan the dir page */
595 while (1) { /* Should probably have some constant bound */
596 /* look for first safe entry to examine in the directory. BlobScan
597 * looks for a the 1st allocated dir after the dirCookie slot.
599 newIndex = BlobScan(&dcp->f.inode, (dirCookie>>5));
600 if (newIndex == 0) break;
602 /* remember the updated directory cookie */
603 dirCookie = newIndex << 5;
605 /* get a ptr to the dir entry */
606 dirEntryp =(struct DirEntry *)afs_dir_GetBlob(&dcp->f.inode, newIndex);
607 if (!dirEntryp) break;
609 /* dont copy more than we have room for */
610 if (fidIndex >= nentries) {
611 DRelease((struct buffer *) dirEntryp, 0);
615 /* now, if the dir entry looks good, copy it out to our list. Vnode
616 * 0 means deleted, although it should also be free were it deleted.
618 if (dirEntryp->fid.vnode != 0) {
619 /* dont copy entries we have in our cache. This check will
620 * also make us skip "." and probably "..", unless it has
621 * disappeared from the cache since we did our namei call.
623 tfid.Cell = adp->fid.Cell;
624 tfid.Fid.Volume = adp->fid.Fid.Volume;
625 tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
626 tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
629 ObtainWriteLock(&afs_xvcache, 130);
630 tvcp = afs_FindVCache(&tfid, &retry, 0 /* no stats | LRU */);
632 ReleaseWriteLock(&afs_xvcache);
635 } while (tvcp && retry);
636 if (!tvcp) { /* otherwise, create manually */
637 tvcp = afs_NewVCache(&tfid, hostp);
638 ObtainWriteLock(&tvcp->lock, 505);
639 ReleaseWriteLock(&afs_xvcache);
640 afs_RemoveVCB(&tfid);
641 ReleaseWriteLock(&tvcp->lock);
643 ReleaseWriteLock(&afs_xvcache);
646 goto done; /* can't happen at present, more's the pity */
648 /* WARNING: afs_DoBulkStat uses the Length field to store a
649 * sequence number for each bulk status request. Under no
650 * circumstances should afs_DoBulkStat store a sequence number
651 * if the new length will be ignored when afs_ProcessFS is
652 * called with new stats. */
654 if (!(tvcp->states & (CStatd|CBulkFetching))
655 && (tvcp->execsOrWriters <= 0)
656 && !afs_DirtyPages(tvcp)
657 && !AFS_VN_MAPPED((vnode_t*)tvcp))
659 if (!(tvcp->states & (CStatd|CBulkFetching))
660 && (tvcp->execsOrWriters <= 0)
661 && !afs_DirtyPages(tvcp))
665 /* this entry doesnt exist in the cache, and is not
666 * already being fetched by someone else, so add it to the
667 * list of file IDs to obtain.
669 * We detect a callback breaking race condition by checking the
670 * CBulkFetching state bit and the value in the file size.
671 * It is safe to set the status only if the CBulkFetching
672 * flag is still set and the value in the file size does
675 * Don't fetch status for dirty files. We need to
676 * preserve the value of the file size. We could
677 * flush the pages, but it wouldn't be worthwhile.
679 memcpy((char *)(fidsp+fidIndex), (char *) &tfid.Fid, sizeof(*fidsp));
680 tvcp->states |= CBulkFetching;
681 tvcp->m.Length = statSeqNo;
685 } /* if dir vnode has non-zero entry */
687 /* move to the next dir entry by adding in the # of entries
688 * used by this dir entry.
690 temp = afs_dir_NameBlobs(dirEntryp->name) << 5;
691 DRelease((struct buffer *) dirEntryp, 0);
692 if (temp <= 0) break;
694 } /* while loop over all dir entries */
696 /* now release the dir lock and prepare to make the bulk RPC */
697 ReleaseReadLock(&dcp->lock);
698 ReleaseReadLock(&adp->lock);
700 /* release the chunk */
703 /* dont make a null call */
704 if (fidIndex == 0) goto done;
707 /* setup the RPC parm structures */
708 fidParm.AFSCBFids_len = fidIndex;
709 fidParm.AFSCBFids_val = fidsp;
710 statParm.AFSBulkStats_len = fidIndex;
711 statParm.AFSBulkStats_val = statsp;
712 cbParm.AFSCBs_len = fidIndex;
713 cbParm.AFSCBs_val = cbsp;
715 /* start the timer; callback expirations are relative to this */
716 startTime = osi_Time();
718 tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
720 hostp = tcp->srvr->server;
721 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
724 if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
725 code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
727 if (code == RXGEN_OPCODE) {
728 tcp->srvr->server->flags |= SNO_INLINEBULK;
730 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
736 code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
743 } while (afs_Analyze(tcp, code, &adp->fid, areqp,
744 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL));
746 /* now, if we didnt get the info, bail out. */
749 /* we need vol flags to create the entries properly */
750 dotdot.Fid.Volume = 0;
751 volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
753 volStates = volp->states;
754 if (volp->dotdot.Fid.Volume != 0)
755 dotdot = volp->dotdot;
759 /* find the place to merge the info into We do this by skipping
760 * nskip entries in the LRU queue. The more we skip, the more
761 * we preserve, since the head of the VLRU queue is the most recently
765 nskip = afs_cacheStats / 2; /* preserved fraction of the cache */
766 ObtainReadLock(&afs_xvcache);
768 /* actually a serious error, probably should panic. Probably will
769 * panic soon, oh well. */
770 ReleaseReadLock(&afs_xvcache);
771 afs_warnuser("afs_DoBulkStat: VLRU empty!");
774 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
775 refpanic ("Bulkstat VLRU inconsistent");
777 for(tq = VLRU.next; tq != &VLRU; tq = QNext(tq)) {
778 if (--nskip <= 0) break;
779 else if (QNext(QPrev(tq)) != tq) {
781 refpanic ("BulkStat VLRU inconsistent");
784 if (tq != &VLRU) lruvcp = QTOV(tq);
785 else lruvcp = QTOV(VLRU.next);
787 /* now we have to hold this entry, so that it does not get moved
788 * into the free list while we're running. It could still get
789 * moved within the lru queue, but hopefully that will be rare; it
790 * doesn't hurt nearly as much.
793 osi_vnhold(lruvcp, &retry);
794 ReleaseReadLock(&afs_xvcache); /* could be read lock */
798 /* otherwise, merge in the info. We have to be quite careful here,
799 * since we need to ensure that we don't merge old info over newer
800 * stuff in a stat cache entry. We're very conservative here: we don't
801 * do the merge at all unless we ourselves create the stat cache
802 * entry. That's pretty safe, and should work pretty well, since we
803 * typically expect to do the stat cache creation ourselves.
805 * We also have to take into account racing token revocations.
807 for(i=0; i<fidIndex; i++) {
808 if ((&statsp[i])->errorCode)
810 afid.Cell = adp->fid.Cell;
811 afid.Fid.Volume = adp->fid.Fid.Volume;
812 afid.Fid.Vnode = fidsp[i].Vnode;
813 afid.Fid.Unique = fidsp[i].Unique;
816 ObtainReadLock(&afs_xvcache);
817 tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
818 ReleaseReadLock(&afs_xvcache);
819 } while (tvcp && retry);
821 /* The entry may no longer exist */
826 /* now we have the entry held, but we need to fill it in */
827 ObtainWriteLock(&tvcp->lock,131);
829 /* if CBulkFetching is not set, or if the file size no longer
830 * matches the value we placed there when we set the CBulkFetching
831 * flag, then someone else has done something with this node,
832 * and we may not have the latest status information for this
833 * file. Leave the entry alone.
835 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
837 ReleaseWriteLock(&tvcp->lock);
842 /* now copy ".." entry back out of volume structure, if necessary */
843 if (tvcp->mvstat == 2 && (dotdot.Fid.Volume != 0)) {
845 tvcp->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
846 *tvcp->mvid = dotdot;
849 ObtainWriteLock(&afs_xvcache,132);
850 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
851 refpanic ("Bulkstat VLRU inconsistent2");
853 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
854 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
855 { refpanic ("Bulkstat VLRU inconsistent4"); }
856 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
857 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
858 { refpanic ("Bulkstat VLRU inconsistent5"); }
860 if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
861 QRemove(&tvcp->vlruq);
862 QAdd(&lruvcp->vlruq, &tvcp->vlruq);
865 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
866 refpanic ("Bulkstat VLRU inconsistent3");
868 if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
869 || (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
870 { refpanic ("Bulkstat VLRU inconsistent5"); }
871 if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
872 || (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
873 { refpanic ("Bulkstat VLRU inconsistent6"); }
874 ReleaseWriteLock(&afs_xvcache);
876 ObtainWriteLock(&afs_xcbhash, 494);
878 /* We need to check the flags again. We may have missed
879 * something while we were waiting for a lock.
881 if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
883 ReleaseWriteLock(&tvcp->lock);
884 ReleaseWriteLock(&afs_xcbhash);
889 /* now merge in the resulting status back into the vnode.
890 * We only do this if the entry looks clear.
892 afs_ProcessFS(tvcp, &statsp[i], areqp);
893 #ifdef AFS_LINUX22_ENV
894 /* overwrite the ops if it's a directory or symlink. */
895 if (vType(tvcp) == VDIR)
896 tvcp->v.v_op = &afs_dir_iops;
897 else if (vType(tvcp) == VLNK)
898 tvcp->v.v_op = &afs_symlink_iops;
901 /* do some accounting for bulk stats: mark this entry as
902 * loaded, so we can tell if we use it before it gets
905 tvcp->states |= CBulkStat;
906 tvcp->states &= ~CBulkFetching;
910 /* merge in vol info */
911 if (volStates & VRO) tvcp->states |= CRO;
912 if (volStates & VBackup) tvcp->states |= CBackup;
913 if (volStates & VForeign) tvcp->states |= CForeign;
915 /* merge in the callback info */
916 tvcp->states |= CTruth;
918 /* get ptr to the callback we are interested in */
921 if (tcbp->ExpirationTime != 0) {
922 tvcp->cbExpires = tcbp->ExpirationTime+startTime;
923 tvcp->callback = hostp;
924 tvcp->states |= CStatd;
925 afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
927 else if (tvcp->states & CRO) {
928 /* ordinary callback on a read-only volume -- AFS 3.2 style */
929 tvcp->cbExpires = 3600+startTime;
930 tvcp->callback = hostp;
931 tvcp->states |= CStatd;
932 afs_QueueCallback(tvcp, CBHash(3600), volp);
936 tvcp->states &= ~(CStatd|CUnique);
937 afs_DequeueCallback(tvcp);
938 if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
939 osi_dnlc_purgedp (tvcp); /* if it (could be) a directory */
941 ReleaseWriteLock(&afs_xcbhash);
943 ReleaseWriteLock(&tvcp->lock);
944 /* finally, we're done with the entry */
946 } /* for all files we got back */
948 /* finally return the pointer into the LRU queue */
949 afs_PutVCache(lruvcp);
952 /* Be sure to turn off the CBulkFetching flags */
953 for(i=flagIndex; i<fidIndex; i++) {
954 afid.Cell = adp->fid.Cell;
955 afid.Fid.Volume = adp->fid.Fid.Volume;
956 afid.Fid.Vnode = fidsp[i].Vnode;
957 afid.Fid.Unique = fidsp[i].Unique;
960 ObtainReadLock(&afs_xvcache);
961 tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
962 ReleaseReadLock(&afs_xvcache);
963 } while (tvcp && retry);
965 && (tvcp->states & CBulkFetching)
966 && (tvcp->m.Length == statSeqNo)) {
967 tvcp->states &= ~CBulkFetching;
974 afs_PutVolume(volp, READ_LOCK);
976 /* If we did the InlineBulk RPC pull out the return code */
978 if ((&statsp[0])->errorCode) {
979 afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
980 AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
982 code = (&statsp[0])->errorCode;
987 osi_FreeLargeSpace(statMemp);
988 osi_FreeLargeSpace(cbfMemp);
992 /* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
993 static int AFSDOBULK = 1;
998 struct nameidata *ndp; {
999 char aname[MAXNAMLEN+1]; /* XXX */
1000 struct vcache **avcp = (struct vcache **)&(ndp->ni_vp);
1001 struct ucred *acred = ndp->ni_cred;
1002 int wantparent = ndp->ni_nameiop & WANTPARENT;
1003 int opflag = ndp->ni_nameiop & OPFLAG;
1004 #else /* AFS_OSF_ENV */
1005 #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
1006 afs_lookup(OSI_VC_ARG(adp), aname, avcp, pnp, flags, rdir, acred)
1007 struct pathname *pnp;
1011 #if defined(UKERNEL)
1012 afs_lookup(adp, aname, avcp, acred, flags)
1015 afs_lookup(adp, aname, avcp, acred)
1016 #endif /* UKERNEL */
1017 #endif /* SUN5 || SGI */
1019 struct vcache **avcp;
1021 struct AFS_UCRED *acred; {
1023 struct vrequest treq;
1025 register struct vcache *tvc=0;
1026 register afs_int32 code;
1027 register afs_int32 bulkcode = 0;
1028 int pass = 0, hit = 0;
1030 extern afs_int32 afs_mariner; /*Writing activity to log?*/
1032 afs_hyper_t versionNo;
1033 int no_read_access = 0;
1034 struct sysname_info sysState; /* used only for @sys checking */
1035 int dynrootRetry = 1;
1036 struct afs_fakestat_state fakestate;
1037 int tryEvalOnly = 0;
1039 AFS_STATCNT(afs_lookup);
1040 afs_InitFakeStat(&fakestate);
1042 if ((code = afs_InitReq(&treq, acred)))
1046 ndp->ni_dvp = AFSTOV(adp);
1047 memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
1048 aname[ndp->ni_namelen] = '\0';
1049 #endif /* AFS_OSF_ENV */
1051 #if defined(AFS_DARWIN_ENV)
1052 /* Workaround for MacOSX Finder, which tries to look for
1053 * .DS_Store and Contents under every directory.
1055 if (afs_fakestat_enable && adp->mvstat == 1) {
1056 if (strcmp(aname, ".DS_Store") == 0)
1058 if (strcmp(aname, "Contents") == 0)
1064 code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
1066 code = afs_EvalFakeStat(&adp, &fakestate, &treq);
1067 if (tryEvalOnly && adp->mvstat == 1)
1072 *avcp = NULL; /* Since some callers don't initialize it */
1074 /* come back to here if we encounter a non-existent object in a read-only
1075 volume's directory */
1078 *avcp = NULL; /* Since some callers don't initialize it */
1081 if (!(adp->states & CStatd)) {
1082 if ((code = afs_VerifyVCache2(adp, &treq))) {
1088 /* watch for ".." in a volume root */
1089 if (adp->mvstat == 2 && aname[0] == '.' && aname[1] == '.' && !aname[2]) {
1090 /* looking up ".." in root via special hacks */
1091 if (adp->mvid == (struct VenusFid *) 0 || adp->mvid->Fid.Volume == 0) {
1093 extern struct vcache *afs_globalVp;
1094 if (adp == afs_globalVp) {
1095 struct vnode *rvp = AFSTOV(adp);
1097 ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
1098 ndp->ni_dvp = ndp->ni_vp;
1108 /* otherwise we have the fid here, so we use it */
1109 tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
1110 afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
1111 ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
1112 ICL_TYPE_INT32, code);
1114 code = (tvc ? 0 : ENOENT);
1116 if (tvc && !VREFCOUNT(tvc)) {
1120 /*printf("LOOKUP GETVCDOTDOT -> %d\n", code);*/
1125 /* now check the access */
1126 if (treq.uid != adp->last_looker) {
1127 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1132 else adp->last_looker = treq.uid;
1135 /* Check for read access as well. We need read access in order to
1136 stat files, but not to stat subdirectories. */
1137 if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
1140 /* special case lookup of ".". Can we check for it sooner in this code,
1141 * for instance, way up before "redo:" ??
1142 * I'm not fiddling with the LRUQ here, either, perhaps I should, or else
1143 * invent a lightweight version of GetVCache.
1145 if (aname[0] == '.' && !aname[1]) { /* special case */
1146 ObtainReadLock(&afs_xvcache);
1148 ReleaseReadLock(&afs_xvcache);
1152 if (adp && !VREFCOUNT(adp)) {
1158 Check_AtSys(adp, aname, &sysState, &treq);
1159 tname = sysState.name;
1161 /* 1st Check_AtSys and lookup by tname is required here, for now,
1162 because the dnlc is *not* told to remove entries for the parent
1163 dir of file/dir op that afs_LocalHero likes, but dnlc is informed
1164 if the cached entry for the parent dir is invalidated for a
1166 Otherwise, we'd be able to do a dnlc lookup on an entry ending
1167 w/@sys and know the dnlc was consistent with reality. */
1168 tvc = osi_dnlc_lookup (adp, tname, WRITE_LOCK);
1169 *avcp = tvc; /* maybe wasn't initialized, but it is now */
1171 if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
1172 /* need read access on dir to stat non-directory / non-link */
1178 #ifdef AFS_LINUX22_ENV
1179 if (tvc->mvstat == 2) { /* we don't trust the dnlc for root vcaches */
1188 #else /* non - LINUX */
1192 #endif /* linux22 */
1195 { /* sub-block just to reduce stack usage */
1196 register struct dcache *tdc;
1197 afs_size_t dirOffset, dirLen;
1199 struct VenusFid tfid;
1201 /* now we have to lookup the next fid */
1202 tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
1204 *avcp = NULL; /* redundant, but harmless */
1209 /* now we will just call dir package with appropriate inode.
1210 Dirs are always fetched in their entirety for now */
1211 ObtainReadLock(&adp->lock);
1212 ObtainReadLock(&tdc->lock);
1215 * Make sure that the data in the cache is current. There are two
1216 * cases we need to worry about:
1217 * 1. The cache data is being fetched by another process.
1218 * 2. The cache data is no longer valid
1220 while ((adp->states & CStatd)
1221 && (tdc->dflags & DFFetching)
1222 && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1223 ReleaseReadLock(&tdc->lock);
1224 ReleaseReadLock(&adp->lock);
1225 afs_osi_Sleep(&tdc->validPos);
1226 ObtainReadLock(&adp->lock);
1227 ObtainReadLock(&tdc->lock);
1229 if (!(adp->states & CStatd)
1230 || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
1231 ReleaseReadLock(&tdc->lock);
1232 ReleaseReadLock(&adp->lock);
1237 /* Save the version number for when we call osi_dnlc_enter */
1238 hset(versionNo, tdc->f.versionNo);
1241 * check for, and handle "@sys" if it's there. We should be able
1242 * to avoid the alloc and the strcpy with a little work, but it's
1243 * not pressing. If there aren't any remote users (ie, via the
1244 * NFS translator), we have a slightly easier job.
1245 * the faster way to do this is to check for *aname == '@' and if
1246 * it's there, check for @sys, otherwise, assume there's no @sys
1247 * then, if the lookup fails, check for .*@sys...
1249 /* above now implemented by Check_AtSys and Next_AtSys */
1251 /* lookup the name in the appropriate dir, and return a cache entry
1252 on the resulting fid */
1253 theDir = tdc->f.inode;
1254 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1256 /* If the first lookup doesn't succeed, maybe it's got @sys in the name */
1257 while (code == ENOENT && Next_AtSys(adp, &treq, &sysState))
1258 code = afs_dir_LookupOffset(&theDir, sysState.name, &tfid.Fid, &dirCookie);
1259 tname = sysState.name;
1261 ReleaseReadLock(&tdc->lock);
1264 if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
1265 ReleaseReadLock(&adp->lock);
1267 if (tname[0] == '.')
1268 afs_LookupAFSDB(tname + 1);
1270 afs_LookupAFSDB(tname);
1271 if (tname && tname != aname) osi_FreeLargeSpace(tname);
1274 ReleaseReadLock(&adp->lock);
1277 /* new fid has same cell and volume */
1278 tfid.Cell = adp->fid.Cell;
1279 tfid.Fid.Volume = adp->fid.Fid.Volume;
1280 afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
1281 ICL_TYPE_STRING, tname,
1282 ICL_TYPE_FID, &tfid, ICL_TYPE_INT32, code);
1285 if (code != ENOENT) {
1286 printf("LOOKUP dirLookupOff -> %d\n", code);
1291 /* prefetch some entries, if the dir is currently open. The variable
1292 * dirCookie tells us where to start prefetching from.
1294 if (AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign) && !afs_IsDynroot(adp)) {
1296 /* if the entry is not in the cache, or is in the cache,
1297 * but hasn't been statd, then do a bulk stat operation.
1301 ObtainReadLock(&afs_xvcache);
1302 tvc = afs_FindVCache(&tfid, &retry, 0 /* !stats,!lru */);
1303 ReleaseReadLock(&afs_xvcache);
1304 } while (tvc && retry);
1306 if (!tvc || !(tvc->states & CStatd))
1307 bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
1311 /* if the vcache isn't usable, release it */
1312 if (tvc && !(tvc->states & CStatd)) {
1321 /* now get the status info, if we don't already have it */
1322 /* This is kind of weird, but we might wind up accidentally calling
1323 * RXAFS_Lookup because we happened upon a file which legitimately
1324 * has a 0 uniquifier. That is the result of allowing unique to wrap
1325 * to 0. This was fixed in AFS 3.4. For CForeign, Unique == 0 means that
1326 * the file has not yet been looked up.
1329 afs_int32 cached = 0;
1330 if (!tfid.Fid.Unique && (adp->states & CForeign)) {
1331 tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
1333 if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
1334 tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
1337 } /* sub-block just to reduce stack usage */
1340 int force_eval = afs_fakestat_enable ? 0 : 1;
1342 if (adp->states & CForeign)
1343 tvc->states |= CForeign;
1344 tvc->parentVnode = adp->fid.Fid.Vnode;
1345 tvc->parentUnique = adp->fid.Fid.Unique;
1346 tvc->states &= ~CBulkStat;
1348 if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
1349 ObtainSharedLock(&tvc->lock, 680);
1350 if (!tvc->linkData) {
1351 UpgradeSToWLock(&tvc->lock, 681);
1352 code = afs_HandleLink(tvc, &treq);
1353 ConvertWToRLock(&tvc->lock);
1355 ConvertSToRLock(&tvc->lock);
1358 if (!code && !afs_strchr(tvc->linkData, ':'))
1360 ReleaseReadLock(&tvc->lock);
1363 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1364 if (!(flags & AFS_LOOKUP_NOEVAL))
1365 /* don't eval mount points */
1366 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1367 if (tvc->mvstat == 1 && force_eval) {
1368 /* a mt point, possibly unevaluated */
1369 struct volume *tvolp;
1371 ObtainWriteLock(&tvc->lock,133);
1372 code = EvalMountPoint(tvc, adp, &tvolp, &treq);
1373 ReleaseWriteLock(&tvc->lock);
1377 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1381 /* next, we want to continue using the target of the mt point */
1382 if (tvc->mvid && (tvc->states & CMValid)) {
1384 /* now lookup target, to set .. pointer */
1385 afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
1386 ICL_TYPE_POINTER, tvc, ICL_TYPE_FID, &tvc->fid);
1387 uvc = tvc; /* remember for later */
1389 if (tvolp && (tvolp->states & VForeign)) {
1390 /* XXXX tvolp has ref cnt on but not locked! XXX */
1391 tvc = afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
1393 tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
1395 afs_PutVCache(uvc); /* we're done with it */
1400 afs_PutVolume(tvolp, WRITE_LOCK);
1405 /* now, if we came via a new mt pt (say because of a new
1406 * release of a R/O volume), we must reevaluate the ..
1407 * ptr to point back to the appropriate place */
1409 ObtainWriteLock(&tvc->lock,134);
1410 if (tvc->mvid == NULL) {
1411 tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
1413 /* setup backpointer */
1414 *tvc->mvid = tvolp->dotdot;
1415 ReleaseWriteLock(&tvc->lock);
1416 afs_PutVolume(tvolp, WRITE_LOCK);
1422 if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
1427 if (tvc && !VREFCOUNT(tvc)) {
1433 /* if we get here, we found something in a directory that couldn't
1434 be located (a Multics "connection failure"). If the volume is
1435 read-only, we try flushing this entry from the cache and trying
1439 tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
1441 if (tv->states & VRO) {
1442 pass = 1; /* try this *once* */
1443 ObtainWriteLock(&afs_xcbhash, 495);
1444 afs_DequeueCallback(adp);
1445 /* re-stat to get later version */
1446 adp->states &= ~CStatd;
1447 ReleaseWriteLock(&afs_xcbhash);
1448 osi_dnlc_purgedp(adp);
1449 afs_PutVolume(tv, READ_LOCK);
1452 afs_PutVolume(tv, READ_LOCK);
1459 /* put the network buffer back, if need be */
1460 if (tname != aname && tname) osi_FreeLargeSpace(tname);
1463 /* Handle RENAME; only need to check rename "." */
1464 if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
1465 if (!FidCmp(&(tvc->fid), &(adp->fid))) {
1466 afs_PutVCache(*avcp);
1468 afs_PutFakeStat(&fakestate);
1469 return afs_CheckCode(EISDIR, &treq, 18);
1472 #endif /* AFS_OSF_ENV */
1475 afs_AddMarinerName(aname, tvc);
1477 #if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
1478 if (!(flags & AFS_LOOKUP_NOEVAL))
1479 /* Here we don't enter the name into the DNLC because we want the
1480 evaluated mount dir to be there (the vcache for the mounted volume)
1481 rather than the vc of the mount point itself. we can still find the
1482 mount point's vc in the vcache by its fid. */
1483 #endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
1485 osi_dnlc_enter (adp, aname, tvc, &versionNo);
1488 #ifdef AFS_LINUX20_ENV
1489 /* So Linux inode cache is up to date. */
1490 code = afs_VerifyVCache(tvc, &treq);
1492 afs_PutFakeStat(&fakestate);
1493 return 0; /* can't have been any errors if hit and !code */
1497 if (bulkcode) code = bulkcode; else
1498 code = afs_CheckCode(code, &treq, 19);
1500 /* If there is an error, make sure *avcp is null.
1501 * Alphas panic otherwise - defect 10719.
1506 afs_PutFakeStat(&fakestate);