2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
24 /*extern void afsi_log(char *pattern, ...);*/
26 extern osi_hyper_t hzero;
29 osi_queue_t *cm_allFileLocks;
30 osi_queue_t *cm_freeFileLocks;
31 unsigned long cm_lockRefreshCycle;
33 /* lock for globals */
34 osi_rwlock_t cm_scacheLock;
36 /* Dummy scache entry for use with pioctl fids */
37 cm_scache_t cm_fakeSCache;
39 osi_queue_t * cm_allFreeWaiters; /* protected by cm_scacheLock */
41 #ifdef AFS_FREELANCE_CLIENT
42 extern osi_mutex_t cm_Freelance_Lock;
45 /* must be called with cm_scacheLock write-locked! */
46 void cm_AdjustScacheLRU(cm_scache_t *scp)
48 if (scp == cm_data.scacheLRULastp)
49 cm_data.scacheLRULastp = (cm_scache_t *) osi_QPrev(&scp->q);
50 osi_QRemoveHT((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
51 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
52 if (!cm_data.scacheLRULastp)
53 cm_data.scacheLRULastp = scp;
56 /* call with scache write-locked and mutex held */
57 void cm_RemoveSCacheFromHashTable(cm_scache_t *scp)
63 if (scp->flags & CM_SCACHEFLAG_INHASH) {
64 /* hash it out first */
65 i = CM_SCACHE_HASH(&scp->fid);
66 for (lscpp = &cm_data.scacheHashTablep[i], tscp = cm_data.scacheHashTablep[i];
68 lscpp = &tscp->nextp, tscp = tscp->nextp) {
71 scp->flags &= ~CM_SCACHEFLAG_INHASH;
78 /* called with cm_scacheLock write-locked; recycles an existing scp.
80 * this function ignores all of the locking hierarchy.
82 long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags)
84 if (scp->refCount != 0) {
88 if (scp->flags & CM_SCACHEFLAG_SMB_FID) {
89 osi_Log1(afsd_logp,"cm_RecycleSCache CM_SCACHEFLAG_SMB_FID detected scp 0x%p", scp);
91 osi_panic("cm_RecycleSCache CM_SCACHEFLAG_SMB_FID detected",__FILE__,__LINE__);
96 cm_RemoveSCacheFromHashTable(scp);
99 if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) {
100 osi_queueData_t *qdp;
103 while(qdp = scp->bufWritesp) {
104 bufp = osi_GetQData(qdp);
105 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
108 lock_ObtainMutex(&bufp->mx);
109 bufp->cmFlags &= ~CM_BUF_CMSTORING;
110 bufp->flags &= ~CM_BUF_DIRTY;
111 bufp->dirty_offset = 0;
112 bufp->dirty_length = 0;
113 bufp->flags |= CM_BUF_ERROR;
114 bufp->error = VNOVNODE;
115 bufp->dataVersion = -1; /* bad */
116 bufp->dirtyCounter++;
117 if (bufp->flags & CM_BUF_WAITING) {
118 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
119 osi_Wakeup((long) &bufp);
121 lock_ReleaseMutex(&bufp->mx);
125 while(qdp = scp->bufReadsp) {
126 bufp = osi_GetQData(qdp);
127 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
130 lock_ObtainMutex(&bufp->mx);
131 bufp->cmFlags &= ~CM_BUF_CMFETCHING;
132 bufp->flags &= ~CM_BUF_DIRTY;
133 bufp->dirty_offset = 0;
134 bufp->dirty_length = 0;
135 bufp->flags |= CM_BUF_ERROR;
136 bufp->error = VNOVNODE;
137 bufp->dataVersion = -1; /* bad */
138 bufp->dirtyCounter++;
139 if (bufp->flags & CM_BUF_WAITING) {
140 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
141 osi_Wakeup((long) &bufp);
143 lock_ReleaseMutex(&bufp->mx);
147 buf_CleanDirtyBuffers(scp);
149 /* look for things that shouldn't still be set */
150 osi_assertx(scp->bufWritesp == NULL, "non-null cm_scache_t bufWritesp");
151 osi_assertx(scp->bufReadsp == NULL, "non-null cm_scache_t bufReadsp");
155 /* invalidate so next merge works fine;
156 * also initialize some flags */
158 scp->flags &= ~(CM_SCACHEFLAG_STATD
159 | CM_SCACHEFLAG_DELETED
161 | CM_SCACHEFLAG_PURERO
162 | CM_SCACHEFLAG_OVERQUOTA
163 | CM_SCACHEFLAG_OUTOFSPACE
164 | CM_SCACHEFLAG_EACCESS);
165 scp->serverModTime = 0;
166 scp->dataVersion = 0;
167 scp->bufDataVersionLow = 0;
168 scp->bulkStatProgress = hzero;
170 scp->waitQueueT = NULL;
172 if (scp->cbServerp) {
173 cm_PutServer(scp->cbServerp);
174 scp->cbServerp = NULL;
184 /* remove from dnlc */
188 /* discard cached status; if non-zero, Close
189 * tried to store this to server but failed */
192 /* discard symlink info */
193 scp->mountPointStringp[0] = '\0';
194 memset(&scp->mountRootFid, 0, sizeof(cm_fid_t));
195 memset(&scp->dotdotFid, 0, sizeof(cm_fid_t));
197 /* reset locking info */
198 scp->fileLocksH = NULL;
199 scp->fileLocksT = NULL;
200 scp->serverLock = (-1);
201 scp->exclusiveLocks = 0;
202 scp->sharedLocks = 0;
203 scp->lockDataVersion = -1;
205 /* not locked, but there can be no references to this guy
206 * while we hold the global refcount lock.
208 cm_FreeAllACLEnts(scp);
211 /* destroy directory Bplus Tree */
213 LARGE_INTEGER start, end;
214 QueryPerformanceCounter(&start);
216 freeBtree(scp->dirBplus);
217 scp->dirBplus = NULL;
218 QueryPerformanceCounter(&end);
220 bplus_free_time += (end.QuadPart - start.QuadPart);
227 /* called with cm_scacheLock write-locked; find a vnode to recycle.
228 * Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches).
230 cm_scache_t *cm_GetNewSCache(void)
236 /* first pass - look for deleted objects */
237 for ( scp = cm_data.scacheLRULastp;
239 scp = (cm_scache_t *) osi_QPrev(&scp->q))
241 osi_assertx(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep,
242 "invalid cm_scache_t address");
244 if (scp->refCount == 0) {
245 if (scp->flags & CM_SCACHEFLAG_DELETED) {
246 osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp);
247 if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) {
249 /* we found an entry, so return it */
250 /* now remove from the LRU queue and put it back at the
251 * head of the LRU queue.
253 cm_AdjustScacheLRU(scp);
258 osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp);
259 } else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
260 /* we found an entry, so return it */
261 /* now remove from the LRU queue and put it back at the
262 * head of the LRU queue.
264 cm_AdjustScacheLRU(scp);
271 osi_Log0(afsd_logp, "GetNewSCache no deleted or recycled entries available for reuse");
274 if (cm_data.currentSCaches >= cm_data.maxSCaches) {
275 /* There were no deleted scache objects that we could use. Try to find
276 * one that simply hasn't been used in a while.
278 for ( scp = cm_data.scacheLRULastp;
280 scp = (cm_scache_t *) osi_QPrev(&scp->q))
282 /* It is possible for the refCount to be zero and for there still
283 * to be outstanding dirty buffers. If there are dirty buffers,
284 * we must not recycle the scp. */
285 if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
286 if (!buf_DirtyBuffersExist(&scp->fid)) {
287 if (!cm_RecycleSCache(scp, 0)) {
288 /* we found an entry, so return it */
289 /* now remove from the LRU queue and put it back at the
290 * head of the LRU queue.
292 cm_AdjustScacheLRU(scp);
298 osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp);
302 osi_Log1(afsd_logp, "GetNewSCache all scache entries in use (retry = %d)", retry);
307 /* if we get here, we should allocate a new scache entry. We either are below
308 * quota or we have a leak and need to allocate a new one to avoid panicing.
310 scp = cm_data.scacheBaseAddress + cm_data.currentSCaches;
311 osi_assertx(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep,
312 "invalid cm_scache_t address");
313 memset(scp, 0, sizeof(cm_scache_t));
314 scp->magic = CM_SCACHE_MAGIC;
315 lock_InitializeRWLock(&scp->rw, "cm_scache_t rw");
316 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
318 lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock");
320 scp->serverLock = -1;
322 /* and put it in the LRU queue */
323 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
324 if (!cm_data.scacheLRULastp)
325 cm_data.scacheLRULastp = scp;
326 cm_data.currentSCaches++;
327 cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
329 scp->allNextp = cm_data.allSCachesp;
330 cm_data.allSCachesp = scp;
334 void cm_SetFid(cm_fid_t *fidp, afs_uint32 cell, afs_uint32 volume, afs_uint32 vnode, afs_uint32 unique)
337 fidp->volume = volume;
339 fidp->unique = unique;
340 fidp->hash = ((cell & 0xF) << 28) | ((volume & 0x3F) << 22) | ((vnode & 0x7FF) << 11) | (unique & 0x7FF);
343 /* like strcmp, only for fids */
344 __inline int cm_FidCmp(cm_fid_t *ap, cm_fid_t *bp)
346 if (ap->hash != bp->hash)
348 if (ap->vnode != bp->vnode)
350 if (ap->volume != bp->volume)
352 if (ap->unique != bp->unique)
354 if (ap->cell != bp->cell)
359 void cm_fakeSCacheInit(int newFile)
362 memset(&cm_data.fakeSCache, 0, sizeof(cm_scache_t));
363 cm_data.fakeSCache.cbServerp = (struct cm_server *)(-1);
364 /* can leave clientModTime at 0 */
365 cm_data.fakeSCache.fileType = CM_SCACHETYPE_FILE;
366 cm_data.fakeSCache.unixModeBits = 0777;
367 cm_data.fakeSCache.length.LowPart = 1000;
368 cm_data.fakeSCache.linkCount = 1;
369 cm_data.fakeSCache.refCount = 1;
371 lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw");
375 cm_ValidateSCache(void)
377 cm_scache_t * scp, *lscp;
380 if ( cm_data.scacheLRUFirstp == NULL && cm_data.scacheLRULastp != NULL ||
381 cm_data.scacheLRUFirstp != NULL && cm_data.scacheLRULastp == NULL) {
382 afsi_log("cm_ValidateSCache failure: inconsistent LRU pointers");
383 fprintf(stderr, "cm_ValidateSCache failure: inconsistent LRU pointers\n");
387 for ( scp = cm_data.scacheLRUFirstp, lscp = NULL, i = 0;
389 lscp = scp, scp = (cm_scache_t *) osi_QNext(&scp->q), i++ ) {
390 if (scp->magic != CM_SCACHE_MAGIC) {
391 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
392 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
395 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
396 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
397 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
400 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
401 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
402 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
405 if (i > cm_data.currentSCaches ) {
406 afsi_log("cm_ValidateSCache failure: LRU First queue loops");
407 fprintf(stderr, "cm_ValidateSCache failure: LUR First queue loops\n");
410 if (lscp != (cm_scache_t *) osi_QPrev(&scp->q)) {
411 afsi_log("cm_ValidateSCache failure: QPrev(scp) != previous");
412 fprintf(stderr, "cm_ValidateSCache failure: QPrev(scp) != previous\n");
417 for ( scp = cm_data.scacheLRULastp, lscp = NULL, i = 0; scp;
418 lscp = scp, scp = (cm_scache_t *) osi_QPrev(&scp->q), i++ ) {
419 if (scp->magic != CM_SCACHE_MAGIC) {
420 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
421 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
424 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
425 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
426 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
429 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
430 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
431 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
434 if (i > cm_data.currentSCaches ) {
435 afsi_log("cm_ValidateSCache failure: LRU Last queue loops");
436 fprintf(stderr, "cm_ValidateSCache failure: LUR Last queue loops\n");
439 if (lscp != (cm_scache_t *) osi_QNext(&scp->q)) {
440 afsi_log("cm_ValidateSCache failure: QNext(scp) != next");
441 fprintf(stderr, "cm_ValidateSCache failure: QNext(scp) != next\n");
446 for ( i=0; i < cm_data.scacheHashTableSize; i++ ) {
447 for ( scp = cm_data.scacheHashTablep[i]; scp; scp = scp->nextp ) {
449 hash = CM_SCACHE_HASH(&scp->fid);
450 if (scp->magic != CM_SCACHE_MAGIC) {
451 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
452 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
455 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
456 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
457 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
460 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
461 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
462 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
466 afsi_log("cm_ValidateSCache failure: scp hash != hash index");
467 fprintf(stderr, "cm_ValidateSCache failure: scp hash != hash index\n");
473 return cm_dnlcValidate();
477 cm_SuspendSCache(void)
482 cm_GiveUpAllCallbacksAllServers(TRUE);
485 * After this call all servers are marked down.
486 * Do not clear the callbacks, instead change the
487 * expiration time so that the callbacks will be expired
488 * when the servers are marked back up. However, we
489 * want the callbacks to be preserved as long as the
490 * servers are down. That way if the machine resumes
491 * without network, the stat cache item will still be
496 lock_ObtainWrite(&cm_scacheLock);
497 for ( scp = cm_data.allSCachesp; scp; scp = scp->allNextp ) {
498 if (scp->cbServerp) {
499 if (scp->flags & CM_SCACHEFLAG_PURERO) {
500 cm_volume_t *volp = cm_GetVolumeByFID(&scp->fid);
502 if (volp->cbExpiresRO == scp->cbExpires)
503 volp->cbExpiresRO = now+1;
507 scp->cbExpires = now+1;
510 lock_ReleaseWrite(&cm_scacheLock);
514 cm_ShutdownSCache(void)
518 lock_ObtainWrite(&cm_scacheLock);
520 for ( scp = cm_data.allSCachesp; scp;
521 scp = scp->allNextp ) {
522 if (scp->randomACLp) {
523 lock_ObtainWrite(&scp->rw);
524 cm_FreeAllACLEnts(scp);
525 lock_ReleaseWrite(&scp->rw);
528 if (scp->cbServerp) {
529 cm_PutServer(scp->cbServerp);
530 scp->cbServerp = NULL;
533 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
537 freeBtree(scp->dirBplus);
538 scp->dirBplus = NULL;
539 scp->dirDataVersion = -1;
540 lock_FinalizeRWLock(&scp->dirlock);
542 lock_FinalizeRWLock(&scp->rw);
543 lock_FinalizeRWLock(&scp->bufCreateLock);
545 lock_ReleaseWrite(&cm_scacheLock);
547 cm_GiveUpAllCallbacksAllServers(FALSE);
549 return cm_dnlcShutdown();
552 void cm_InitSCache(int newFile, long maxSCaches)
554 static osi_once_t once;
556 if (osi_Once(&once)) {
557 lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
559 memset(cm_data.scacheHashTablep, 0, sizeof(cm_scache_t *) * cm_data.scacheHashTableSize);
560 cm_data.allSCachesp = NULL;
561 cm_data.currentSCaches = 0;
562 cm_data.maxSCaches = maxSCaches;
563 cm_data.scacheLRUFirstp = cm_data.scacheLRULastp = NULL;
567 for ( scp = cm_data.allSCachesp; scp;
568 scp = scp->allNextp ) {
569 lock_InitializeRWLock(&scp->rw, "cm_scache_t rw");
570 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
572 lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock");
574 scp->cbServerp = NULL;
576 scp->fileLocksH = NULL;
577 scp->fileLocksT = NULL;
578 scp->serverLock = (-1);
579 scp->lastRefreshCycle = 0;
580 scp->exclusiveLocks = 0;
581 scp->sharedLocks = 0;
588 scp->dirBplus = NULL;
589 scp->dirDataVersion = -1;
591 scp->waitQueueT = NULL;
592 scp->flags &= ~CM_SCACHEFLAG_WAITING;
595 cm_allFileLocks = NULL;
596 cm_freeFileLocks = NULL;
597 cm_lockRefreshCycle = 0;
598 cm_fakeSCacheInit(newFile);
599 cm_allFreeWaiters = NULL;
600 cm_dnlcInit(newFile);
605 /* version that doesn't bother creating the entry if we don't find it */
606 cm_scache_t *cm_FindSCache(cm_fid_t *fidp)
611 hash = CM_SCACHE_HASH(fidp);
613 if (fidp->cell == 0) {
617 lock_ObtainRead(&cm_scacheLock);
618 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
619 if (cm_FidCmp(fidp, &scp->fid) == 0) {
620 cm_HoldSCacheNoLock(scp);
621 lock_ConvertRToW(&cm_scacheLock);
622 cm_AdjustScacheLRU(scp);
623 lock_ReleaseWrite(&cm_scacheLock);
627 lock_ReleaseRead(&cm_scacheLock);
631 #ifdef DEBUG_REFCOUNT
632 long cm_GetSCacheDbg(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
633 cm_req_t *reqp, char * file, long line)
635 long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
642 cm_volume_t *volp = NULL;
644 int special = 0; // yj: boolean variable to test if file is on root.afs
646 extern cm_fid_t cm_rootFid;
648 hash = CM_SCACHE_HASH(fidp);
650 osi_assertx(fidp->cell != 0, "unassigned cell value");
652 if (fidp->cell== cm_data.rootFid.cell &&
653 fidp->volume==cm_data.rootFid.volume &&
654 fidp->vnode==0x0 && fidp->unique==0x0)
656 osi_Log0(afsd_logp,"cm_GetSCache called with root cell/volume and vnode=0 and unique=0");
659 // yj: check if we have the scp, if so, we don't need
660 // to do anything else
661 lock_ObtainWrite(&cm_scacheLock);
662 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
663 if (cm_FidCmp(fidp, &scp->fid) == 0) {
664 #ifdef DEBUG_REFCOUNT
665 afsi_log("%s:%d cm_GetSCache (1) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
666 osi_Log1(afsd_logp,"cm_GetSCache (1) outScpp 0x%p", scp);
668 cm_HoldSCacheNoLock(scp);
670 cm_AdjustScacheLRU(scp);
671 lock_ReleaseWrite(&cm_scacheLock);
676 // yj: when we get here, it means we don't have an scp
677 // so we need to either load it or fake it, depending
678 // on whether the file is "special", see below.
680 // yj: if we're trying to get an scp for a file that's
681 // on root.afs of homecell, we want to handle it specially
682 // because we have to fill in the status stuff 'coz we
683 // don't want trybulkstat to fill it in for us
684 #ifdef AFS_FREELANCE_CLIENT
685 special = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
686 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
687 !(fidp->vnode==0x1 && fidp->unique==0x1));
688 isRoot = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
689 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
690 fidp->vnode==0x1 && fidp->unique==0x1);
691 if (cm_freelanceEnabled && isRoot) {
692 osi_Log0(afsd_logp,"cm_GetSCache Freelance and isRoot");
693 /* freelance: if we are trying to get the root scp for the first
694 * time, we will just put in a place holder entry.
699 if (cm_freelanceEnabled && special) {
700 char mp[MOUNTPOINTLEN] = "";
703 osi_Log0(afsd_logp,"cm_GetSCache Freelance and special");
704 lock_ObtainMutex(&cm_Freelance_Lock);
705 if (fidp->vnode >= 2 && fidp->vnode - 2 < cm_noLocalMountPoints) {
706 strncpy(mp,(cm_localMountPoints+fidp->vnode-2)->mountPointStringp, MOUNTPOINTLEN);
707 mp[MOUNTPOINTLEN-1] = '\0';
708 if ( !strnicmp(mp, "msdfs:", strlen("msdfs:")) )
709 fileType = CM_SCACHETYPE_DFSLINK;
711 fileType = (cm_localMountPoints+fidp->vnode-2)->fileType;
713 fileType = CM_SCACHETYPE_INVALID;
716 lock_ReleaseMutex(&cm_Freelance_Lock);
718 scp = cm_GetNewSCache();
720 osi_Log0(afsd_logp,"cm_GetSCache unable to obtain *new* scache entry");
721 lock_ReleaseWrite(&cm_scacheLock);
722 return CM_ERROR_WOULDBLOCK;
725 #if not_too_dangerous
726 /* dropping the cm_scacheLock allows more than one thread
727 * to obtain the same cm_scache_t from the LRU list. Since
728 * the refCount is known to be zero at this point we have to
729 * assume that no one else is using the one this is returned.
731 lock_ReleaseWrite(&cm_scacheLock);
732 lock_ObtainWrite(&scp->rw);
733 lock_ObtainWrite(&cm_scacheLock);
736 scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID;
737 scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID;
738 scp->dotdotFid.unique=1;
739 scp->dotdotFid.vnode=1;
740 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
741 scp->nextp=cm_data.scacheHashTablep[hash];
742 cm_data.scacheHashTablep[hash]=scp;
743 scp->flags |= CM_SCACHEFLAG_INHASH;
745 osi_Log1(afsd_logp,"cm_GetSCache (freelance) sets refCount to 1 scp 0x%x", scp);
746 scp->fileType = fileType;
747 scp->length.LowPart = (DWORD)strlen(mp)+4;
748 scp->length.HighPart = 0;
749 strncpy(scp->mountPointStringp,mp,MOUNTPOINTLEN);
751 scp->unixModeBits=0777;
752 scp->clientModTime=FakeFreelanceModTime;
753 scp->serverModTime=FakeFreelanceModTime;
754 scp->parentUnique = 0x1;
755 scp->parentVnode=0x1;
757 scp->dataVersion=cm_data.fakeDirVersion;
758 scp->bufDataVersionLow=cm_data.fakeDirVersion;
759 scp->lockDataVersion=-1; /* no lock yet */
760 #if not_too_dangerous
761 lock_ReleaseWrite(&scp->rw);
764 lock_ReleaseWrite(&cm_scacheLock);
765 #ifdef DEBUG_REFCOUNT
766 afsi_log("%s:%d cm_GetSCache (2) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
767 osi_Log1(afsd_logp,"cm_GetSCache (2) outScpp 0x%p", scp);
772 #endif /* AFS_FREELANCE_CLIENT */
774 /* otherwise, we need to find the volume */
775 if (!cm_freelanceEnabled || !isRoot) {
776 lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
777 cellp = cm_FindCellByID(fidp->cell, 0);
779 return CM_ERROR_NOSUCHCELL;
781 code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp, CM_GETVOL_FLAG_CREATE, &volp);
784 lock_ObtainWrite(&cm_scacheLock);
787 /* otherwise, we have the volume, now reverify that the scp doesn't
788 * exist, and proceed.
790 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
791 if (cm_FidCmp(fidp, &scp->fid) == 0) {
792 #ifdef DEBUG_REFCOUNT
793 afsi_log("%s:%d cm_GetSCache (3) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
794 osi_Log1(afsd_logp,"cm_GetSCache (3) outScpp 0x%p", scp);
796 cm_HoldSCacheNoLock(scp);
797 cm_AdjustScacheLRU(scp);
798 lock_ReleaseWrite(&cm_scacheLock);
806 /* now, if we don't have the fid, recycle something */
807 scp = cm_GetNewSCache();
809 osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry");
810 lock_ReleaseWrite(&cm_scacheLock);
813 return CM_ERROR_WOULDBLOCK;
815 osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%x flags 0x%x", scp, scp->flags);
817 osi_assertx(!(scp->flags & CM_SCACHEFLAG_INHASH), "CM_SCACHEFLAG_INHASH set");
819 #if not_too_dangerous
820 /* dropping the cm_scacheLock allows more than one thread
821 * to obtain the same cm_scache_t from the LRU list. Since
822 * the refCount is known to be zero at this point we have to
823 * assume that no one else is using the one this is returned.
825 lock_ReleaseWrite(&cm_scacheLock);
826 lock_ObtainWrite(&scp->rw);
827 lock_ObtainWrite(&cm_scacheLock);
830 if (!cm_freelanceEnabled || !isRoot) {
831 /* if this scache entry represents a volume root then we need
832 * to copy the dotdotFipd from the volume structure where the
833 * "master" copy is stored (defect 11489)
835 if (scp->fid.vnode == 1 && scp->fid.unique == 1) {
836 scp->dotdotFid = volp->dotdotFid;
839 if (volp->ro.ID == fidp->volume)
840 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
841 else if (volp->bk.ID == fidp->volume)
842 scp->flags |= CM_SCACHEFLAG_RO;
846 scp->nextp = cm_data.scacheHashTablep[hash];
847 cm_data.scacheHashTablep[hash] = scp;
848 scp->flags |= CM_SCACHEFLAG_INHASH;
850 osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp);
851 #if not_too_dangerous
852 lock_ReleaseWrite(&scp->rw);
855 /* XXX - The following fields in the cm_scache are
861 lock_ReleaseWrite(&cm_scacheLock);
863 /* now we have a held scache entry; just return it */
865 #ifdef DEBUG_REFCOUNT
866 afsi_log("%s:%d cm_GetSCache (4) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
867 osi_Log1(afsd_logp,"cm_GetSCache (4) outScpp 0x%p", scp);
872 /* Returns a held reference to the scache's parent
874 cm_scache_t * cm_FindSCacheParent(cm_scache_t * scp)
879 cm_scache_t * pscp = NULL;
881 lock_ObtainWrite(&cm_scacheLock);
882 cm_SetFid(&parent_fid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique);
884 if (cm_FidCmp(&scp->fid, &parent_fid)) {
885 i = CM_SCACHE_HASH(&parent_fid);
886 for (pscp = cm_data.scacheHashTablep[i]; pscp; pscp = pscp->nextp) {
887 if (!cm_FidCmp(&pscp->fid, &parent_fid)) {
888 cm_HoldSCacheNoLock(pscp);
894 lock_ReleaseWrite(&cm_scacheLock);
899 void cm_SyncOpAddToWaitQueue(cm_scache_t * scp, afs_int32 flags, cm_buf_t * bufp)
901 cm_scache_waiter_t * w;
903 /* Do not use the queue for asynchronous store operations */
904 if (flags == CM_SCACHESYNC_ASYNCSTORE)
907 lock_ObtainWrite(&cm_scacheLock);
908 if (cm_allFreeWaiters == NULL) {
909 w = malloc(sizeof(*w));
910 memset(w, 0, sizeof(*w));
912 w = (cm_scache_waiter_t *) cm_allFreeWaiters;
913 osi_QRemove(&cm_allFreeWaiters, (osi_queue_t *) w);
916 w->threadId = thrd_Current();
918 cm_HoldSCacheNoLock(scp);
922 osi_QAddT(&scp->waitQueueH, &scp->waitQueueT, (osi_queue_t *) w);
923 lock_ReleaseWrite(&cm_scacheLock);
925 osi_Log2(afsd_logp, "cm_SyncOpAddToWaitQueue : Adding thread to wait queue scp 0x%p w 0x%p", scp, w);
928 int cm_SyncOpCheckContinue(cm_scache_t * scp, afs_int32 flags, cm_buf_t * bufp)
930 cm_scache_waiter_t * w;
933 /* Do not use the queue for asynchronous store operations */
934 if (flags == CM_SCACHESYNC_ASYNCSTORE)
937 osi_Log0(afsd_logp, "cm_SyncOpCheckContinue checking for continuation");
939 lock_ObtainRead(&cm_scacheLock);
940 for (w = (cm_scache_waiter_t *)scp->waitQueueH;
942 w = (cm_scache_waiter_t *)osi_QNext((osi_queue_t *) w)) {
943 if (w->flags == flags && w->bufp == bufp) {
948 osi_assertx(w != NULL, "null cm_scache_waiter_t");
949 this_is_me = (w->threadId == thrd_Current());
950 lock_ReleaseRead(&cm_scacheLock);
953 osi_Log1(afsd_logp, "cm_SyncOpCheckContinue MISS: Waiter 0x%p", w);
957 osi_Log1(afsd_logp, "cm_SyncOpCheckContinue HIT: Waiter 0x%p", w);
959 lock_ObtainWrite(&cm_scacheLock);
960 osi_QRemoveHT(&scp->waitQueueH, &scp->waitQueueT, (osi_queue_t *) w);
961 cm_ReleaseSCacheNoLock(scp);
962 memset(w, 0, sizeof(*w));
963 osi_QAdd(&cm_allFreeWaiters, (osi_queue_t *) w);
964 lock_ReleaseWrite(&cm_scacheLock);
970 /* synchronize a fetch, store, read, write, fetch status or store status.
971 * Called with scache mutex held, and returns with it held, but temporarily
972 * drops it during the fetch.
974 * At most one flag can be on in flags, if this is an RPC request.
976 * Also, if we're fetching or storing data, we must ensure that we have a buffer.
978 * There are a lot of weird restrictions here; here's an attempt to explain the
979 * rationale for the concurrency restrictions implemented in this function.
981 * First, although the file server will break callbacks when *another* machine
982 * modifies a file or status block, the client itself is responsible for
983 * concurrency control on its own requests. Callback breaking events are rare,
984 * and simply invalidate any concurrent new status info.
986 * In the absence of callback breaking messages, we need to know how to
987 * synchronize incoming responses describing updates to files. We synchronize
988 * operations that update the data version by comparing the data versions.
989 * However, updates that do not update the data, but only the status, can't be
990 * synchronized with fetches or stores, since there's nothing to compare
991 * to tell which operation executed first at the server.
993 * Thus, we can allow multiple ops that change file data, or dir data, and
994 * fetches. However, status storing ops have to be done serially.
996 * Furthermore, certain data-changing ops are incompatible: we can't read or
997 * write a buffer while doing a truncate. We can't read and write the same
998 * buffer at the same time, or write while fetching or storing, or read while
999 * fetching a buffer (this may change). We can't fetch and store at the same
1002 * With respect to status, we can't read and write at the same time, read while
1003 * fetching, write while fetching or storing, or fetch and store at the same time.
1005 * We can't allow a get callback RPC to run in concurrently with something that
1006 * will return updated status, since we could start a call, have the server
1007 * return status, have another machine make an update to the status (which
1008 * doesn't change serverModTime), have the original machine get a new callback,
1009 * and then have the original machine merge in the early, old info from the
1010 * first call. At this point, the easiest way to avoid this problem is to have
1011 * getcallback calls conflict with all others for the same vnode. Other calls
1012 * to cm_MergeStatus that aren't associated with calls to cm_SyncOp on the same
1013 * vnode must be careful not to merge in their status unless they have obtained
1014 * a callback from the start of their call.
1016 * Note added 1/23/96
1017 * Concurrent StoreData RPC's can cause trouble if the file is being extended.
1018 * Each such RPC passes a FileLength parameter, which the server uses to do
1019 * pre-truncation if necessary. So if two RPC's are processed out of order at
1020 * the server, the one with the smaller FileLength will be processed last,
1021 * possibly resulting in a bogus truncation. The simplest way to avoid this
1022 * is to serialize all StoreData RPC's. This is the reason we defined
1023 * CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
1025 long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *reqp,
1026 afs_uint32 rights, afs_uint32 flags)
1028 osi_queueData_t *qdp;
1031 afs_uint32 outRights;
1033 afs_uint32 sleep_scp_flags = 0;
1034 afs_uint32 sleep_buf_cmflags = 0;
1035 afs_uint32 sleep_scp_bufs = 0;
1038 /* lookup this first */
1039 bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
1042 osi_assertx(bufp->refCount > 0, "cm_buf_t refCount 0");
1045 /* Do the access check. Now we don't really do the access check
1046 * atomically, since the caller doesn't expect the parent dir to be
1047 * returned locked, and that is what we'd have to do to prevent a
1048 * callback breaking message on the parent due to a setacl call from
1049 * being processed while we're running. So, instead, we check things
1050 * here, and if things look fine with the access, we proceed to finish
1051 * the rest of this check. Sort of a hack, but probably good enough.
1055 if (flags & CM_SCACHESYNC_FETCHSTATUS) {
1056 /* if we're bringing in a new status block, ensure that
1057 * we aren't already doing so, and that no one is
1058 * changing the status concurrently, either. We need
1059 * to do this, even if the status is of a different
1060 * type, since we don't have the ability to figure out,
1061 * in the AFS 3 protocols, which status-changing
1062 * operation ran first, or even which order a read and
1063 * a write occurred in.
1065 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
1066 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
1067 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHSTATUS", scp);
1071 if (flags & (CM_SCACHESYNC_STORESIZE | CM_SCACHESYNC_STORESTATUS
1072 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_GETCALLBACK)) {
1073 /* if we're going to make an RPC to change the status, make sure
1074 * that no one is bringing in or sending out the status.
1076 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING |
1077 CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
1078 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
1081 if (scp->bufReadsp || scp->bufWritesp) {
1082 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is bufRead|bufWrite want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
1086 if (flags & CM_SCACHESYNC_FETCHDATA) {
1087 /* if we're bringing in a new chunk of data, make sure that
1088 * nothing is happening to that chunk, and that we aren't
1089 * changing the basic file status info, either.
1091 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
1092 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
1093 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHDATA", scp);
1096 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMWRITING))) {
1097 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING|BUF_CMWRITING want FETCHDATA", scp, bufp);
1101 if (flags & CM_SCACHESYNC_STOREDATA) {
1102 /* same as fetch data */
1103 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
1104 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
1105 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STOREDATA", scp);
1108 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMWRITING))) {
1109 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING|BUF_CMWRITING want STOREDATA", scp, bufp);
1114 if (flags & CM_SCACHESYNC_STOREDATA_EXCL) {
1115 /* Don't allow concurrent StoreData RPC's */
1116 if (scp->flags & CM_SCACHEFLAG_DATASTORING) {
1117 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is DATASTORING want STOREDATA_EXCL", scp);
1122 if (flags & CM_SCACHESYNC_ASYNCSTORE) {
1123 /* Don't allow more than one BKG store request */
1124 if (scp->flags & CM_SCACHEFLAG_ASYNCSTORING) {
1125 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is ASYNCSTORING want ASYNCSTORE", scp);
1130 if (flags & CM_SCACHESYNC_LOCK) {
1131 /* Don't allow concurrent fiddling with lock lists */
1132 if (scp->flags & CM_SCACHEFLAG_LOCKING) {
1133 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is LOCKING want LOCK", scp);
1138 /* now the operations that don't correspond to making RPCs */
1139 if (flags & CM_SCACHESYNC_GETSTATUS) {
1140 /* we can use the status that's here, if we're not
1141 * bringing in new status.
1143 if (scp->flags & (CM_SCACHEFLAG_FETCHING)) {
1144 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want GETSTATUS", scp);
1148 if (flags & CM_SCACHESYNC_SETSTATUS) {
1149 /* we can make a change to the local status, as long as
1150 * the status isn't changing now.
1152 * If we're fetching or storing a chunk of data, we can
1153 * change the status locally, since the fetch/store
1154 * operations don't change any of the data that we're
1157 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING | CM_SCACHEFLAG_SIZESTORING)) {
1158 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want SETSTATUS", scp);
1162 if (flags & CM_SCACHESYNC_READ) {
1163 /* we're going to read the data, make sure that the
1164 * status is available, and that the data is here. It
1165 * is OK to read while storing the data back.
1167 if (scp->flags & CM_SCACHEFLAG_FETCHING) {
1168 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want READ", scp);
1171 if (bufp && ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == CM_BUF_CMFETCHING)) {
1172 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING want READ", scp, bufp);
1175 if (bufp && (bufp->cmFlags & CM_BUF_CMWRITING)) {
1176 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMWRITING want READ", scp, bufp);
1180 if (flags & CM_SCACHESYNC_WRITE) {
1181 /* don't write unless the status is stable and the chunk
1184 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
1185 | CM_SCACHEFLAG_SIZESTORING)) {
1186 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want WRITE", scp);
1189 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING |
1191 CM_BUF_CMWRITING))) {
1192 osi_Log3(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is %s want WRITE",
1194 ((bufp->cmFlags & CM_BUF_CMFETCHING) ? "CM_BUF_CMFETCHING":
1195 ((bufp->cmFlags & CM_BUF_CMSTORING) ? "CM_BUF_CMSTORING" :
1196 ((bufp->cmFlags & CM_BUF_CMWRITING) ? "CM_BUF_CMWRITING" :
1202 // yj: modified this so that callback only checked if we're
1203 // not checking something on /afs
1204 /* fix the conditional to match the one in cm_HaveCallback */
1205 if ((flags & CM_SCACHESYNC_NEEDCALLBACK)
1206 #ifdef AFS_FREELANCE_CLIENT
1207 && (!cm_freelanceEnabled ||
1208 !(scp->fid.vnode==0x1 && scp->fid.unique==0x1) ||
1209 scp->fid.cell!=AFS_FAKE_ROOT_CELL_ID ||
1210 scp->fid.volume!=AFS_FAKE_ROOT_VOL_ID ||
1211 cm_fakeDirCallback < 2)
1212 #endif /* AFS_FREELANCE_CLIENT */
1214 if ((flags & CM_SCACHESYNC_FORCECB) || !cm_HaveCallback(scp)) {
1215 osi_Log1(afsd_logp, "CM SyncOp getting callback on scp 0x%p",
1218 lock_ReleaseMutex(&bufp->mx);
1219 code = cm_GetCallback(scp, userp, reqp, (flags & CM_SCACHESYNC_FORCECB)?1:0);
1221 lock_ReleaseWrite(&scp->rw);
1222 lock_ObtainMutex(&bufp->mx);
1223 lock_ObtainWrite(&scp->rw);
1227 flags &= ~CM_SCACHESYNC_FORCECB; /* only force once */
1233 /* can't check access rights without a callback */
1234 osi_assertx(flags & CM_SCACHESYNC_NEEDCALLBACK, "!CM_SCACHESYNC_NEEDCALLBACK");
1236 if ((rights & PRSFS_WRITE) && (scp->flags & CM_SCACHEFLAG_RO))
1237 return CM_ERROR_READONLY;
1239 if (cm_HaveAccessRights(scp, userp, rights, &outRights)) {
1240 if (~outRights & rights)
1241 return CM_ERROR_NOACCESS;
1244 /* we don't know the required access rights */
1245 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
1246 code = cm_GetAccessRights(scp, userp, reqp);
1248 lock_ReleaseWrite(&scp->rw);
1249 lock_ObtainMutex(&bufp->mx);
1250 lock_ObtainWrite(&scp->rw);
1258 /* if we get here, we're happy */
1262 /* first check if we're not supposed to wait: fail
1263 * in this case, returning with everything still locked.
1265 if (flags & CM_SCACHESYNC_NOWAIT)
1266 return CM_ERROR_WOULDBLOCK;
1268 /* These are used for minidump debugging */
1269 sleep_scp_flags = scp->flags; /* so we know why we slept */
1270 sleep_buf_cmflags = bufp ? bufp->cmFlags : 0;
1271 sleep_scp_bufs = (scp->bufReadsp ? 1 : 0) | (scp->bufWritesp ? 2 : 0);
1273 /* wait here, then try again */
1274 osi_Log1(afsd_logp, "CM SyncOp sleeping scp 0x%p", scp);
1275 if ( scp->flags & CM_SCACHEFLAG_WAITING ) {
1277 scp->waitRequests++;
1278 osi_Log3(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING already set for 0x%p; %d threads; %d requests",
1279 scp, scp->waitCount, scp->waitRequests);
1281 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING set for 0x%p", scp);
1282 scp->flags |= CM_SCACHEFLAG_WAITING;
1283 scp->waitCount = scp->waitRequests = 1;
1286 cm_SyncOpAddToWaitQueue(scp, flags, bufp);
1290 lock_ReleaseMutex(&bufp->mx);
1291 osi_SleepW((LONG_PTR) &scp->flags, &scp->rw);
1293 lock_ObtainMutex(&bufp->mx);
1294 lock_ObtainWrite(&scp->rw);
1295 } while (!cm_SyncOpCheckContinue(scp, flags, bufp));
1297 smb_UpdateServerPriority();
1300 osi_Log3(afsd_logp, "CM SyncOp woke! scp 0x%p; still waiting %d threads of %d requests",
1301 scp, scp->waitCount, scp->waitRequests);
1302 if (scp->waitCount == 0) {
1303 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING reset for 0x%p", scp);
1304 scp->flags &= ~CM_SCACHEFLAG_WAITING;
1305 scp->waitRequests = 0;
1307 } /* big while loop */
1309 /* now, update the recorded state for RPC-type calls */
1310 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1311 scp->flags |= CM_SCACHEFLAG_FETCHING;
1312 if (flags & CM_SCACHESYNC_STORESTATUS)
1313 scp->flags |= CM_SCACHEFLAG_STORING;
1314 if (flags & CM_SCACHESYNC_STORESIZE)
1315 scp->flags |= CM_SCACHEFLAG_SIZESTORING;
1316 if (flags & CM_SCACHESYNC_GETCALLBACK)
1317 scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
1318 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1319 scp->flags |= CM_SCACHEFLAG_DATASTORING;
1320 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1321 scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
1322 if (flags & CM_SCACHESYNC_LOCK)
1323 scp->flags |= CM_SCACHEFLAG_LOCKING;
1325 /* now update the buffer pointer */
1326 if (flags & CM_SCACHESYNC_FETCHDATA) {
1327 /* ensure that the buffer isn't already in the I/O list */
1329 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1330 tbufp = osi_GetQData(qdp);
1331 osi_assertx(tbufp != bufp, "unexpected cm_buf_t value");
1335 /* queue a held reference to the buffer in the "reading" I/O list */
1336 qdp = osi_QDAlloc();
1337 osi_SetQData(qdp, bufp);
1340 bufp->cmFlags |= CM_BUF_CMFETCHING;
1342 osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1345 if (flags & CM_SCACHESYNC_STOREDATA) {
1346 /* ensure that the buffer isn't already in the I/O list */
1348 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1349 tbufp = osi_GetQData(qdp);
1350 osi_assertx(tbufp != bufp, "unexpected cm_buf_t value");
1354 /* queue a held reference to the buffer in the "writing" I/O list */
1355 qdp = osi_QDAlloc();
1356 osi_SetQData(qdp, bufp);
1359 bufp->cmFlags |= CM_BUF_CMSTORING;
1361 osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1364 if (flags & CM_SCACHESYNC_WRITE) {
1365 /* mark the buffer as being written to. */
1367 bufp->cmFlags |= CM_BUF_CMWRITING;
1374 /* for those syncops that setup for RPCs.
1375 * Called with scache locked.
1377 void cm_SyncOpDone(cm_scache_t *scp, cm_buf_t *bufp, afs_uint32 flags)
1379 osi_queueData_t *qdp;
1382 lock_AssertWrite(&scp->rw);
1384 /* now, update the recorded state for RPC-type calls */
1385 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1386 scp->flags &= ~CM_SCACHEFLAG_FETCHING;
1387 if (flags & CM_SCACHESYNC_STORESTATUS)
1388 scp->flags &= ~CM_SCACHEFLAG_STORING;
1389 if (flags & CM_SCACHESYNC_STORESIZE)
1390 scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
1391 if (flags & CM_SCACHESYNC_GETCALLBACK)
1392 scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
1393 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1394 scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
1395 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1396 scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
1397 if (flags & CM_SCACHESYNC_LOCK)
1398 scp->flags &= ~CM_SCACHEFLAG_LOCKING;
1400 /* now update the buffer pointer */
1401 if (flags & CM_SCACHESYNC_FETCHDATA) {
1404 /* ensure that the buffer isn't already in the I/O list */
1405 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1406 tbufp = osi_GetQData(qdp);
1411 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1416 bufp->cmFlags &= ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
1417 if (bufp->flags & CM_BUF_WAITING) {
1418 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1419 osi_Wakeup((LONG_PTR) &bufp);
1426 /* now update the buffer pointer */
1427 if (flags & CM_SCACHESYNC_STOREDATA) {
1429 /* ensure that the buffer isn't already in the I/O list */
1430 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1431 tbufp = osi_GetQData(qdp);
1436 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1441 bufp->cmFlags &= ~CM_BUF_CMSTORING;
1442 if (bufp->flags & CM_BUF_WAITING) {
1443 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1444 osi_Wakeup((LONG_PTR) &bufp);
1451 if (flags & CM_SCACHESYNC_WRITE) {
1453 osi_assertx(bufp->cmFlags & CM_BUF_CMWRITING, "!CM_BUF_CMWRITING");
1455 bufp->cmFlags &= ~CM_BUF_CMWRITING;
1459 /* and wakeup anyone who is waiting */
1460 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1461 osi_Log1(afsd_logp, "CM SyncOpDone Waking scp 0x%p", scp);
1462 osi_Wakeup((LONG_PTR) &scp->flags);
1466 /* merge in a response from an RPC. The scp must be locked, and the callback
1469 * Don't overwrite any status info that is dirty, since we could have a store
1470 * operation (such as store data) that merges some info in, and we don't want
1471 * to lose the local updates. Typically, there aren't many updates we do
1472 * locally, anyway, probably only mtime.
1474 * There is probably a bug in here where a chmod (which doesn't change
1475 * serverModTime) that occurs between two fetches, both of whose responses are
1476 * handled after the callback breaking is done, but only one of whose calls
1477 * started before that, can cause old info to be merged from the first call.
1479 void cm_MergeStatus(cm_scache_t *dscp,
1480 cm_scache_t *scp, AFSFetchStatus *statusp,
1481 AFSVolSync *volsyncp,
1482 cm_user_t *userp, afs_uint32 flags)
1484 afs_uint64 dataVersion;
1486 // yj: i want to create some fake status for the /afs directory and the
1487 // entries under that directory
1488 #ifdef AFS_FREELANCE_CLIENT
1489 if (cm_freelanceEnabled && scp == cm_data.rootSCachep) {
1490 osi_Log0(afsd_logp,"cm_MergeStatus Freelance cm_data.rootSCachep");
1491 statusp->InterfaceVersion = 0x1;
1492 statusp->FileType = CM_SCACHETYPE_DIRECTORY;
1493 statusp->LinkCount = scp->linkCount;
1494 statusp->Length = cm_fakeDirSize;
1495 statusp->Length_hi = 0;
1496 statusp->DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1497 statusp->Author = 0x1;
1498 statusp->Owner = 0x0;
1499 statusp->CallerAccess = 0x9;
1500 statusp->AnonymousAccess = 0x9;
1501 statusp->UnixModeBits = 0777;
1502 statusp->ParentVnode = 0x1;
1503 statusp->ParentUnique = 0x1;
1504 statusp->ResidencyMask = 0;
1505 statusp->ClientModTime = FakeFreelanceModTime;
1506 statusp->ServerModTime = FakeFreelanceModTime;
1508 statusp->SyncCounter = 0;
1509 statusp->dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1510 statusp->errorCode = 0;
1512 #endif /* AFS_FREELANCE_CLIENT */
1514 if (statusp->errorCode != 0) {
1515 scp->flags |= CM_SCACHEFLAG_EACCESS;
1516 osi_Log2(afsd_logp, "Merge, Failure scp %x code 0x%x", scp, statusp->errorCode);
1518 scp->fileType = 0; /* unknown */
1520 scp->serverModTime = 0;
1521 scp->clientModTime = 0;
1522 scp->length.LowPart = 0;
1523 scp->length.HighPart = 0;
1524 scp->serverLength.LowPart = 0;
1525 scp->serverLength.HighPart = 0;
1529 scp->unixModeBits = 0;
1531 scp->dataVersion = 0;
1532 scp->bufDataVersionLow = 0;
1535 scp->parentVnode = dscp->fid.vnode;
1536 scp->parentUnique = dscp->fid.unique;
1538 scp->parentVnode = 0;
1539 scp->parentUnique = 0;
1543 scp->flags &= ~CM_SCACHEFLAG_EACCESS;
1546 dataVersion = statusp->dataVersionHigh;
1548 dataVersion |= statusp->DataVersion;
1550 if (!(flags & CM_MERGEFLAG_FORCE) && dataVersion < scp->dataVersion) {
1551 struct cm_cell *cellp;
1553 cellp = cm_FindCellByID(scp->fid.cell, 0);
1554 if (scp->cbServerp) {
1555 struct cm_volume *volp = NULL;
1557 cm_FindVolumeByID(cellp, scp->fid.volume, userp,
1558 (cm_req_t *) NULL, CM_GETVOL_FLAG_CREATE, &volp);
1559 osi_Log2(afsd_logp, "old data from server %x volume %s",
1560 scp->cbServerp->addr.sin_addr.s_addr,
1561 volp ? volp->namep : "(unknown)");
1565 osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
1566 scp, scp->dataVersion, dataVersion);
1567 /* we have a number of data fetch/store operations running
1568 * concurrently, and we can tell which one executed last at the
1569 * server by its mtime.
1570 * Choose the one with the largest mtime, and ignore the rest.
1572 * These concurrent calls are incompatible with setting the
1573 * mtime, so we won't have a locally changed mtime here.
1575 * We could also have ACL info for a different user than usual,
1576 * in which case we have to do that part of the merge, anyway.
1577 * We won't have to worry about the info being old, since we
1578 * won't have concurrent calls
1579 * that change file status running from this machine.
1581 * Added 3/17/98: if we see data version regression on an RO
1582 * file, it's probably due to a server holding an out-of-date
1583 * replica, rather than to concurrent RPC's. Failures to
1584 * release replicas are now flagged by the volserver, but only
1585 * since AFS 3.4 5.22, so there are plenty of clients getting
1586 * out-of-date replicas out there.
1588 * If we discover an out-of-date replica, by this time it's too
1589 * late to go to another server and retry. Also, we can't
1590 * reject the merge, because then there is no way for
1591 * GetAccess to do its work, and the caller gets into an
1592 * infinite loop. So we just grin and bear it.
1594 if (!(scp->flags & CM_SCACHEFLAG_RO))
1598 scp->serverModTime = statusp->ServerModTime;
1600 if (!(scp->mask & CM_SCACHEMASK_CLIENTMODTIME)) {
1601 scp->clientModTime = statusp->ClientModTime;
1603 if (!(scp->mask & CM_SCACHEMASK_LENGTH)) {
1604 scp->length.LowPart = statusp->Length;
1605 scp->length.HighPart = statusp->Length_hi;
1608 scp->serverLength.LowPart = statusp->Length;
1609 scp->serverLength.HighPart = statusp->Length_hi;
1611 scp->linkCount = statusp->LinkCount;
1612 scp->owner = statusp->Owner;
1613 scp->group = statusp->Group;
1614 scp->unixModeBits = statusp->UnixModeBits & 07777;
1616 if (statusp->FileType == File)
1617 scp->fileType = CM_SCACHETYPE_FILE;
1618 else if (statusp->FileType == Directory)
1619 scp->fileType = CM_SCACHETYPE_DIRECTORY;
1620 else if (statusp->FileType == SymbolicLink) {
1621 if ((scp->unixModeBits & 0111) == 0)
1622 scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
1624 scp->fileType = CM_SCACHETYPE_SYMLINK;
1627 osi_Log2(afsd_logp, "Merge, Invalid File Type (%d), scp %x", statusp->FileType, scp);
1628 scp->fileType = CM_SCACHETYPE_INVALID; /* invalid */
1630 /* and other stuff */
1631 scp->parentVnode = statusp->ParentVnode;
1632 scp->parentUnique = statusp->ParentUnique;
1634 /* and merge in the private acl cache info, if this is more than the public
1635 * info; merge in the public stuff in any case.
1637 scp->anyAccess = statusp->AnonymousAccess;
1639 if (userp != NULL) {
1640 cm_AddACLCache(scp, userp, statusp->CallerAccess);
1643 if (scp->dataVersion != 0 &&
1644 (!(flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) && dataVersion != scp->dataVersion ||
1645 (flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) && dataVersion - scp->dataVersion > 1)) {
1647 * We now know that all of the data buffers that we have associated
1648 * with this scp are invalid. Subsequent operations will go faster
1649 * if the buffers are removed from the hash tables.
1651 * We do not remove directory buffers if the dataVersion delta is 1 because
1652 * those version numbers will be updated as part of the directory operation.
1654 * We do not remove storedata buffers because they will still be valid.
1659 cm_buf_t *bp, *prevBp, *nextBp;
1661 lock_ObtainWrite(&buf_globalLock);
1662 i = BUF_FILEHASH(&scp->fid);
1663 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=nextBp)
1665 nextBp = bp->fileHashp;
1667 * if the buffer belongs to this stat cache entry
1668 * and the buffer mutex can be obtained, check the
1669 * reference count and if it is zero, remove the buffer
1670 * from the hash tables. If there are references,
1671 * the buffer might be updated to the current version
1672 * so leave it in place.
1674 if (cm_FidCmp(&scp->fid, &bp->fid) == 0 &&
1675 lock_TryMutex(&bp->mx)) {
1676 if (bp->refCount == 0 &&
1677 !(bp->flags & CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)) {
1678 prevBp = bp->fileHashBackp;
1679 bp->fileHashBackp = bp->fileHashp = NULL;
1681 prevBp->fileHashp = nextBp;
1683 cm_data.buf_fileHashTablepp[i] = nextBp;
1685 nextBp->fileHashBackp = prevBp;
1687 j = BUF_HASH(&bp->fid, &bp->offset);
1688 lbpp = &(cm_data.buf_scacheHashTablepp[j]);
1689 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
1694 *lbpp = bp->hashp; /* hash out */
1697 bp->flags &= ~CM_BUF_INHASH;
1699 lock_ReleaseMutex(&bp->mx);
1702 lock_ReleaseWrite(&buf_globalLock);
1705 /* We maintain a range of buffer dataVersion values which are considered
1706 * valid. This avoids the need to update the dataVersion on each buffer
1707 * object during an uncontested storeData operation. As a result this
1708 * merge status no longer has performance characteristics derived from
1709 * the size of the file.
1711 if (((flags & CM_MERGEFLAG_STOREDATA) && dataVersion - scp->dataVersion > 1) ||
1712 (!(flags & CM_MERGEFLAG_STOREDATA) && scp->dataVersion != dataVersion) ||
1713 scp->bufDataVersionLow == 0)
1714 scp->bufDataVersionLow = dataVersion;
1716 scp->dataVersion = dataVersion;
1719 /* note that our stat cache info is incorrect, so force us eventually
1720 * to stat the file again. There may be dirty data associated with
1721 * this vnode, and we want to preserve that information.
1723 * This function works by simply simulating a loss of the callback.
1725 * This function must be called with the scache locked.
1727 void cm_DiscardSCache(cm_scache_t *scp)
1729 lock_AssertWrite(&scp->rw);
1730 if (scp->cbServerp) {
1731 cm_PutServer(scp->cbServerp);
1732 scp->cbServerp = NULL;
1735 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
1736 cm_dnlcPurgedp(scp);
1737 cm_dnlcPurgevp(scp);
1738 cm_FreeAllACLEnts(scp);
1740 if (scp->fileType == CM_SCACHETYPE_DFSLINK)
1741 cm_VolStatus_Invalidate_DFS_Mapping(scp);
1743 /* Force mount points and symlinks to be re-evaluated */
1744 scp->mountPointStringp[0] = '\0';
1747 void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
1749 afsFidp->Volume = fidp->volume;
1750 afsFidp->Vnode = fidp->vnode;
1751 afsFidp->Unique = fidp->unique;
1754 #ifdef DEBUG_REFCOUNT
1755 void cm_HoldSCacheNoLockDbg(cm_scache_t *scp, char * file, long line)
1757 void cm_HoldSCacheNoLock(cm_scache_t *scp)
1762 osi_assertx(scp != NULL, "null cm_scache_t");
1763 lock_AssertAny(&cm_scacheLock);
1764 refCount = InterlockedIncrement(&scp->refCount);
1765 #ifdef DEBUG_REFCOUNT
1766 osi_Log2(afsd_logp,"cm_HoldSCacheNoLock scp 0x%p ref %d",scp, refCount);
1767 afsi_log("%s:%d cm_HoldSCacheNoLock scp 0x%p, ref %d", file, line, scp, refCount);
1771 #ifdef DEBUG_REFCOUNT
1772 void cm_HoldSCacheDbg(cm_scache_t *scp, char * file, long line)
1774 void cm_HoldSCache(cm_scache_t *scp)
1779 osi_assertx(scp != NULL, "null cm_scache_t");
1780 lock_ObtainRead(&cm_scacheLock);
1781 refCount = InterlockedIncrement(&scp->refCount);
1782 #ifdef DEBUG_REFCOUNT
1783 osi_Log2(afsd_logp,"cm_HoldSCache scp 0x%p ref %d",scp, refCount);
1784 afsi_log("%s:%d cm_HoldSCache scp 0x%p ref %d", file, line, scp, refCount);
1786 lock_ReleaseRead(&cm_scacheLock);
1789 #ifdef DEBUG_REFCOUNT
1790 void cm_ReleaseSCacheNoLockDbg(cm_scache_t *scp, char * file, long line)
1792 void cm_ReleaseSCacheNoLock(cm_scache_t *scp)
1796 osi_assertx(scp != NULL, "null cm_scache_t");
1797 lock_AssertAny(&cm_scacheLock);
1798 refCount = InterlockedDecrement(&scp->refCount);
1799 #ifdef DEBUG_REFCOUNT
1801 osi_Log1(afsd_logp,"cm_ReleaseSCacheNoLock about to panic scp 0x%x",scp);
1803 osi_assertx(refCount >= 0, "cm_scache_t refCount 0");
1804 #ifdef DEBUG_REFCOUNT
1805 osi_Log2(afsd_logp,"cm_ReleaseSCacheNoLock scp 0x%p ref %d",scp, refCount);
1806 afsi_log("%s:%d cm_ReleaseSCacheNoLock scp 0x%p ref %d", file, line, scp, refCount);
1810 #ifdef DEBUG_REFCOUNT
1811 void cm_ReleaseSCacheDbg(cm_scache_t *scp, char * file, long line)
1813 void cm_ReleaseSCache(cm_scache_t *scp)
1818 osi_assertx(scp != NULL, "null cm_scache_t");
1819 lock_ObtainRead(&cm_scacheLock);
1820 refCount = InterlockedDecrement(&scp->refCount);
1821 #ifdef DEBUG_REFCOUNT
1823 osi_Log1(afsd_logp,"cm_ReleaseSCache about to panic scp 0x%x",scp);
1825 osi_assertx(refCount >= 0, "cm_scache_t refCount 0");
1826 #ifdef DEBUG_REFCOUNT
1827 osi_Log2(afsd_logp,"cm_ReleaseSCache scp 0x%p ref %d",scp, refCount);
1828 afsi_log("%s:%d cm_ReleaseSCache scp 0x%p ref %d", file, line, scp, refCount);
1830 lock_ReleaseRead(&cm_scacheLock);
1833 /* just look for the scp entry to get filetype */
1834 /* doesn't need to be perfectly accurate, so locking doesn't matter too much */
1835 int cm_FindFileType(cm_fid_t *fidp)
1840 hash = CM_SCACHE_HASH(fidp);
1842 osi_assertx(fidp->cell != 0, "unassigned cell value");
1844 lock_ObtainWrite(&cm_scacheLock);
1845 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
1846 if (cm_FidCmp(fidp, &scp->fid) == 0) {
1847 lock_ReleaseWrite(&cm_scacheLock);
1848 return scp->fileType;
1851 lock_ReleaseWrite(&cm_scacheLock);
1855 /* dump all scp's that have reference count > 0 to a file.
1856 * cookie is used to identify this batch for easy parsing,
1857 * and it a string provided by a caller
1859 int cm_DumpSCache(FILE *outputFile, char *cookie, int lock)
1867 lock_ObtainRead(&cm_scacheLock);
1869 sprintf(output, "%s - dumping all scache - cm_data.currentSCaches=%d, cm_data.maxSCaches=%d\r\n", cookie, cm_data.currentSCaches, cm_data.maxSCaches);
1870 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1872 for (scp = cm_data.allSCachesp; scp; scp = scp->allNextp)
1874 sprintf(output, "%s scp=0x%p, fid (cell=%d, volume=%d, vnode=%d, unique=%d) type=%d dv=%I64d len=0x%I64x mp='%s' flags=0x%x cb=0x%x refCount=%u\r\n",
1875 cookie, scp, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique,
1876 scp->fileType, scp->dataVersion, scp->length.QuadPart, scp->mountPointStringp, scp->flags,
1877 (unsigned long)scp->cbExpires, scp->refCount);
1878 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1881 sprintf(output, "%s - Done dumping all scache.\r\n", cookie);
1882 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1883 sprintf(output, "%s - dumping cm_data.scacheHashTable - cm_data.scacheHashTableSize=%d\r\n", cookie, cm_data.scacheHashTableSize);
1884 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1886 for (i = 0; i < cm_data.scacheHashTableSize; i++)
1888 for(scp = cm_data.scacheHashTablep[i]; scp; scp=scp->nextp)
1890 sprintf(output, "%s scp=0x%p, hash=%d, fid (cell=%d, volume=%d, vnode=%d, unique=%d)\r\n",
1891 cookie, scp, i, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
1892 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1896 sprintf(output, "%s - Done dumping cm_data.scacheHashTable\r\n", cookie);
1897 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1900 lock_ReleaseRead(&cm_scacheLock);