2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
23 /*extern void afsi_log(char *pattern, ...);*/
25 extern osi_hyper_t hzero;
28 osi_queue_t *cm_allFileLocks;
29 osi_queue_t *cm_freeFileLocks;
30 unsigned long cm_lockRefreshCycle;
32 /* lock for globals */
33 osi_rwlock_t cm_scacheLock;
35 /* Dummy scache entry for use with pioctl fids */
36 cm_scache_t cm_fakeSCache;
38 #ifdef AFS_FREELANCE_CLIENT
39 extern osi_mutex_t cm_Freelance_Lock;
42 /* must be called with cm_scacheLock write-locked! */
43 void cm_AdjustScacheLRU(cm_scache_t *scp)
45 if (scp == cm_data.scacheLRULastp)
46 cm_data.scacheLRULastp = (cm_scache_t *) osi_QPrev(&scp->q);
47 osi_QRemoveHT((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
48 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
49 if (!cm_data.scacheLRULastp)
50 cm_data.scacheLRULastp = scp;
53 /* call with scache write-locked and mutex held */
54 void cm_RemoveSCacheFromHashTable(cm_scache_t *scp)
60 if (scp->flags & CM_SCACHEFLAG_INHASH) {
61 /* hash it out first */
62 i = CM_SCACHE_HASH(&scp->fid);
63 for (lscpp = &cm_data.scacheHashTablep[i], tscp = cm_data.scacheHashTablep[i];
65 lscpp = &tscp->nextp, tscp = tscp->nextp) {
68 scp->flags &= ~CM_SCACHEFLAG_INHASH;
75 /* called with cm_scacheLock write-locked; recycles an existing scp.
77 * this function ignores all of the locking hierarchy.
79 long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags)
81 if (scp->refCount != 0) {
85 if (scp->flags & CM_SCACHEFLAG_SMB_FID) {
86 osi_Log1(afsd_logp,"cm_RecycleSCache CM_SCACHEFLAG_SMB_FID detected scp 0x%p", scp);
88 osi_panic("cm_RecycleSCache CM_SCACHEFLAG_SMB_FID detected",__FILE__,__LINE__);
93 cm_RemoveSCacheFromHashTable(scp);
96 if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) {
100 while(qdp = scp->bufWritesp) {
101 bufp = osi_GetQData(qdp);
102 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
105 lock_ObtainMutex(&bufp->mx);
106 bufp->cmFlags &= ~CM_BUF_CMSTORING;
107 bufp->flags &= ~CM_BUF_DIRTY;
108 bufp->dirty_offset = 0;
109 bufp->dirty_length = 0;
110 bufp->flags |= CM_BUF_ERROR;
111 bufp->error = VNOVNODE;
112 bufp->dataVersion = -1; /* bad */
113 bufp->dirtyCounter++;
114 if (bufp->flags & CM_BUF_WAITING) {
115 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
116 osi_Wakeup((long) &bufp);
118 lock_ReleaseMutex(&bufp->mx);
122 while(qdp = scp->bufReadsp) {
123 bufp = osi_GetQData(qdp);
124 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
127 lock_ObtainMutex(&bufp->mx);
128 bufp->cmFlags &= ~CM_BUF_CMFETCHING;
129 bufp->flags &= ~CM_BUF_DIRTY;
130 bufp->dirty_offset = 0;
131 bufp->dirty_length = 0;
132 bufp->flags |= CM_BUF_ERROR;
133 bufp->error = VNOVNODE;
134 bufp->dataVersion = -1; /* bad */
135 bufp->dirtyCounter++;
136 if (bufp->flags & CM_BUF_WAITING) {
137 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
138 osi_Wakeup((long) &bufp);
140 lock_ReleaseMutex(&bufp->mx);
144 buf_CleanDirtyBuffers(scp);
146 /* look for things that shouldn't still be set */
147 osi_assert(scp->bufWritesp == NULL);
148 osi_assert(scp->bufReadsp == NULL);
152 /* invalidate so next merge works fine;
153 * also initialize some flags */
155 scp->flags &= ~(CM_SCACHEFLAG_STATD
156 | CM_SCACHEFLAG_DELETED
158 | CM_SCACHEFLAG_PURERO
159 | CM_SCACHEFLAG_OVERQUOTA
160 | CM_SCACHEFLAG_OUTOFSPACE
161 | CM_SCACHEFLAG_EACCESS);
162 scp->serverModTime = 0;
163 scp->dataVersion = 0;
164 scp->bulkStatProgress = hzero;
167 if (scp->cbServerp) {
168 cm_PutServer(scp->cbServerp);
169 scp->cbServerp = NULL;
178 /* remove from dnlc */
182 /* discard cached status; if non-zero, Close
183 * tried to store this to server but failed */
186 /* drop held volume ref */
188 cm_PutVolume(scp->volp);
192 /* discard symlink info */
193 scp->mountPointStringp[0] = '\0';
194 memset(&scp->mountRootFid, 0, sizeof(cm_fid_t));
195 memset(&scp->dotdotFid, 0, sizeof(cm_fid_t));
197 /* reset locking info */
198 scp->fileLocksH = NULL;
199 scp->fileLocksT = NULL;
200 scp->serverLock = (-1);
201 scp->exclusiveLocks = 0;
202 scp->sharedLocks = 0;
204 /* not locked, but there can be no references to this guy
205 * while we hold the global refcount lock.
207 cm_FreeAllACLEnts(scp);
212 /* called with cm_scacheLock write-locked; find a vnode to recycle.
213 * Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches).
215 cm_scache_t *cm_GetNewSCache(void)
221 /* first pass - look for deleted objects */
222 for ( scp = cm_data.scacheLRULastp;
224 scp = (cm_scache_t *) osi_QPrev(&scp->q))
226 osi_assert(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep);
228 if (scp->refCount == 0) {
229 if (scp->flags & CM_SCACHEFLAG_DELETED) {
230 osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp);
231 if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) {
233 /* we found an entry, so return it */
234 /* now remove from the LRU queue and put it back at the
235 * head of the LRU queue.
237 cm_AdjustScacheLRU(scp);
242 osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp);
243 } else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
244 /* we found an entry, so return it */
245 /* now remove from the LRU queue and put it back at the
246 * head of the LRU queue.
248 cm_AdjustScacheLRU(scp);
255 osi_Log0(afsd_logp, "GetNewSCache no deleted or recycled entries available for reuse");
258 if (cm_data.currentSCaches >= cm_data.maxSCaches) {
259 /* There were no deleted scache objects that we could use. Try to find
260 * one that simply hasn't been used in a while.
262 for ( scp = cm_data.scacheLRULastp;
264 scp = (cm_scache_t *) osi_QPrev(&scp->q))
266 /* It is possible for the refCount to be zero and for there still
267 * to be outstanding dirty buffers. If there are dirty buffers,
268 * we must not recycle the scp. */
269 if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
270 if (!buf_DirtyBuffersExist(&scp->fid)) {
271 if (!cm_RecycleSCache(scp, 0)) {
272 /* we found an entry, so return it */
273 /* now remove from the LRU queue and put it back at the
274 * head of the LRU queue.
276 cm_AdjustScacheLRU(scp);
282 osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp);
286 osi_Log1(afsd_logp, "GetNewSCache all scache entries in use (retry = %d)", retry);
291 /* if we get here, we should allocate a new scache entry. We either are below
292 * quota or we have a leak and need to allocate a new one to avoid panicing.
294 scp = cm_data.scacheBaseAddress + cm_data.currentSCaches;
295 osi_assert(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep);
296 memset(scp, 0, sizeof(cm_scache_t));
297 scp->magic = CM_SCACHE_MAGIC;
298 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
299 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
300 scp->serverLock = -1;
302 /* and put it in the LRU queue */
303 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
304 if (!cm_data.scacheLRULastp)
305 cm_data.scacheLRULastp = scp;
306 cm_data.currentSCaches++;
307 cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
309 scp->allNextp = cm_data.allSCachesp;
310 cm_data.allSCachesp = scp;
314 /* like strcmp, only for fids */
315 int cm_FidCmp(cm_fid_t *ap, cm_fid_t *bp)
317 if (ap->vnode != bp->vnode)
319 if (ap->volume != bp->volume)
321 if (ap->unique != bp->unique)
323 if (ap->cell != bp->cell)
328 void cm_fakeSCacheInit(int newFile)
331 memset(&cm_data.fakeSCache, 0, sizeof(cm_scache_t));
332 cm_data.fakeSCache.cbServerp = (struct cm_server *)(-1);
333 /* can leave clientModTime at 0 */
334 cm_data.fakeSCache.fileType = CM_SCACHETYPE_FILE;
335 cm_data.fakeSCache.unixModeBits = 0777;
336 cm_data.fakeSCache.length.LowPart = 1000;
337 cm_data.fakeSCache.linkCount = 1;
338 cm_data.fakeSCache.refCount = 1;
340 lock_InitializeMutex(&cm_data.fakeSCache.mx, "cm_scache_t mutex");
344 cm_ValidateSCache(void)
346 cm_scache_t * scp, *lscp;
349 if ( cm_data.scacheLRUFirstp == NULL && cm_data.scacheLRULastp != NULL ||
350 cm_data.scacheLRUFirstp != NULL && cm_data.scacheLRULastp == NULL) {
351 afsi_log("cm_ValidateSCache failure: inconsistent LRU pointers");
352 fprintf(stderr, "cm_ValidateSCache failure: inconsistent LRU pointers\n");
356 for ( scp = cm_data.scacheLRUFirstp, lscp = NULL, i = 0;
358 lscp = scp, scp = (cm_scache_t *) osi_QNext(&scp->q), i++ ) {
359 if (scp->magic != CM_SCACHE_MAGIC) {
360 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
361 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
364 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
365 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
366 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
369 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
370 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
371 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
374 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
375 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
376 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
379 if (i > cm_data.currentSCaches ) {
380 afsi_log("cm_ValidateSCache failure: LRU First queue loops");
381 fprintf(stderr, "cm_ValidateSCache failure: LUR First queue loops\n");
384 if (lscp != (cm_scache_t *) osi_QPrev(&scp->q)) {
385 afsi_log("cm_ValidateSCache failure: QPrev(scp) != previous");
386 fprintf(stderr, "cm_ValidateSCache failure: QPrev(scp) != previous\n");
391 for ( scp = cm_data.scacheLRULastp, lscp = NULL, i = 0; scp;
392 lscp = scp, scp = (cm_scache_t *) osi_QPrev(&scp->q), i++ ) {
393 if (scp->magic != CM_SCACHE_MAGIC) {
394 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
395 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
398 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
399 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
400 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
403 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
404 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
405 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
408 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
409 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
410 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
413 if (i > cm_data.currentSCaches ) {
414 afsi_log("cm_ValidateSCache failure: LRU Last queue loops");
415 fprintf(stderr, "cm_ValidateSCache failure: LUR Last queue loops\n");
418 if (lscp != (cm_scache_t *) osi_QNext(&scp->q)) {
419 afsi_log("cm_ValidateSCache failure: QNext(scp) != next");
420 fprintf(stderr, "cm_ValidateSCache failure: QNext(scp) != next\n");
425 for ( i=0; i < cm_data.scacheHashTableSize; i++ ) {
426 for ( scp = cm_data.scacheHashTablep[i]; scp; scp = scp->nextp ) {
427 if (scp->magic != CM_SCACHE_MAGIC) {
428 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
429 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
432 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
433 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
434 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
437 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
438 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
439 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
442 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
443 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
444 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
450 return cm_dnlcValidate();
454 cm_SuspendSCache(void)
458 cm_GiveUpAllCallbacksAllServers();
460 lock_ObtainWrite(&cm_scacheLock);
461 for ( scp = cm_data.allSCachesp; scp;
462 scp = scp->allNextp ) {
463 if (scp->cbServerp) {
464 cm_PutServer(scp->cbServerp);
465 scp->cbServerp = NULL;
468 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
470 lock_ReleaseWrite(&cm_scacheLock);
474 cm_ShutdownSCache(void)
478 lock_ObtainWrite(&cm_scacheLock);
480 for ( scp = cm_data.allSCachesp; scp;
481 scp = scp->allNextp ) {
482 if (scp->randomACLp) {
483 lock_ObtainMutex(&scp->mx);
484 cm_FreeAllACLEnts(scp);
485 lock_ReleaseMutex(&scp->mx);
488 if (scp->cbServerp) {
489 cm_PutServer(scp->cbServerp);
490 scp->cbServerp = NULL;
493 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
495 lock_FinalizeMutex(&scp->mx);
496 lock_FinalizeRWLock(&scp->bufCreateLock);
498 lock_ReleaseWrite(&cm_scacheLock);
500 cm_GiveUpAllCallbacksAllServers();
502 return cm_dnlcShutdown();
505 void cm_InitSCache(int newFile, long maxSCaches)
507 static osi_once_t once;
509 if (osi_Once(&once)) {
510 lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
512 memset(cm_data.scacheHashTablep, 0, sizeof(cm_scache_t *) * cm_data.scacheHashTableSize);
513 cm_data.allSCachesp = NULL;
514 cm_data.currentSCaches = 0;
515 cm_data.maxSCaches = maxSCaches;
516 cm_data.scacheLRUFirstp = cm_data.scacheLRULastp = NULL;
520 for ( scp = cm_data.allSCachesp; scp;
521 scp = scp->allNextp ) {
522 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
523 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
525 scp->cbServerp = NULL;
527 scp->fileLocksH = NULL;
528 scp->fileLocksT = NULL;
529 scp->serverLock = (-1);
530 scp->lastRefreshCycle = 0;
531 scp->exclusiveLocks = 0;
532 scp->sharedLocks = 0;
538 scp->flags &= ~CM_SCACHEFLAG_WAITING;
541 cm_allFileLocks = NULL;
542 cm_freeFileLocks = NULL;
543 cm_lockRefreshCycle = 0;
544 cm_fakeSCacheInit(newFile);
545 cm_dnlcInit(newFile);
550 /* version that doesn't bother creating the entry if we don't find it */
551 cm_scache_t *cm_FindSCache(cm_fid_t *fidp)
556 hash = CM_SCACHE_HASH(fidp);
558 if (fidp->cell == 0) {
562 lock_ObtainWrite(&cm_scacheLock);
563 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
564 if (cm_FidCmp(fidp, &scp->fid) == 0) {
565 cm_HoldSCacheNoLock(scp);
566 cm_AdjustScacheLRU(scp);
567 lock_ReleaseWrite(&cm_scacheLock);
571 lock_ReleaseWrite(&cm_scacheLock);
575 #ifdef DEBUG_REFCOUNT
576 long cm_GetSCacheDbg(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
577 cm_req_t *reqp, char * file, long line)
579 long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
586 cm_volume_t *volp = NULL;
589 int special; // yj: boolean variable to test if file is on root.afs
591 extern cm_fid_t cm_rootFid;
593 hash = CM_SCACHE_HASH(fidp);
595 osi_assert(fidp->cell != 0);
597 if (fidp->cell== cm_data.rootFid.cell &&
598 fidp->volume==cm_data.rootFid.volume &&
599 fidp->vnode==0x0 && fidp->unique==0x0)
601 osi_Log0(afsd_logp,"cm_GetSCache called with root cell/volume and vnode=0 and unique=0");
604 // yj: check if we have the scp, if so, we don't need
605 // to do anything else
606 lock_ObtainWrite(&cm_scacheLock);
607 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
608 if (cm_FidCmp(fidp, &scp->fid) == 0) {
609 #ifdef DEBUG_REFCOUNT
610 afsi_log("%s:%d cm_GetSCache (1) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
611 osi_Log1(afsd_logp,"cm_GetSCache (1) outScpp 0x%p", scp);
613 cm_HoldSCacheNoLock(scp);
615 cm_AdjustScacheLRU(scp);
616 lock_ReleaseWrite(&cm_scacheLock);
621 // yj: when we get here, it means we don't have an scp
622 // so we need to either load it or fake it, depending
623 // on whether the file is "special", see below.
625 // yj: if we're trying to get an scp for a file that's
626 // on root.afs of homecell, we want to handle it specially
627 // because we have to fill in the status stuff 'coz we
628 // don't want trybulkstat to fill it in for us
629 #ifdef AFS_FREELANCE_CLIENT
630 special = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
631 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
632 !(fidp->vnode==0x1 && fidp->unique==0x1));
633 isRoot = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
634 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
635 fidp->vnode==0x1 && fidp->unique==0x1);
636 if (cm_freelanceEnabled && isRoot) {
637 osi_Log0(afsd_logp,"cm_GetSCache Freelance and isRoot");
638 /* freelance: if we are trying to get the root scp for the first
639 * time, we will just put in a place holder entry.
644 if (cm_freelanceEnabled && special) {
645 osi_Log0(afsd_logp,"cm_GetSCache Freelance and special");
646 if (fidp->vnode > 1 && fidp->vnode <= cm_noLocalMountPoints + 2) {
647 lock_ObtainMutex(&cm_Freelance_Lock);
648 mp =(cm_localMountPoints+fidp->vnode-2)->mountPointStringp;
649 lock_ReleaseMutex(&cm_Freelance_Lock);
653 scp = cm_GetNewSCache();
655 osi_Log0(afsd_logp,"cm_GetSCache unable to obtain *new* scache entry");
656 lock_ReleaseWrite(&cm_scacheLock);
657 return CM_ERROR_WOULDBLOCK;
660 #if not_too_dangerous
661 /* dropping the cm_scacheLock allows more than one thread
662 * to obtain the same cm_scache_t from the LRU list. Since
663 * the refCount is known to be zero at this point we have to
664 * assume that no one else is using the one this is returned.
666 lock_ReleaseWrite(&cm_scacheLock);
667 lock_ObtainMutex(&scp->mx);
668 lock_ObtainWrite(&cm_scacheLock);
671 scp->volp = cm_data.rootSCachep->volp;
672 cm_GetVolume(scp->volp); /* grab an additional reference */
673 scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID;
674 scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID;
675 scp->dotdotFid.unique=1;
676 scp->dotdotFid.vnode=1;
677 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
678 scp->nextp=cm_data.scacheHashTablep[hash];
679 cm_data.scacheHashTablep[hash]=scp;
680 scp->flags |= CM_SCACHEFLAG_INHASH;
682 osi_Log1(afsd_logp,"cm_GetSCache (freelance) sets refCount to 1 scp 0x%x", scp);
683 if (fidp->vnode > 1 && fidp->vnode <= cm_noLocalMountPoints + 2)
684 scp->fileType = (cm_localMountPoints+fidp->vnode-2)->fileType;
686 scp->fileType = CM_SCACHETYPE_INVALID;
688 lock_ObtainMutex(&cm_Freelance_Lock);
689 scp->length.LowPart = (DWORD)strlen(mp)+4;
690 scp->length.HighPart = 0;
691 strncpy(scp->mountPointStringp,mp,MOUNTPOINTLEN);
692 scp->mountPointStringp[MOUNTPOINTLEN-1] = '\0';
693 lock_ReleaseMutex(&cm_Freelance_Lock);
696 scp->unixModeBits=0777;
697 scp->clientModTime=FakeFreelanceModTime;
698 scp->serverModTime=FakeFreelanceModTime;
699 scp->parentUnique = 0x1;
700 scp->parentVnode=0x1;
702 scp->dataVersion=cm_data.fakeDirVersion;
703 scp->lockDataVersion=-1; /* no lock yet */
704 #if not_too_dangerous
705 lock_ReleaseMutex(&scp->mx);
708 lock_ReleaseWrite(&cm_scacheLock);
709 #ifdef DEBUG_REFCOUNT
710 afsi_log("%s:%d cm_GetSCache (2) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
711 osi_Log1(afsd_logp,"cm_GetSCache (2) outScpp 0x%p", scp);
716 #endif /* AFS_FREELANCE_CLIENT */
718 /* otherwise, we need to find the volume */
719 if (!cm_freelanceEnabled || !isRoot) {
720 lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
721 cellp = cm_FindCellByID(fidp->cell);
723 return CM_ERROR_NOSUCHCELL;
725 code = cm_GetVolumeByID(cellp, fidp->volume, userp, reqp, CM_GETVOL_FLAG_CREATE, &volp);
728 lock_ObtainWrite(&cm_scacheLock);
731 /* otherwise, we have the volume, now reverify that the scp doesn't
732 * exist, and proceed.
734 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
735 if (cm_FidCmp(fidp, &scp->fid) == 0) {
736 #ifdef DEBUG_REFCOUNT
737 afsi_log("%s:%d cm_GetSCache (3) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
738 osi_Log1(afsd_logp,"cm_GetSCache (3) outScpp 0x%p", scp);
740 cm_HoldSCacheNoLock(scp);
741 osi_assert(scp->volp == volp);
742 cm_AdjustScacheLRU(scp);
743 lock_ReleaseWrite(&cm_scacheLock);
751 /* now, if we don't have the fid, recycle something */
752 scp = cm_GetNewSCache();
754 osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry");
755 lock_ReleaseWrite(&cm_scacheLock);
758 return CM_ERROR_WOULDBLOCK;
760 osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%x flags 0x%x", scp, scp->flags);
762 osi_assert(!(scp->flags & CM_SCACHEFLAG_INHASH));
764 #if not_too_dangerous
765 /* dropping the cm_scacheLock allows more than one thread
766 * to obtain the same cm_scache_t from the LRU list. Since
767 * the refCount is known to be zero at this point we have to
768 * assume that no one else is using the one this is returned.
770 lock_ReleaseWrite(&cm_scacheLock);
771 lock_ObtainMutex(&scp->mx);
772 lock_ObtainWrite(&cm_scacheLock);
775 scp->volp = volp; /* a held reference */
777 if (!cm_freelanceEnabled || !isRoot) {
778 /* if this scache entry represents a volume root then we need
779 * to copy the dotdotFipd from the volume structure where the
780 * "master" copy is stored (defect 11489)
782 if (scp->fid.vnode == 1 && scp->fid.unique == 1) {
783 scp->dotdotFid = volp->dotdotFid;
786 if (volp->ro.ID == fidp->volume)
787 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
788 else if (volp->bk.ID == fidp->volume)
789 scp->flags |= CM_SCACHEFLAG_RO;
791 scp->nextp = cm_data.scacheHashTablep[hash];
792 cm_data.scacheHashTablep[hash] = scp;
793 scp->flags |= CM_SCACHEFLAG_INHASH;
795 osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp);
796 #if not_too_dangerous
797 lock_ReleaseMutex(&scp->mx);
800 /* XXX - The following fields in the cm_scache are
806 lock_ReleaseWrite(&cm_scacheLock);
808 /* now we have a held scache entry; just return it */
810 #ifdef DEBUG_REFCOUNT
811 afsi_log("%s:%d cm_GetSCache (4) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
812 osi_Log1(afsd_logp,"cm_GetSCache (4) outScpp 0x%p", scp);
817 /* Returns a held reference to the scache's parent
819 cm_scache_t * cm_FindSCacheParent(cm_scache_t * scp)
824 cm_scache_t * pscp = NULL;
826 lock_ObtainRead(&cm_scacheLock);
827 parent_fid = scp->fid;
828 parent_fid.vnode = scp->parentVnode;
829 parent_fid.unique = scp->parentUnique;
831 if (cm_FidCmp(&scp->fid, &parent_fid)) {
832 i = CM_SCACHE_HASH(&parent_fid);
833 for (pscp = cm_data.scacheHashTablep[i]; pscp; pscp = pscp->nextp) {
834 if (!cm_FidCmp(&pscp->fid, &parent_fid)) {
835 cm_HoldSCacheNoLock(pscp);
841 lock_ReleaseRead(&cm_scacheLock);
846 /* synchronize a fetch, store, read, write, fetch status or store status.
847 * Called with scache mutex held, and returns with it held, but temporarily
848 * drops it during the fetch.
850 * At most one flag can be on in flags, if this is an RPC request.
852 * Also, if we're fetching or storing data, we must ensure that we have a buffer.
854 * There are a lot of weird restrictions here; here's an attempt to explain the
855 * rationale for the concurrency restrictions implemented in this function.
857 * First, although the file server will break callbacks when *another* machine
858 * modifies a file or status block, the client itself is responsible for
859 * concurrency control on its own requests. Callback breaking events are rare,
860 * and simply invalidate any concurrent new status info.
862 * In the absence of callback breaking messages, we need to know how to
863 * synchronize incoming responses describing updates to files. We synchronize
864 * operations that update the data version by comparing the data versions.
865 * However, updates that do not update the data, but only the status, can't be
866 * synchronized with fetches or stores, since there's nothing to compare
867 * to tell which operation executed first at the server.
869 * Thus, we can allow multiple ops that change file data, or dir data, and
870 * fetches. However, status storing ops have to be done serially.
872 * Furthermore, certain data-changing ops are incompatible: we can't read or
873 * write a buffer while doing a truncate. We can't read and write the same
874 * buffer at the same time, or write while fetching or storing, or read while
875 * fetching a buffer (this may change). We can't fetch and store at the same
878 * With respect to status, we can't read and write at the same time, read while
879 * fetching, write while fetching or storing, or fetch and store at the same time.
881 * We can't allow a get callback RPC to run in concurrently with something that
882 * will return updated status, since we could start a call, have the server
883 * return status, have another machine make an update to the status (which
884 * doesn't change serverModTime), have the original machine get a new callback,
885 * and then have the original machine merge in the early, old info from the
886 * first call. At this point, the easiest way to avoid this problem is to have
887 * getcallback calls conflict with all others for the same vnode. Other calls
888 * to cm_MergeStatus that aren't associated with calls to cm_SyncOp on the same
889 * vnode must be careful not to merge in their status unless they have obtained
890 * a callback from the start of their call.
893 * Concurrent StoreData RPC's can cause trouble if the file is being extended.
894 * Each such RPC passes a FileLength parameter, which the server uses to do
895 * pre-truncation if necessary. So if two RPC's are processed out of order at
896 * the server, the one with the smaller FileLength will be processed last,
897 * possibly resulting in a bogus truncation. The simplest way to avoid this
898 * is to serialize all StoreData RPC's. This is the reason we defined
899 * CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
901 long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *reqp,
902 afs_uint32 rights, afs_uint32 flags)
904 osi_queueData_t *qdp;
907 afs_uint32 outRights;
909 afs_uint32 sleep_scp_flags = 0;
910 afs_uint32 sleep_buf_cmflags = 0;
911 afs_uint32 sleep_scp_bufs = 0;
913 /* lookup this first */
914 bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
917 osi_assert(bufp->refCount > 0);
920 /* Do the access check. Now we don't really do the access check
921 * atomically, since the caller doesn't expect the parent dir to be
922 * returned locked, and that is what we'd have to do to prevent a
923 * callback breaking message on the parent due to a setacl call from
924 * being processed while we're running. So, instead, we check things
925 * here, and if things look fine with the access, we proceed to finish
926 * the rest of this check. Sort of a hack, but probably good enough.
930 if (flags & CM_SCACHESYNC_FETCHSTATUS) {
931 /* if we're bringing in a new status block, ensure that
932 * we aren't already doing so, and that no one is
933 * changing the status concurrently, either. We need
934 * to do this, even if the status is of a different
935 * type, since we don't have the ability to figure out,
936 * in the AFS 3 protocols, which status-changing
937 * operation ran first, or even which order a read and
938 * a write occurred in.
940 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
941 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
942 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHSTATUS", scp);
946 if (flags & (CM_SCACHESYNC_STORESIZE | CM_SCACHESYNC_STORESTATUS
947 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_GETCALLBACK)) {
948 /* if we're going to make an RPC to change the status, make sure
949 * that no one is bringing in or sending out the status.
951 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING |
952 CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
953 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
956 if (scp->bufReadsp || scp->bufWritesp) {
957 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is bufRead|bufWrite want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
961 if (flags & CM_SCACHESYNC_FETCHDATA) {
962 /* if we're bringing in a new chunk of data, make sure that
963 * nothing is happening to that chunk, and that we aren't
964 * changing the basic file status info, either.
966 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
967 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
968 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHDATA", scp);
971 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMWRITING))) {
972 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING|BUF_CMWRITING want FETCHDATA", scp, bufp);
976 if (flags & CM_SCACHESYNC_STOREDATA) {
977 /* same as fetch data */
978 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
979 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
980 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STOREDATA", scp);
983 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMWRITING))) {
984 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING|BUF_CMWRITING want STOREDATA", scp, bufp);
989 if (flags & CM_SCACHESYNC_STOREDATA_EXCL) {
990 /* Don't allow concurrent StoreData RPC's */
991 if (scp->flags & CM_SCACHEFLAG_DATASTORING) {
992 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is DATASTORING want STOREDATA_EXCL", scp);
997 if (flags & CM_SCACHESYNC_ASYNCSTORE) {
998 /* Don't allow more than one BKG store request */
999 if (scp->flags & CM_SCACHEFLAG_ASYNCSTORING) {
1000 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is ASYNCSTORING want ASYNCSTORE", scp);
1005 if (flags & CM_SCACHESYNC_LOCK) {
1006 /* Don't allow concurrent fiddling with lock lists */
1007 if (scp->flags & CM_SCACHEFLAG_LOCKING) {
1008 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is LOCKING want LOCK", scp);
1013 /* now the operations that don't correspond to making RPCs */
1014 if (flags & CM_SCACHESYNC_GETSTATUS) {
1015 /* we can use the status that's here, if we're not
1016 * bringing in new status.
1018 if (scp->flags & (CM_SCACHEFLAG_FETCHING)) {
1019 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want GETSTATUS", scp);
1023 if (flags & CM_SCACHESYNC_SETSTATUS) {
1024 /* we can make a change to the local status, as long as
1025 * the status isn't changing now.
1027 * If we're fetching or storing a chunk of data, we can
1028 * change the status locally, since the fetch/store
1029 * operations don't change any of the data that we're
1032 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING | CM_SCACHEFLAG_SIZESTORING)) {
1033 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want SETSTATUS", scp);
1037 if (flags & CM_SCACHESYNC_READ) {
1038 /* we're going to read the data, make sure that the
1039 * status is available, and that the data is here. It
1040 * is OK to read while storing the data back.
1042 if (scp->flags & CM_SCACHEFLAG_FETCHING) {
1043 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want READ", scp);
1046 if (bufp && ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == CM_BUF_CMFETCHING)) {
1047 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING want READ", scp, bufp);
1050 if (bufp && (bufp->cmFlags & CM_BUF_CMWRITING)) {
1051 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMWRITING want READ", scp, bufp);
1055 if (flags & CM_SCACHESYNC_WRITE) {
1056 /* don't write unless the status is stable and the chunk
1059 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
1060 | CM_SCACHEFLAG_SIZESTORING)) {
1061 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want WRITE", scp);
1064 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING |
1066 CM_BUF_CMWRITING))) {
1067 osi_Log3(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is %s want WRITE",
1069 ((bufp->cmFlags & CM_BUF_CMFETCHING) ? "CM_BUF_CMFETCHING":
1070 ((bufp->cmFlags & CM_BUF_CMSTORING) ? "CM_BUF_CMSTORING" :
1071 ((bufp->cmFlags & CM_BUF_CMWRITING) ? "CM_BUF_CMWRITING" :
1077 // yj: modified this so that callback only checked if we're
1078 // not checking something on /afs
1079 /* fix the conditional to match the one in cm_HaveCallback */
1080 if ((flags & CM_SCACHESYNC_NEEDCALLBACK)
1081 #ifdef AFS_FREELANCE_CLIENT
1082 && (!cm_freelanceEnabled ||
1083 !(scp->fid.vnode==0x1 && scp->fid.unique==0x1) ||
1084 scp->fid.cell!=AFS_FAKE_ROOT_CELL_ID ||
1085 scp->fid.volume!=AFS_FAKE_ROOT_VOL_ID ||
1086 cm_fakeDirCallback < 2)
1087 #endif /* AFS_FREELANCE_CLIENT */
1089 if ((flags & CM_SCACHESYNC_FORCECB) || !cm_HaveCallback(scp)) {
1090 osi_Log1(afsd_logp, "CM SyncOp getting callback on scp 0x%p",
1093 lock_ReleaseMutex(&bufp->mx);
1094 code = cm_GetCallback(scp, userp, reqp, (flags & CM_SCACHESYNC_FORCECB)?1:0);
1096 lock_ReleaseMutex(&scp->mx);
1097 lock_ObtainMutex(&bufp->mx);
1098 lock_ObtainMutex(&scp->mx);
1102 flags &= ~CM_SCACHESYNC_FORCECB; /* only force once */
1108 /* can't check access rights without a callback */
1109 osi_assert(flags & CM_SCACHESYNC_NEEDCALLBACK);
1111 if ((rights & PRSFS_WRITE) && (scp->flags & CM_SCACHEFLAG_RO))
1112 return CM_ERROR_READONLY;
1114 if (cm_HaveAccessRights(scp, userp, rights, &outRights)) {
1115 if (~outRights & rights)
1116 return CM_ERROR_NOACCESS;
1119 /* we don't know the required access rights */
1120 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
1121 code = cm_GetAccessRights(scp, userp, reqp);
1123 lock_ReleaseMutex(&scp->mx);
1124 lock_ObtainMutex(&bufp->mx);
1125 lock_ObtainMutex(&scp->mx);
1133 /* if we get here, we're happy */
1137 /* first check if we're not supposed to wait: fail
1138 * in this case, returning with everything still locked.
1140 if (flags & CM_SCACHESYNC_NOWAIT)
1141 return CM_ERROR_WOULDBLOCK;
1143 sleep_scp_flags = scp->flags; /* so we know why we slept */
1144 sleep_buf_cmflags = bufp ? bufp->cmFlags : 0;
1145 sleep_scp_bufs = (scp->bufReadsp ? 1 : 0) | (scp->bufWritesp ? 2 : 0);
1147 /* wait here, then try again */
1148 osi_Log1(afsd_logp, "CM SyncOp sleeping scp 0x%p", scp);
1149 if ( scp->flags & CM_SCACHEFLAG_WAITING ) {
1151 scp->waitRequests++;
1152 osi_Log3(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING already set for 0x%p; %d threads; %d requests",
1153 scp, scp->waitCount, scp->waitRequests);
1155 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING set for 0x%p", scp);
1156 scp->flags |= CM_SCACHEFLAG_WAITING;
1157 scp->waitCount = scp->waitRequests = 1;
1160 lock_ReleaseMutex(&bufp->mx);
1161 osi_SleepM((LONG_PTR) &scp->flags, &scp->mx);
1163 smb_UpdateServerPriority();
1166 lock_ObtainMutex(&bufp->mx);
1167 lock_ObtainMutex(&scp->mx);
1169 osi_Log3(afsd_logp, "CM SyncOp woke! scp 0x%p; still waiting %d threads of %d requests",
1170 scp, scp->waitCount, scp->waitRequests);
1171 if (scp->waitCount == 0) {
1172 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING reset for 0x%p", scp);
1173 scp->flags &= ~CM_SCACHEFLAG_WAITING;
1174 scp->waitRequests = 0;
1176 } /* big while loop */
1178 /* now, update the recorded state for RPC-type calls */
1179 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1180 scp->flags |= CM_SCACHEFLAG_FETCHING;
1181 if (flags & CM_SCACHESYNC_STORESTATUS)
1182 scp->flags |= CM_SCACHEFLAG_STORING;
1183 if (flags & CM_SCACHESYNC_STORESIZE)
1184 scp->flags |= CM_SCACHEFLAG_SIZESTORING;
1185 if (flags & CM_SCACHESYNC_GETCALLBACK)
1186 scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
1187 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1188 scp->flags |= CM_SCACHEFLAG_DATASTORING;
1189 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1190 scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
1191 if (flags & CM_SCACHESYNC_LOCK)
1192 scp->flags |= CM_SCACHEFLAG_LOCKING;
1194 /* now update the buffer pointer */
1195 if (flags & CM_SCACHESYNC_FETCHDATA) {
1196 /* ensure that the buffer isn't already in the I/O list */
1198 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1199 tbufp = osi_GetQData(qdp);
1200 osi_assert(tbufp != bufp);
1204 /* queue a held reference to the buffer in the "reading" I/O list */
1205 qdp = osi_QDAlloc();
1206 osi_SetQData(qdp, bufp);
1209 bufp->cmFlags |= CM_BUF_CMFETCHING;
1211 osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1214 if (flags & CM_SCACHESYNC_STOREDATA) {
1215 /* ensure that the buffer isn't already in the I/O list */
1217 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1218 tbufp = osi_GetQData(qdp);
1219 osi_assert(tbufp != bufp);
1223 /* queue a held reference to the buffer in the "writing" I/O list */
1224 qdp = osi_QDAlloc();
1225 osi_SetQData(qdp, bufp);
1228 bufp->cmFlags |= CM_BUF_CMSTORING;
1230 osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1233 if (flags & CM_SCACHESYNC_WRITE) {
1234 /* mark the buffer as being written to. */
1236 bufp->cmFlags |= CM_BUF_CMWRITING;
1243 /* for those syncops that setup for RPCs.
1244 * Called with scache locked.
1246 void cm_SyncOpDone(cm_scache_t *scp, cm_buf_t *bufp, afs_uint32 flags)
1248 osi_queueData_t *qdp;
1251 lock_AssertMutex(&scp->mx);
1253 /* now, update the recorded state for RPC-type calls */
1254 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1255 scp->flags &= ~CM_SCACHEFLAG_FETCHING;
1256 if (flags & CM_SCACHESYNC_STORESTATUS)
1257 scp->flags &= ~CM_SCACHEFLAG_STORING;
1258 if (flags & CM_SCACHESYNC_STORESIZE)
1259 scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
1260 if (flags & CM_SCACHESYNC_GETCALLBACK)
1261 scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
1262 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1263 scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
1264 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1265 scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
1266 if (flags & CM_SCACHESYNC_LOCK)
1267 scp->flags &= ~CM_SCACHEFLAG_LOCKING;
1269 /* now update the buffer pointer */
1270 if (flags & CM_SCACHESYNC_FETCHDATA) {
1273 /* ensure that the buffer isn't already in the I/O list */
1274 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1275 tbufp = osi_GetQData(qdp);
1280 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1285 bufp->cmFlags &= ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
1286 if (bufp->flags & CM_BUF_WAITING) {
1287 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1288 osi_Wakeup((LONG_PTR) &bufp);
1295 /* now update the buffer pointer */
1296 if (flags & CM_SCACHESYNC_STOREDATA) {
1298 /* ensure that the buffer isn't already in the I/O list */
1299 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1300 tbufp = osi_GetQData(qdp);
1305 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1310 bufp->cmFlags &= ~CM_BUF_CMSTORING;
1311 if (bufp->flags & CM_BUF_WAITING) {
1312 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1313 osi_Wakeup((LONG_PTR) &bufp);
1320 if (flags & CM_SCACHESYNC_WRITE) {
1322 osi_assert(bufp->cmFlags & CM_BUF_CMWRITING);
1324 bufp->cmFlags &= ~CM_BUF_CMWRITING;
1328 /* and wakeup anyone who is waiting */
1329 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1330 osi_Log1(afsd_logp, "CM SyncOpDone Waking scp 0x%p", scp);
1331 osi_Wakeup((LONG_PTR) &scp->flags);
1335 /* merge in a response from an RPC. The scp must be locked, and the callback
1338 * Don't overwrite any status info that is dirty, since we could have a store
1339 * operation (such as store data) that merges some info in, and we don't want
1340 * to lose the local updates. Typically, there aren't many updates we do
1341 * locally, anyway, probably only mtime.
1343 * There is probably a bug in here where a chmod (which doesn't change
1344 * serverModTime) that occurs between two fetches, both of whose responses are
1345 * handled after the callback breaking is done, but only one of whose calls
1346 * started before that, can cause old info to be merged from the first call.
1348 void cm_MergeStatus(cm_scache_t *dscp,
1349 cm_scache_t *scp, AFSFetchStatus *statusp,
1350 AFSVolSync *volsyncp,
1351 cm_user_t *userp, afs_uint32 flags)
1353 // yj: i want to create some fake status for the /afs directory and the
1354 // entries under that directory
1355 #ifdef AFS_FREELANCE_CLIENT
1356 if (cm_freelanceEnabled && scp == cm_data.rootSCachep) {
1357 osi_Log0(afsd_logp,"cm_MergeStatus Freelance cm_data.rootSCachep");
1358 statusp->InterfaceVersion = 0x1;
1359 statusp->FileType = CM_SCACHETYPE_DIRECTORY;
1360 statusp->LinkCount = scp->linkCount;
1361 statusp->Length = cm_fakeDirSize;
1362 statusp->Length_hi = 0;
1363 statusp->DataVersion = cm_data.fakeDirVersion;
1364 statusp->Author = 0x1;
1365 statusp->Owner = 0x0;
1366 statusp->CallerAccess = 0x9;
1367 statusp->AnonymousAccess = 0x9;
1368 statusp->UnixModeBits = 0777;
1369 statusp->ParentVnode = 0x1;
1370 statusp->ParentUnique = 0x1;
1371 statusp->ResidencyMask = 0;
1372 statusp->ClientModTime = FakeFreelanceModTime;
1373 statusp->ServerModTime = FakeFreelanceModTime;
1375 statusp->SyncCounter = 0;
1376 statusp->dataVersionHigh = 0;
1377 statusp->errorCode = 0;
1379 #endif /* AFS_FREELANCE_CLIENT */
1381 if (statusp->errorCode != 0) {
1382 scp->flags |= CM_SCACHEFLAG_EACCESS;
1383 osi_Log2(afsd_logp, "Merge, Failure scp %x code 0x%x", scp, statusp->errorCode);
1385 scp->fileType = 0; /* unknown */
1387 scp->serverModTime = 0;
1388 scp->clientModTime = 0;
1389 scp->length.LowPart = 0;
1390 scp->length.HighPart = 0;
1391 scp->serverLength.LowPart = 0;
1392 scp->serverLength.HighPart = 0;
1396 scp->unixModeBits = 0;
1398 scp->dataVersion = 0;
1401 scp->parentVnode = dscp->fid.vnode;
1402 scp->parentUnique = dscp->fid.unique;
1404 scp->parentVnode = 0;
1405 scp->parentUnique = 0;
1409 scp->flags &= ~CM_SCACHEFLAG_EACCESS;
1412 if (!(flags & CM_MERGEFLAG_FORCE)
1413 && statusp->DataVersion < (unsigned long) scp->dataVersion) {
1414 struct cm_cell *cellp;
1416 cellp = cm_FindCellByID(scp->fid.cell);
1417 if (scp->cbServerp) {
1418 struct cm_volume *volp = NULL;
1420 cm_GetVolumeByID(cellp, scp->fid.volume, userp,
1421 (cm_req_t *) NULL, CM_GETVOL_FLAG_CREATE, &volp);
1422 osi_Log2(afsd_logp, "old data from server %x volume %s",
1423 scp->cbServerp->addr.sin_addr.s_addr,
1424 volp ? volp->namep : "(unknown)");
1428 osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
1429 scp, scp->dataVersion, statusp->DataVersion);
1430 /* we have a number of data fetch/store operations running
1431 * concurrently, and we can tell which one executed last at the
1432 * server by its mtime.
1433 * Choose the one with the largest mtime, and ignore the rest.
1435 * These concurrent calls are incompatible with setting the
1436 * mtime, so we won't have a locally changed mtime here.
1438 * We could also have ACL info for a different user than usual,
1439 * in which case we have to do that part of the merge, anyway.
1440 * We won't have to worry about the info being old, since we
1441 * won't have concurrent calls
1442 * that change file status running from this machine.
1444 * Added 3/17/98: if we see data version regression on an RO
1445 * file, it's probably due to a server holding an out-of-date
1446 * replica, rather than to concurrent RPC's. Failures to
1447 * release replicas are now flagged by the volserver, but only
1448 * since AFS 3.4 5.22, so there are plenty of clients getting
1449 * out-of-date replicas out there.
1451 * If we discover an out-of-date replica, by this time it's too
1452 * late to go to another server and retry. Also, we can't
1453 * reject the merge, because then there is no way for
1454 * GetAccess to do its work, and the caller gets into an
1455 * infinite loop. So we just grin and bear it.
1457 if (!(scp->flags & CM_SCACHEFLAG_RO))
1461 scp->serverModTime = statusp->ServerModTime;
1463 if (!(scp->mask & CM_SCACHEMASK_CLIENTMODTIME)) {
1464 scp->clientModTime = statusp->ClientModTime;
1466 if (!(scp->mask & CM_SCACHEMASK_LENGTH)) {
1467 scp->length.LowPart = statusp->Length;
1468 scp->length.HighPart = statusp->Length_hi;
1471 scp->serverLength.LowPart = statusp->Length;
1472 scp->serverLength.HighPart = statusp->Length_hi;
1474 scp->linkCount = statusp->LinkCount;
1475 scp->owner = statusp->Owner;
1476 scp->group = statusp->Group;
1477 scp->unixModeBits = statusp->UnixModeBits & 07777;
1479 if (statusp->FileType == File)
1480 scp->fileType = CM_SCACHETYPE_FILE;
1481 else if (statusp->FileType == Directory)
1482 scp->fileType = CM_SCACHETYPE_DIRECTORY;
1483 else if (statusp->FileType == SymbolicLink) {
1484 if ((scp->unixModeBits & 0111) == 0)
1485 scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
1487 scp->fileType = CM_SCACHETYPE_SYMLINK;
1490 osi_Log2(afsd_logp, "Merge, Invalid File Type (%d), scp %x", statusp->FileType, scp);
1491 scp->fileType = CM_SCACHETYPE_INVALID; /* invalid */
1493 /* and other stuff */
1494 scp->parentVnode = statusp->ParentVnode;
1495 scp->parentUnique = statusp->ParentUnique;
1497 /* and merge in the private acl cache info, if this is more than the public
1498 * info; merge in the public stuff in any case.
1500 scp->anyAccess = statusp->AnonymousAccess;
1502 if (userp != NULL) {
1503 cm_AddACLCache(scp, userp, statusp->CallerAccess);
1506 if ((flags & CM_MERGEFLAG_STOREDATA) &&
1507 statusp->DataVersion - scp->dataVersion == 1) {
1510 for (bp = cm_data.buf_fileHashTablepp[BUF_FILEHASH(&scp->fid)]; bp; bp=bp->fileHashp)
1512 if (cm_FidCmp(&scp->fid, &bp->fid) == 0 &&
1513 bp->dataVersion == scp->dataVersion)
1514 bp->dataVersion = statusp->DataVersion;
1518 scp->dataVersion = statusp->DataVersion;
1521 /* note that our stat cache info is incorrect, so force us eventually
1522 * to stat the file again. There may be dirty data associated with
1523 * this vnode, and we want to preserve that information.
1525 * This function works by simply simulating a loss of the callback.
1527 * This function must be called with the scache locked.
1529 void cm_DiscardSCache(cm_scache_t *scp)
1531 lock_AssertMutex(&scp->mx);
1532 if (scp->cbServerp) {
1533 cm_PutServer(scp->cbServerp);
1534 scp->cbServerp = NULL;
1537 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
1538 cm_dnlcPurgedp(scp);
1539 cm_dnlcPurgevp(scp);
1540 cm_FreeAllACLEnts(scp);
1542 /* Force mount points and symlinks to be re-evaluated */
1543 scp->mountPointStringp[0] = '\0';
1546 void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
1548 afsFidp->Volume = fidp->volume;
1549 afsFidp->Vnode = fidp->vnode;
1550 afsFidp->Unique = fidp->unique;
1553 #ifdef DEBUG_REFCOUNT
1554 void cm_HoldSCacheNoLockDbg(cm_scache_t *scp, char * file, long line)
1556 void cm_HoldSCacheNoLock(cm_scache_t *scp)
1559 osi_assert(scp != 0);
1561 #ifdef DEBUG_REFCOUNT
1562 osi_Log2(afsd_logp,"cm_HoldSCacheNoLock scp 0x%p ref %d",scp, scp->refCount);
1563 afsi_log("%s:%d cm_HoldSCacheNoLock scp 0x%p, ref %d", file, line, scp, scp->refCount);
1567 #ifdef DEBUG_REFCOUNT
1568 void cm_HoldSCacheDbg(cm_scache_t *scp, char * file, long line)
1570 void cm_HoldSCache(cm_scache_t *scp)
1573 osi_assert(scp != 0);
1574 lock_ObtainWrite(&cm_scacheLock);
1576 #ifdef DEBUG_REFCOUNT
1577 osi_Log2(afsd_logp,"cm_HoldSCache scp 0x%p ref %d",scp, scp->refCount);
1578 afsi_log("%s:%d cm_HoldSCache scp 0x%p ref %d", file, line, scp, scp->refCount);
1580 lock_ReleaseWrite(&cm_scacheLock);
1583 #ifdef DEBUG_REFCOUNT
1584 void cm_ReleaseSCacheNoLockDbg(cm_scache_t *scp, char * file, long line)
1586 void cm_ReleaseSCacheNoLock(cm_scache_t *scp)
1589 osi_assert(scp != NULL);
1590 if (scp->refCount == 0)
1591 osi_Log1(afsd_logp,"cm_ReleaseSCacheNoLock about to panic scp 0x%x",scp);
1592 osi_assert(scp->refCount-- >= 0);
1593 #ifdef DEBUG_REFCOUNT
1594 osi_Log2(afsd_logp,"cm_ReleaseSCacheNoLock scp 0x%p ref %d",scp,scp->refCount);
1595 afsi_log("%s:%d cm_ReleaseSCacheNoLock scp 0x%p ref %d", file, line, scp, scp->refCount);
1599 #ifdef DEBUG_REFCOUNT
1600 void cm_ReleaseSCacheDbg(cm_scache_t *scp, char * file, long line)
1602 void cm_ReleaseSCache(cm_scache_t *scp)
1605 osi_assert(scp != NULL);
1606 lock_ObtainWrite(&cm_scacheLock);
1607 if (scp->refCount == 0)
1608 osi_Log1(afsd_logp,"cm_ReleaseSCache about to panic scp 0x%x",scp);
1609 osi_assert(scp->refCount != 0);
1611 #ifdef DEBUG_REFCOUNT
1612 osi_Log2(afsd_logp,"cm_ReleaseSCache scp 0x%p ref %d",scp,scp->refCount);
1613 afsi_log("%s:%d cm_ReleaseSCache scp 0x%p ref %d", file, line, scp, scp->refCount);
1615 lock_ReleaseWrite(&cm_scacheLock);
1618 /* just look for the scp entry to get filetype */
1619 /* doesn't need to be perfectly accurate, so locking doesn't matter too much */
1620 int cm_FindFileType(cm_fid_t *fidp)
1625 hash = CM_SCACHE_HASH(fidp);
1627 osi_assert(fidp->cell != 0);
1629 lock_ObtainWrite(&cm_scacheLock);
1630 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
1631 if (cm_FidCmp(fidp, &scp->fid) == 0) {
1632 lock_ReleaseWrite(&cm_scacheLock);
1633 return scp->fileType;
1636 lock_ReleaseWrite(&cm_scacheLock);
1640 /* dump all scp's that have reference count > 0 to a file.
1641 * cookie is used to identify this batch for easy parsing,
1642 * and it a string provided by a caller
1644 int cm_DumpSCache(FILE *outputFile, char *cookie, int lock)
1652 lock_ObtainRead(&cm_scacheLock);
1654 sprintf(output, "%s - dumping all scache - cm_data.currentSCaches=%d, cm_data.maxSCaches=%d\r\n", cookie, cm_data.currentSCaches, cm_data.maxSCaches);
1655 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1657 for (scp = cm_data.allSCachesp; scp; scp = scp->allNextp)
1659 sprintf(output, "%s scp=0x%p, fid (cell=%d, volume=%d, vnode=%d, unique=%d) volp=0x%p type=%d dv=%d len=0x%I64x mp='%s' flags=0x%x cb=0x%x refCount=%u\r\n",
1660 cookie, scp, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique,
1661 scp->volp, scp->fileType, scp->dataVersion, scp->length.QuadPart, scp->mountPointStringp, scp->flags,
1662 (unsigned long)scp->cbExpires, scp->refCount);
1663 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1666 sprintf(output, "%s - Done dumping all scache.\r\n", cookie);
1667 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1668 sprintf(output, "%s - dumping cm_data.scacheHashTable - cm_data.scacheHashTableSize=%d\r\n", cookie, cm_data.scacheHashTableSize);
1669 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1671 for (i = 0; i < cm_data.scacheHashTableSize; i++)
1673 for(scp = cm_data.scacheHashTablep[i]; scp; scp=scp->nextp)
1675 sprintf(output, "%s scp=0x%p, hash=%d, fid (cell=%d, volume=%d, vnode=%d, unique=%d)\r\n",
1676 cookie, scp, i, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
1677 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1681 sprintf(output, "%s - Done dumping cm_data.scacheHashTable\r\n", cookie);
1682 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1685 lock_ReleaseRead(&cm_scacheLock);