2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
23 /*extern void afsi_log(char *pattern, ...);*/
25 extern osi_hyper_t hzero;
28 osi_queue_t *cm_allFileLocks;
29 osi_queue_t *cm_freeFileLocks;
30 unsigned long cm_lockRefreshCycle;
32 /* lock for globals */
33 osi_rwlock_t cm_scacheLock;
35 /* Dummy scache entry for use with pioctl fids */
36 cm_scache_t cm_fakeSCache;
38 #ifdef AFS_FREELANCE_CLIENT
39 extern osi_mutex_t cm_Freelance_Lock;
42 /* must be called with cm_scacheLock write-locked! */
43 void cm_AdjustLRU(cm_scache_t *scp)
45 if (scp == cm_data.scacheLRULastp)
46 cm_data.scacheLRULastp = (cm_scache_t *) osi_QPrev(&scp->q);
47 osi_QRemoveHT((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
48 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
49 if (!cm_data.scacheLRULastp)
50 cm_data.scacheLRULastp = scp;
53 /* called with cm_scacheLock write-locked; recycles an existing scp. */
54 long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags)
60 if (scp->flags & CM_SCACHEFLAG_INHASH) {
61 /* hash it out first */
62 i = CM_SCACHE_HASH(&scp->fid);
63 for (lscpp = &cm_data.hashTablep[i], tscp = cm_data.hashTablep[i];
65 lscpp = &tscp->nextp, tscp = tscp->nextp) {
68 scp->flags &= ~CM_SCACHEFLAG_INHASH;
72 osi_assertx(tscp, "afsd: scache hash screwup");
75 if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) {
79 while(qdp = scp->bufWritesp) {
80 bufp = osi_GetQData(qdp);
81 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
84 lock_ObtainMutex(&bufp->mx);
85 bufp->cmFlags &= ~CM_BUF_CMSTORING;
86 bufp->flags &= ~CM_BUF_DIRTY;
87 bufp->flags |= CM_BUF_ERROR;
88 bufp->error = VNOVNODE;
89 bufp->dataVersion = -1; /* bad */
91 if (bufp->flags & CM_BUF_WAITING) {
92 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
93 osi_Wakeup((long) &bufp);
95 lock_ReleaseMutex(&bufp->mx);
99 while(qdp = scp->bufReadsp) {
100 bufp = osi_GetQData(qdp);
101 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
104 lock_ObtainMutex(&bufp->mx);
105 bufp->cmFlags &= ~CM_BUF_CMFETCHING;
106 bufp->flags &= ~CM_BUF_DIRTY;
107 bufp->flags |= CM_BUF_ERROR;
108 bufp->error = VNOVNODE;
109 bufp->dataVersion = -1; /* bad */
110 bufp->dirtyCounter++;
111 if (bufp->flags & CM_BUF_WAITING) {
112 osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
113 osi_Wakeup((long) &bufp);
115 lock_ReleaseMutex(&bufp->mx);
119 buf_CleanDirtyBuffers(scp);
121 /* look for things that shouldn't still be set */
122 osi_assert(scp->bufWritesp == NULL);
123 osi_assert(scp->bufReadsp == NULL);
126 /* invalidate so next merge works fine;
127 * also initialize some flags */
128 scp->flags &= ~(CM_SCACHEFLAG_STATD
129 | CM_SCACHEFLAG_DELETED
131 | CM_SCACHEFLAG_PURERO
132 | CM_SCACHEFLAG_OVERQUOTA
133 | CM_SCACHEFLAG_OUTOFSPACE);
134 scp->serverModTime = 0;
135 scp->dataVersion = 0;
136 scp->bulkStatProgress = hzero;
144 /* discard callback */
145 if (scp->cbServerp) {
146 cm_PutServer(scp->cbServerp);
147 scp->cbServerp = NULL;
151 /* remove from dnlc */
155 /* discard cached status; if non-zero, Close
156 * tried to store this to server but failed */
159 /* drop held volume ref */
161 cm_PutVolume(scp->volp);
165 /* discard symlink info */
166 scp->mountPointStringp[0] = 0;
167 memset(&scp->mountRootFid, 0, sizeof(cm_fid_t));
168 memset(&scp->dotdotFid, 0, sizeof(cm_fid_t));
170 /* reset locking info */
171 scp->fileLocksH = NULL;
172 scp->fileLocksT = NULL;
173 scp->serverLock = (-1);
174 scp->exclusiveLocks = 0;
175 scp->sharedLocks = 0;
177 /* not locked, but there can be no references to this guy
178 * while we hold the global refcount lock.
180 cm_FreeAllACLEnts(scp);
186 /* called with cm_scacheLock write-locked; find a vnode to recycle.
187 * Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches).
189 cm_scache_t *cm_GetNewSCache(void)
194 /* first pass - look for deleted objects */
195 for ( scp = cm_data.scacheLRULastp;
197 scp = (cm_scache_t *) osi_QPrev(&scp->q))
199 osi_assert(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.hashTablep);
201 if (scp->refCount == 0) {
202 if (scp->flags & CM_SCACHEFLAG_DELETED) {
203 osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp);
204 if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) {
206 /* we found an entry, so return it */
207 /* now remove from the LRU queue and put it back at the
208 * head of the LRU queue.
215 osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp);
216 } else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
217 /* we found an entry, so return it */
218 /* now remove from the LRU queue and put it back at the
219 * head of the LRU queue.
228 osi_Log0(afsd_logp, "GetNewSCache no deleted or recycled entries available for reuse");
230 if (cm_data.currentSCaches >= cm_data.maxSCaches) {
231 /* There were no deleted scache objects that we could use. Try to find
232 * one that simply hasn't been used in a while.
235 for ( scp = cm_data.scacheLRULastp;
237 scp = (cm_scache_t *) osi_QPrev(&scp->q))
239 /* It is possible for the refCount to be zero and for there still
240 * to be outstanding dirty buffers. If there are dirty buffers,
241 * we must not recycle the scp. */
242 if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
243 if (!buf_DirtyBuffersExist(&scp->fid)) {
244 if (!cm_RecycleSCache(scp, 0)) {
245 /* we found an entry, so return it */
246 /* now remove from the LRU queue and put it back at the
247 * head of the LRU queue.
255 osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp);
259 osi_Log1(afsd_logp, "GetNewSCache all scache entries in use (retry = %d)", retry);
261 /* If get here it means that every scache is either in use or has dirty buffers.
262 * We used to panic. Now we will give up our lock and wait.
265 lock_ReleaseWrite(&cm_scacheLock);
267 lock_ObtainWrite(&cm_scacheLock);
274 /* if we get here, we should allocate a new scache entry. We either are below
275 * quota or we have a leak and need to allocate a new one to avoid panicing.
277 scp = cm_data.scacheBaseAddress + cm_data.currentSCaches;
278 osi_assert(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.hashTablep);
279 memset(scp, 0, sizeof(cm_scache_t));
280 scp->magic = CM_SCACHE_MAGIC;
281 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
282 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
283 scp->serverLock = -1;
285 /* and put it in the LRU queue */
286 osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
287 if (!cm_data.scacheLRULastp)
288 cm_data.scacheLRULastp = scp;
289 cm_data.currentSCaches++;
290 cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
295 /* like strcmp, only for fids */
296 int cm_FidCmp(cm_fid_t *ap, cm_fid_t *bp)
298 if (ap->vnode != bp->vnode)
300 if (ap->volume != bp->volume)
302 if (ap->unique != bp->unique)
304 if (ap->cell != bp->cell)
309 void cm_fakeSCacheInit(int newFile)
312 memset(&cm_data.fakeSCache, 0, sizeof(cm_scache_t));
313 cm_data.fakeSCache.cbServerp = (struct cm_server *)(-1);
314 /* can leave clientModTime at 0 */
315 cm_data.fakeSCache.fileType = CM_SCACHETYPE_FILE;
316 cm_data.fakeSCache.unixModeBits = 0777;
317 cm_data.fakeSCache.length.LowPart = 1000;
318 cm_data.fakeSCache.linkCount = 1;
319 cm_data.fakeSCache.refCount = 1;
321 lock_InitializeMutex(&cm_data.fakeSCache.mx, "cm_scache_t mutex");
325 cm_ValidateSCache(void)
327 cm_scache_t * scp, *lscp;
330 if ( cm_data.scacheLRUFirstp == NULL && cm_data.scacheLRULastp != NULL ||
331 cm_data.scacheLRUFirstp != NULL && cm_data.scacheLRULastp == NULL) {
332 afsi_log("cm_ValidateSCache failure: inconsistent LRU pointers");
333 fprintf(stderr, "cm_ValidateSCache failure: inconsistent LRU pointers\n");
337 for ( scp = cm_data.scacheLRUFirstp, lscp = NULL, i = 0;
339 lscp = scp, scp = (cm_scache_t *) osi_QNext(&scp->q), i++ ) {
340 if (scp->magic != CM_SCACHE_MAGIC) {
341 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
342 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
345 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
346 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
347 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
350 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
351 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
352 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
355 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
356 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
357 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
360 if (i > cm_data.currentSCaches ) {
361 afsi_log("cm_ValidateSCache failure: LRU First queue loops");
362 fprintf(stderr, "cm_ValidateSCache failure: LUR First queue loops\n");
365 if (lscp != (cm_scache_t *) osi_QPrev(&scp->q)) {
366 afsi_log("cm_ValidateSCache failure: QPrev(scp) != previous");
367 fprintf(stderr, "cm_ValidateSCache failure: QPrev(scp) != previous\n");
372 for ( scp = cm_data.scacheLRULastp, lscp = NULL, i = 0; scp;
373 lscp = scp, scp = (cm_scache_t *) osi_QPrev(&scp->q), i++ ) {
374 if (scp->magic != CM_SCACHE_MAGIC) {
375 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
376 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
379 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
380 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
381 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
384 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
385 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
386 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
389 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
390 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
391 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
394 if (i > cm_data.currentSCaches ) {
395 afsi_log("cm_ValidateSCache failure: LRU Last queue loops");
396 fprintf(stderr, "cm_ValidateSCache failure: LUR Last queue loops\n");
399 if (lscp != (cm_scache_t *) osi_QNext(&scp->q)) {
400 afsi_log("cm_ValidateSCache failure: QNext(scp) != next");
401 fprintf(stderr, "cm_ValidateSCache failure: QNext(scp) != next\n");
406 for ( i=0; i < cm_data.hashTableSize; i++ ) {
407 for ( scp = cm_data.hashTablep[i]; scp; scp = scp->nextp ) {
408 if (scp->magic != CM_SCACHE_MAGIC) {
409 afsi_log("cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC");
410 fprintf(stderr, "cm_ValidateSCache failure: scp->magic != CM_SCACHE_MAGIC\n");
413 if (scp->nextp && scp->nextp->magic != CM_SCACHE_MAGIC) {
414 afsi_log("cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC");
415 fprintf(stderr, "cm_ValidateSCache failure: scp->nextp->magic != CM_SCACHE_MAGIC\n");
418 if (scp->randomACLp && scp->randomACLp->magic != CM_ACLENT_MAGIC) {
419 afsi_log("cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC");
420 fprintf(stderr, "cm_ValidateSCache failure: scp->randomACLp->magic != CM_ACLENT_MAGIC\n");
423 if (scp->volp && scp->volp->magic != CM_VOLUME_MAGIC) {
424 afsi_log("cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC");
425 fprintf(stderr, "cm_ValidateSCache failure: scp->volp->magic != CM_VOLUME_MAGIC\n");
431 return cm_dnlcValidate();
435 cm_ShutdownSCache(void)
439 for ( scp = cm_data.scacheLRULastp; scp;
440 scp = (cm_scache_t *) osi_QPrev(&scp->q) ) {
441 if (scp->randomACLp) {
442 lock_ObtainMutex(&scp->mx);
443 cm_FreeAllACLEnts(scp);
444 lock_ReleaseMutex(&scp->mx);
446 lock_FinalizeMutex(&scp->mx);
447 lock_FinalizeRWLock(&scp->bufCreateLock);
450 return cm_dnlcShutdown();
453 void cm_InitSCache(int newFile, long maxSCaches)
455 static osi_once_t once;
457 if (osi_Once(&once)) {
458 lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
460 memset(cm_data.hashTablep, 0, sizeof(cm_scache_t *) * cm_data.hashTableSize);
461 cm_data.currentSCaches = 0;
462 cm_data.maxSCaches = maxSCaches;
463 cm_data.scacheLRUFirstp = cm_data.scacheLRULastp = NULL;
467 for ( scp = cm_data.scacheLRULastp; scp;
468 scp = (cm_scache_t *) osi_QPrev(&scp->q) ) {
469 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
470 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
472 scp->cbServerp = NULL;
474 scp->fileLocksH = NULL;
475 scp->fileLocksT = NULL;
476 scp->serverLock = (-1);
477 scp->lastRefreshCycle = 0;
478 scp->exclusiveLocks = 0;
479 scp->sharedLocks = 0;
485 scp->flags &= ~CM_SCACHEFLAG_WAITING;
488 cm_allFileLocks = NULL;
489 cm_freeFileLocks = NULL;
490 cm_lockRefreshCycle = 0;
491 cm_fakeSCacheInit(newFile);
492 cm_dnlcInit(newFile);
497 /* version that doesn't bother creating the entry if we don't find it */
498 cm_scache_t *cm_FindSCache(cm_fid_t *fidp)
503 hash = CM_SCACHE_HASH(fidp);
505 if (fidp->cell == 0) {
512 lock_ObtainWrite(&cm_scacheLock);
513 for (scp=cm_data.hashTablep[hash]; scp; scp=scp->nextp) {
514 if (cm_FidCmp(fidp, &scp->fid) == 0) {
515 cm_HoldSCacheNoLock(scp);
517 lock_ReleaseWrite(&cm_scacheLock);
521 lock_ReleaseWrite(&cm_scacheLock);
525 long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
531 cm_volume_t *volp = 0;
534 int special; // yj: boolean variable to test if file is on root.afs
536 extern cm_fid_t cm_rootFid;
538 hash = CM_SCACHE_HASH(fidp);
540 osi_assert(fidp->cell != 0);
542 if (fidp->cell== cm_data.rootFid.cell &&
543 fidp->volume==cm_data.rootFid.volume &&
544 fidp->vnode==0x0 && fidp->unique==0x0)
546 osi_Log0(afsd_logp,"cm_getSCache called with root cell/volume and vnode=0 and unique=0");
549 // yj: check if we have the scp, if so, we don't need
550 // to do anything else
551 lock_ObtainWrite(&cm_scacheLock);
552 for (scp=cm_data.hashTablep[hash]; scp; scp=scp->nextp) {
553 if (cm_FidCmp(fidp, &scp->fid) == 0) {
554 cm_HoldSCacheNoLock(scp);
557 lock_ReleaseWrite(&cm_scacheLock);
562 // yj: when we get here, it means we don't have an scp
563 // so we need to either load it or fake it, depending
564 // on whether the file is "special", see below.
566 // yj: if we're trying to get an scp for a file that's
567 // on root.afs of homecell, we want to handle it specially
568 // because we have to fill in the status stuff 'coz we
569 // don't want trybulkstat to fill it in for us
570 #ifdef AFS_FREELANCE_CLIENT
571 special = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
572 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
573 !(fidp->vnode==0x1 && fidp->unique==0x1));
574 isRoot = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
575 fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
576 fidp->vnode==0x1 && fidp->unique==0x1);
577 if (cm_freelanceEnabled && isRoot) {
578 osi_Log0(afsd_logp,"cm_getSCache Freelance and isRoot");
579 /* freelance: if we are trying to get the root scp for the first
580 * time, we will just put in a place holder entry.
585 if (cm_freelanceEnabled && special) {
586 osi_Log0(afsd_logp,"cm_getSCache Freelance and special");
587 if (fidp->vnode > 1 && fidp->vnode <= cm_noLocalMountPoints + 2) {
588 lock_ObtainMutex(&cm_Freelance_Lock);
589 mp =(cm_localMountPoints+fidp->vnode-2)->mountPointStringp;
590 lock_ReleaseMutex(&cm_Freelance_Lock);
594 scp = cm_GetNewSCache();
596 osi_Log0(afsd_logp,"cm_getSCache unable to obtain *new* scache entry");
597 lock_ReleaseWrite(&cm_scacheLock);
598 return CM_ERROR_WOULDBLOCK;
601 lock_ObtainMutex(&scp->mx);
603 scp->volp = cm_data.rootSCachep->volp;
604 scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID;
605 scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID;
606 scp->dotdotFid.unique=1;
607 scp->dotdotFid.vnode=1;
608 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
609 scp->nextp=cm_data.hashTablep[hash];
610 cm_data.hashTablep[hash]=scp;
611 scp->flags |= CM_SCACHEFLAG_INHASH;
613 if (fidp->vnode > 1 && fidp->vnode <= cm_noLocalMountPoints + 2)
614 scp->fileType = (cm_localMountPoints+fidp->vnode-2)->fileType;
616 scp->fileType = CM_SCACHETYPE_INVALID;
618 lock_ObtainMutex(&cm_Freelance_Lock);
619 scp->length.LowPart = (DWORD)strlen(mp)+4;
620 scp->length.HighPart = 0;
621 strncpy(scp->mountPointStringp,mp,MOUNTPOINTLEN);
622 scp->mountPointStringp[MOUNTPOINTLEN-1] = '\0';
623 lock_ReleaseMutex(&cm_Freelance_Lock);
626 scp->unixModeBits=0x1ff;
627 scp->clientModTime=FakeFreelanceModTime;
628 scp->serverModTime=FakeFreelanceModTime;
629 scp->parentUnique = 0x1;
630 scp->parentVnode=0x1;
632 scp->dataVersion=cm_data.fakeDirVersion;
633 scp->lockDataVersion=-1; /* no lock yet */
634 lock_ReleaseMutex(&scp->mx);
636 lock_ReleaseWrite(&cm_scacheLock);
640 #endif /* AFS_FREELANCE_CLIENT */
642 /* otherwise, we need to find the volume */
643 if (!cm_freelanceEnabled || !isRoot) {
644 lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
645 cellp = cm_FindCellByID(fidp->cell);
647 return CM_ERROR_NOSUCHCELL;
649 code = cm_GetVolumeByID(cellp, fidp->volume, userp, reqp, &volp);
652 lock_ObtainWrite(&cm_scacheLock);
655 /* otherwise, we have the volume, now reverify that the scp doesn't
656 * exist, and proceed.
658 for (scp=cm_data.hashTablep[hash]; scp; scp=scp->nextp) {
659 if (cm_FidCmp(fidp, &scp->fid) == 0) {
660 cm_HoldSCacheNoLock(scp);
661 osi_assert(scp->volp == volp);
663 lock_ReleaseWrite(&cm_scacheLock);
671 /* now, if we don't have the fid, recycle something */
672 scp = cm_GetNewSCache();
674 osi_Log0(afsd_logp,"cm_getSCache unable to obtain *new* scache entry");
675 lock_ReleaseWrite(&cm_scacheLock);
676 return CM_ERROR_WOULDBLOCK;
679 osi_assert(!(scp->flags & CM_SCACHEFLAG_INHASH));
680 lock_ObtainMutex(&scp->mx);
682 scp->volp = volp; /* a held reference */
684 if (!cm_freelanceEnabled || !isRoot) {
685 /* if this scache entry represents a volume root then we need
686 * to copy the dotdotFipd from the volume structure where the
687 * "master" copy is stored (defect 11489)
689 if (scp->fid.vnode == 1 && scp->fid.unique == 1) {
690 scp->dotdotFid = volp->dotdotFid;
693 if (volp->roID == fidp->volume)
694 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
695 else if (volp->bkID == fidp->volume)
696 scp->flags |= CM_SCACHEFLAG_RO;
698 scp->nextp = cm_data.hashTablep[hash];
699 cm_data.hashTablep[hash] = scp;
700 scp->flags |= CM_SCACHEFLAG_INHASH;
702 lock_ReleaseMutex(&scp->mx);
704 /* XXX - The following fields in the cm_scache are
710 lock_ReleaseWrite(&cm_scacheLock);
712 /* now we have a held scache entry; just return it */
717 /* Returns a held reference to the scache's parent
719 cm_scache_t * cm_FindSCacheParent(cm_scache_t * scp)
724 cm_scache_t * pscp = NULL;
726 lock_ObtainWrite(&cm_scacheLock);
727 parent_fid = scp->fid;
728 parent_fid.vnode = scp->parentVnode;
729 parent_fid.unique = scp->parentUnique;
731 if (cm_FidCmp(&scp->fid, &parent_fid)) {
732 for (i=0; i<cm_data.hashTableSize; i++) {
733 for (pscp = cm_data.hashTablep[i]; pscp; pscp = pscp->nextp) {
734 if (!cm_FidCmp(&pscp->fid, &parent_fid)) {
735 cm_HoldSCacheNoLock(pscp);
741 lock_ReleaseWrite(&cm_scacheLock);
746 /* synchronize a fetch, store, read, write, fetch status or store status.
747 * Called with scache mutex held, and returns with it held, but temporarily
748 * drops it during the fetch.
750 * At most one flag can be on in flags, if this is an RPC request.
752 * Also, if we're fetching or storing data, we must ensure that we have a buffer.
754 * There are a lot of weird restrictions here; here's an attempt to explain the
755 * rationale for the concurrency restrictions implemented in this function.
757 * First, although the file server will break callbacks when *another* machine
758 * modifies a file or status block, the client itself is responsible for
759 * concurrency control on its own requests. Callback breaking events are rare,
760 * and simply invalidate any concurrent new status info.
762 * In the absence of callback breaking messages, we need to know how to
763 * synchronize incoming responses describing updates to files. We synchronize
764 * operations that update the data version by comparing the data versions.
765 * However, updates that do not update the data, but only the status, can't be
766 * synchronized with fetches or stores, since there's nothing to compare
767 * to tell which operation executed first at the server.
769 * Thus, we can allow multiple ops that change file data, or dir data, and
770 * fetches. However, status storing ops have to be done serially.
772 * Furthermore, certain data-changing ops are incompatible: we can't read or
773 * write a buffer while doing a truncate. We can't read and write the same
774 * buffer at the same time, or write while fetching or storing, or read while
775 * fetching a buffer (this may change). We can't fetch and store at the same
778 * With respect to status, we can't read and write at the same time, read while
779 * fetching, write while fetching or storing, or fetch and store at the same time.
781 * We can't allow a get callback RPC to run in concurrently with something that
782 * will return updated status, since we could start a call, have the server
783 * return status, have another machine make an update to the status (which
784 * doesn't change serverModTime), have the original machine get a new callback,
785 * and then have the original machine merge in the early, old info from the
786 * first call. At this point, the easiest way to avoid this problem is to have
787 * getcallback calls conflict with all others for the same vnode. Other calls
788 * to cm_MergeStatus that aren't associated with calls to cm_SyncOp on the same
789 * vnode must be careful not to merge in their status unless they have obtained
790 * a callback from the start of their call.
793 * Concurrent StoreData RPC's can cause trouble if the file is being extended.
794 * Each such RPC passes a FileLength parameter, which the server uses to do
795 * pre-truncation if necessary. So if two RPC's are processed out of order at
796 * the server, the one with the smaller FileLength will be processed last,
797 * possibly resulting in a bogus truncation. The simplest way to avoid this
798 * is to serialize all StoreData RPC's. This is the reason we defined
799 * CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
801 long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *reqp,
802 afs_uint32 rights, afs_uint32 flags)
804 osi_queueData_t *qdp;
807 afs_uint32 outRights;
810 /* lookup this first */
811 bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
813 /* some minor assertions */
814 if (flags & (CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_FETCHDATA
815 | CM_SCACHESYNC_READ | CM_SCACHESYNC_WRITE
816 | CM_SCACHESYNC_SETSIZE)) {
818 osi_assert(bufp->refCount > 0);
820 osi_assert(cm_FidCmp(&bufp->fid, &scp->fid) == 0);
824 else osi_assert(bufp == NULL);
826 /* Do the access check. Now we don't really do the access check
827 * atomically, since the caller doesn't expect the parent dir to be
828 * returned locked, and that is what we'd have to do to prevent a
829 * callback breaking message on the parent due to a setacl call from
830 * being processed while we're running. So, instead, we check things
831 * here, and if things look fine with the access, we proceed to finish
832 * the rest of this check. Sort of a hack, but probably good enough.
836 if (flags & CM_SCACHESYNC_FETCHSTATUS) {
837 /* if we're bringing in a new status block, ensure that
838 * we aren't already doing so, and that no one is
839 * changing the status concurrently, either. We need
840 * to do this, even if the status is of a different
841 * type, since we don't have the ability to figure out,
842 * in the AFS 3 protocols, which status-changing
843 * operation ran first, or even which order a read and
844 * a write occurred in.
846 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
847 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
848 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHSTATUS", scp);
852 if (flags & (CM_SCACHESYNC_STORESIZE | CM_SCACHESYNC_STORESTATUS
853 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_GETCALLBACK)) {
854 /* if we're going to make an RPC to change the status, make sure
855 * that no one is bringing in or sending out the status.
857 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING |
858 CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
859 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
862 if (scp->bufReadsp || scp->bufWritesp) {
863 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is bufRead|bufWrite want STORESIZE|STORESTATUS|SETSIZE|GETCALLBACK", scp);
867 if (flags & CM_SCACHESYNC_FETCHDATA) {
868 /* if we're bringing in a new chunk of data, make sure that
869 * nothing is happening to that chunk, and that we aren't
870 * changing the basic file status info, either.
872 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
873 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
874 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want FETCHDATA", scp);
877 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING))) {
878 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING want FETCHDATA", scp, bufp);
882 if (flags & CM_SCACHESYNC_STOREDATA) {
883 /* same as fetch data */
884 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
885 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK)) {
886 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING|GETCALLBACK want STOREDATA", scp);
889 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING))) {
890 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING want STOREDATA", scp, bufp);
895 if (flags & CM_SCACHESYNC_STOREDATA_EXCL) {
896 /* Don't allow concurrent StoreData RPC's */
897 if (scp->flags & CM_SCACHEFLAG_DATASTORING) {
898 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is DATASTORING want STOREDATA_EXCL", scp);
903 if (flags & CM_SCACHESYNC_ASYNCSTORE) {
904 /* Don't allow more than one BKG store request */
905 if (scp->flags & CM_SCACHEFLAG_ASYNCSTORING) {
906 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is ASYNCSTORING want ASYNCSTORE", scp);
911 if (flags & CM_SCACHESYNC_LOCK) {
912 /* Don't allow concurrent fiddling with lock lists */
913 if (scp->flags & CM_SCACHEFLAG_LOCKING) {
914 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is LOCKING want LOCK", scp);
919 /* now the operations that don't correspond to making RPCs */
920 if (flags & CM_SCACHESYNC_GETSTATUS) {
921 /* we can use the status that's here, if we're not
922 * bringing in new status.
924 if (scp->flags & (CM_SCACHEFLAG_FETCHING)) {
925 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want GETSTATUS", scp);
929 if (flags & CM_SCACHESYNC_SETSTATUS) {
930 /* we can make a change to the local status, as long as
931 * the status isn't changing now.
933 * If we're fetching or storing a chunk of data, we can
934 * change the status locally, since the fetch/store
935 * operations don't change any of the data that we're
938 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING | CM_SCACHEFLAG_SIZESTORING)) {
939 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want SETSTATUS", scp);
943 if (flags & CM_SCACHESYNC_READ) {
944 /* we're going to read the data, make sure that the
945 * status is available, and that the data is here. It
946 * is OK to read while storing the data back.
948 if (scp->flags & CM_SCACHEFLAG_FETCHING) {
949 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING want READ", scp);
952 if (bufp && ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == CM_BUF_CMFETCHING)) {
953 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING want READ", scp, bufp);
957 if (flags & CM_SCACHESYNC_WRITE) {
958 /* don't write unless the status is stable and the chunk
961 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
962 | CM_SCACHEFLAG_SIZESTORING)) {
963 osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is FETCHING|STORING|SIZESTORING want WRITE", scp);
966 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING))) {
967 osi_Log2(afsd_logp, "CM SyncOp scp 0x%p bufp 0x%p is BUF_CMFETCHING|BUF_CMSTORING want WRITE", scp, bufp);
972 // yj: modified this so that callback only checked if we're
973 // not checking something on /afs
974 /* fix the conditional to match the one in cm_HaveCallback */
975 if ((flags & CM_SCACHESYNC_NEEDCALLBACK)
976 #ifdef AFS_FREELANCE_CLIENT
977 && (!cm_freelanceEnabled ||
978 !(scp->fid.vnode==0x1 && scp->fid.unique==0x1) ||
979 scp->fid.cell!=AFS_FAKE_ROOT_CELL_ID ||
980 scp->fid.volume!=AFS_FAKE_ROOT_VOL_ID ||
981 cm_fakeDirCallback < 2)
982 #endif /* AFS_FREELANCE_CLIENT */
984 if (!cm_HaveCallback(scp)) {
985 osi_Log1(afsd_logp, "CM SyncOp getting callback on scp 0x%p",
988 lock_ReleaseMutex(&bufp->mx);
989 code = cm_GetCallback(scp, userp, reqp, (flags & CM_SCACHESYNC_FORCECB)?1:0);
991 lock_ReleaseMutex(&scp->mx);
992 lock_ObtainMutex(&bufp->mx);
993 lock_ObtainMutex(&scp->mx);
1002 /* can't check access rights without a callback */
1003 osi_assert(flags & CM_SCACHESYNC_NEEDCALLBACK);
1005 if ((rights & PRSFS_WRITE) && (scp->flags & CM_SCACHEFLAG_RO))
1006 return CM_ERROR_READONLY;
1008 if (cm_HaveAccessRights(scp, userp, rights, &outRights)) {
1009 if (~outRights & rights)
1010 return CM_ERROR_NOACCESS;
1013 /* we don't know the required access rights */
1014 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
1015 code = cm_GetAccessRights(scp, userp, reqp);
1017 lock_ReleaseMutex(&scp->mx);
1018 lock_ObtainMutex(&bufp->mx);
1019 lock_ObtainMutex(&scp->mx);
1027 /* if we get here, we're happy */
1031 /* first check if we're not supposed to wait: fail
1032 * in this case, returning with everything still locked.
1034 if (flags & CM_SCACHESYNC_NOWAIT)
1035 return CM_ERROR_WOULDBLOCK;
1037 /* wait here, then try again */
1038 osi_Log1(afsd_logp, "CM SyncOp sleeping scp 0x%p", scp);
1039 if ( scp->flags & CM_SCACHEFLAG_WAITING ) {
1041 scp->waitRequests++;
1042 osi_Log3(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING already set for 0x%p; %d threads; %d requests",
1043 scp, scp->waitCount, scp->waitRequests);
1045 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING set for 0x%p", scp);
1046 scp->flags |= CM_SCACHEFLAG_WAITING;
1047 scp->waitCount = scp->waitRequests = 1;
1050 lock_ReleaseMutex(&bufp->mx);
1051 osi_SleepM((LONG_PTR) &scp->flags, &scp->mx);
1053 smb_UpdateServerPriority();
1056 lock_ObtainMutex(&bufp->mx);
1057 lock_ObtainMutex(&scp->mx);
1059 osi_Log3(afsd_logp, "CM SyncOp woke! scp 0x%p; still waiting %d threads of %d requests",
1060 scp, scp->waitCount, scp->waitRequests);
1061 if (scp->waitCount == 0) {
1062 osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING reset for 0x%p", scp);
1063 scp->flags &= ~CM_SCACHEFLAG_WAITING;
1064 scp->waitRequests = 0;
1066 } /* big while loop */
1068 /* now, update the recorded state for RPC-type calls */
1069 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1070 scp->flags |= CM_SCACHEFLAG_FETCHING;
1071 if (flags & CM_SCACHESYNC_STORESTATUS)
1072 scp->flags |= CM_SCACHEFLAG_STORING;
1073 if (flags & CM_SCACHESYNC_STORESIZE)
1074 scp->flags |= CM_SCACHEFLAG_SIZESTORING;
1075 if (flags & CM_SCACHESYNC_GETCALLBACK)
1076 scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
1077 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1078 scp->flags |= CM_SCACHEFLAG_DATASTORING;
1079 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1080 scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
1081 if (flags & CM_SCACHESYNC_LOCK)
1082 scp->flags |= CM_SCACHEFLAG_LOCKING;
1084 /* now update the buffer pointer */
1085 if (flags & CM_SCACHESYNC_FETCHDATA) {
1086 /* ensure that the buffer isn't already in the I/O list */
1088 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1089 tbufp = osi_GetQData(qdp);
1090 osi_assert(tbufp != bufp);
1094 /* queue a held reference to the buffer in the "reading" I/O list */
1095 qdp = osi_QDAlloc();
1096 osi_SetQData(qdp, bufp);
1099 bufp->cmFlags |= CM_BUF_CMFETCHING;
1101 osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1104 if (flags & CM_SCACHESYNC_STOREDATA) {
1105 /* ensure that the buffer isn't already in the I/O list */
1107 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1108 tbufp = osi_GetQData(qdp);
1109 osi_assert(tbufp != bufp);
1113 /* queue a held reference to the buffer in the "writing" I/O list */
1114 qdp = osi_QDAlloc();
1115 osi_SetQData(qdp, bufp);
1118 bufp->cmFlags |= CM_BUF_CMSTORING;
1120 osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1126 /* for those syncops that setup for RPCs.
1127 * Called with scache locked.
1129 void cm_SyncOpDone(cm_scache_t *scp, cm_buf_t *bufp, afs_uint32 flags)
1131 osi_queueData_t *qdp;
1134 /* now, update the recorded state for RPC-type calls */
1135 if (flags & CM_SCACHESYNC_FETCHSTATUS)
1136 scp->flags &= ~CM_SCACHEFLAG_FETCHING;
1137 if (flags & CM_SCACHESYNC_STORESTATUS)
1138 scp->flags &= ~CM_SCACHEFLAG_STORING;
1139 if (flags & CM_SCACHESYNC_STORESIZE)
1140 scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
1141 if (flags & CM_SCACHESYNC_GETCALLBACK)
1142 scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
1143 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
1144 scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
1145 if (flags & CM_SCACHESYNC_ASYNCSTORE)
1146 scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
1147 if (flags & CM_SCACHESYNC_LOCK)
1148 scp->flags &= ~CM_SCACHEFLAG_LOCKING;
1150 /* now update the buffer pointer */
1151 if (flags & CM_SCACHESYNC_FETCHDATA) {
1152 /* ensure that the buffer isn't already in the I/O list */
1153 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1154 tbufp = osi_GetQData(qdp);
1159 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
1164 if (bufp->cmFlags & CM_BUF_CMFETCHING)
1166 bufp->cmFlags &= ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
1167 if (bufp->flags & CM_BUF_WAITING) {
1168 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1169 osi_Wakeup((LONG_PTR) &bufp);
1176 /* now update the buffer pointer */
1177 if (flags & CM_SCACHESYNC_STOREDATA) {
1178 /* ensure that the buffer isn't already in the I/O list */
1179 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1180 tbufp = osi_GetQData(qdp);
1185 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
1190 if (bufp->cmFlags & CM_BUF_CMSTORING)
1192 bufp->cmFlags &= ~CM_BUF_CMSTORING;
1193 if (bufp->flags & CM_BUF_WAITING) {
1194 osi_Log2(afsd_logp, "CM SyncOpDone Waking [scp 0x%p] bufp 0x%p", scp, bufp);
1195 osi_Wakeup((LONG_PTR) &bufp);
1202 /* and wakeup anyone who is waiting */
1203 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1204 osi_Log1(afsd_logp, "CM SyncOpDone Waking scp 0x%p", scp);
1205 osi_Wakeup((LONG_PTR) &scp->flags);
1209 /* merge in a response from an RPC. The scp must be locked, and the callback
1212 * Don't overwrite any status info that is dirty, since we could have a store
1213 * operation (such as store data) that merges some info in, and we don't want
1214 * to lose the local updates. Typically, there aren't many updates we do
1215 * locally, anyway, probably only mtime.
1217 * There is probably a bug in here where a chmod (which doesn't change
1218 * serverModTime) that occurs between two fetches, both of whose responses are
1219 * handled after the callback breaking is done, but only one of whose calls
1220 * started before that, can cause old info to be merged from the first call.
1222 void cm_MergeStatus(cm_scache_t *scp, AFSFetchStatus *statusp, AFSVolSync *volp,
1223 cm_user_t *userp, afs_uint32 flags)
1225 // yj: i want to create some fake status for the /afs directory and the
1226 // entries under that directory
1227 #ifdef AFS_FREELANCE_CLIENT
1228 if (cm_freelanceEnabled && scp == cm_data.rootSCachep) {
1229 osi_Log0(afsd_logp,"cm_MergeStatus Freelance cm_data.rootSCachep");
1230 statusp->InterfaceVersion = 0x1;
1231 statusp->FileType = CM_SCACHETYPE_DIRECTORY;
1232 statusp->LinkCount = scp->linkCount;
1233 statusp->Length = cm_fakeDirSize;
1234 statusp->Length_hi = 0;
1235 statusp->DataVersion = cm_data.fakeDirVersion;
1236 statusp->Author = 0x1;
1237 statusp->Owner = 0x0;
1238 statusp->CallerAccess = 0x9;
1239 statusp->AnonymousAccess = 0x9;
1240 statusp->UnixModeBits = 0x1ff;
1241 statusp->ParentVnode = 0x1;
1242 statusp->ParentUnique = 0x1;
1243 statusp->ResidencyMask = 0;
1244 statusp->ClientModTime = FakeFreelanceModTime;
1245 statusp->ServerModTime = FakeFreelanceModTime;
1247 statusp->SyncCounter = 0;
1248 statusp->dataVersionHigh = 0;
1249 statusp->errorCode = 0;
1251 #endif /* AFS_FREELANCE_CLIENT */
1253 if (statusp->errorCode != 0) {
1254 scp->flags |= CM_SCACHEFLAG_EACCESS;
1255 osi_Log2(afsd_logp, "Merge, Failure scp %x code 0x%x", scp, statusp->errorCode);
1258 scp->flags &= ~CM_SCACHEFLAG_EACCESS;
1261 if (!(flags & CM_MERGEFLAG_FORCE)
1262 && statusp->DataVersion < (unsigned long) scp->dataVersion) {
1263 struct cm_cell *cellp;
1265 cellp = cm_FindCellByID(scp->fid.cell);
1266 if (scp->cbServerp) {
1267 struct cm_volume *volp = NULL;
1269 cm_GetVolumeByID(cellp, scp->fid.volume, userp,
1270 (cm_req_t *) NULL, &volp);
1271 osi_Log2(afsd_logp, "old data from server %x volume %s",
1272 scp->cbServerp->addr.sin_addr.s_addr,
1273 volp ? volp->namep : "(unknown)");
1277 osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
1278 scp, scp->dataVersion, statusp->DataVersion);
1279 /* we have a number of data fetch/store operations running
1280 * concurrently, and we can tell which one executed last at the
1281 * server by its mtime.
1282 * Choose the one with the largest mtime, and ignore the rest.
1284 * These concurrent calls are incompatible with setting the
1285 * mtime, so we won't have a locally changed mtime here.
1287 * We could also have ACL info for a different user than usual,
1288 * in which case we have to do that part of the merge, anyway.
1289 * We won't have to worry about the info being old, since we
1290 * won't have concurrent calls
1291 * that change file status running from this machine.
1293 * Added 3/17/98: if we see data version regression on an RO
1294 * file, it's probably due to a server holding an out-of-date
1295 * replica, rather than to concurrent RPC's. Failures to
1296 * release replicas are now flagged by the volserver, but only
1297 * since AFS 3.4 5.22, so there are plenty of clients getting
1298 * out-of-date replicas out there.
1300 * If we discover an out-of-date replica, by this time it's too
1301 * late to go to another server and retry. Also, we can't
1302 * reject the merge, because then there is no way for
1303 * GetAccess to do its work, and the caller gets into an
1304 * infinite loop. So we just grin and bear it.
1306 if (!(scp->flags & CM_SCACHEFLAG_RO))
1309 scp->serverModTime = statusp->ServerModTime;
1311 if (!(scp->mask & CM_SCACHEMASK_CLIENTMODTIME)) {
1312 scp->clientModTime = statusp->ClientModTime;
1314 if (!(scp->mask & CM_SCACHEMASK_LENGTH)) {
1315 scp->length.LowPart = statusp->Length;
1316 scp->length.HighPart = statusp->Length_hi;
1319 scp->serverLength.LowPart = statusp->Length;
1320 scp->serverLength.HighPart = statusp->Length_hi;
1322 scp->linkCount = statusp->LinkCount;
1323 scp->dataVersion = statusp->DataVersion;
1324 scp->owner = statusp->Owner;
1325 scp->group = statusp->Group;
1326 scp->unixModeBits = statusp->UnixModeBits & 07777;
1328 if (statusp->FileType == File)
1329 scp->fileType = CM_SCACHETYPE_FILE;
1330 else if (statusp->FileType == Directory)
1331 scp->fileType = CM_SCACHETYPE_DIRECTORY;
1332 else if (statusp->FileType == SymbolicLink) {
1333 if ((scp->unixModeBits & 0111) == 0)
1334 scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
1336 scp->fileType = CM_SCACHETYPE_SYMLINK;
1339 osi_Log2(afsd_logp, "Merge, Invalid File Type (%d), scp %x", statusp->FileType, scp);
1340 scp->fileType = CM_SCACHETYPE_INVALID; /* invalid */
1342 /* and other stuff */
1343 scp->parentVnode = statusp->ParentVnode;
1344 scp->parentUnique = statusp->ParentUnique;
1346 /* and merge in the private acl cache info, if this is more than the public
1347 * info; merge in the public stuff in any case.
1349 scp->anyAccess = statusp->AnonymousAccess;
1351 if (userp != NULL) {
1352 cm_AddACLCache(scp, userp, statusp->CallerAccess);
1356 /* note that our stat cache info is incorrect, so force us eventually
1357 * to stat the file again. There may be dirty data associated with
1358 * this vnode, and we want to preserve that information.
1360 * This function works by simply simulating a loss of the callback.
1362 * This function must be called with the scache locked.
1364 void cm_DiscardSCache(cm_scache_t *scp)
1366 lock_AssertMutex(&scp->mx);
1367 if (scp->cbServerp) {
1368 cm_PutServer(scp->cbServerp);
1369 scp->cbServerp = NULL;
1372 scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
1373 cm_dnlcPurgedp(scp);
1374 cm_dnlcPurgevp(scp);
1375 cm_FreeAllACLEnts(scp);
1377 /* Force mount points and symlinks to be re-evaluated */
1378 scp->mountPointStringp[0] = '\0';
1381 void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
1383 afsFidp->Volume = fidp->volume;
1384 afsFidp->Vnode = fidp->vnode;
1385 afsFidp->Unique = fidp->unique;
1388 void cm_HoldSCacheNoLock(cm_scache_t *scp)
1390 osi_assert(scp != 0);
1391 osi_assert(scp->refCount >= 0);
1395 void cm_HoldSCache(cm_scache_t *scp)
1397 osi_assert(scp != 0);
1398 lock_ObtainWrite(&cm_scacheLock);
1399 osi_assert(scp->refCount >= 0);
1401 lock_ReleaseWrite(&cm_scacheLock);
1404 void cm_ReleaseSCacheNoLock(cm_scache_t *scp)
1406 osi_assert(scp != 0);
1407 osi_assert(scp->refCount-- >= 0);
1410 void cm_ReleaseSCache(cm_scache_t *scp)
1412 osi_assert(scp != 0);
1413 lock_ObtainWrite(&cm_scacheLock);
1414 osi_assert(scp->refCount != 0);
1416 lock_ReleaseWrite(&cm_scacheLock);
1419 /* just look for the scp entry to get filetype */
1420 /* doesn't need to be perfectly accurate, so locking doesn't matter too much */
1421 int cm_FindFileType(cm_fid_t *fidp)
1426 hash = CM_SCACHE_HASH(fidp);
1428 osi_assert(fidp->cell != 0);
1430 lock_ObtainWrite(&cm_scacheLock);
1431 for (scp=cm_data.hashTablep[hash]; scp; scp=scp->nextp) {
1432 if (cm_FidCmp(fidp, &scp->fid) == 0) {
1433 lock_ReleaseWrite(&cm_scacheLock);
1434 return scp->fileType;
1437 lock_ReleaseWrite(&cm_scacheLock);
1441 /* dump all scp's that have reference count > 0 to a file.
1442 * cookie is used to identify this batch for easy parsing,
1443 * and it a string provided by a caller
1445 int cm_DumpSCache(FILE *outputFile, char *cookie, int lock)
1453 lock_ObtainRead(&cm_scacheLock);
1455 sprintf(output, "%s - dumping scache - cm_data.currentSCaches=%d, cm_data.maxSCaches=%d\n", cookie, cm_data.currentSCaches, cm_data.maxSCaches);
1456 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1458 for (scp = cm_data.scacheLRULastp; scp; scp = (cm_scache_t *) osi_QPrev(&scp->q))
1460 if (scp->refCount != 0)
1462 sprintf(output, "%s fid (cell=%d, volume=%d, vnode=%d, unique=%d) refCount=%u\n",
1463 cookie, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique,
1465 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1469 sprintf(output, "%s - dumping cm_data.hashTable - cm_data.hashTableSize=%d\n", cookie, cm_data.hashTableSize);
1470 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1472 for (i = 0; i < cm_data.hashTableSize; i++)
1474 for(scp = cm_data.hashTablep[i]; scp; scp=scp->nextp)
1476 if (scp->refCount != 0)
1478 sprintf(output, "%s scp=0x%p, hash=%d, fid (cell=%d, volume=%d, vnode=%d, unique=%d) refCount=%u\n",
1479 cookie, scp, i, scp->fid.cell, scp->fid.volume, scp->fid.vnode,
1480 scp->fid.unique, scp->refCount);
1481 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1486 sprintf(output, "%s - Done dumping scache.\n", cookie);
1487 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1490 lock_ReleaseRead(&cm_scacheLock);