2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
25 extern osi_hyper_t hzero;
27 /* hash table stuff */
28 cm_scache_t **cm_hashTablep;
29 long cm_hashTableSize;
31 long cm_currentSCaches;
34 cm_scache_t *cm_scacheLRUFirstp;
35 cm_scache_t *cm_scacheLRULastp;
38 osi_queue_t *cm_allFileLocks;
40 /* lock for globals */
41 osi_rwlock_t cm_scacheLock;
43 /* Dummy scache entry for use with pioctl fids */
44 cm_scache_t cm_fakeSCache;
46 /* must be called with cm_scacheLock write-locked! */
47 void cm_AdjustLRU(cm_scache_t *scp)
49 if (scp == cm_scacheLRULastp)
50 cm_scacheLRULastp = (cm_scache_t *) osi_QPrev(&scp->q);
51 osi_QRemove((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
52 osi_QAdd((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
53 if (!cm_scacheLRULastp) cm_scacheLRULastp = scp;
56 /* called with cm_scacheLock write-locked; find a vnode to recycle.
57 * Can allocate a new one if desperate, or if below quota (cm_maxSCaches).
59 cm_scache_t *cm_GetNewSCache(void)
66 if (cm_currentSCaches >= cm_maxSCaches) {
67 for (scp = cm_scacheLRULastp;
69 scp = (cm_scache_t *) osi_QPrev(&scp->q)) {
70 if (scp->refCount == 0) break;
74 /* we found an entry, so return it */
75 if (scp->flags & CM_SCACHEFLAG_INHASH) {
76 /* hash it out first */
77 i = CM_SCACHE_HASH(&scp->fid);
78 lscpp = &cm_hashTablep[i];
81 lscpp = &tscp->nextp, tscp = *lscpp) {
82 if (tscp == scp) break;
84 osi_assertx(tscp, "afsd: scache hash screwup");
86 scp->flags &= ~CM_SCACHEFLAG_INHASH;
89 /* look for things that shouldn't still be set */
90 osi_assert(scp->bufWritesp == NULL);
91 osi_assert(scp->bufReadsp == NULL);
93 /* invalidate so next merge works fine;
94 * also initialize some flags */
95 scp->flags &= ~(CM_SCACHEFLAG_STATD
97 | CM_SCACHEFLAG_PURERO
98 | CM_SCACHEFLAG_OVERQUOTA
99 | CM_SCACHEFLAG_OUTOFSPACE);
100 scp->serverModTime = 0;
101 scp->dataVersion = 0;
102 scp->bulkStatProgress = hzero;
104 /* discard callback */
105 scp->cbServerp = NULL;
108 /* remove from dnlc */
112 /* discard cached status; if non-zero, Close
113 * tried to store this to server but failed */
116 /* drop held volume ref */
118 cm_PutVolume(scp->volp);
122 /* discard symlink info */
123 if (scp->mountPointStringp) {
124 free(scp->mountPointStringp);
125 scp->mountPointStringp = NULL;
127 if (scp->mountRootFidp) {
128 free(scp->mountRootFidp);
129 scp->mountRootFidp = NULL;
131 if (scp->dotdotFidp) {
132 free(scp->dotdotFidp);
133 scp->dotdotFidp = NULL;
136 /* not locked, but there can be no references to this guy
137 * while we hold the global refcount lock.
139 cm_FreeAllACLEnts(scp);
141 /* now remove from the LRU queue and put it back at the
142 * head of the LRU queue.
151 /* if we get here, we should allocate a new scache entry. We either are below
152 * quota or we have a leak and need to allocate a new one to avoid panicing.
154 scp = malloc(sizeof(*scp));
155 memset(scp, 0, sizeof(*scp));
156 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
157 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
159 /* and put it in the LRU queue */
160 osi_QAdd((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
161 if (!cm_scacheLRULastp) cm_scacheLRULastp = scp;
163 cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
168 /* like strcmp, only for fids */
169 int cm_FidCmp(cm_fid_t *ap, cm_fid_t *bp)
171 if (ap->vnode != bp->vnode) return 1;
172 if (ap->volume != bp->volume) return 1;
173 if (ap->unique != bp->unique) return 1;
174 if (ap->cell != bp->cell) return 1;
178 void cm_fakeSCacheInit()
180 memset(&cm_fakeSCache, 0, sizeof(cm_fakeSCache));
181 lock_InitializeMutex(&cm_fakeSCache.mx, "cm_scache_t mutex");
182 cm_fakeSCache.cbServerp = (struct cm_server *)(-1);
183 /* can leave clientModTime at 0 */
184 cm_fakeSCache.fileType = CM_SCACHETYPE_FILE;
185 cm_fakeSCache.unixModeBits = 0777;
186 cm_fakeSCache.length.LowPart = 1000;
187 cm_fakeSCache.linkCount = 1;
190 void cm_InitSCache(long maxSCaches)
192 static osi_once_t once;
194 if (osi_Once(&once)) {
195 lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
196 cm_hashTableSize = maxSCaches / 2;
197 cm_hashTablep = malloc(sizeof(cm_scache_t *) * cm_hashTableSize);
198 memset(cm_hashTablep, 0, sizeof(cm_scache_t *) * cm_hashTableSize);
199 cm_allFileLocks = NULL;
200 cm_currentSCaches = 0;
201 cm_maxSCaches = maxSCaches;
208 /* version that doesn't bother creating the entry if we don't find it */
209 cm_scache_t *cm_FindSCache(cm_fid_t *fidp)
214 hash = CM_SCACHE_HASH(fidp);
216 osi_assert(fidp->cell != 0);
218 lock_ObtainWrite(&cm_scacheLock);
219 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
220 if (cm_FidCmp(fidp, &scp->fid) == 0) {
223 lock_ReleaseWrite(&cm_scacheLock);
227 lock_ReleaseWrite(&cm_scacheLock);
231 long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
240 hash = CM_SCACHE_HASH(fidp);
242 osi_assert(fidp->cell != 0);
244 lock_ObtainWrite(&cm_scacheLock);
245 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
246 if (cm_FidCmp(fidp, &scp->fid) == 0) {
250 lock_ReleaseWrite(&cm_scacheLock);
255 /* otherwise, we need to find the volume */
256 lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
257 cellp = cm_FindCellByID(fidp->cell);
258 if (!cellp) return CM_ERROR_NOSUCHCELL;
260 code = cm_GetVolumeByID(cellp, fidp->volume, userp, reqp, &volp);
261 if (code) return code;
263 /* otherwise, we have the volume, now reverify that the scp doesn't
264 * exist, and proceed.
266 lock_ObtainWrite(&cm_scacheLock);
267 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
268 if (cm_FidCmp(fidp, &scp->fid) == 0) {
271 lock_ReleaseWrite(&cm_scacheLock);
278 /* now, if we don't have the fid, recycle something */
279 scp = cm_GetNewSCache();
280 osi_assert(!(scp->flags & CM_SCACHEFLAG_INHASH));
282 scp->volp = volp; /* a held reference */
284 /* if this scache entry represents a volume root then we need
285 * to copy the dotdotFipd from the volume structure where the
286 * "master" copy is stored (defect 11489)
288 if(scp->fid.vnode == 1 && scp->fid.unique == 1 && volp->dotdotFidp) {
289 if (scp->dotdotFidp == (cm_fid_t *) NULL)
290 scp->dotdotFidp = (cm_fid_t *) malloc(sizeof(cm_fid_t));
291 *(scp->dotdotFidp) = *volp->dotdotFidp;
294 if (volp->roID == fidp->volume)
295 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
296 else if (volp->bkID == fidp->volume)
297 scp->flags |= CM_SCACHEFLAG_RO;
298 scp->nextp = cm_hashTablep[hash];
299 cm_hashTablep[hash] = scp;
300 scp->flags |= CM_SCACHEFLAG_INHASH;
302 lock_ReleaseWrite(&cm_scacheLock);
304 /* now we have a held scache entry; just return it */
310 /* synchronize a fetch, store, read, write, fetch status or store status.
311 * Called with scache mutex held, and returns with it held, but temporarily
312 * drops it during the fetch.
314 * At most one flag can be on in flags, if this is an RPC request.
316 * Also, if we're fetching or storing data, we must ensure that we have a buffer.
318 * There are a lot of weird restrictions here; here's an attempt to explain the
319 * rationale for the concurrency restrictions implemented in this function.
321 * First, although the file server will break callbacks when *another* machine
322 * modifies a file or status block, the client itself is responsible for
323 * concurrency control on its own requests. Callback breaking events are rare,
324 * and simply invalidate any concurrent new status info.
326 * In the absence of callback breaking messages, we need to know how to
327 * synchronize incoming responses describing updates to files. We synchronize
328 * operations that update the data version by comparing the data versions.
329 * However, updates that do not update the data, but only the status, can't be
330 * synchronized with fetches or stores, since there's nothing to compare
331 * to tell which operation executed first at the server.
333 * Thus, we can allow multiple ops that change file data, or dir data, and
334 * fetches. However, status storing ops have to be done serially.
336 * Furthermore, certain data-changing ops are incompatible: we can't read or
337 * write a buffer while doing a truncate. We can't read and write the same
338 * buffer at the same time, or write while fetching or storing, or read while
339 * fetching a buffer (this may change). We can't fetch and store at the same
342 * With respect to status, we can't read and write at the same time, read while
343 * fetching, write while fetching or storing, or fetch and store at the same time.
345 * We can't allow a get callback RPC to run in concurrently with something that
346 * will return updated status, since we could start a call, have the server
347 * return status, have another machine make an update to the status (which
348 * doesn't change serverModTime), have the original machine get a new callback,
349 * and then have the original machine merge in the early, old info from the
350 * first call. At this point, the easiest way to avoid this problem is to have
351 * getcallback calls conflict with all others for the same vnode. Other calls
352 * to cm_MergeStatus that aren't associated with calls to cm_SyncOp on the same
353 * vnode must be careful not to merge in their status unless they have obtained
354 * a callback from the start of their call.
357 * Concurrent StoreData RPC's can cause trouble if the file is being extended.
358 * Each such RPC passes a FileLength parameter, which the server uses to do
359 * pre-truncation if necessary. So if two RPC's are processed out of order at
360 * the server, the one with the smaller FileLength will be processed last,
361 * possibly resulting in a bogus truncation. The simplest way to avoid this
362 * is to serialize all StoreData RPC's. This is the reason we defined
363 * CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
365 long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *up, cm_req_t *reqp,
366 long rights, long flags)
368 osi_queueData_t *qdp;
374 /* lookup this first */
375 bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
377 /* some minor assertions */
378 if (flags & (CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_FETCHDATA
379 | CM_SCACHESYNC_READ | CM_SCACHESYNC_WRITE
380 | CM_SCACHESYNC_SETSIZE)) {
382 osi_assert(bufp->refCount > 0);
384 osi_assert(cm_FidCmp(&bufp->fid, &scp->fid) == 0);
388 else osi_assert(bufp == NULL);
390 /* Do the access check. Now we don't really do the access check
391 * atomically, since the caller doesn't expect the parent dir to be
392 * returned locked, and that is what we'd have to do to prevent a
393 * callback breaking message on the parent due to a setacl call from
394 * being processed while we're running. So, instead, we check things
395 * here, and if things look fine with the access, we proceed to finish
396 * the rest of this check. Sort of a hack, but probably good enough.
400 if (flags & CM_SCACHESYNC_FETCHSTATUS) {
401 /* if we're bringing in a new status block, ensure that
402 * we aren't already doing so, and that no one is
403 * changing the status concurrently, either. We need
404 * to do this, even if the status is of a different
405 * type, since we don't have the ability to figure out,
406 * in the AFS 3 protocols, which status-changing
407 * operation ran first, or even which order a read and
408 * a write occurred in.
410 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
411 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
414 if (flags & (CM_SCACHESYNC_STORESIZE | CM_SCACHESYNC_STORESTATUS
415 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_GETCALLBACK)) {
416 /* if we're going to make an RPC to change the status, make sure
417 * that no one is bringing in or sending out the status.
419 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
420 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
422 if (scp->bufReadsp || scp->bufWritesp) goto sleep;
424 if (flags & CM_SCACHESYNC_FETCHDATA) {
425 /* if we're bringing in a new chunk of data, make sure that
426 * nothing is happening to that chunk, and that we aren't
427 * changing the basic file status info, either.
429 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
430 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
432 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
435 if (flags & CM_SCACHESYNC_STOREDATA) {
436 /* same as fetch data */
437 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
438 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
440 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
444 if (flags & CM_SCACHESYNC_STOREDATA_EXCL) {
445 /* Don't allow concurrent StoreData RPC's */
446 if (scp->flags & CM_SCACHEFLAG_DATASTORING)
450 if (flags & CM_SCACHESYNC_ASYNCSTORE) {
451 /* Don't allow more than one BKG store request */
452 if (scp->flags & CM_SCACHEFLAG_ASYNCSTORING)
456 if (flags & CM_SCACHESYNC_LOCK) {
457 /* Don't allow concurrent fiddling with lock lists */
458 if (scp->flags & CM_SCACHEFLAG_LOCKING)
462 /* now the operations that don't correspond to making RPCs */
463 if (flags & CM_SCACHESYNC_GETSTATUS) {
464 /* we can use the status that's here, if we're not
465 * bringing in new status.
467 if (scp->flags & (CM_SCACHEFLAG_FETCHING))
470 if (flags & CM_SCACHESYNC_SETSTATUS) {
471 /* we can make a change to the local status, as long as
472 * the status isn't changing now.
474 * If we're fetching or storing a chunk of data, we can
475 * change the status locally, since the fetch/store
476 * operations don't change any of the data that we're
479 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
480 | CM_SCACHEFLAG_SIZESTORING))
483 if (flags & CM_SCACHESYNC_READ) {
484 /* we're going to read the data, make sure that the
485 * status is available, and that the data is here. It
486 * is OK to read while storing the data back.
488 if (scp->flags & CM_SCACHEFLAG_FETCHING)
490 if (bufp && ((bufp->cmFlags
492 | CM_BUF_CMFULLYFETCHED))
493 == CM_BUF_CMFETCHING))
496 if (flags & CM_SCACHESYNC_WRITE) {
497 /* don't write unless the status is stable and the chunk
500 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
501 | CM_SCACHEFLAG_SIZESTORING))
503 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
507 if (flags & CM_SCACHESYNC_NEEDCALLBACK) {
508 if (!cm_HaveCallback(scp)) {
509 osi_Log1(afsd_logp, "CM SyncOp getting callback on scp %x",
511 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
512 code = cm_GetCallback(scp, up, reqp, 0);
514 lock_ReleaseMutex(&scp->mx);
515 lock_ObtainMutex(&bufp->mx);
516 lock_ObtainMutex(&scp->mx);
518 if (code) return code;
524 /* can't check access rights without a callback */
525 osi_assert(flags & CM_SCACHESYNC_NEEDCALLBACK);
527 if ((rights & PRSFS_WRITE) && (scp->flags & CM_SCACHEFLAG_RO))
528 return CM_ERROR_READONLY;
530 if (cm_HaveAccessRights(scp, up, rights, &outRights)) {
531 if (~outRights & rights) return CM_ERROR_NOACCESS;
534 /* we don't know the required access rights */
535 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
536 code = cm_GetAccessRights(scp, up, reqp);
537 if (code) return code;
539 lock_ReleaseMutex(&scp->mx);
540 lock_ObtainMutex(&bufp->mx);
541 lock_ObtainMutex(&scp->mx);
547 /* if we get here, we're happy */
551 /* first check if we're not supposed to wait: fail
552 * in this case, returning with everything still locked.
554 if (flags & CM_SCACHESYNC_NOWAIT) return CM_ERROR_WOULDBLOCK;
556 /* wait here, then try again */
557 osi_Log1(afsd_logp, "CM SyncOp sleeping scp %x", (long) scp);
558 scp->flags |= CM_SCACHEFLAG_WAITING;
559 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
560 osi_SleepM((long) &scp->flags, &scp->mx);
561 osi_Log0(afsd_logp, "CM SyncOp woke!");
562 if (bufLocked) lock_ObtainMutex(&bufp->mx);
563 lock_ObtainMutex(&scp->mx);
564 } /* big while loop */
566 /* now, update the recorded state for RPC-type calls */
567 if (flags & CM_SCACHESYNC_FETCHSTATUS)
568 scp->flags |= CM_SCACHEFLAG_FETCHING;
569 if (flags & CM_SCACHESYNC_STORESTATUS)
570 scp->flags |= CM_SCACHEFLAG_STORING;
571 if (flags & CM_SCACHESYNC_STORESIZE)
572 scp->flags |= CM_SCACHEFLAG_SIZESTORING;
573 if (flags & CM_SCACHESYNC_GETCALLBACK)
574 scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
575 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
576 scp->flags |= CM_SCACHEFLAG_DATASTORING;
577 if (flags & CM_SCACHESYNC_ASYNCSTORE)
578 scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
579 if (flags & CM_SCACHESYNC_LOCK)
580 scp->flags |= CM_SCACHEFLAG_LOCKING;
582 /* now update the buffer pointer */
583 if (flags & CM_SCACHESYNC_FETCHDATA) {
584 /* ensure that the buffer isn't already in the I/O list */
586 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
587 tbufp = osi_GetQData(qdp);
588 osi_assert(tbufp != bufp);
592 /* queue a held reference to the buffer in the "reading" I/O list */
594 osi_SetQData(qdp, bufp);
597 bufp->cmFlags |= CM_BUF_CMFETCHING;
599 osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
602 if (flags & CM_SCACHESYNC_STOREDATA) {
603 /* ensure that the buffer isn't already in the I/O list */
605 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
606 tbufp = osi_GetQData(qdp);
607 osi_assert(tbufp != bufp);
611 /* queue a held reference to the buffer in the "writing" I/O list */
613 osi_SetQData(qdp, bufp);
616 bufp->cmFlags |= CM_BUF_CMSTORING;
618 osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
624 /* for those syncops that setup for RPCs.
625 * Called with scache locked.
627 void cm_SyncOpDone(cm_scache_t *scp, cm_buf_t *bufp, long flags)
629 osi_queueData_t *qdp;
632 /* now, update the recorded state for RPC-type calls */
633 if (flags & CM_SCACHESYNC_FETCHSTATUS)
634 scp->flags &= ~CM_SCACHEFLAG_FETCHING;
635 if (flags & CM_SCACHESYNC_STORESTATUS)
636 scp->flags &= ~CM_SCACHEFLAG_STORING;
637 if (flags & CM_SCACHESYNC_STORESIZE)
638 scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
639 if (flags & CM_SCACHESYNC_GETCALLBACK)
640 scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
641 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
642 scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
643 if (flags & CM_SCACHESYNC_ASYNCSTORE)
644 scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
645 if (flags & CM_SCACHESYNC_LOCK)
646 scp->flags &= ~CM_SCACHEFLAG_LOCKING;
648 /* now update the buffer pointer */
649 if (flags & CM_SCACHESYNC_FETCHDATA) {
650 /* ensure that the buffer isn't already in the I/O list */
651 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
652 tbufp = osi_GetQData(qdp);
653 if (tbufp == bufp) break;
655 osi_assert(qdp != NULL);
656 osi_assert(osi_GetQData(qdp) == bufp);
657 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
661 ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
666 /* now update the buffer pointer */
667 if (flags & CM_SCACHESYNC_STOREDATA) {
668 /* ensure that the buffer isn't already in the I/O list */
669 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
670 tbufp = osi_GetQData(qdp);
671 if (tbufp == bufp) break;
673 osi_assert(qdp != NULL);
674 osi_assert(osi_GetQData(qdp) == bufp);
675 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
678 bufp->cmFlags &= ~CM_BUF_CMSTORING;
683 /* and wakeup anyone who is waiting */
684 if (scp->flags & CM_SCACHEFLAG_WAITING) {
685 scp->flags &= ~CM_SCACHEFLAG_WAITING;
686 osi_Wakeup((long) &scp->flags);
690 /* merge in a response from an RPC. The scp must be locked, and the callback
693 * Don't overwrite any status info that is dirty, since we could have a store
694 * operation (such as store data) that merges some info in, and we don't want
695 * to lose the local updates. Typically, there aren't many updates we do
696 * locally, anyway, probably only mtime.
698 * There is probably a bug in here where a chmod (which doesn't change
699 * serverModTime) that occurs between two fetches, both of whose responses are
700 * handled after the callback breaking is done, but only one of whose calls
701 * started before that, can cause old info to be merged from the first call.
703 void cm_MergeStatus(cm_scache_t *scp, AFSFetchStatus *statusp, AFSVolSync *volp,
704 cm_user_t *userp, int flags)
706 if (!(flags & CM_MERGEFLAG_FORCE)
707 && statusp->DataVersion < (unsigned long) scp->dataVersion) {
708 struct cm_cell *cellp;
709 struct cm_volume *volp;
711 cellp = cm_FindCellByID(scp->fid.cell);
712 cm_GetVolumeByID(cellp, scp->fid.volume, userp,
713 (cm_req_t *) NULL, &volp);
715 osi_Log2(afsd_logp, "old data from server %x volume %s",
716 scp->cbServerp->addr.sin_addr.s_addr,
718 osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
719 scp, scp->dataVersion, statusp->DataVersion);
720 /* we have a number of data fetch/store operations running
721 * concurrently, and we can tell which one executed last at the
722 * server by its mtime.
723 * Choose the one with the largest mtime, and ignore the rest.
725 * These concurrent calls are incompatible with setting the
726 * mtime, so we won't have a locally changed mtime here.
728 * We could also have ACL info for a different user than usual,
729 * in which case we have to do that part of the merge, anyway.
730 * We won't have to worry about the info being old, since we
731 * won't have concurrent calls
732 * that change file status running from this machine.
734 * Added 3/17/98: if we see data version regression on an RO
735 * file, it's probably due to a server holding an out-of-date
736 * replica, rather than to concurrent RPC's. Failures to
737 * release replicas are now flagged by the volserver, but only
738 * since AFS 3.4 5.22, so there are plenty of clients getting
739 * out-of-date replicas out there.
741 * If we discover an out-of-date replica, by this time it's too
742 * late to go to another server and retry. Also, we can't
743 * reject the merge, because then there is no way for
744 * GetAccess to do its work, and the caller gets into an
745 * infinite loop. So we just grin and bear it.
747 if (!(scp->flags & CM_SCACHEFLAG_RO))
750 scp->serverModTime = statusp->ServerModTime;
752 if (!(scp->mask & CM_SCACHEMASK_CLIENTMODTIME)) {
753 scp->clientModTime = statusp->ClientModTime;
755 if (!(scp->mask & CM_SCACHEMASK_LENGTH)) {
756 scp->length.LowPart = statusp->Length;
757 scp->length.HighPart = 0;
760 scp->serverLength.LowPart = statusp->Length;
761 scp->serverLength.HighPart = 0;
763 scp->linkCount = statusp->LinkCount;
764 scp->dataVersion = statusp->DataVersion;
765 scp->owner = statusp->Owner;
766 scp->group = statusp->Group;
767 scp->unixModeBits = statusp->UnixModeBits & 07777;
769 if (statusp->FileType == File)
770 scp->fileType = CM_SCACHETYPE_FILE;
771 else if (statusp->FileType == Directory)
772 scp->fileType = CM_SCACHETYPE_DIRECTORY;
773 else if (statusp->FileType == SymbolicLink) {
774 if ((scp->unixModeBits & 0111) == 0)
775 scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
777 scp->fileType = CM_SCACHETYPE_SYMLINK;
779 else scp->fileType = 0; /* invalid */
781 /* and other stuff */
782 scp->parentVnode = statusp->ParentVnode;
783 scp->parentUnique = statusp->ParentUnique;
785 /* and merge in the private acl cache info, if this is more than the public
786 * info; merge in the public stuff in any case.
788 scp->anyAccess = statusp->AnonymousAccess;
791 cm_AddACLCache(scp, userp, statusp->CallerAccess);
795 /* note that our stat cache info is incorrect, so force us eventually
796 * to stat the file again. There may be dirty data associated with
797 * this vnode, and we want to preserve that information.
799 * This function works by simply simulating a loss of the callback.
801 * This function must be called with the scache locked.
803 void cm_DiscardSCache(cm_scache_t *scp)
805 lock_AssertMutex(&scp->mx);
806 scp->cbServerp = NULL;
809 cm_FreeAllACLEnts(scp);
812 void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
814 afsFidp->Volume = fidp->volume;
815 afsFidp->Vnode = fidp->vnode;
816 afsFidp->Unique = fidp->unique;
819 void cm_HoldSCache(cm_scache_t *scp)
821 lock_ObtainWrite(&cm_scacheLock);
822 osi_assert(scp->refCount > 0);
824 lock_ReleaseWrite(&cm_scacheLock);
827 void cm_ReleaseSCache(cm_scache_t *scp)
829 lock_ObtainWrite(&cm_scacheLock);
830 osi_assert(scp->refCount-- > 0);
831 lock_ReleaseWrite(&cm_scacheLock);
834 /* just look for the scp entry to get filetype */
835 /* doesn't need to be perfectly accurate, so locking doesn't matter too much */
836 int cm_FindFileType(cm_fid_t *fidp)
841 hash = CM_SCACHE_HASH(fidp);
843 osi_assert(fidp->cell != 0);
845 lock_ObtainWrite(&cm_scacheLock);
846 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
847 if (cm_FidCmp(fidp, &scp->fid) == 0) {
849 /*cm_AdjustLRU(scp);*/
850 lock_ReleaseWrite(&cm_scacheLock);
851 return scp->fileType;
854 lock_ReleaseWrite(&cm_scacheLock);