2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
23 extern osi_hyper_t hzero;
25 /* hash table stuff */
26 cm_scache_t **cm_hashTablep;
27 long cm_hashTableSize;
29 long cm_currentSCaches;
32 cm_scache_t *cm_scacheLRUFirstp;
33 cm_scache_t *cm_scacheLRULastp;
36 osi_queue_t *cm_allFileLocks;
38 /* lock for globals */
39 osi_rwlock_t cm_scacheLock;
41 /* Dummy scache entry for use with pioctl fids */
42 cm_scache_t cm_fakeSCache;
44 /* must be called with cm_scacheLock write-locked! */
45 void cm_AdjustLRU(cm_scache_t *scp)
47 if (scp == cm_scacheLRULastp)
48 cm_scacheLRULastp = (cm_scache_t *) osi_QPrev(&scp->q);
49 osi_QRemove((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
50 osi_QAdd((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
51 if (!cm_scacheLRULastp) cm_scacheLRULastp = scp;
54 /* called with cm_scacheLock write-locked; find a vnode to recycle.
55 * Can allocate a new one if desperate, or if below quota (cm_maxSCaches).
57 cm_scache_t *cm_GetNewSCache(void)
64 if (cm_currentSCaches >= cm_maxSCaches) {
65 for (scp = cm_scacheLRULastp;
67 scp = (cm_scache_t *) osi_QPrev(&scp->q)) {
68 if (scp->refCount == 0) break;
72 /* we found an entry, so return it */
73 if (scp->flags & CM_SCACHEFLAG_INHASH) {
74 /* hash it out first */
75 i = CM_SCACHE_HASH(&scp->fid);
76 lscpp = &cm_hashTablep[i];
79 lscpp = &tscp->nextp, tscp = *lscpp) {
80 if (tscp == scp) break;
82 osi_assertx(tscp, "afsd: scache hash screwup");
84 scp->flags &= ~CM_SCACHEFLAG_INHASH;
87 /* look for things that shouldn't still be set */
88 osi_assert(scp->bufWritesp == NULL);
89 osi_assert(scp->bufReadsp == NULL);
91 /* invalidate so next merge works fine;
92 * also initialize some flags */
93 scp->flags &= ~(CM_SCACHEFLAG_STATD
95 | CM_SCACHEFLAG_PURERO
96 | CM_SCACHEFLAG_OVERQUOTA
97 | CM_SCACHEFLAG_OUTOFSPACE);
98 scp->serverModTime = 0;
100 scp->bulkStatProgress = hzero;
102 /* discard callback */
103 scp->cbServerp = NULL;
106 /* remove from dnlc */
110 /* discard cached status; if non-zero, Close
111 * tried to store this to server but failed */
114 /* drop held volume ref */
116 cm_PutVolume(scp->volp);
120 /* discard symlink info */
121 if (scp->mountPointStringp) {
122 free(scp->mountPointStringp);
123 scp->mountPointStringp = NULL;
125 if (scp->mountRootFidp) {
126 free(scp->mountRootFidp);
127 scp->mountRootFidp = NULL;
129 if (scp->dotdotFidp) {
130 free(scp->dotdotFidp);
131 scp->dotdotFidp = NULL;
134 /* not locked, but there can be no references to this guy
135 * while we hold the global refcount lock.
137 cm_FreeAllACLEnts(scp);
139 /* now remove from the LRU queue and put it back at the
140 * head of the LRU queue.
149 /* if we get here, we should allocate a new scache entry. We either are below
150 * quota or we have a leak and need to allocate a new one to avoid panicing.
152 scp = malloc(sizeof(*scp));
153 memset(scp, 0, sizeof(*scp));
154 lock_InitializeMutex(&scp->mx, "cm_scache_t mutex");
155 lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
157 /* and put it in the LRU queue */
158 osi_QAdd((osi_queue_t **) &cm_scacheLRUFirstp, &scp->q);
159 if (!cm_scacheLRULastp) cm_scacheLRULastp = scp;
161 cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
166 /* like strcmp, only for fids */
167 int cm_FidCmp(cm_fid_t *ap, cm_fid_t *bp)
169 if (ap->vnode != bp->vnode) return 1;
170 if (ap->volume != bp->volume) return 1;
171 if (ap->unique != bp->unique) return 1;
172 if (ap->cell != bp->cell) return 1;
176 void cm_fakeSCacheInit()
178 memset(&cm_fakeSCache, 0, sizeof(cm_fakeSCache));
179 lock_InitializeMutex(&cm_fakeSCache.mx, "cm_scache_t mutex");
180 cm_fakeSCache.cbServerp = (struct cm_server *)(-1);
181 /* can leave clientModTime at 0 */
182 cm_fakeSCache.fileType = CM_SCACHETYPE_FILE;
183 cm_fakeSCache.unixModeBits = 0777;
184 cm_fakeSCache.length.LowPart = 1000;
185 cm_fakeSCache.linkCount = 1;
188 void cm_InitSCache(long maxSCaches)
190 static osi_once_t once;
192 if (osi_Once(&once)) {
193 lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
194 cm_hashTableSize = maxSCaches / 2;
195 cm_hashTablep = malloc(sizeof(cm_scache_t *) * cm_hashTableSize);
196 memset(cm_hashTablep, 0, sizeof(cm_scache_t *) * cm_hashTableSize);
197 cm_allFileLocks = NULL;
198 cm_currentSCaches = 0;
199 cm_maxSCaches = maxSCaches;
206 /* version that doesn't bother creating the entry if we don't find it */
207 cm_scache_t *cm_FindSCache(cm_fid_t *fidp)
212 hash = CM_SCACHE_HASH(fidp);
214 osi_assert(fidp->cell != 0);
216 lock_ObtainWrite(&cm_scacheLock);
217 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
218 if (cm_FidCmp(fidp, &scp->fid) == 0) {
221 lock_ReleaseWrite(&cm_scacheLock);
225 lock_ReleaseWrite(&cm_scacheLock);
229 long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
238 hash = CM_SCACHE_HASH(fidp);
240 osi_assert(fidp->cell != 0);
242 lock_ObtainWrite(&cm_scacheLock);
243 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
244 if (cm_FidCmp(fidp, &scp->fid) == 0) {
248 lock_ReleaseWrite(&cm_scacheLock);
253 /* otherwise, we need to find the volume */
254 lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
255 cellp = cm_FindCellByID(fidp->cell);
256 if (!cellp) return CM_ERROR_NOSUCHCELL;
258 code = cm_GetVolumeByID(cellp, fidp->volume, userp, reqp, &volp);
259 if (code) return code;
261 /* otherwise, we have the volume, now reverify that the scp doesn't
262 * exist, and proceed.
264 lock_ObtainWrite(&cm_scacheLock);
265 for(scp=cm_hashTablep[hash]; scp; scp=scp->nextp) {
266 if (cm_FidCmp(fidp, &scp->fid) == 0) {
269 lock_ReleaseWrite(&cm_scacheLock);
276 /* now, if we don't have the fid, recycle something */
277 scp = cm_GetNewSCache();
278 osi_assert(!(scp->flags & CM_SCACHEFLAG_INHASH));
280 scp->volp = volp; /* a held reference */
282 /* if this scache entry represents a volume root then we need
283 * to copy the dotdotFipd from the volume structure where the
284 * "master" copy is stored (defect 11489)
286 if(scp->fid.vnode == 1 && scp->fid.unique == 1 && volp->dotdotFidp) {
287 if (scp->dotdotFidp == (cm_fid_t *) NULL)
288 scp->dotdotFidp = (cm_fid_t *) malloc(sizeof(cm_fid_t));
289 *(scp->dotdotFidp) = *volp->dotdotFidp;
292 if (volp->roID == fidp->volume)
293 scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
294 else if (volp->bkID == fidp->volume)
295 scp->flags |= CM_SCACHEFLAG_RO;
296 scp->nextp = cm_hashTablep[hash];
297 cm_hashTablep[hash] = scp;
298 scp->flags |= CM_SCACHEFLAG_INHASH;
300 lock_ReleaseWrite(&cm_scacheLock);
302 /* now we have a held scache entry; just return it */
308 /* synchronize a fetch, store, read, write, fetch status or store status.
309 * Called with scache mutex held, and returns with it held, but temporarily
310 * drops it during the fetch.
312 * At most one flag can be on in flags, if this is an RPC request.
314 * Also, if we're fetching or storing data, we must ensure that we have a buffer.
316 * There are a lot of weird restrictions here; here's an attempt to explain the
317 * rationale for the concurrency restrictions implemented in this function.
319 * First, although the file server will break callbacks when *another* machine
320 * modifies a file or status block, the client itself is responsible for
321 * concurrency control on its own requests. Callback breaking events are rare,
322 * and simply invalidate any concurrent new status info.
324 * In the absence of callback breaking messages, we need to know how to
325 * synchronize incoming responses describing updates to files. We synchronize
326 * operations that update the data version by comparing the data versions.
327 * However, updates that do not update the data, but only the status, can't be
328 * synchronized with fetches or stores, since there's nothing to compare
329 * to tell which operation executed first at the server.
331 * Thus, we can allow multiple ops that change file data, or dir data, and
332 * fetches. However, status storing ops have to be done serially.
334 * Furthermore, certain data-changing ops are incompatible: we can't read or
335 * write a buffer while doing a truncate. We can't read and write the same
336 * buffer at the same time, or write while fetching or storing, or read while
337 * fetching a buffer (this may change). We can't fetch and store at the same
340 * With respect to status, we can't read and write at the same time, read while
341 * fetching, write while fetching or storing, or fetch and store at the same time.
343 * We can't allow a get callback RPC to run in concurrently with something that
344 * will return updated status, since we could start a call, have the server
345 * return status, have another machine make an update to the status (which
346 * doesn't change serverModTime), have the original machine get a new callback,
347 * and then have the original machine merge in the early, old info from the
348 * first call. At this point, the easiest way to avoid this problem is to have
349 * getcallback calls conflict with all others for the same vnode. Other calls
350 * to cm_MergeStatus that aren't associated with calls to cm_SyncOp on the same
351 * vnode must be careful not to merge in their status unless they have obtained
352 * a callback from the start of their call.
355 * Concurrent StoreData RPC's can cause trouble if the file is being extended.
356 * Each such RPC passes a FileLength parameter, which the server uses to do
357 * pre-truncation if necessary. So if two RPC's are processed out of order at
358 * the server, the one with the smaller FileLength will be processed last,
359 * possibly resulting in a bogus truncation. The simplest way to avoid this
360 * is to serialize all StoreData RPC's. This is the reason we defined
361 * CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
363 long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *up, cm_req_t *reqp,
364 long rights, long flags)
366 osi_queueData_t *qdp;
372 /* lookup this first */
373 bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
375 /* some minor assertions */
376 if (flags & (CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_FETCHDATA
377 | CM_SCACHESYNC_READ | CM_SCACHESYNC_WRITE
378 | CM_SCACHESYNC_SETSIZE)) {
380 osi_assert(bufp->refCount > 0);
382 osi_assert(cm_FidCmp(&bufp->fid, &scp->fid) == 0);
386 else osi_assert(bufp == NULL);
388 /* Do the access check. Now we don't really do the access check
389 * atomically, since the caller doesn't expect the parent dir to be
390 * returned locked, and that is what we'd have to do to prevent a
391 * callback breaking message on the parent due to a setacl call from
392 * being processed while we're running. So, instead, we check things
393 * here, and if things look fine with the access, we proceed to finish
394 * the rest of this check. Sort of a hack, but probably good enough.
398 if (flags & CM_SCACHESYNC_FETCHSTATUS) {
399 /* if we're bringing in a new status block, ensure that
400 * we aren't already doing so, and that no one is
401 * changing the status concurrently, either. We need
402 * to do this, even if the status is of a different
403 * type, since we don't have the ability to figure out,
404 * in the AFS 3 protocols, which status-changing
405 * operation ran first, or even which order a read and
406 * a write occurred in.
408 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
409 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
412 if (flags & (CM_SCACHESYNC_STORESIZE | CM_SCACHESYNC_STORESTATUS
413 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_GETCALLBACK)) {
414 /* if we're going to make an RPC to change the status, make sure
415 * that no one is bringing in or sending out the status.
417 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
418 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
420 if (scp->bufReadsp || scp->bufWritesp) goto sleep;
422 if (flags & CM_SCACHESYNC_FETCHDATA) {
423 /* if we're bringing in a new chunk of data, make sure that
424 * nothing is happening to that chunk, and that we aren't
425 * changing the basic file status info, either.
427 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
428 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
430 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
433 if (flags & CM_SCACHESYNC_STOREDATA) {
434 /* same as fetch data */
435 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
436 | CM_SCACHEFLAG_SIZESTORING | CM_SCACHEFLAG_GETCALLBACK))
438 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
442 if (flags & CM_SCACHESYNC_STOREDATA_EXCL) {
443 /* Don't allow concurrent StoreData RPC's */
444 if (scp->flags & CM_SCACHEFLAG_DATASTORING)
448 if (flags & CM_SCACHESYNC_ASYNCSTORE) {
449 /* Don't allow more than one BKG store request */
450 if (scp->flags & CM_SCACHEFLAG_ASYNCSTORING)
454 if (flags & CM_SCACHESYNC_LOCK) {
455 /* Don't allow concurrent fiddling with lock lists */
456 if (scp->flags & CM_SCACHEFLAG_LOCKING)
460 /* now the operations that don't correspond to making RPCs */
461 if (flags & CM_SCACHESYNC_GETSTATUS) {
462 /* we can use the status that's here, if we're not
463 * bringing in new status.
465 if (scp->flags & (CM_SCACHEFLAG_FETCHING))
468 if (flags & CM_SCACHESYNC_SETSTATUS) {
469 /* we can make a change to the local status, as long as
470 * the status isn't changing now.
472 * If we're fetching or storing a chunk of data, we can
473 * change the status locally, since the fetch/store
474 * operations don't change any of the data that we're
477 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
478 | CM_SCACHEFLAG_SIZESTORING))
481 if (flags & CM_SCACHESYNC_READ) {
482 /* we're going to read the data, make sure that the
483 * status is available, and that the data is here. It
484 * is OK to read while storing the data back.
486 if (scp->flags & CM_SCACHEFLAG_FETCHING)
488 if (bufp && ((bufp->cmFlags
490 | CM_BUF_CMFULLYFETCHED))
491 == CM_BUF_CMFETCHING))
494 if (flags & CM_SCACHESYNC_WRITE) {
495 /* don't write unless the status is stable and the chunk
498 if (scp->flags & (CM_SCACHEFLAG_FETCHING | CM_SCACHEFLAG_STORING
499 | CM_SCACHEFLAG_SIZESTORING))
501 if (bufp && (bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)))
505 if (flags & CM_SCACHESYNC_NEEDCALLBACK) {
506 if (!cm_HaveCallback(scp)) {
507 osi_Log1(afsd_logp, "CM SyncOp getting callback on scp %x",
509 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
510 code = cm_GetCallback(scp, up, reqp, 0);
512 lock_ReleaseMutex(&scp->mx);
513 lock_ObtainMutex(&bufp->mx);
514 lock_ObtainMutex(&scp->mx);
516 if (code) return code;
522 /* can't check access rights without a callback */
523 osi_assert(flags & CM_SCACHESYNC_NEEDCALLBACK);
525 if ((rights & PRSFS_WRITE) && (scp->flags & CM_SCACHEFLAG_RO))
526 return CM_ERROR_READONLY;
528 if (cm_HaveAccessRights(scp, up, rights, &outRights)) {
529 if (~outRights & rights) return CM_ERROR_NOACCESS;
532 /* we don't know the required access rights */
533 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
534 code = cm_GetAccessRights(scp, up, reqp);
535 if (code) return code;
537 lock_ReleaseMutex(&scp->mx);
538 lock_ObtainMutex(&bufp->mx);
539 lock_ObtainMutex(&scp->mx);
545 /* if we get here, we're happy */
549 /* first check if we're not supposed to wait: fail
550 * in this case, returning with everything still locked.
552 if (flags & CM_SCACHESYNC_NOWAIT) return CM_ERROR_WOULDBLOCK;
554 /* wait here, then try again */
555 osi_Log1(afsd_logp, "CM SyncOp sleeping scp %x", (long) scp);
556 scp->flags |= CM_SCACHEFLAG_WAITING;
557 if (bufLocked) lock_ReleaseMutex(&bufp->mx);
558 osi_SleepM((long) &scp->flags, &scp->mx);
559 osi_Log0(afsd_logp, "CM SyncOp woke!");
560 if (bufLocked) lock_ObtainMutex(&bufp->mx);
561 lock_ObtainMutex(&scp->mx);
562 } /* big while loop */
564 /* now, update the recorded state for RPC-type calls */
565 if (flags & CM_SCACHESYNC_FETCHSTATUS)
566 scp->flags |= CM_SCACHEFLAG_FETCHING;
567 if (flags & CM_SCACHESYNC_STORESTATUS)
568 scp->flags |= CM_SCACHEFLAG_STORING;
569 if (flags & CM_SCACHESYNC_STORESIZE)
570 scp->flags |= CM_SCACHEFLAG_SIZESTORING;
571 if (flags & CM_SCACHESYNC_GETCALLBACK)
572 scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
573 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
574 scp->flags |= CM_SCACHEFLAG_DATASTORING;
575 if (flags & CM_SCACHESYNC_ASYNCSTORE)
576 scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
577 if (flags & CM_SCACHESYNC_LOCK)
578 scp->flags |= CM_SCACHEFLAG_LOCKING;
580 /* now update the buffer pointer */
581 if (flags & CM_SCACHESYNC_FETCHDATA) {
582 /* ensure that the buffer isn't already in the I/O list */
584 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
585 tbufp = osi_GetQData(qdp);
586 osi_assert(tbufp != bufp);
590 /* queue a held reference to the buffer in the "reading" I/O list */
592 osi_SetQData(qdp, bufp);
595 bufp->cmFlags |= CM_BUF_CMFETCHING;
597 osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
600 if (flags & CM_SCACHESYNC_STOREDATA) {
601 /* ensure that the buffer isn't already in the I/O list */
603 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
604 tbufp = osi_GetQData(qdp);
605 osi_assert(tbufp != bufp);
609 /* queue a held reference to the buffer in the "writing" I/O list */
611 osi_SetQData(qdp, bufp);
614 bufp->cmFlags |= CM_BUF_CMSTORING;
616 osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
622 /* for those syncops that setup for RPCs.
623 * Called with scache locked.
625 void cm_SyncOpDone(cm_scache_t *scp, cm_buf_t *bufp, long flags)
627 osi_queueData_t *qdp;
630 /* now, update the recorded state for RPC-type calls */
631 if (flags & CM_SCACHESYNC_FETCHSTATUS)
632 scp->flags &= ~CM_SCACHEFLAG_FETCHING;
633 if (flags & CM_SCACHESYNC_STORESTATUS)
634 scp->flags &= ~CM_SCACHEFLAG_STORING;
635 if (flags & CM_SCACHESYNC_STORESIZE)
636 scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
637 if (flags & CM_SCACHESYNC_GETCALLBACK)
638 scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
639 if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
640 scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
641 if (flags & CM_SCACHESYNC_ASYNCSTORE)
642 scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
643 if (flags & CM_SCACHESYNC_LOCK)
644 scp->flags &= ~CM_SCACHEFLAG_LOCKING;
646 /* now update the buffer pointer */
647 if (flags & CM_SCACHESYNC_FETCHDATA) {
648 /* ensure that the buffer isn't already in the I/O list */
649 for(qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
650 tbufp = osi_GetQData(qdp);
651 if (tbufp == bufp) break;
653 osi_assert(qdp != NULL);
654 osi_assert(osi_GetQData(qdp) == bufp);
655 osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
659 ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
664 /* now update the buffer pointer */
665 if (flags & CM_SCACHESYNC_STOREDATA) {
666 /* ensure that the buffer isn't already in the I/O list */
667 for(qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
668 tbufp = osi_GetQData(qdp);
669 if (tbufp == bufp) break;
671 osi_assert(qdp != NULL);
672 osi_assert(osi_GetQData(qdp) == bufp);
673 osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
676 bufp->cmFlags &= ~CM_BUF_CMSTORING;
681 /* and wakeup anyone who is waiting */
682 if (scp->flags & CM_SCACHEFLAG_WAITING) {
683 scp->flags &= ~CM_SCACHEFLAG_WAITING;
684 osi_Wakeup((long) &scp->flags);
688 /* merge in a response from an RPC. The scp must be locked, and the callback
691 * Don't overwrite any status info that is dirty, since we could have a store
692 * operation (such as store data) that merges some info in, and we don't want
693 * to lose the local updates. Typically, there aren't many updates we do
694 * locally, anyway, probably only mtime.
696 * There is probably a bug in here where a chmod (which doesn't change
697 * serverModTime) that occurs between two fetches, both of whose responses are
698 * handled after the callback breaking is done, but only one of whose calls
699 * started before that, can cause old info to be merged from the first call.
701 void cm_MergeStatus(cm_scache_t *scp, AFSFetchStatus *statusp, AFSVolSync *volp,
702 cm_user_t *userp, int flags)
704 if (!(flags & CM_MERGEFLAG_FORCE)
705 && statusp->DataVersion < (unsigned long) scp->dataVersion) {
706 struct cm_cell *cellp;
707 struct cm_volume *volp;
709 cellp = cm_FindCellByID(scp->fid.cell);
710 cm_GetVolumeByID(cellp, scp->fid.volume, userp,
711 (cm_req_t *) NULL, &volp);
713 osi_Log2(afsd_logp, "old data from server %x volume %s",
714 scp->cbServerp->addr.sin_addr.s_addr,
716 osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
717 scp, scp->dataVersion, statusp->DataVersion);
718 /* we have a number of data fetch/store operations running
719 * concurrently, and we can tell which one executed last at the
720 * server by its mtime.
721 * Choose the one with the largest mtime, and ignore the rest.
723 * These concurrent calls are incompatible with setting the
724 * mtime, so we won't have a locally changed mtime here.
726 * We could also have ACL info for a different user than usual,
727 * in which case we have to do that part of the merge, anyway.
728 * We won't have to worry about the info being old, since we
729 * won't have concurrent calls
730 * that change file status running from this machine.
732 * Added 3/17/98: if we see data version regression on an RO
733 * file, it's probably due to a server holding an out-of-date
734 * replica, rather than to concurrent RPC's. Failures to
735 * release replicas are now flagged by the volserver, but only
736 * since AFS 3.4 5.22, so there are plenty of clients getting
737 * out-of-date replicas out there.
739 * If we discover an out-of-date replica, by this time it's too
740 * late to go to another server and retry. Also, we can't
741 * reject the merge, because then there is no way for
742 * GetAccess to do its work, and the caller gets into an
743 * infinite loop. So we just grin and bear it.
745 if (!(scp->flags & CM_SCACHEFLAG_RO))
748 scp->serverModTime = statusp->ServerModTime;
750 if (!(scp->mask & CM_SCACHEMASK_CLIENTMODTIME)) {
751 scp->clientModTime = statusp->ClientModTime;
753 if (!(scp->mask & CM_SCACHEMASK_LENGTH)) {
754 scp->length.LowPart = statusp->Length;
755 scp->length.HighPart = 0;
758 scp->serverLength.LowPart = statusp->Length;
759 scp->serverLength.HighPart = 0;
761 scp->linkCount = statusp->LinkCount;
762 scp->dataVersion = statusp->DataVersion;
763 scp->owner = statusp->Owner;
764 scp->group = statusp->Group;
765 scp->unixModeBits = statusp->UnixModeBits & 07777;
767 if (statusp->FileType == File)
768 scp->fileType = CM_SCACHETYPE_FILE;
769 else if (statusp->FileType == Directory)
770 scp->fileType = CM_SCACHETYPE_DIRECTORY;
771 else if (statusp->FileType == SymbolicLink) {
772 if ((scp->unixModeBits & 0111) == 0)
773 scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
775 scp->fileType = CM_SCACHETYPE_SYMLINK;
777 else scp->fileType = 0; /* invalid */
779 /* and other stuff */
780 scp->parentVnode = statusp->ParentVnode;
781 scp->parentUnique = statusp->ParentUnique;
783 /* and merge in the private acl cache info, if this is more than the public
784 * info; merge in the public stuff in any case.
786 scp->anyAccess = statusp->AnonymousAccess;
789 cm_AddACLCache(scp, userp, statusp->CallerAccess);
793 /* note that our stat cache info is incorrect, so force us eventually
794 * to stat the file again. There may be dirty data associated with
795 * this vnode, and we want to preserve that information.
797 * This function works by simply simulating a loss of the callback.
799 * This function must be called with the scache locked.
801 void cm_DiscardSCache(cm_scache_t *scp)
803 lock_AssertMutex(&scp->mx);
804 scp->cbServerp = NULL;
807 cm_FreeAllACLEnts(scp);
810 void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
812 afsFidp->Volume = fidp->volume;
813 afsFidp->Vnode = fidp->vnode;
814 afsFidp->Unique = fidp->unique;
817 void cm_HoldSCache(cm_scache_t *scp)
819 lock_ObtainWrite(&cm_scacheLock);
820 osi_assert(scp->refCount > 0);
822 lock_ReleaseWrite(&cm_scacheLock);
825 void cm_ReleaseSCache(cm_scache_t *scp)
827 lock_ObtainWrite(&cm_scacheLock);
828 osi_assert(scp->refCount-- > 0);
829 lock_ReleaseWrite(&cm_scacheLock);