2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
26 extern void afsi_log(char *pattern, ...);
29 osi_mutex_t cm_bufGetMutex;
30 #ifdef AFS_FREELANCE_CLIENT
31 extern osi_mutex_t cm_Freelance_Lock;
34 /* functions called back from the buffer package when reading or writing data,
35 * or when holding or releasing a vnode pointer.
37 long cm_BufWrite(void *vfidp, osi_hyper_t *offsetp, long length, long flags,
38 cm_user_t *userp, cm_req_t *reqp)
40 /* store the data back from this buffer; the buffer is locked and held,
41 * but the vnode involved isn't locked, yet. It is held by its
42 * reference from the buffer, which won't change until the buffer is
43 * released by our caller. Thus, we don't have to worry about holding
47 cm_fid_t *fidp = vfidp;
51 AFSFetchStatus outStatus;
52 AFSStoreStatus inStatus;
56 struct rx_call *callp;
63 cm_bulkIO_t biod; /* bulk IO descriptor */
65 osi_assert(userp != NULL);
67 /* now, the buffer may or may not be filled with good data (buf_GetNew
68 * drops lots of locks, and may indeed return a properly initialized
69 * buffer, although more likely it will just return a new, empty, buffer.
71 scp = cm_FindSCache(fidp);
73 return CM_ERROR_NOSUCHFILE; /* shouldn't happen */
75 cm_AFSFidFromFid(&tfid, fidp);
77 lock_ObtainMutex(&scp->mx);
79 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
81 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
82 lock_ReleaseMutex(&scp->mx);
83 cm_ReleaseSCache(scp);
87 if (biod.length == 0) {
88 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
89 lock_ReleaseMutex(&scp->mx);
90 cm_ReleaseBIOD(&biod, 1); /* should be a NOOP */
91 cm_ReleaseSCache(scp);
95 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
96 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
98 /* prepare the output status for the store */
99 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
100 cm_StatusFromAttr(&inStatus, scp, NULL);
101 truncPos = scp->length.LowPart;
102 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
103 && scp->truncPos.LowPart < (unsigned long) truncPos)
104 truncPos = scp->truncPos.LowPart;
105 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
107 /* compute how many bytes to write from this buffer */
108 thyper = LargeIntegerSubtract(scp->length, biod.offset);
109 if (LargeIntegerLessThanZero(thyper)) {
110 /* entire buffer is past EOF */
114 /* otherwise write out part of buffer before EOF, but not
115 * more than bufferSize bytes.
117 nbytes = thyper.LowPart;
118 if (nbytes > biod.length)
119 nbytes = biod.length;
122 lock_ReleaseMutex(&scp->mx);
124 /* now we're ready to do the store operation */
126 code = cm_Conn(&scp->fid, userp, reqp, &connp);
130 callp = rx_NewCall(connp->callp);
132 osi_Log3(afsd_logp, "CALL StoreData vp %x, off 0x%x, size 0x%x",
133 (long) scp, biod.offset.LowPart, nbytes);
135 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
136 biod.offset.LowPart, nbytes, truncPos);
139 /* write the data from the the list of buffers */
143 qdp = biod.bufListEndp;
145 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
146 osi_assert(qdp != NULL);
147 bufp = osi_GetQData(qdp);
148 bufferp = bufp->datap;
150 if (wbytes > buf_bufferSize)
151 wbytes = buf_bufferSize;
153 /* write out wbytes of data from bufferp */
154 temp = rx_Write(callp, bufferp, wbytes);
155 if (temp != wbytes) {
160 } /* while more bytes to write */
161 } /* if RPC started successfully */
164 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
165 code = rx_EndCall(callp, code);
166 osi_Log0(afsd_logp, "CALL StoreData DONE");
168 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, code));
169 code = cm_MapRPCError(code, reqp);
171 /* now, clean up our state */
172 lock_ObtainMutex(&scp->mx);
174 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
177 /* now, here's something a little tricky: in AFS 3, a dirty
178 * length can't be directly stored, instead, a dirty chunk is
179 * stored that sets the file's size (by writing and by using
180 * the truncate-first option in the store call).
182 * At this point, we've just finished a store, and so the trunc
183 * pos field is clean. If the file's size at the server is at
184 * least as big as we think it should be, then we turn off the
185 * length dirty bit, since all the other dirty buffers must
186 * precede this one in the file.
188 * The file's desired size shouldn't be smaller than what's
189 * stored at the server now, since we just did the trunc pos
192 * We have to turn off the length dirty bit as soon as we can,
193 * so that we see updates made by other machines.
195 if (outStatus.Length >= scp->length.LowPart)
196 scp->mask &= ~CM_SCACHEMASK_LENGTH;
197 cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
199 if (code == CM_ERROR_SPACE)
200 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
201 else if (code == CM_ERROR_QUOTA)
202 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
204 lock_ReleaseMutex(&scp->mx);
205 cm_ReleaseBIOD(&biod, 1);
206 cm_ReleaseSCache(scp);
212 * Truncate the file, by sending a StoreData RPC with zero length.
214 * Called with scp locked. Releases and re-obtains the lock.
216 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
218 AFSFetchStatus outStatus;
219 AFSStoreStatus inStatus;
225 struct rx_call *callp;
227 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
228 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
229 CM_SCACHESYNC_STOREDATA_EXCL);
231 /* prepare the output status for the store */
232 inStatus.Mask = AFS_SETMODTIME;
233 inStatus.ClientModTime = scp->clientModTime;
234 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
236 /* calculate truncation position */
237 truncPos = scp->length.LowPart;
238 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
239 && scp->truncPos.LowPart < (unsigned long) truncPos)
240 truncPos = scp->truncPos.LowPart;
241 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
243 lock_ReleaseMutex(&scp->mx);
245 cm_AFSFidFromFid(&tfid, &scp->fid);
247 /* now we're ready to do the store operation */
249 code = cm_Conn(&scp->fid, userp, reqp, &connp);
253 callp = rx_NewCall(connp->callp);
255 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
259 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
260 code = rx_EndCall(callp, code);
261 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, code));
262 code = cm_MapRPCError(code, reqp);
264 /* now, clean up our state */
265 lock_ObtainMutex(&scp->mx);
267 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
271 * For explanation of handling of CM_SCACHEMASK_LENGTH,
274 if (outStatus.Length >= scp->length.LowPart)
275 scp->mask &= ~CM_SCACHEMASK_LENGTH;
276 cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
282 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
284 *bytesReadp = buf_bufferSize;
286 /* now return a code that means that I/O is done */
290 /* stabilize scache entry, and return with it locked so
293 long cm_BufStabilize(void *parmp, cm_user_t *userp, cm_req_t *reqp)
300 lock_ObtainMutex(&scp->mx);
301 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
302 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
304 lock_ReleaseMutex(&scp->mx);
311 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
312 long cm_BufUnstabilize(void *parmp, cm_user_t *userp)
318 lock_ReleaseMutex(&scp->mx);
320 /* always succeeds */
324 cm_buf_ops_t cm_bufOps = {
331 int cm_InitDCache(long chunkSize, long nbuffers)
333 lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
335 buf_nbuffers = nbuffers;
336 return buf_Init(&cm_bufOps);
339 /* check to see if we have an up-to-date buffer. The buffer must have
340 * previously been obtained by calling buf_Get.
342 * Make sure we have a callback, and that the dataversion matches.
344 * Scp must be locked.
346 * Bufp *may* be locked.
348 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
351 if (!cm_HaveCallback(scp))
354 & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
355 == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
357 if (bufp->dataVersion == scp->dataVersion)
360 code = lock_TryMutex(&bufp->mx);
362 /* don't have the lock, and can't lock it, then
369 /* remember dirty flag for later */
370 code = bufp->flags & CM_BUF_DIRTY;
372 /* release lock if we obtained it here */
374 lock_ReleaseMutex(&bufp->mx);
376 /* if buffer was dirty, buffer is acceptable for use */
383 /* used when deciding whether to do a prefetch or not */
384 long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, long length,
385 cm_user_t *up, cm_req_t *reqp, osi_hyper_t *realBasep)
393 /* now scan all buffers in the range, looking for any that look like
398 lock_ObtainMutex(&scp->mx);
400 /* get callback so we can do a meaningful dataVersion comparison */
401 code = cm_SyncOp(scp, NULL, up, reqp, 0,
402 CM_SCACHESYNC_NEEDCALLBACK
403 | CM_SCACHESYNC_GETSTATUS);
405 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
406 lock_ReleaseMutex(&scp->mx);
410 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
411 /* we're past the end of file */
415 bp = buf_Find(scp, &tbase);
416 /* We cheat slightly by not locking the bp mutex. */
419 & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
420 && bp->dataVersion != scp->dataVersion)
427 /* if this buffer is essentially guaranteed to require a fetch,
428 * break out here and return this position.
433 toffset.LowPart = buf_bufferSize;
434 toffset.HighPart = 0;
435 tbase = LargeIntegerAdd(toffset, tbase);
436 length -= buf_bufferSize;
439 /* if we get here, either everything is fine or stop stopped us at a
440 * particular buffer in the range that definitely needs to be fetched.
443 /* return non-zero code since realBasep won't be valid */
444 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
448 /* successfully found a page that will need fetching */
452 lock_ReleaseMutex(&scp->mx);
456 void cm_BkgStore(cm_scache_t *scp, long p1, long p2, long p3, long p4,
464 req.flags |= CM_REQ_NORETRY;
466 toffset.LowPart = p1;
467 toffset.HighPart = p2;
470 osi_Log2(afsd_logp, "Starting BKG store vp 0x%x, base 0x%x", scp, p1);
472 cm_BufWrite(&scp->fid, &toffset, length, /* flags */ 0, userp, &req);
474 lock_ObtainMutex(&scp->mx);
475 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
476 lock_ReleaseMutex(&scp->mx);
479 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base)
484 thyper.LowPart = cm_chunkSize;
486 thyper = LargeIntegerAdd(*base, thyper);
487 thyper.LowPart &= (-cm_chunkSize);
488 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
489 scp->prefetch.base = *base;
490 if (LargeIntegerGreaterThan(thyper, scp->prefetch.end))
491 scp->prefetch.end = thyper;
493 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
496 /* do the prefetch */
497 void cm_BkgPrefetch(cm_scache_t *scp, long p1, long p2, long p3, long p4,
504 int cpff = 0; /* cleared prefetch flag */
508 req.flags |= CM_REQ_NORETRY;
514 osi_Log2(afsd_logp, "Starting BKG prefetch vp 0x%x, base 0x%x", scp, p1);
516 code = buf_Get(scp, &base, &bp);
518 lock_ObtainMutex(&scp->mx);
520 if (code || (bp->cmFlags & CM_BUF_CMFETCHING)) {
521 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
522 lock_ReleaseMutex(&scp->mx);
526 code = cm_GetBuffer(scp, bp, &cpff, userp, &req);
528 cm_ClearPrefetchFlag(code, scp, &base);
529 lock_ReleaseMutex(&scp->mx);
534 /* a read was issued to offsetp, and we have to determine whether we should
537 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
538 cm_user_t *userp, cm_req_t *reqp)
541 osi_hyper_t realBase;
542 osi_hyper_t readBase;
545 /* round up to chunk boundary */
546 readBase.LowPart += (cm_chunkSize-1);
547 readBase.LowPart &= (-cm_chunkSize);
549 lock_ObtainMutex(&scp->mx);
550 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
551 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
552 lock_ReleaseMutex(&scp->mx);
555 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
557 /* start the scan at the latter of the end of this read or
558 * the end of the last fetched region.
560 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
561 readBase = scp->prefetch.end;
563 lock_ReleaseMutex(&scp->mx);
565 code = cm_CheckFetchRange(scp, &readBase, cm_chunkSize, userp, reqp,
568 return; /* can't find something to prefetch */
570 osi_Log2(afsd_logp, "BKG Prefetch request vp 0x%x, base 0x%x",
571 scp, realBase.LowPart);
573 cm_QueueBKGRequest(scp, cm_BkgPrefetch, realBase.LowPart,
574 realBase.HighPart, cm_chunkSize, 0, userp);
577 /* scp must be locked; temporarily unlocked during processing.
578 * If returns 0, returns buffers held in biop, and with
579 * CM_BUF_CMSTORING set.
581 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
582 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
583 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
584 * must be woken, and the event must be set when the I/O is done. All of this
585 * is required so that buf_WaitIO synchronizes properly with the buffer as it
586 * is being written out.
588 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
589 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
592 osi_queueData_t *qdp;
595 osi_hyper_t scanStart; /* where to start scan for dirty pages */
596 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
597 osi_hyper_t firstModOffset; /* offset of first modified page in range */
600 long flags; /* flags to cm_SyncOp */
602 /* clear things out */
603 biop->scp = scp; /* don't hold */
604 biop->offset = *inOffsetp;
606 biop->bufListp = NULL;
607 biop->bufListEndp = NULL;
610 /* reserve a chunk's worth of buffers */
611 lock_ReleaseMutex(&scp->mx);
612 buf_ReserveBuffers(cm_chunkSize / buf_bufferSize);
613 lock_ObtainMutex(&scp->mx);
616 for(temp = 0; temp < inSize; temp += buf_bufferSize, bufp = NULL) {
618 thyper.LowPart = temp;
619 tbase = LargeIntegerAdd(*inOffsetp, thyper);
621 bufp = buf_Find(scp, &tbase);
623 /* get buffer mutex and scp mutex safely */
624 lock_ReleaseMutex(&scp->mx);
625 lock_ObtainMutex(&bufp->mx);
626 lock_ObtainMutex(&scp->mx);
628 flags = CM_SCACHESYNC_NEEDCALLBACK
629 | CM_SCACHESYNC_GETSTATUS
630 | CM_SCACHESYNC_STOREDATA
631 | CM_SCACHESYNC_BUFLOCKED;
632 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
634 lock_ReleaseMutex(&bufp->mx);
636 buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
640 /* if the buffer is dirty, we're done */
641 if (bufp->flags & CM_BUF_DIRTY) {
642 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
643 "WRITING w/o CMSTORING in SetupStoreBIOD");
644 bufp->flags |= CM_BUF_WRITING;
648 /* this buffer is clean, so there's no reason to process it */
649 cm_SyncOpDone(scp, bufp, flags);
650 lock_ReleaseMutex(&bufp->mx);
657 /* if we get here, if bufp is null, we didn't find any dirty buffers
658 * that weren't already being stored back, so we just quit now.
664 /* don't need buffer mutex any more */
665 lock_ReleaseMutex(&bufp->mx);
667 /* put this element in the list */
669 osi_SetQData(qdp, bufp);
670 /* don't have to hold bufp, since held by buf_Find above */
671 osi_QAddH((osi_queue_t **) &biop->bufListp,
672 (osi_queue_t **) &biop->bufListEndp,
674 biop->length = buf_bufferSize;
675 firstModOffset = bufp->offset;
676 biop->offset = firstModOffset;
678 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
679 scanStart = *inOffsetp;
680 scanStart.LowPart &= (-cm_chunkSize);
681 thyper.LowPart = cm_chunkSize;
683 scanEnd = LargeIntegerAdd(scanStart, thyper);
685 flags = CM_SCACHESYNC_NEEDCALLBACK
686 | CM_SCACHESYNC_GETSTATUS
687 | CM_SCACHESYNC_STOREDATA
688 | CM_SCACHESYNC_BUFLOCKED
689 | CM_SCACHESYNC_NOWAIT;
691 /* start by looking backwards until scanStart */
692 thyper.HighPart = 0; /* hyper version of buf_bufferSize */
693 thyper.LowPart = buf_bufferSize;
694 tbase = LargeIntegerSubtract(firstModOffset, thyper);
695 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
696 /* see if we can find the buffer */
697 bufp = buf_Find(scp, &tbase);
701 /* try to lock it, and quit if we can't (simplifies locking) */
702 code = lock_TryMutex(&bufp->mx);
708 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
710 lock_ReleaseMutex(&bufp->mx);
715 if (!(bufp->flags & CM_BUF_DIRTY)) {
716 /* buffer is clean, so we shouldn't add it */
717 cm_SyncOpDone(scp, bufp, flags);
718 lock_ReleaseMutex(&bufp->mx);
723 /* don't need buffer mutex any more */
724 lock_ReleaseMutex(&bufp->mx);
726 /* we have a dirty buffer ready for storing. Add it to the tail
727 * of the list, since it immediately precedes all of the disk
728 * addresses we've already collected.
731 osi_SetQData(qdp, bufp);
732 /* no buf_hold necessary, since we have it held from buf_Find */
733 osi_QAddT((osi_queue_t **) &biop->bufListp,
734 (osi_queue_t **) &biop->bufListEndp,
737 /* update biod info describing the transfer */
738 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
739 biop->length += buf_bufferSize;
741 /* update loop pointer */
742 tbase = LargeIntegerSubtract(tbase, thyper);
743 } /* while loop looking for pages preceding the one we found */
745 /* now, find later dirty, contiguous pages, and add them to the list */
746 thyper.HighPart = 0; /* hyper version of buf_bufferSize */
747 thyper.LowPart = buf_bufferSize;
748 tbase = LargeIntegerAdd(firstModOffset, thyper);
749 while(LargeIntegerLessThan(tbase, scanEnd)) {
750 /* see if we can find the buffer */
751 bufp = buf_Find(scp, &tbase);
755 /* try to lock it, and quit if we can't (simplifies locking) */
756 code = lock_TryMutex(&bufp->mx);
762 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
764 lock_ReleaseMutex(&bufp->mx);
769 if (!(bufp->flags & CM_BUF_DIRTY)) {
770 /* buffer is clean, so we shouldn't add it */
771 cm_SyncOpDone(scp, bufp, flags);
772 lock_ReleaseMutex(&bufp->mx);
777 /* don't need buffer mutex any more */
778 lock_ReleaseMutex(&bufp->mx);
780 /* we have a dirty buffer ready for storing. Add it to the head
781 * of the list, since it immediately follows all of the disk
782 * addresses we've already collected.
785 osi_SetQData(qdp, bufp);
786 /* no buf_hold necessary, since we have it held from buf_Find */
787 osi_QAddH((osi_queue_t **) &biop->bufListp,
788 (osi_queue_t **) &biop->bufListEndp,
791 /* update biod info describing the transfer */
792 biop->length += buf_bufferSize;
794 /* update loop pointer */
795 tbase = LargeIntegerAdd(tbase, thyper);
796 } /* while loop looking for pages following the first page we found */
798 /* finally, we're done */
802 /* scp must be locked; temporarily unlocked during processing.
803 * If returns 0, returns buffers held in biop, and with
804 * CM_BUF_CMFETCHING flags set.
805 * If an error is returned, we don't return any buffers.
807 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
808 cm_bulkIO_t *biop, cm_user_t *up, cm_req_t *reqp)
812 osi_hyper_t toffset; /* a long long temp variable */
813 osi_hyper_t pageBase; /* base offset we're looking at */
814 osi_queueData_t *qdp; /* one temp queue structure */
815 osi_queueData_t *tqdp; /* another temp queue structure */
816 long collected; /* how many bytes have been collected */
819 osi_hyper_t fileSize; /* the # of bytes in the file */
820 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
821 osi_queueData_t *heldBufListEndp; /* first one */
825 biop->offset = *offsetp;
826 /* null out the list of buffers */
827 biop->bufListp = biop->bufListEndp = NULL;
830 /* first lookup the file's length, so we know when to stop */
831 code = cm_SyncOp(scp, NULL, up, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK
832 | CM_SCACHESYNC_GETSTATUS);
836 /* copy out size, since it may change */
837 fileSize = scp->serverLength;
839 lock_ReleaseMutex(&scp->mx);
842 collected = pageBase.LowPart & (cm_chunkSize - 1);
844 heldBufListEndp = NULL;
847 * Obtaining buffers can cause dirty buffers to be recycled, which
848 * can cause a storeback, so cannot be done while we have buffers
851 * To get around this, we get buffers twice. Before reserving buffers,
852 * we obtain and release each one individually. After reserving
853 * buffers, we try to obtain them again, but only by lookup, not by
854 * recycling. If a buffer has gone away while we were waiting for
855 * the others, we just use whatever buffers we already have.
857 * On entry to this function, we are already holding a buffer, so we
858 * can't wait for reservation. So we call buf_TryReserveBuffers()
859 * instead. Not only that, we can't really even call buf_Get(), for
860 * the same reason. We can't avoid that, though. To avoid deadlock
861 * we allow only one thread to be executing the buf_Get()-buf_Release()
862 * sequence at a time.
865 /* first hold all buffers, since we can't hold any locks in buf_Get */
867 /* stop at chunk boundary */
868 if (collected >= cm_chunkSize) break;
870 /* see if the next page would be past EOF */
871 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize)) break;
873 lock_ObtainMutex(&cm_bufGetMutex);
875 code = buf_Get(scp, &pageBase, &tbp);
877 lock_ReleaseMutex(&cm_bufGetMutex);
878 lock_ObtainMutex(&scp->mx);
884 lock_ReleaseMutex(&cm_bufGetMutex);
886 toffset.HighPart = 0;
887 toffset.LowPart = buf_bufferSize;
888 pageBase = LargeIntegerAdd(toffset, pageBase);
889 collected += buf_bufferSize;
892 /* reserve a chunk's worth of buffers if possible */
893 reserving = buf_TryReserveBuffers(cm_chunkSize / buf_bufferSize);
896 collected = pageBase.LowPart & (cm_chunkSize - 1);
898 /* now hold all buffers, if they are still there */
900 /* stop at chunk boundary */
901 if (collected >= cm_chunkSize)
904 /* see if the next page would be past EOF */
905 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
908 tbp = buf_Find(scp, &pageBase);
912 /* add the buffer to the list */
914 osi_SetQData(qdp, tbp);
915 osi_QAdd((osi_queue_t **)&heldBufListp, &qdp->q);
916 if (!heldBufListEndp) heldBufListEndp = qdp;
917 /* leave tbp held (from buf_Get) */
922 collected += buf_bufferSize;
923 toffset.HighPart = 0;
924 toffset.LowPart = buf_bufferSize;
925 pageBase = LargeIntegerAdd(toffset, pageBase);
928 /* look at each buffer, adding it into the list if it looks idle and
929 * filled with old data. One special case: wait for idle if it is the
930 * first buffer since we really need that one for our caller to make
934 collected = 0; /* now count how many we'll really use */
935 for(tqdp = heldBufListEndp;
937 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
938 /* get a ptr to the held buffer */
939 tbp = osi_GetQData(tqdp);
940 pageBase = tbp->offset;
942 /* now lock the buffer lock */
943 lock_ObtainMutex(&tbp->mx);
944 lock_ObtainMutex(&scp->mx);
946 /* don't bother fetching over data that is already current */
947 if (tbp->dataVersion == scp->dataVersion) {
948 /* we don't need this buffer, since it is current */
949 lock_ReleaseMutex(&scp->mx);
950 lock_ReleaseMutex(&tbp->mx);
954 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
955 | CM_SCACHESYNC_BUFLOCKED;
957 flags |= CM_SCACHESYNC_NOWAIT;
959 /* wait for the buffer to serialize, if required. Doesn't
960 * release the scp or buffer lock(s) if NOWAIT is specified.
962 code = cm_SyncOp(scp, tbp, up, reqp, 0, flags);
964 lock_ReleaseMutex(&scp->mx);
965 lock_ReleaseMutex(&tbp->mx);
969 /* don't fetch over dirty buffers */
970 if (tbp->flags & CM_BUF_DIRTY) {
971 cm_SyncOpDone(scp, tbp, flags);
972 lock_ReleaseMutex(&scp->mx);
973 lock_ReleaseMutex(&tbp->mx);
978 lock_ReleaseMutex(&scp->mx);
979 lock_ReleaseMutex(&tbp->mx);
981 /* add the buffer to the list */
983 osi_SetQData(qdp, tbp);
984 osi_QAdd((osi_queue_t **)&biop->bufListp, &qdp->q);
985 if (!biop->bufListEndp)
986 biop->bufListEndp = qdp;
989 /* from now on, a failure just stops our collection process, but
990 * we still do the I/O to whatever we've already managed to collect.
993 collected += buf_bufferSize;
996 /* now, we've held in biop->bufListp all the buffer's we're really
997 * interested in. We also have holds left from heldBufListp, and we
998 * now release those holds on the buffers.
1000 for(qdp = heldBufListp; qdp; qdp = tqdp) {
1001 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1002 tbp = osi_GetQData(qdp);
1007 /* Caller expects this */
1008 lock_ObtainMutex(&scp->mx);
1010 /* if we got a failure setting up the first buffer, then we don't have
1011 * any side effects yet, and we also have failed an operation that the
1012 * caller requires to make any progress. Give up now.
1014 if (code && isFirst) {
1015 buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
1019 /* otherwise, we're still OK, and should just return the I/O setup we've
1022 biop->length = collected;
1023 biop->reserved = reserving;
1027 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1030 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore)
1034 osi_queueData_t *qdp;
1035 osi_queueData_t *nqdp;
1038 /* Give back reserved buffers */
1040 buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
1042 flags = CM_SCACHESYNC_NEEDCALLBACK;
1044 flags |= CM_SCACHESYNC_STOREDATA;
1046 flags |= CM_SCACHESYNC_FETCHDATA;
1049 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1050 /* lookup next guy first, since we're going to free this one */
1051 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1053 /* extract buffer and free queue data */
1054 bufp = osi_GetQData(qdp);
1057 /* now, mark I/O as done, unlock the buffer and release it */
1058 lock_ObtainMutex(&bufp->mx);
1059 lock_ObtainMutex(&scp->mx);
1060 cm_SyncOpDone(scp, bufp, flags);
1061 lock_ReleaseMutex(&scp->mx);
1063 /* turn off writing and wakeup users */
1065 if (bufp->flags & CM_BUF_WAITING) {
1066 osi_Wakeup((long) bufp);
1068 bufp->flags &= ~(CM_BUF_WAITING | CM_BUF_WRITING
1072 lock_ReleaseMutex(&bufp->mx);
1076 /* clean things out */
1077 biop->bufListp = NULL;
1078 biop->bufListEndp = NULL;
1081 /* Fetch a buffer. Called with scp locked.
1082 * The scp is locked on return.
1084 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *up,
1088 long nbytes; /* bytes in transfer */
1089 long rbytes; /* bytes in rx_Read call */
1091 AFSFetchStatus afsStatus;
1092 AFSCallBack callback;
1095 cm_buf_t *tbufp; /* buf we're filling */
1096 osi_queueData_t *qdp; /* q element we're scanning */
1098 struct rx_call *callp;
1099 cm_bulkIO_t biod; /* bulk IO descriptor */
1104 /* now, the buffer may or may not be filled with good data (buf_GetNew
1105 * drops lots of locks, and may indeed return a properly initialized
1106 * buffer, although more likely it will just return a new, empty, buffer.
1109 #ifdef AFS_FREELANCE_CLIENT
1111 // yj: if they're trying to get the /afs directory, we need to
1112 // handle it differently, since it's local rather than on any
1115 getroot = (scp==cm_rootSCachep) ;
1118 cm_AFSFidFromFid(&tfid, &scp->fid);
1120 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, up, reqp);
1122 /* couldn't even get the first page setup properly */
1123 osi_Log1(afsd_logp, "SetupFetchBIOD failure code %d", code);
1127 /* once we get here, we have the callback in place, we know that no one
1128 * is fetching the data now. Check one last time that we still have
1129 * the wrong data, and then fetch it if we're still wrong.
1131 * We can lose a race condition and end up with biod.length zero, in
1132 * which case we just retry.
1134 if (bufp->dataVersion == scp->dataVersion || biod.length == 0) {
1135 osi_Log3(afsd_logp, "Bad DVs %d, %d or length 0x%x",
1136 bufp->dataVersion, scp->dataVersion, biod.length);
1137 if ((bufp->dataVersion == -1
1138 || bufp->dataVersion < scp->dataVersion)
1139 && LargeIntegerGreaterThanOrEqualTo(bufp->offset,
1140 scp->serverLength)) {
1141 if (bufp->dataVersion == -1)
1142 memset(bufp->datap, 0, buf_bufferSize);
1143 bufp->dataVersion = scp->dataVersion;
1145 lock_ReleaseMutex(&scp->mx);
1146 cm_ReleaseBIOD(&biod, 0);
1147 lock_ObtainMutex(&scp->mx);
1151 lock_ReleaseMutex(&scp->mx);
1154 DPRINTF("cm_GetBuffer: fetching data scpDV=%d bufDV=%d scp=%x bp=%x dcp=%x\n",
1155 scp->dataVersion, bufp->dataVersion, scp, bufp, bufp->dcp);
1156 #endif /* DISKCACHE95 */
1158 #ifdef AFS_FREELANCE_CLIENT
1161 // if getroot then we don't need to make any calls
1162 // just return fake data
1164 if (cm_freelanceEnabled && getroot) {
1165 // setup the fake status
1166 afsStatus.InterfaceVersion = 0x1;
1167 afsStatus.FileType = 0x2;
1168 afsStatus.LinkCount = scp->linkCount;
1169 afsStatus.Length = cm_fakeDirSize;
1170 afsStatus.DataVersion = cm_fakeDirVersion;
1171 afsStatus.Author = 0x1;
1172 afsStatus.Owner = 0x0;
1173 afsStatus.CallerAccess = 0x9;
1174 afsStatus.AnonymousAccess = 0x9;
1175 afsStatus.UnixModeBits = 0x1ff;
1176 afsStatus.ParentVnode = 0x1;
1177 afsStatus.ParentUnique = 0x1;
1178 afsStatus.ResidencyMask = 0;
1179 afsStatus.ClientModTime = 0x3b49f6e2;
1180 afsStatus.ServerModTime = 0x3b49f6e2;
1181 afsStatus.Group = 0;
1182 afsStatus.SyncCounter = 0;
1183 afsStatus.dataVersionHigh = 0;
1185 // once we're done setting up the status info,
1186 // we just fill the buffer pages with fakedata
1187 // from cm_FakeRootDir. Extra pages are set to
1190 lock_ObtainMutex(&cm_Freelance_Lock);
1192 afsi_log("bufp->offset is %d", bufp->offset);
1194 t1 = bufp->offset.LowPart;
1195 qdp = biod.bufListEndp;
1197 tbufp = osi_GetQData(qdp);
1198 bufferp=tbufp->datap;
1199 memset(bufferp, 0, buf_bufferSize);
1200 t2 = cm_fakeDirSize - t1;
1201 if (t2>buf_bufferSize) t2=buf_bufferSize;
1203 afsi_log("t1:%d, t2:%d", t1, t2);
1206 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1211 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1214 lock_ReleaseMutex(&cm_Freelance_Lock);
1216 // once we're done, we skip over the part of the
1217 // code that does the ACTUAL fetching of data for
1220 goto fetchingcompleted;
1223 #endif /* AFS_FREELANCE_CLIENT */
1225 /* now make the call */
1227 code = cm_Conn(&scp->fid, up, reqp, &connp);
1231 callp = rx_NewCall(connp->callp);
1233 osi_Log3(afsd_logp, "CALL FetchData vp %x, off 0x%x, size 0x%x",
1234 (long) scp, biod.offset.LowPart, biod.length);
1236 code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
1239 /* now copy the data out of the pipe and put it in the buffer */
1240 temp = rx_Read(callp, &nbytes, 4);
1242 nbytes = ntohl(nbytes);
1243 if (nbytes > biod.length)
1244 code = (callp->error < 0) ? callp->error : -1;
1247 code = (callp->error < 0) ? callp->error : -1;
1250 qdp = biod.bufListEndp;
1252 tbufp = osi_GetQData(qdp);
1253 bufferp = tbufp->datap;
1257 /* fill nbytes of data from the pipe into the pages.
1258 * When we stop, qdp will point at the last page we're
1259 * dealing with, and bufferp will tell us where we
1260 * stopped. We'll need this info below when we clear
1261 * the remainder of the last page out (and potentially
1262 * clear later pages out, if we fetch past EOF).
1265 /* assert that there are still more buffers;
1266 * our check above for nbytes being less than
1267 * biod.length should ensure this.
1269 osi_assert(bufferp != NULL);
1271 /* read rbytes of data */
1272 rbytes = (nbytes > buf_bufferSize? buf_bufferSize : nbytes);
1273 temp = rx_Read(callp, bufferp, rbytes);
1274 if (temp < rbytes) {
1275 code = (callp->error < 0) ? callp->error : -1;
1279 /* allow read-while-fetching.
1280 * if this is the last buffer, clear the
1281 * PREFETCHING flag, so the reader waiting for
1282 * this buffer will start a prefetch.
1284 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1285 lock_ObtainMutex(&scp->mx);
1286 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1287 scp->flags &= ~CM_SCACHEFLAG_WAITING;
1288 osi_Wakeup((long) &scp->flags);
1290 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1292 cm_ClearPrefetchFlag(0, scp, &biod.offset);
1294 lock_ReleaseMutex(&scp->mx);
1296 /* and adjust counters */
1299 /* and move to the next buffer */
1301 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1303 tbufp = osi_GetQData(qdp);
1304 bufferp = tbufp->datap;
1312 /* zero out remainder of last pages, in case we are
1313 * fetching past EOF. We were fetching an integral #
1314 * of pages, but stopped, potentially in the middle of
1315 * a page. Zero the remainder of that page, and then
1316 * all of the rest of the pages.
1319 rbytes = bufferp - tbufp->datap;
1320 /* bytes left to zero */
1321 rbytes = buf_bufferSize - rbytes;
1324 memset(bufferp, 0, rbytes);
1325 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1328 tbufp = osi_GetQData(qdp);
1329 bufferp = tbufp->datap;
1330 /* bytes to clear in this page */
1331 rbytes = buf_bufferSize;
1336 code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
1338 osi_Log0(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error");
1339 code = rx_EndCall(callp, code);
1340 if (code == RXKADUNKNOWNKEY)
1341 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
1342 osi_Log0(afsd_logp, "CALL FetchData DONE");
1344 } while (cm_Analyze(connp, up, reqp, &scp->fid, &volSync, NULL, code));
1347 code = cm_MapRPCError(code, reqp);
1349 lock_ObtainMutex(&scp->mx);
1350 /* we know that no one else has changed the buffer, since we still have
1351 * the fetching flag on the buffers, and we have the scp locked again.
1352 * Copy in the version # into the buffer if we got code 0 back from the
1356 for(qdp = biod.bufListp;
1358 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1359 tbufp = osi_GetQData(qdp);
1360 tbufp->dataVersion = afsStatus.DataVersion;
1363 /* write buffer out to disk cache */
1364 diskcache_Update(tbufp->dcp, tbufp->datap, buf_bufferSize,
1365 tbufp->dataVersion);
1366 #endif /* DISKCACHE95 */
1370 /* release scatter/gather I/O structure (buffers, locks) */
1371 lock_ReleaseMutex(&scp->mx);
1372 cm_ReleaseBIOD(&biod, 0);
1373 lock_ObtainMutex(&scp->mx);
1376 cm_MergeStatus(scp, &afsStatus, &volSync, up, 0);