2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
26 extern void afsi_log(char *pattern, ...);
29 osi_mutex_t cm_bufGetMutex;
30 #ifdef AFS_FREELANCE_CLIENT
31 extern osi_mutex_t cm_Freelance_Lock;
35 /* we can access connp->serverp without holding a lock because that
36 never changes since the connection is made. */
37 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
38 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
40 #define SERVERHAS64BIT(connp) (FALSE)
41 #define SET_SERVERHASNO64BIT(connp) (FALSE)
44 /* functions called back from the buffer package when reading or writing data,
45 * or when holding or releasing a vnode pointer.
47 long cm_BufWrite(void *vfidp, osi_hyper_t *offsetp, long length, long flags,
48 cm_user_t *userp, cm_req_t *reqp)
50 /* store the data back from this buffer; the buffer is locked and held,
51 * but the vnode involved isn't locked, yet. It is held by its
52 * reference from the buffer, which won't change until the buffer is
53 * released by our caller. Thus, we don't have to worry about holding
57 cm_fid_t *fidp = vfidp;
61 AFSFetchStatus outStatus;
62 AFSStoreStatus inStatus;
66 struct rx_call *callp;
67 struct rx_connection *rxconnp;
74 cm_bulkIO_t biod; /* bulk IO descriptor */
75 int require_64bit_ops = 0;
77 osi_assert(userp != NULL);
79 /* now, the buffer may or may not be filled with good data (buf_GetNew
80 * drops lots of locks, and may indeed return a properly initialized
81 * buffer, although more likely it will just return a new, empty, buffer.
83 scp = cm_FindSCache(fidp);
85 return CM_ERROR_NOSUCHFILE; /* shouldn't happen */
88 cm_AFSFidFromFid(&tfid, fidp);
90 lock_ObtainMutex(&scp->mx);
91 if (scp->flags & CM_SCACHEFLAG_DELETED) {
92 lock_ReleaseMutex(&scp->mx);
93 cm_ReleaseSCache(scp);
94 return CM_ERROR_NOSUCHFILE;
97 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
99 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
100 lock_ReleaseMutex(&scp->mx);
101 cm_ReleaseSCache(scp);
105 if (biod.length == 0) {
106 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
107 lock_ReleaseMutex(&scp->mx);
108 cm_ReleaseBIOD(&biod, 1); /* should be a NOOP */
109 cm_ReleaseSCache(scp);
113 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
114 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
116 /* prepare the output status for the store */
117 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
118 cm_StatusFromAttr(&inStatus, scp, NULL);
119 truncPos = scp->length;
120 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
121 && LargeIntegerLessThan(scp->truncPos, truncPos))
122 truncPos = scp->truncPos;
123 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
125 /* compute how many bytes to write from this buffer */
126 thyper = LargeIntegerSubtract(scp->length, biod.offset);
127 if (LargeIntegerLessThanZero(thyper)) {
128 /* entire buffer is past EOF */
132 /* otherwise write out part of buffer before EOF, but not
133 * more than bufferSize bytes.
135 if (LargeIntegerGreaterThan(thyper,
136 ConvertLongToLargeInteger(biod.length))) {
137 nbytes = biod.length;
139 /* if thyper is less than or equal to biod.length, then we
140 can safely assume that the value fits in a long. */
141 nbytes = thyper.LowPart;
145 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
146 ConvertLongToLargeInteger(nbytes)),
147 ConvertLongToLargeInteger(LONG_MAX)) ||
148 LargeIntegerGreaterThan(truncPos,
149 ConvertLongToLargeInteger(LONG_MAX))) {
150 require_64bit_ops = 1;
153 lock_ReleaseMutex(&scp->mx);
155 /* now we're ready to do the store operation */
157 code = cm_Conn(&scp->fid, userp, reqp, &connp);
162 rxconnp = cm_GetRxConn(connp);
163 callp = rx_NewCall(rxconnp);
164 rx_PutConnection(rxconnp);
166 #ifdef AFS_LARGEFILES
167 if (SERVERHAS64BIT(connp)) {
168 osi_Log4(afsd_logp, "CALL StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
169 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
171 code = StartRXAFS_StoreData64(callp, &tfid, &inStatus,
172 biod.offset.QuadPart,
177 if (require_64bit_ops) {
178 osi_Log0(afsd_logp, "Skipping StoreData. The operation requires StoreData64");
179 code = CM_ERROR_TOOBIG;
181 osi_Log4(afsd_logp, "CALL StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
182 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
184 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
185 biod.offset.LowPart, nbytes, truncPos.LowPart);
189 osi_Log4(afsd_logp, "CALL StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
190 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
192 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
193 biod.offset.LowPart, nbytes, truncPos.LowPart);
197 /* write the data from the the list of buffers */
201 qdp = biod.bufListEndp;
203 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
204 osi_assert(qdp != NULL);
205 bufp = osi_GetQData(qdp);
206 bufferp = bufp->datap;
208 if (wbytes > cm_data.buf_blockSize)
209 wbytes = cm_data.buf_blockSize;
211 /* write out wbytes of data from bufferp */
212 temp = rx_Write(callp, bufferp, wbytes);
213 if (temp != wbytes) {
214 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
218 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
221 } /* while more bytes to write */
222 } /* if RPC started successfully */
224 osi_Log2(afsd_logp, "StartRXAFS_StoreData?? scp 0x%p failed (%lX)",scp,code);
228 if (SERVERHAS64BIT(connp)) {
229 code = EndRXAFS_StoreData64(callp, &outStatus, &volSync);
231 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 scp 0x%p failed (%lX)", scp, code);
233 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
235 osi_Log2(afsd_logp, "EndRXAFS_StoreData scp 0x%p failed (%lX)",scp,code);
239 code = rx_EndCall(callp, code);
241 #ifdef AFS_LARGEFILES
242 if (code == RXGEN_OPCODE && SERVERHAS64BIT(connp)) {
243 SET_SERVERHASNO64BIT(connp);
248 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
250 code = cm_MapRPCError(code, reqp);
253 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
255 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
257 /* now, clean up our state */
258 lock_ObtainMutex(&scp->mx);
260 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
264 /* now, here's something a little tricky: in AFS 3, a dirty
265 * length can't be directly stored, instead, a dirty chunk is
266 * stored that sets the file's size (by writing and by using
267 * the truncate-first option in the store call).
269 * At this point, we've just finished a store, and so the trunc
270 * pos field is clean. If the file's size at the server is at
271 * least as big as we think it should be, then we turn off the
272 * length dirty bit, since all the other dirty buffers must
273 * precede this one in the file.
275 * The file's desired size shouldn't be smaller than what's
276 * stored at the server now, since we just did the trunc pos
279 * We have to turn off the length dirty bit as soon as we can,
280 * so that we see updates made by other machines.
283 if (SERVERHAS64BIT(connp)) {
284 t.LowPart = outStatus.Length;
285 t.HighPart = outStatus.Length_hi;
287 t = ConvertLongToLargeInteger(outStatus.Length);
290 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
291 scp->mask &= ~CM_SCACHEMASK_LENGTH;
293 cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
295 if (code == CM_ERROR_SPACE)
296 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
297 else if (code == CM_ERROR_QUOTA)
298 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
300 lock_ReleaseMutex(&scp->mx);
301 cm_ReleaseBIOD(&biod, 1);
302 cm_ReleaseSCache(scp);
308 * Truncate the file, by sending a StoreData RPC with zero length.
310 * Called with scp locked. Releases and re-obtains the lock.
312 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
314 AFSFetchStatus outStatus;
315 AFSStoreStatus inStatus;
319 osi_hyper_t truncPos;
321 struct rx_call *callp;
322 struct rx_connection *rxconnp;
323 int require_64bit_ops = 0;
325 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
326 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
327 CM_SCACHESYNC_STOREDATA_EXCL);
329 /* prepare the output status for the store */
330 inStatus.Mask = AFS_SETMODTIME;
331 inStatus.ClientModTime = scp->clientModTime;
332 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
334 /* calculate truncation position */
335 truncPos = scp->length;
336 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
337 && LargeIntegerLessThan(scp->truncPos, truncPos))
338 truncPos = scp->truncPos;
339 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
341 if (LargeIntegerGreaterThan(truncPos,
342 ConvertLongToLargeInteger(LONG_MAX))) {
344 require_64bit_ops = 1;
347 lock_ReleaseMutex(&scp->mx);
349 cm_AFSFidFromFid(&tfid, &scp->fid);
351 /* now we're ready to do the store operation */
353 code = cm_Conn(&scp->fid, userp, reqp, &connp);
358 rxconnp = cm_GetRxConn(connp);
359 callp = rx_NewCall(rxconnp);
360 rx_PutConnection(rxconnp);
362 #ifdef AFS_LARGEFILES
363 if (SERVERHAS64BIT(connp)) {
364 code = StartRXAFS_StoreData64(callp, &tfid, &inStatus,
365 0, 0, truncPos.QuadPart);
367 if (require_64bit_ops) {
368 code = CM_ERROR_TOOBIG;
370 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
371 0, 0, truncPos.LowPart);
375 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
376 0, 0, truncPos.LowPart);
380 if (SERVERHAS64BIT(connp))
381 code = EndRXAFS_StoreData64(callp, &outStatus, &volSync);
383 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
385 code = rx_EndCall(callp, code);
387 #ifdef AFS_LARGEFILES
388 if (code == RXGEN_OPCODE && SERVERHAS64BIT(connp)) {
389 SET_SERVERHASNO64BIT(connp);
394 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
395 code = cm_MapRPCError(code, reqp);
397 /* now, clean up our state */
398 lock_ObtainMutex(&scp->mx);
400 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
405 * For explanation of handling of CM_SCACHEMASK_LENGTH,
408 if (SERVERHAS64BIT(connp)) {
409 t.HighPart = outStatus.Length_hi;
410 t.LowPart = outStatus.Length;
412 t = ConvertLongToLargeInteger(outStatus.Length);
415 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
416 scp->mask &= ~CM_SCACHEMASK_LENGTH;
417 cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
423 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
425 *bytesReadp = cm_data.buf_blockSize;
427 /* now return a code that means that I/O is done */
431 /* stabilize scache entry, and return with it locked so
434 long cm_BufStabilize(void *parmp, cm_user_t *userp, cm_req_t *reqp)
441 lock_ObtainMutex(&scp->mx);
442 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
443 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
445 lock_ReleaseMutex(&scp->mx);
452 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
453 long cm_BufUnstabilize(void *parmp, cm_user_t *userp)
459 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
461 lock_ReleaseMutex(&scp->mx);
463 /* always succeeds */
467 cm_buf_ops_t cm_bufOps = {
474 long cm_ValidateDCache(void)
476 return buf_ValidateBuffers();
479 long cm_ShutdownDCache(void)
484 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
486 lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
487 return buf_Init(newFile, &cm_bufOps, nbuffers);
490 /* check to see if we have an up-to-date buffer. The buffer must have
491 * previously been obtained by calling buf_Get.
493 * Make sure we have a callback, and that the dataversion matches.
495 * Scp must be locked.
497 * Bufp *may* be locked.
499 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
502 if (!cm_HaveCallback(scp))
504 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
506 if (bufp->dataVersion == scp->dataVersion)
509 code = lock_TryMutex(&bufp->mx);
511 /* don't have the lock, and can't lock it, then
518 /* remember dirty flag for later */
519 code = bufp->flags & CM_BUF_DIRTY;
521 /* release lock if we obtained it here */
523 lock_ReleaseMutex(&bufp->mx);
525 /* if buffer was dirty, buffer is acceptable for use */
532 /* used when deciding whether to do a prefetch or not */
533 long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, long length,
534 cm_user_t *up, cm_req_t *reqp, osi_hyper_t *realBasep)
542 /* now scan all buffers in the range, looking for any that look like
547 lock_ObtainMutex(&scp->mx);
549 /* get callback so we can do a meaningful dataVersion comparison */
550 code = cm_SyncOp(scp, NULL, up, reqp, 0,
551 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
553 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
554 lock_ReleaseMutex(&scp->mx);
558 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
559 /* we're past the end of file */
563 bp = buf_Find(scp, &tbase);
564 /* We cheat slightly by not locking the bp mutex. */
566 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
567 && bp->dataVersion != scp->dataVersion)
575 /* if this buffer is essentially guaranteed to require a fetch,
576 * break out here and return this position.
581 toffset.LowPart = cm_data.buf_blockSize;
582 toffset.HighPart = 0;
583 tbase = LargeIntegerAdd(toffset, tbase);
584 length -= cm_data.buf_blockSize;
587 /* if we get here, either everything is fine or stop stopped us at a
588 * particular buffer in the range that definitely needs to be fetched.
591 /* return non-zero code since realBasep won't be valid */
592 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
596 /* successfully found a page that will need fetching */
600 lock_ReleaseMutex(&scp->mx);
604 void cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
612 if (scp->flags & CM_SCACHEFLAG_DELETED) {
613 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
616 #ifdef NO_BKG_RETRIES
617 req.flags |= CM_REQ_NORETRY;
620 toffset.LowPart = p1;
621 toffset.HighPart = p2;
624 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
626 code = cm_BufWrite(&scp->fid, &toffset, length, /* flags */ 0, userp, &req);
629 lock_ObtainMutex(&scp->mx);
630 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
631 lock_ReleaseMutex(&scp->mx);
634 /* Called with scp locked */
635 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base)
640 thyper.LowPart = cm_chunkSize;
642 thyper = LargeIntegerAdd(*base, thyper);
643 thyper.LowPart &= (-cm_chunkSize);
644 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
645 scp->prefetch.base = *base;
646 if (LargeIntegerGreaterThan(thyper, scp->prefetch.end))
647 scp->prefetch.end = thyper;
649 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
652 /* do the prefetch */
653 void cm_BkgPrefetch(cm_scache_t *scp, long p1, long p2, long p3, long p4,
660 int cpff = 0; /* cleared prefetch flag */
664 req.flags |= CM_REQ_NORETRY;
670 osi_Log2(afsd_logp, "Starting BKG prefetch scp 0x%p, base 0x%x", scp, p1);
672 code = buf_Get(scp, &base, &bp);
674 lock_ObtainMutex(&scp->mx);
676 if (code || (bp->cmFlags & CM_BUF_CMFETCHING)) {
677 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
678 lock_ReleaseMutex(&scp->mx);
682 code = cm_GetBuffer(scp, bp, &cpff, userp, &req);
684 cm_ClearPrefetchFlag(code, scp, &base);
685 lock_ReleaseMutex(&scp->mx);
690 /* a read was issued to offsetp, and we have to determine whether we should
693 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
694 cm_user_t *userp, cm_req_t *reqp)
697 osi_hyper_t realBase;
698 osi_hyper_t readBase;
701 /* round up to chunk boundary */
702 readBase.LowPart += (cm_chunkSize-1);
703 readBase.LowPart &= (-cm_chunkSize);
705 lock_ObtainMutex(&scp->mx);
706 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
707 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
708 lock_ReleaseMutex(&scp->mx);
711 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
713 /* start the scan at the latter of the end of this read or
714 * the end of the last fetched region.
716 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
717 readBase = scp->prefetch.end;
719 lock_ReleaseMutex(&scp->mx);
721 code = cm_CheckFetchRange(scp, &readBase, cm_chunkSize, userp, reqp,
724 return; /* can't find something to prefetch */
726 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
727 scp, realBase.LowPart);
729 cm_QueueBKGRequest(scp, cm_BkgPrefetch, realBase.LowPart,
730 realBase.HighPart, cm_chunkSize, 0, userp);
733 /* scp must be locked; temporarily unlocked during processing.
734 * If returns 0, returns buffers held in biop, and with
735 * CM_BUF_CMSTORING set.
737 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
738 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
739 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
740 * must be woken, and the event must be set when the I/O is done. All of this
741 * is required so that buf_WaitIO synchronizes properly with the buffer as it
742 * is being written out.
744 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
745 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
748 osi_queueData_t *qdp;
751 osi_hyper_t scanStart; /* where to start scan for dirty pages */
752 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
753 osi_hyper_t firstModOffset; /* offset of first modified page in range */
756 long flags; /* flags to cm_SyncOp */
758 /* clear things out */
759 biop->scp = scp; /* don't hold */
760 biop->offset = *inOffsetp;
762 biop->bufListp = NULL;
763 biop->bufListEndp = NULL;
766 /* reserve a chunk's worth of buffers */
767 lock_ReleaseMutex(&scp->mx);
768 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
769 lock_ObtainMutex(&scp->mx);
772 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
774 thyper.LowPart = temp;
775 tbase = LargeIntegerAdd(*inOffsetp, thyper);
777 bufp = buf_Find(scp, &tbase);
779 /* get buffer mutex and scp mutex safely */
780 lock_ReleaseMutex(&scp->mx);
781 lock_ObtainMutex(&bufp->mx);
782 lock_ObtainMutex(&scp->mx);
784 flags = CM_SCACHESYNC_NEEDCALLBACK
785 | CM_SCACHESYNC_GETSTATUS
786 | CM_SCACHESYNC_STOREDATA
787 | CM_SCACHESYNC_BUFLOCKED;
788 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
790 lock_ReleaseMutex(&bufp->mx);
792 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
796 /* if the buffer is dirty, we're done */
797 if (bufp->flags & CM_BUF_DIRTY) {
798 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
799 "WRITING w/o CMSTORING in SetupStoreBIOD");
800 bufp->flags |= CM_BUF_WRITING;
804 /* this buffer is clean, so there's no reason to process it */
805 cm_SyncOpDone(scp, bufp, flags);
806 lock_ReleaseMutex(&bufp->mx);
814 /* if we get here, if bufp is null, we didn't find any dirty buffers
815 * that weren't already being stored back, so we just quit now.
821 /* don't need buffer mutex any more */
822 lock_ReleaseMutex(&bufp->mx);
824 /* put this element in the list */
826 osi_SetQData(qdp, bufp);
827 /* don't have to hold bufp, since held by buf_Find above */
828 osi_QAddH((osi_queue_t **) &biop->bufListp,
829 (osi_queue_t **) &biop->bufListEndp,
831 biop->length = cm_data.buf_blockSize;
832 firstModOffset = bufp->offset;
833 biop->offset = firstModOffset;
834 bufp = NULL; /* this buffer and reference added to the queue */
836 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
837 scanStart = *inOffsetp;
838 scanStart.LowPart &= (-cm_chunkSize);
839 thyper.LowPart = cm_chunkSize;
841 scanEnd = LargeIntegerAdd(scanStart, thyper);
843 flags = CM_SCACHESYNC_NEEDCALLBACK
844 | CM_SCACHESYNC_GETSTATUS
845 | CM_SCACHESYNC_STOREDATA
846 | CM_SCACHESYNC_BUFLOCKED
847 | CM_SCACHESYNC_NOWAIT;
849 /* start by looking backwards until scanStart */
850 thyper.HighPart = 0; /* hyper version of cm_data.buf_blockSize */
851 thyper.LowPart = cm_data.buf_blockSize;
852 tbase = LargeIntegerSubtract(firstModOffset, thyper);
853 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
854 /* see if we can find the buffer */
855 bufp = buf_Find(scp, &tbase);
859 /* try to lock it, and quit if we can't (simplifies locking) */
860 lock_ReleaseMutex(&scp->mx);
861 code = lock_TryMutex(&bufp->mx);
862 lock_ObtainMutex(&scp->mx);
869 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
871 lock_ReleaseMutex(&bufp->mx);
877 if (!(bufp->flags & CM_BUF_DIRTY)) {
878 /* buffer is clean, so we shouldn't add it */
879 cm_SyncOpDone(scp, bufp, flags);
880 lock_ReleaseMutex(&bufp->mx);
886 /* don't need buffer mutex any more */
887 lock_ReleaseMutex(&bufp->mx);
889 /* we have a dirty buffer ready for storing. Add it to the tail
890 * of the list, since it immediately precedes all of the disk
891 * addresses we've already collected.
894 osi_SetQData(qdp, bufp);
895 /* no buf_hold necessary, since we have it held from buf_Find */
896 osi_QAddT((osi_queue_t **) &biop->bufListp,
897 (osi_queue_t **) &biop->bufListEndp,
899 bufp = NULL; /* added to the queue */
901 /* update biod info describing the transfer */
902 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
903 biop->length += cm_data.buf_blockSize;
905 /* update loop pointer */
906 tbase = LargeIntegerSubtract(tbase, thyper);
907 } /* while loop looking for pages preceding the one we found */
909 /* now, find later dirty, contiguous pages, and add them to the list */
910 thyper.HighPart = 0; /* hyper version of cm_data.buf_blockSize */
911 thyper.LowPart = cm_data.buf_blockSize;
912 tbase = LargeIntegerAdd(firstModOffset, thyper);
913 while(LargeIntegerLessThan(tbase, scanEnd)) {
914 /* see if we can find the buffer */
915 bufp = buf_Find(scp, &tbase);
919 /* try to lock it, and quit if we can't (simplifies locking) */
920 lock_ReleaseMutex(&scp->mx);
921 code = lock_TryMutex(&bufp->mx);
922 lock_ObtainMutex(&scp->mx);
929 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
931 lock_ReleaseMutex(&bufp->mx);
937 if (!(bufp->flags & CM_BUF_DIRTY)) {
938 /* buffer is clean, so we shouldn't add it */
939 cm_SyncOpDone(scp, bufp, flags);
940 lock_ReleaseMutex(&bufp->mx);
946 /* don't need buffer mutex any more */
947 lock_ReleaseMutex(&bufp->mx);
949 /* we have a dirty buffer ready for storing. Add it to the head
950 * of the list, since it immediately follows all of the disk
951 * addresses we've already collected.
954 osi_SetQData(qdp, bufp);
955 /* no buf_hold necessary, since we have it held from buf_Find */
956 osi_QAddH((osi_queue_t **) &biop->bufListp,
957 (osi_queue_t **) &biop->bufListEndp,
961 /* update biod info describing the transfer */
962 biop->length += cm_data.buf_blockSize;
964 /* update loop pointer */
965 tbase = LargeIntegerAdd(tbase, thyper);
966 } /* while loop looking for pages following the first page we found */
968 /* finally, we're done */
972 /* scp must be locked; temporarily unlocked during processing.
973 * If returns 0, returns buffers held in biop, and with
974 * CM_BUF_CMFETCHING flags set.
975 * If an error is returned, we don't return any buffers.
977 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
978 cm_bulkIO_t *biop, cm_user_t *up, cm_req_t *reqp)
982 osi_hyper_t toffset; /* a long long temp variable */
983 osi_hyper_t pageBase; /* base offset we're looking at */
984 osi_queueData_t *qdp; /* one temp queue structure */
985 osi_queueData_t *tqdp; /* another temp queue structure */
986 long collected; /* how many bytes have been collected */
989 osi_hyper_t fileSize; /* the # of bytes in the file */
990 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
991 osi_queueData_t *heldBufListEndp; /* first one */
995 biop->offset = *offsetp;
996 /* null out the list of buffers */
997 biop->bufListp = biop->bufListEndp = NULL;
1000 /* first lookup the file's length, so we know when to stop */
1001 code = cm_SyncOp(scp, NULL, up, reqp, 0,
1002 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1006 /* copy out size, since it may change */
1007 fileSize = scp->serverLength;
1009 lock_ReleaseMutex(&scp->mx);
1011 pageBase = *offsetp;
1012 collected = pageBase.LowPart & (cm_chunkSize - 1);
1013 heldBufListp = NULL;
1014 heldBufListEndp = NULL;
1017 * Obtaining buffers can cause dirty buffers to be recycled, which
1018 * can cause a storeback, so cannot be done while we have buffers
1021 * To get around this, we get buffers twice. Before reserving buffers,
1022 * we obtain and release each one individually. After reserving
1023 * buffers, we try to obtain them again, but only by lookup, not by
1024 * recycling. If a buffer has gone away while we were waiting for
1025 * the others, we just use whatever buffers we already have.
1027 * On entry to this function, we are already holding a buffer, so we
1028 * can't wait for reservation. So we call buf_TryReserveBuffers()
1029 * instead. Not only that, we can't really even call buf_Get(), for
1030 * the same reason. We can't avoid that, though. To avoid deadlock
1031 * we allow only one thread to be executing the buf_Get()-buf_Release()
1032 * sequence at a time.
1035 // lock_ObtainMutex(&cm_bufGetMutex);
1036 /* first hold all buffers, since we can't hold any locks in buf_Get */
1038 /* stop at chunk boundary */
1039 if (collected >= cm_chunkSize)
1042 /* see if the next page would be past EOF */
1043 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1046 code = buf_Get(scp, &pageBase, &tbp);
1048 //lock_ReleaseMutex(&cm_bufGetMutex);
1049 lock_ObtainMutex(&scp->mx);
1050 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1056 toffset.HighPart = 0;
1057 toffset.LowPart = cm_data.buf_blockSize;
1058 pageBase = LargeIntegerAdd(toffset, pageBase);
1059 collected += cm_data.buf_blockSize;
1062 /* reserve a chunk's worth of buffers if possible */
1063 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1065 // lock_ReleaseMutex(&cm_bufGetMutex);
1067 pageBase = *offsetp;
1068 collected = pageBase.LowPart & (cm_chunkSize - 1);
1070 /* now hold all buffers, if they are still there */
1072 /* stop at chunk boundary */
1073 if (collected >= cm_chunkSize)
1076 /* see if the next page would be past EOF */
1077 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1080 tbp = buf_Find(scp, &pageBase);
1084 /* add the buffer to the list */
1085 qdp = osi_QDAlloc();
1086 osi_SetQData(qdp, tbp);
1087 osi_QAddH((osi_queue_t **)&heldBufListp,
1088 (osi_queue_t **)&heldBufListEndp,
1090 /* leave tbp held (from buf_Get) */
1095 collected += cm_data.buf_blockSize;
1096 toffset.HighPart = 0;
1097 toffset.LowPart = cm_data.buf_blockSize;
1098 pageBase = LargeIntegerAdd(toffset, pageBase);
1101 /* look at each buffer, adding it into the list if it looks idle and
1102 * filled with old data. One special case: wait for idle if it is the
1103 * first buffer since we really need that one for our caller to make
1107 collected = 0; /* now count how many we'll really use */
1108 for (tqdp = heldBufListEndp;
1110 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1111 /* get a ptr to the held buffer */
1112 tbp = osi_GetQData(tqdp);
1113 pageBase = tbp->offset;
1115 /* now lock the buffer lock */
1116 lock_ObtainMutex(&tbp->mx);
1117 lock_ObtainMutex(&scp->mx);
1119 /* don't bother fetching over data that is already current */
1120 if (tbp->dataVersion == scp->dataVersion) {
1121 /* we don't need this buffer, since it is current */
1122 lock_ReleaseMutex(&scp->mx);
1123 lock_ReleaseMutex(&tbp->mx);
1127 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
1128 | CM_SCACHESYNC_BUFLOCKED;
1130 flags |= CM_SCACHESYNC_NOWAIT;
1132 /* wait for the buffer to serialize, if required. Doesn't
1133 * release the scp or buffer lock(s) if NOWAIT is specified.
1135 code = cm_SyncOp(scp, tbp, up, reqp, 0, flags);
1137 lock_ReleaseMutex(&scp->mx);
1138 lock_ReleaseMutex(&tbp->mx);
1142 /* don't fetch over dirty buffers */
1143 if (tbp->flags & CM_BUF_DIRTY) {
1144 cm_SyncOpDone(scp, tbp, flags);
1145 lock_ReleaseMutex(&scp->mx);
1146 lock_ReleaseMutex(&tbp->mx);
1151 lock_ReleaseMutex(&scp->mx);
1152 lock_ReleaseMutex(&tbp->mx);
1154 /* add the buffer to the list */
1155 qdp = osi_QDAlloc();
1156 osi_SetQData(qdp, tbp);
1157 osi_QAddH((osi_queue_t **)&biop->bufListp,
1158 (osi_queue_t **)&biop->bufListEndp,
1162 /* from now on, a failure just stops our collection process, but
1163 * we still do the I/O to whatever we've already managed to collect.
1166 collected += cm_data.buf_blockSize;
1169 /* now, we've held in biop->bufListp all the buffer's we're really
1170 * interested in. We also have holds left from heldBufListp, and we
1171 * now release those holds on the buffers.
1173 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1174 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1175 tbp = osi_GetQData(qdp);
1176 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1177 (osi_queue_t **) &heldBufListEndp,
1183 /* Caller expects this */
1184 lock_ObtainMutex(&scp->mx);
1186 /* if we got a failure setting up the first buffer, then we don't have
1187 * any side effects yet, and we also have failed an operation that the
1188 * caller requires to make any progress. Give up now.
1190 if (code && isFirst) {
1191 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1195 /* otherwise, we're still OK, and should just return the I/O setup we've
1198 biop->length = collected;
1199 biop->reserved = reserving;
1203 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1206 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore)
1210 osi_queueData_t *qdp;
1211 osi_queueData_t *nqdp;
1214 /* Give back reserved buffers */
1216 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1218 flags = CM_SCACHESYNC_NEEDCALLBACK;
1220 flags |= CM_SCACHESYNC_STOREDATA;
1222 flags |= CM_SCACHESYNC_FETCHDATA;
1225 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1226 /* lookup next guy first, since we're going to free this one */
1227 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1229 /* extract buffer and free queue data */
1230 bufp = osi_GetQData(qdp);
1231 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1232 (osi_queue_t **) &biop->bufListEndp,
1236 /* now, mark I/O as done, unlock the buffer and release it */
1237 lock_ObtainMutex(&bufp->mx);
1238 lock_ObtainMutex(&scp->mx);
1239 cm_SyncOpDone(scp, bufp, flags);
1241 /* turn off writing and wakeup users */
1243 if (bufp->flags & CM_BUF_WAITING) {
1244 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1245 osi_Wakeup((LONG_PTR) bufp);
1247 bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
1250 lock_ReleaseMutex(&scp->mx);
1251 lock_ReleaseMutex(&bufp->mx);
1256 /* clean things out */
1257 biop->bufListp = NULL;
1258 biop->bufListEndp = NULL;
1261 /* Fetch a buffer. Called with scp locked.
1262 * The scp is locked on return.
1264 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *up,
1268 afs_int32 nbytes; /* bytes in transfer */
1269 afs_int32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1270 afs_int64 length_found = 0;
1271 long rbytes; /* bytes in rx_Read call */
1273 AFSFetchStatus afsStatus;
1274 AFSCallBack callback;
1277 cm_buf_t *tbufp; /* buf we're filling */
1278 osi_queueData_t *qdp; /* q element we're scanning */
1280 struct rx_call *callp;
1281 struct rx_connection *rxconnp;
1282 cm_bulkIO_t biod; /* bulk IO descriptor */
1286 int require_64bit_ops = 0;
1288 /* now, the buffer may or may not be filled with good data (buf_GetNew
1289 * drops lots of locks, and may indeed return a properly initialized
1290 * buffer, although more likely it will just return a new, empty, buffer.
1293 #ifdef AFS_FREELANCE_CLIENT
1295 // yj: if they're trying to get the /afs directory, we need to
1296 // handle it differently, since it's local rather than on any
1299 getroot = (scp==cm_data.rootSCachep);
1301 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1304 cm_AFSFidFromFid(&tfid, &scp->fid);
1306 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, up, reqp);
1308 /* couldn't even get the first page setup properly */
1309 osi_Log1(afsd_logp, "SetupFetchBIOD failure code %d", code);
1313 /* once we get here, we have the callback in place, we know that no one
1314 * is fetching the data now. Check one last time that we still have
1315 * the wrong data, and then fetch it if we're still wrong.
1317 * We can lose a race condition and end up with biod.length zero, in
1318 * which case we just retry.
1320 if (bufp->dataVersion == scp->dataVersion || biod.length == 0) {
1321 osi_Log3(afsd_logp, "Bad DVs %d, %d or length 0x%x",
1322 bufp->dataVersion, scp->dataVersion, biod.length);
1323 if ((bufp->dataVersion == -1
1324 || bufp->dataVersion < scp->dataVersion)
1325 && LargeIntegerGreaterThanOrEqualTo(bufp->offset,
1326 scp->serverLength)) {
1327 if (bufp->dataVersion == -1)
1328 memset(bufp->datap, 0, cm_data.buf_blockSize);
1329 bufp->dataVersion = scp->dataVersion;
1331 lock_ReleaseMutex(&scp->mx);
1332 cm_ReleaseBIOD(&biod, 0);
1333 lock_ObtainMutex(&scp->mx);
1337 lock_ReleaseMutex(&scp->mx);
1339 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1340 ConvertLongToLargeInteger(biod.length)),
1341 ConvertLongToLargeInteger(LONG_MAX))) {
1342 require_64bit_ops = 1;
1346 DPRINTF("cm_GetBuffer: fetching data scpDV=%d bufDV=%d scp=%x bp=%x dcp=%x\n",
1347 scp->dataVersion, bufp->dataVersion, scp, bufp, bufp->dcp);
1348 #endif /* DISKCACHE95 */
1350 #ifdef AFS_FREELANCE_CLIENT
1353 // if getroot then we don't need to make any calls
1354 // just return fake data
1356 if (cm_freelanceEnabled && getroot) {
1357 // setup the fake status
1358 afsStatus.InterfaceVersion = 0x1;
1359 afsStatus.FileType = 0x2;
1360 afsStatus.LinkCount = scp->linkCount;
1361 afsStatus.Length = cm_fakeDirSize;
1362 afsStatus.DataVersion = cm_data.fakeDirVersion;
1363 afsStatus.Author = 0x1;
1364 afsStatus.Owner = 0x0;
1365 afsStatus.CallerAccess = 0x9;
1366 afsStatus.AnonymousAccess = 0x9;
1367 afsStatus.UnixModeBits = 0x1ff;
1368 afsStatus.ParentVnode = 0x1;
1369 afsStatus.ParentUnique = 0x1;
1370 afsStatus.ResidencyMask = 0;
1371 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1372 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1373 afsStatus.Group = 0;
1374 afsStatus.SyncCounter = 0;
1375 afsStatus.dataVersionHigh = 0;
1376 afsStatus.lockCount = 0;
1377 afsStatus.Length_hi = 0;
1378 afsStatus.errorCode = 0;
1380 // once we're done setting up the status info,
1381 // we just fill the buffer pages with fakedata
1382 // from cm_FakeRootDir. Extra pages are set to
1385 lock_ObtainMutex(&cm_Freelance_Lock);
1386 t1 = bufp->offset.LowPart;
1387 qdp = biod.bufListEndp;
1389 tbufp = osi_GetQData(qdp);
1390 bufferp=tbufp->datap;
1391 memset(bufferp, 0, cm_data.buf_blockSize);
1392 t2 = cm_fakeDirSize - t1;
1393 if (t2>cm_data.buf_blockSize) t2=cm_data.buf_blockSize;
1395 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1400 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1403 lock_ReleaseMutex(&cm_Freelance_Lock);
1405 // once we're done, we skip over the part of the
1406 // code that does the ACTUAL fetching of data for
1409 goto fetchingcompleted;
1412 #endif /* AFS_FREELANCE_CLIENT */
1414 /* now make the call */
1416 code = cm_Conn(&scp->fid, up, reqp, &connp);
1420 rxconnp = cm_GetRxConn(connp);
1421 callp = rx_NewCall(rxconnp);
1422 rx_PutConnection(rxconnp);
1424 #ifdef AFS_LARGEFILES
1425 nbytes = nbytes_hi = 0;
1427 if (SERVERHAS64BIT(connp)) {
1428 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1429 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1431 code = StartRXAFS_FetchData64(callp, &tfid, biod.offset.QuadPart, biod.length);
1434 temp = rx_Read(callp, (char *) &nbytes_hi, sizeof(afs_int32));
1435 if (temp == sizeof(afs_int32)) {
1436 nbytes_hi = ntohl(nbytes_hi);
1439 code = callp->error;
1440 rx_EndCall(callp, code);
1446 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1447 if (require_64bit_ops) {
1448 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1449 code = CM_ERROR_TOOBIG;
1452 rxconnp = cm_GetRxConn(connp);
1453 callp = rx_NewCall(rxconnp);
1454 rx_PutConnection(rxconnp);
1457 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1458 scp, biod.offset.LowPart, biod.length);
1460 code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
1463 SET_SERVERHASNO64BIT(connp);
1468 temp = rx_Read(callp, (char *)&nbytes, sizeof(afs_int32));
1469 if (temp == sizeof(afs_int32)) {
1470 nbytes = ntohl(nbytes);
1471 FillInt64(length_found, nbytes_hi, nbytes);
1472 if (length_found > biod.length)
1473 code = (callp->error < 0) ? callp->error : -1;
1475 code = (callp->error < 0) ? callp->error : -1;
1478 /* for the moment, nbytes_hi will always be 0 if code == 0
1479 because biod.length is a 32-bit quantity. */
1481 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1482 scp, biod.offset.LowPart, biod.length);
1484 code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
1487 /* now copy the data out of the pipe and put it in the buffer */
1489 temp = rx_Read(callp, (char *)&nbytes, sizeof(afs_int32));
1490 if (temp == sizeof(afs_int32)) {
1491 nbytes = ntohl(nbytes);
1492 if (nbytes > biod.length)
1493 code = (callp->error < 0) ? callp->error : -1;
1496 code = (callp->error < 0) ? callp->error : -1;
1501 qdp = biod.bufListEndp;
1503 tbufp = osi_GetQData(qdp);
1504 bufferp = tbufp->datap;
1508 /* fill nbytes of data from the pipe into the pages.
1509 * When we stop, qdp will point at the last page we're
1510 * dealing with, and bufferp will tell us where we
1511 * stopped. We'll need this info below when we clear
1512 * the remainder of the last page out (and potentially
1513 * clear later pages out, if we fetch past EOF).
1515 while (nbytes > 0) {
1516 /* assert that there are still more buffers;
1517 * our check above for nbytes being less than
1518 * biod.length should ensure this.
1520 osi_assert(bufferp != NULL);
1522 /* read rbytes of data */
1523 rbytes = (nbytes > cm_data.buf_blockSize? cm_data.buf_blockSize : nbytes);
1524 temp = rx_Read(callp, bufferp, rbytes);
1525 if (temp < rbytes) {
1526 code = (callp->error < 0) ? callp->error : -1;
1530 /* allow read-while-fetching.
1531 * if this is the last buffer, clear the
1532 * PREFETCHING flag, so the reader waiting for
1533 * this buffer will start a prefetch.
1535 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1536 lock_ObtainMutex(&scp->mx);
1537 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1538 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1539 osi_Wakeup((LONG_PTR) &scp->flags);
1541 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1543 cm_ClearPrefetchFlag(0, scp, &biod.offset);
1545 lock_ReleaseMutex(&scp->mx);
1547 /* and adjust counters */
1550 /* and move to the next buffer */
1552 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1554 tbufp = osi_GetQData(qdp);
1555 bufferp = tbufp->datap;
1563 /* zero out remainder of last pages, in case we are
1564 * fetching past EOF. We were fetching an integral #
1565 * of pages, but stopped, potentially in the middle of
1566 * a page. Zero the remainder of that page, and then
1567 * all of the rest of the pages.
1570 osi_assert((bufferp - tbufp->datap) < LONG_MAX);
1571 rbytes = (long) (bufferp - tbufp->datap);
1573 /* bytes left to zero */
1574 rbytes = cm_data.buf_blockSize - rbytes;
1577 memset(bufferp, 0, rbytes);
1578 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1581 tbufp = osi_GetQData(qdp);
1582 bufferp = tbufp->datap;
1583 /* bytes to clear in this page */
1584 rbytes = cm_data.buf_blockSize;
1589 if (SERVERHAS64BIT(connp))
1590 code = EndRXAFS_FetchData64(callp, &afsStatus, &callback, &volSync);
1592 code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
1594 if (SERVERHAS64BIT(connp))
1595 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
1597 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
1601 code = rx_EndCall(callp, code);
1603 if (code == RXKADUNKNOWNKEY)
1604 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
1606 osi_Log0(afsd_logp, "CALL FetchData DONE");
1608 } while (cm_Analyze(connp, up, reqp, &scp->fid, &volSync, NULL, NULL, code));
1611 code = cm_MapRPCError(code, reqp);
1613 lock_ObtainMutex(&scp->mx);
1615 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_FETCHSTATUS);
1617 /* we know that no one else has changed the buffer, since we still have
1618 * the fetching flag on the buffers, and we have the scp locked again.
1619 * Copy in the version # into the buffer if we got code 0 back from the
1623 for(qdp = biod.bufListp;
1625 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1626 tbufp = osi_GetQData(qdp);
1627 tbufp->dataVersion = afsStatus.DataVersion;
1630 /* write buffer out to disk cache */
1631 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
1632 tbufp->dataVersion);
1633 #endif /* DISKCACHE95 */
1637 /* release scatter/gather I/O structure (buffers, locks) */
1638 lock_ReleaseMutex(&scp->mx);
1639 cm_ReleaseBIOD(&biod, 0);
1640 lock_ObtainMutex(&scp->mx);
1643 cm_MergeStatus(scp, &afsStatus, &volSync, up, 0);