2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
26 extern void afsi_log(char *pattern, ...);
29 osi_mutex_t cm_bufGetMutex;
30 #ifdef AFS_FREELANCE_CLIENT
31 extern osi_mutex_t cm_Freelance_Lock;
35 /* we can access connp->serverp without holding a lock because that
36 never changes since the connection is made. */
37 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
38 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
40 #define SERVERHAS64BIT(connp) (FALSE)
41 #define SET_SERVERHASNO64BIT(connp) (FALSE)
44 /* functions called back from the buffer package when reading or writing data,
45 * or when holding or releasing a vnode pointer.
47 long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
48 cm_user_t *userp, cm_req_t *reqp)
50 /* store the data back from this buffer; the buffer is locked and held,
51 * but the vnode involved isn't locked, yet. It is held by its
52 * reference from the buffer, which won't change until the buffer is
53 * released by our caller. Thus, we don't have to worry about holding
57 cm_scache_t *scp = vscp;
60 AFSFetchStatus outStatus;
61 AFSStoreStatus inStatus;
65 struct rx_call *callp;
66 struct rx_connection *rxconnp;
73 cm_bulkIO_t biod; /* bulk IO descriptor */
74 int require_64bit_ops = 0;
76 osi_assertx(userp != NULL, "null cm_user_t");
77 osi_assertx(scp != NULL, "null cm_scache_t");
79 /* now, the buffer may or may not be filled with good data (buf_GetNew
80 * drops lots of locks, and may indeed return a properly initialized
81 * buffer, although more likely it will just return a new, empty, buffer.
84 lock_ObtainMutex(&scp->mx);
85 if (scp->flags & CM_SCACHEFLAG_DELETED) {
86 lock_ReleaseMutex(&scp->mx);
87 return CM_ERROR_NOSUCHFILE;
90 cm_AFSFidFromFid(&tfid, &scp->fid);
92 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
94 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
95 lock_ReleaseMutex(&scp->mx);
99 if (biod.length == 0) {
100 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
101 lock_ReleaseMutex(&scp->mx);
102 cm_ReleaseBIOD(&biod, 1, 0); /* should be a NOOP */
106 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
107 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
109 /* prepare the output status for the store */
110 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
111 cm_StatusFromAttr(&inStatus, scp, NULL);
112 truncPos = scp->length;
113 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
114 && LargeIntegerLessThan(scp->truncPos, truncPos))
115 truncPos = scp->truncPos;
116 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
118 /* compute how many bytes to write from this buffer */
119 thyper = LargeIntegerSubtract(scp->length, biod.offset);
120 if (LargeIntegerLessThanZero(thyper)) {
121 /* entire buffer is past EOF */
125 /* otherwise write out part of buffer before EOF, but not
126 * more than bufferSize bytes.
128 if (LargeIntegerGreaterThan(thyper,
129 ConvertLongToLargeInteger(biod.length))) {
130 nbytes = biod.length;
132 /* if thyper is less than or equal to biod.length, then we
133 can safely assume that the value fits in a long. */
134 nbytes = thyper.LowPart;
138 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
139 ConvertLongToLargeInteger(nbytes)),
140 ConvertLongToLargeInteger(LONG_MAX)) ||
141 LargeIntegerGreaterThan(truncPos,
142 ConvertLongToLargeInteger(LONG_MAX))) {
143 require_64bit_ops = 1;
146 lock_ReleaseMutex(&scp->mx);
148 /* now we're ready to do the store operation */
150 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
155 rxconnp = cm_GetRxConn(connp);
156 callp = rx_NewCall(rxconnp);
157 rx_PutConnection(rxconnp);
159 #ifdef AFS_LARGEFILES
160 if (SERVERHAS64BIT(connp)) {
161 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
162 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
164 code = StartRXAFS_StoreData64(callp, &tfid, &inStatus,
165 biod.offset.QuadPart,
169 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
171 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
173 if (require_64bit_ops) {
174 osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData. The operation requires large file support in the server.");
175 code = CM_ERROR_TOOBIG;
177 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
178 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
180 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
181 biod.offset.LowPart, nbytes, truncPos.LowPart);
183 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
185 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
189 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
190 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
192 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
193 biod.offset.LowPart, nbytes, truncPos.LowPart);
195 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
197 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
201 /* write the data from the the list of buffers */
205 qdp = biod.bufListEndp;
207 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
208 osi_assertx(qdp != NULL, "null osi_queueData_t");
209 bufp = osi_GetQData(qdp);
210 bufferp = bufp->datap;
212 if (wbytes > cm_data.buf_blockSize)
213 wbytes = cm_data.buf_blockSize;
215 /* write out wbytes of data from bufferp */
216 temp = rx_Write(callp, bufferp, wbytes);
217 if (temp != wbytes) {
218 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
222 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
225 } /* while more bytes to write */
226 } /* if RPC started successfully */
229 if (SERVERHAS64BIT(connp)) {
230 code = EndRXAFS_StoreData64(callp, &outStatus, &volSync);
232 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
234 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
236 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
238 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
240 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
244 code = rx_EndCall(callp, code);
246 #ifdef AFS_LARGEFILES
247 if (code == RXGEN_OPCODE && SERVERHAS64BIT(connp)) {
248 SET_SERVERHASNO64BIT(connp);
253 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
255 code = cm_MapRPCError(code, reqp);
258 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
260 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
262 /* now, clean up our state */
263 lock_ObtainMutex(&scp->mx);
265 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
269 /* now, here's something a little tricky: in AFS 3, a dirty
270 * length can't be directly stored, instead, a dirty chunk is
271 * stored that sets the file's size (by writing and by using
272 * the truncate-first option in the store call).
274 * At this point, we've just finished a store, and so the trunc
275 * pos field is clean. If the file's size at the server is at
276 * least as big as we think it should be, then we turn off the
277 * length dirty bit, since all the other dirty buffers must
278 * precede this one in the file.
280 * The file's desired size shouldn't be smaller than what's
281 * stored at the server now, since we just did the trunc pos
284 * We have to turn off the length dirty bit as soon as we can,
285 * so that we see updates made by other machines.
288 if (SERVERHAS64BIT(connp)) {
289 t.LowPart = outStatus.Length;
290 t.HighPart = outStatus.Length_hi;
292 t = ConvertLongToLargeInteger(outStatus.Length);
295 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
296 scp->mask &= ~CM_SCACHEMASK_LENGTH;
298 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, CM_MERGEFLAG_STOREDATA);
300 if (code == CM_ERROR_SPACE)
301 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
302 else if (code == CM_ERROR_QUOTA)
303 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
305 lock_ReleaseMutex(&scp->mx);
306 cm_ReleaseBIOD(&biod, 1, code);
312 * Truncate the file, by sending a StoreData RPC with zero length.
314 * Called with scp locked. Releases and re-obtains the lock.
316 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
318 AFSFetchStatus outStatus;
319 AFSStoreStatus inStatus;
323 osi_hyper_t truncPos;
325 struct rx_call *callp;
326 struct rx_connection *rxconnp;
327 int require_64bit_ops = 0;
329 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
330 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
331 CM_SCACHESYNC_STOREDATA_EXCL);
333 /* prepare the output status for the store */
334 inStatus.Mask = AFS_SETMODTIME;
335 inStatus.ClientModTime = scp->clientModTime;
336 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
338 /* calculate truncation position */
339 truncPos = scp->length;
340 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
341 && LargeIntegerLessThan(scp->truncPos, truncPos))
342 truncPos = scp->truncPos;
343 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
345 if (LargeIntegerGreaterThan(truncPos,
346 ConvertLongToLargeInteger(LONG_MAX))) {
348 require_64bit_ops = 1;
351 lock_ReleaseMutex(&scp->mx);
353 cm_AFSFidFromFid(&tfid, &scp->fid);
355 /* now we're ready to do the store operation */
357 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
362 rxconnp = cm_GetRxConn(connp);
363 callp = rx_NewCall(rxconnp);
364 rx_PutConnection(rxconnp);
366 #ifdef AFS_LARGEFILES
367 if (SERVERHAS64BIT(connp)) {
368 code = StartRXAFS_StoreData64(callp, &tfid, &inStatus,
369 0, 0, truncPos.QuadPart);
371 if (require_64bit_ops) {
372 code = CM_ERROR_TOOBIG;
374 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
375 0, 0, truncPos.LowPart);
379 code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
380 0, 0, truncPos.LowPart);
384 if (SERVERHAS64BIT(connp))
385 code = EndRXAFS_StoreData64(callp, &outStatus, &volSync);
387 code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
389 code = rx_EndCall(callp, code);
391 #ifdef AFS_LARGEFILES
392 if (code == RXGEN_OPCODE && SERVERHAS64BIT(connp)) {
393 SET_SERVERHASNO64BIT(connp);
398 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
399 code = cm_MapRPCError(code, reqp);
401 /* now, clean up our state */
402 lock_ObtainMutex(&scp->mx);
404 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
409 * For explanation of handling of CM_SCACHEMASK_LENGTH,
412 if (SERVERHAS64BIT(connp)) {
413 t.HighPart = outStatus.Length_hi;
414 t.LowPart = outStatus.Length;
416 t = ConvertLongToLargeInteger(outStatus.Length);
419 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
420 scp->mask &= ~CM_SCACHEMASK_LENGTH;
421 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, CM_MERGEFLAG_STOREDATA);
427 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
429 *bytesReadp = cm_data.buf_blockSize;
431 /* now return a code that means that I/O is done */
435 /* stabilize scache entry, and return with it locked so
438 long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
440 cm_scache_t *scp = vscp;
443 lock_ObtainMutex(&scp->mx);
444 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
445 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
447 lock_ReleaseMutex(&scp->mx);
454 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
455 long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
457 cm_scache_t *scp = vscp;
459 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
461 lock_ReleaseMutex(&scp->mx);
463 /* always succeeds */
467 cm_buf_ops_t cm_bufOps = {
474 long cm_ValidateDCache(void)
476 return buf_ValidateBuffers();
479 long cm_ShutdownDCache(void)
484 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
486 lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
487 return buf_Init(newFile, &cm_bufOps, nbuffers);
490 /* check to see if we have an up-to-date buffer. The buffer must have
491 * previously been obtained by calling buf_Get.
493 * Make sure we have a callback, and that the dataversion matches.
495 * Scp must be locked.
497 * Bufp *may* be locked.
499 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
502 if (!cm_HaveCallback(scp))
504 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
506 if (bufp->dataVersion == scp->dataVersion)
509 code = lock_TryMutex(&bufp->mx);
511 /* don't have the lock, and can't lock it, then
518 /* remember dirty flag for later */
519 code = bufp->flags & CM_BUF_DIRTY;
521 /* release lock if we obtained it here */
523 lock_ReleaseMutex(&bufp->mx);
525 /* if buffer was dirty, buffer is acceptable for use */
532 /* used when deciding whether to do a prefetch or not */
533 long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
534 cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
538 osi_hyper_t tblocksize;
543 /* now scan all buffers in the range, looking for any that look like
548 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
550 lock_ObtainMutex(&scp->mx);
551 while (LargeIntegerGreaterThanZero(tlength)) {
552 /* get callback so we can do a meaningful dataVersion comparison */
553 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
554 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
556 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
557 lock_ReleaseMutex(&scp->mx);
561 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
562 /* we're past the end of file */
566 bp = buf_Find(scp, &tbase);
567 /* We cheat slightly by not locking the bp mutex. */
569 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
570 && bp->dataVersion != scp->dataVersion)
578 /* if this buffer is essentially guaranteed to require a fetch,
579 * break out here and return this position.
584 tbase = LargeIntegerAdd(tbase, tblocksize);
585 tlength = LargeIntegerSubtract(tlength, tblocksize);
588 /* if we get here, either everything is fine or 'stop' stopped us at a
589 * particular buffer in the range that definitely needs to be fetched.
592 /* return non-zero code since realBasep won't be valid */
593 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
597 /* successfully found a page that will need fetching */
601 lock_ReleaseMutex(&scp->mx);
606 cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
614 if (scp->flags & CM_SCACHEFLAG_DELETED) {
615 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
619 /* Retries will be performed by the BkgDaemon thread if appropriate */
620 req.flags |= CM_REQ_NORETRY;
622 toffset.LowPart = p1;
623 toffset.HighPart = p2;
626 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
628 code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, &req);
630 osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
633 lock_ObtainMutex(&scp->mx);
634 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
635 lock_ReleaseMutex(&scp->mx);
640 /* Called with scp locked */
641 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
646 end = LargeIntegerAdd(*base, *length);
647 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
648 scp->prefetch.base = *base;
649 if (LargeIntegerGreaterThan(end, scp->prefetch.end))
650 scp->prefetch.end = end;
652 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
655 /* do the prefetch. if the prefetch fails, return 0 (success)
656 * because there is no harm done. */
658 cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
666 osi_hyper_t tblocksize;
674 /* Retries will be performed by the BkgDaemon thread if appropriate */
675 req.flags |= CM_REQ_NORETRY;
678 fetched.HighPart = 0;
679 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
683 length.HighPart = p4;
685 end = LargeIntegerAdd(base, length);
687 osi_Log3(afsd_logp, "Starting BKG prefetch scp 0x%p, base 0x%x:%x", scp, p2, p1);
689 for ( code = 0, offset = base;
690 code == 0 && LargeIntegerLessThan(offset, end);
691 offset = LargeIntegerAdd(offset, tblocksize) )
694 lock_ReleaseMutex(&scp->mx);
698 lock_ObtainRead(&scp->bufCreateLock);
699 code = buf_Get(scp, &offset, &bp);
700 lock_ReleaseRead(&scp->bufCreateLock);
704 if (bp->cmFlags & CM_BUF_CMFETCHING) {
705 /* skip this buffer as another thread is already fetching it */
712 lock_ObtainMutex(&scp->mx);
716 code = cm_GetBuffer(scp, bp, NULL, userp, &req);
718 fetched = LargeIntegerAdd(fetched, tblocksize);
723 lock_ObtainMutex(&scp->mx);
726 cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
727 scp, &base, &fetched);
728 lock_ReleaseMutex(&scp->mx);
730 osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p, code %d bytes 0x%x:%x",
731 scp, code, fetched.HighPart, fetched.LowPart);
735 /* a read was issued to offsetp, and we have to determine whether we should
736 * do a prefetch of the next chunk.
738 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
739 cm_user_t *userp, cm_req_t *reqp)
742 osi_hyper_t realBase;
743 osi_hyper_t readBase;
744 osi_hyper_t readLength;
747 /* round up to chunk boundary */
748 readBase.LowPart += (cm_chunkSize-1);
749 readBase.LowPart &= (-cm_chunkSize);
751 readLength = ConvertLongToLargeInteger(cm_chunkSize);
753 lock_ObtainMutex(&scp->mx);
754 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
755 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
756 lock_ReleaseMutex(&scp->mx);
759 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
761 /* start the scan at the latter of the end of this read or
762 * the end of the last fetched region.
764 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
765 readBase = scp->prefetch.end;
767 lock_ReleaseMutex(&scp->mx);
769 code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
772 return; /* can't find something to prefetch */
774 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
775 scp, realBase.LowPart);
777 cm_QueueBKGRequest(scp, cm_BkgPrefetch,
778 realBase.LowPart, realBase.HighPart,
779 readLength.LowPart, readLength.HighPart,
783 /* scp must be locked; temporarily unlocked during processing.
784 * If returns 0, returns buffers held in biop, and with
785 * CM_BUF_CMSTORING set.
787 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
788 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
789 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
790 * must be woken, and the event must be set when the I/O is done. All of this
791 * is required so that buf_WaitIO synchronizes properly with the buffer as it
792 * is being written out.
794 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
795 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
798 osi_queueData_t *qdp;
801 osi_hyper_t scanStart; /* where to start scan for dirty pages */
802 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
803 osi_hyper_t firstModOffset; /* offset of first modified page in range */
806 long flags; /* flags to cm_SyncOp */
808 /* clear things out */
809 biop->scp = scp; /* do not hold; held by caller */
810 biop->offset = *inOffsetp;
812 biop->bufListp = NULL;
813 biop->bufListEndp = NULL;
816 /* reserve a chunk's worth of buffers */
817 lock_ReleaseMutex(&scp->mx);
818 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
819 lock_ObtainMutex(&scp->mx);
822 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
823 thyper = ConvertLongToLargeInteger(temp);
824 tbase = LargeIntegerAdd(*inOffsetp, thyper);
826 bufp = buf_Find(scp, &tbase);
828 /* get buffer mutex and scp mutex safely */
829 lock_ReleaseMutex(&scp->mx);
830 lock_ObtainMutex(&bufp->mx);
831 lock_ObtainMutex(&scp->mx);
833 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
834 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
836 lock_ReleaseMutex(&bufp->mx);
839 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
843 /* if the buffer is dirty, we're done */
844 if (bufp->flags & CM_BUF_DIRTY) {
845 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
846 "WRITING w/o CMSTORING in SetupStoreBIOD");
847 bufp->flags |= CM_BUF_WRITING;
851 /* this buffer is clean, so there's no reason to process it */
852 cm_SyncOpDone(scp, bufp, flags);
853 lock_ReleaseMutex(&bufp->mx);
861 /* if we get here, if bufp is null, we didn't find any dirty buffers
862 * that weren't already being stored back, so we just quit now.
868 /* don't need buffer mutex any more */
869 lock_ReleaseMutex(&bufp->mx);
871 /* put this element in the list */
873 osi_SetQData(qdp, bufp);
874 /* don't have to hold bufp, since held by buf_Find above */
875 osi_QAddH((osi_queue_t **) &biop->bufListp,
876 (osi_queue_t **) &biop->bufListEndp,
878 biop->length = cm_data.buf_blockSize;
879 firstModOffset = bufp->offset;
880 biop->offset = firstModOffset;
881 bufp = NULL; /* this buffer and reference added to the queue */
883 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
884 scanStart = *inOffsetp;
885 scanStart.LowPart &= (-cm_chunkSize);
886 thyper = ConvertLongToLargeInteger(cm_chunkSize);
887 scanEnd = LargeIntegerAdd(scanStart, thyper);
889 flags = CM_SCACHESYNC_GETSTATUS
890 | CM_SCACHESYNC_STOREDATA
891 | CM_SCACHESYNC_BUFLOCKED
892 | CM_SCACHESYNC_NOWAIT;
894 /* start by looking backwards until scanStart */
895 /* hyper version of cm_data.buf_blockSize */
896 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
897 tbase = LargeIntegerSubtract(firstModOffset, thyper);
898 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
899 /* see if we can find the buffer */
900 bufp = buf_Find(scp, &tbase);
904 /* try to lock it, and quit if we can't (simplifies locking) */
905 lock_ReleaseMutex(&scp->mx);
906 code = lock_TryMutex(&bufp->mx);
907 lock_ObtainMutex(&scp->mx);
914 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
916 lock_ReleaseMutex(&bufp->mx);
922 if (!(bufp->flags & CM_BUF_DIRTY)) {
923 /* buffer is clean, so we shouldn't add it */
924 cm_SyncOpDone(scp, bufp, flags);
925 lock_ReleaseMutex(&bufp->mx);
931 /* don't need buffer mutex any more */
932 lock_ReleaseMutex(&bufp->mx);
934 /* we have a dirty buffer ready for storing. Add it to the tail
935 * of the list, since it immediately precedes all of the disk
936 * addresses we've already collected.
939 osi_SetQData(qdp, bufp);
940 /* no buf_hold necessary, since we have it held from buf_Find */
941 osi_QAddT((osi_queue_t **) &biop->bufListp,
942 (osi_queue_t **) &biop->bufListEndp,
944 bufp = NULL; /* added to the queue */
946 /* update biod info describing the transfer */
947 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
948 biop->length += cm_data.buf_blockSize;
950 /* update loop pointer */
951 tbase = LargeIntegerSubtract(tbase, thyper);
952 } /* while loop looking for pages preceding the one we found */
954 /* now, find later dirty, contiguous pages, and add them to the list */
955 /* hyper version of cm_data.buf_blockSize */
956 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
957 tbase = LargeIntegerAdd(firstModOffset, thyper);
958 while(LargeIntegerLessThan(tbase, scanEnd)) {
959 /* see if we can find the buffer */
960 bufp = buf_Find(scp, &tbase);
964 /* try to lock it, and quit if we can't (simplifies locking) */
965 lock_ReleaseMutex(&scp->mx);
966 code = lock_TryMutex(&bufp->mx);
967 lock_ObtainMutex(&scp->mx);
974 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
976 lock_ReleaseMutex(&bufp->mx);
982 if (!(bufp->flags & CM_BUF_DIRTY)) {
983 /* buffer is clean, so we shouldn't add it */
984 cm_SyncOpDone(scp, bufp, flags);
985 lock_ReleaseMutex(&bufp->mx);
991 /* don't need buffer mutex any more */
992 lock_ReleaseMutex(&bufp->mx);
994 /* we have a dirty buffer ready for storing. Add it to the head
995 * of the list, since it immediately follows all of the disk
996 * addresses we've already collected.
999 osi_SetQData(qdp, bufp);
1000 /* no buf_hold necessary, since we have it held from buf_Find */
1001 osi_QAddH((osi_queue_t **) &biop->bufListp,
1002 (osi_queue_t **) &biop->bufListEndp,
1006 /* update biod info describing the transfer */
1007 biop->length += cm_data.buf_blockSize;
1009 /* update loop pointer */
1010 tbase = LargeIntegerAdd(tbase, thyper);
1011 } /* while loop looking for pages following the first page we found */
1013 /* finally, we're done */
1017 /* scp must be locked; temporarily unlocked during processing.
1018 * If returns 0, returns buffers held in biop, and with
1019 * CM_BUF_CMFETCHING flags set.
1020 * If an error is returned, we don't return any buffers.
1022 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
1023 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
1027 osi_hyper_t tblocksize; /* a long long temp variable */
1028 osi_hyper_t pageBase; /* base offset we're looking at */
1029 osi_queueData_t *qdp; /* one temp queue structure */
1030 osi_queueData_t *tqdp; /* another temp queue structure */
1031 long collected; /* how many bytes have been collected */
1034 osi_hyper_t fileSize; /* the # of bytes in the file */
1035 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
1036 osi_queueData_t *heldBufListEndp; /* first one */
1039 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1041 biop->scp = scp; /* do not hold; held by caller */
1042 biop->offset = *offsetp;
1043 /* null out the list of buffers */
1044 biop->bufListp = biop->bufListEndp = NULL;
1047 /* first lookup the file's length, so we know when to stop */
1048 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
1049 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1053 /* copy out size, since it may change */
1054 fileSize = scp->serverLength;
1056 lock_ReleaseMutex(&scp->mx);
1058 pageBase = *offsetp;
1059 collected = pageBase.LowPart & (cm_chunkSize - 1);
1060 heldBufListp = NULL;
1061 heldBufListEndp = NULL;
1064 * Obtaining buffers can cause dirty buffers to be recycled, which
1065 * can cause a storeback, so cannot be done while we have buffers
1068 * To get around this, we get buffers twice. Before reserving buffers,
1069 * we obtain and release each one individually. After reserving
1070 * buffers, we try to obtain them again, but only by lookup, not by
1071 * recycling. If a buffer has gone away while we were waiting for
1072 * the others, we just use whatever buffers we already have.
1074 * On entry to this function, we are already holding a buffer, so we
1075 * can't wait for reservation. So we call buf_TryReserveBuffers()
1076 * instead. Not only that, we can't really even call buf_Get(), for
1077 * the same reason. We can't avoid that, though. To avoid deadlock
1078 * we allow only one thread to be executing the buf_Get()-buf_Release()
1079 * sequence at a time.
1082 // lock_ObtainMutex(&cm_bufGetMutex);
1083 /* first hold all buffers, since we can't hold any locks in buf_Get */
1085 /* stop at chunk boundary */
1086 if (collected >= cm_chunkSize)
1089 /* see if the next page would be past EOF */
1090 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1093 lock_ObtainRead(&scp->bufCreateLock);
1094 code = buf_Get(scp, &pageBase, &tbp);
1095 lock_ReleaseRead(&scp->bufCreateLock);
1097 //lock_ReleaseMutex(&cm_bufGetMutex);
1098 lock_ObtainMutex(&scp->mx);
1099 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1106 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1107 collected += cm_data.buf_blockSize;
1110 /* reserve a chunk's worth of buffers if possible */
1111 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1113 // lock_ReleaseMutex(&cm_bufGetMutex);
1115 pageBase = *offsetp;
1116 collected = pageBase.LowPart & (cm_chunkSize - 1);
1118 /* now hold all buffers, if they are still there */
1120 /* stop at chunk boundary */
1121 if (collected >= cm_chunkSize)
1124 /* see if the next page would be past EOF */
1125 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1128 tbp = buf_Find(scp, &pageBase);
1132 /* add the buffer to the list */
1133 qdp = osi_QDAlloc();
1134 osi_SetQData(qdp, tbp);
1135 osi_QAddH((osi_queue_t **)&heldBufListp,
1136 (osi_queue_t **)&heldBufListEndp,
1138 /* leave tbp held (from buf_Get) */
1143 collected += cm_data.buf_blockSize;
1144 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1147 /* look at each buffer, adding it into the list if it looks idle and
1148 * filled with old data. One special case: wait for idle if it is the
1149 * first buffer since we really need that one for our caller to make
1153 collected = 0; /* now count how many we'll really use */
1154 for (tqdp = heldBufListEndp;
1156 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1157 /* get a ptr to the held buffer */
1158 tbp = osi_GetQData(tqdp);
1159 pageBase = tbp->offset;
1161 /* now lock the buffer lock */
1162 lock_ObtainMutex(&tbp->mx);
1163 lock_ObtainMutex(&scp->mx);
1165 /* don't bother fetching over data that is already current */
1166 if (tbp->dataVersion == scp->dataVersion) {
1167 /* we don't need this buffer, since it is current */
1168 lock_ReleaseMutex(&scp->mx);
1169 lock_ReleaseMutex(&tbp->mx);
1173 flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
1175 flags |= CM_SCACHESYNC_NOWAIT;
1177 /* wait for the buffer to serialize, if required. Doesn't
1178 * release the scp or buffer lock(s) if NOWAIT is specified.
1180 code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
1182 lock_ReleaseMutex(&scp->mx);
1183 lock_ReleaseMutex(&tbp->mx);
1187 /* don't fetch over dirty buffers */
1188 if (tbp->flags & CM_BUF_DIRTY) {
1189 cm_SyncOpDone(scp, tbp, flags);
1190 lock_ReleaseMutex(&scp->mx);
1191 lock_ReleaseMutex(&tbp->mx);
1196 lock_ReleaseMutex(&scp->mx);
1197 lock_ReleaseMutex(&tbp->mx);
1199 /* add the buffer to the list */
1200 qdp = osi_QDAlloc();
1201 osi_SetQData(qdp, tbp);
1202 osi_QAddH((osi_queue_t **)&biop->bufListp,
1203 (osi_queue_t **)&biop->bufListEndp,
1207 /* from now on, a failure just stops our collection process, but
1208 * we still do the I/O to whatever we've already managed to collect.
1211 collected += cm_data.buf_blockSize;
1214 /* now, we've held in biop->bufListp all the buffer's we're really
1215 * interested in. We also have holds left from heldBufListp, and we
1216 * now release those holds on the buffers.
1218 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1219 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1220 tbp = osi_GetQData(qdp);
1221 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1222 (osi_queue_t **) &heldBufListEndp,
1229 /* Caller expects this */
1230 lock_ObtainMutex(&scp->mx);
1232 /* if we got a failure setting up the first buffer, then we don't have
1233 * any side effects yet, and we also have failed an operation that the
1234 * caller requires to make any progress. Give up now.
1236 if (code && isFirst) {
1237 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1241 /* otherwise, we're still OK, and should just return the I/O setup we've
1244 biop->length = collected;
1245 biop->reserved = reserving;
1249 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1252 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, int failed)
1254 cm_scache_t *scp; /* do not release; not held in biop */
1256 osi_queueData_t *qdp;
1257 osi_queueData_t *nqdp;
1260 /* Give back reserved buffers */
1262 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1265 flags = CM_SCACHESYNC_STOREDATA;
1267 flags = CM_SCACHESYNC_FETCHDATA;
1270 if (biop->bufListp) {
1271 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1272 /* lookup next guy first, since we're going to free this one */
1273 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1275 /* extract buffer and free queue data */
1276 bufp = osi_GetQData(qdp);
1277 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1278 (osi_queue_t **) &biop->bufListEndp,
1282 /* now, mark I/O as done, unlock the buffer and release it */
1283 lock_ObtainMutex(&bufp->mx);
1284 lock_ObtainMutex(&scp->mx);
1285 cm_SyncOpDone(scp, bufp, flags);
1287 /* turn off writing and wakeup users */
1289 if (bufp->flags & CM_BUF_WAITING) {
1290 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1291 osi_Wakeup((LONG_PTR) bufp);
1294 bufp->flags &= ~CM_BUF_WRITING;
1296 bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
1297 bufp->dirty_offset = bufp->dirty_length = 0;
1301 lock_ReleaseMutex(&scp->mx);
1302 lock_ReleaseMutex(&bufp->mx);
1307 lock_ObtainMutex(&scp->mx);
1308 cm_SyncOpDone(scp, NULL, flags);
1309 lock_ReleaseMutex(&scp->mx);
1312 /* clean things out */
1313 biop->bufListp = NULL;
1314 biop->bufListEndp = NULL;
1317 /* Fetch a buffer. Called with scp locked.
1318 * The scp is locked on return.
1320 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
1324 afs_int32 nbytes; /* bytes in transfer */
1325 afs_int32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1326 afs_int64 length_found = 0;
1327 long rbytes; /* bytes in rx_Read call */
1329 AFSFetchStatus afsStatus;
1330 AFSCallBack callback;
1333 cm_buf_t *tbufp; /* buf we're filling */
1334 osi_queueData_t *qdp; /* q element we're scanning */
1336 struct rx_call *callp;
1337 struct rx_connection *rxconnp;
1338 cm_bulkIO_t biod; /* bulk IO descriptor */
1342 int require_64bit_ops = 0;
1344 /* now, the buffer may or may not be filled with good data (buf_GetNew
1345 * drops lots of locks, and may indeed return a properly initialized
1346 * buffer, although more likely it will just return a new, empty, buffer.
1349 #ifdef AFS_FREELANCE_CLIENT
1351 // yj: if they're trying to get the /afs directory, we need to
1352 // handle it differently, since it's local rather than on any
1355 getroot = (scp==cm_data.rootSCachep);
1357 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1360 if (cm_HaveCallback(scp) && bufp->dataVersion == scp->dataVersion) {
1361 /* We already have this buffer don't do extra work */
1365 cm_AFSFidFromFid(&tfid, &scp->fid);
1367 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
1369 /* couldn't even get the first page setup properly */
1370 osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
1374 /* once we get here, we have the callback in place, we know that no one
1375 * is fetching the data now. Check one last time that we still have
1376 * the wrong data, and then fetch it if we're still wrong.
1378 * We can lose a race condition and end up with biod.length zero, in
1379 * which case we just retry.
1381 if (bufp->dataVersion == scp->dataVersion || biod.length == 0) {
1382 if ((bufp->dataVersion == -1 || bufp->dataVersion < scp->dataVersion) &&
1383 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
1385 osi_Log3(afsd_logp, "Bad DVs %I64d, %I64d or length 0x%x",
1386 bufp->dataVersion, scp->dataVersion, biod.length);
1388 if (bufp->dataVersion == -1)
1389 memset(bufp->datap, 0, cm_data.buf_blockSize);
1390 bufp->dataVersion = scp->dataVersion;
1392 lock_ReleaseMutex(&scp->mx);
1393 cm_ReleaseBIOD(&biod, 0, 0);
1394 lock_ObtainMutex(&scp->mx);
1398 lock_ReleaseMutex(&scp->mx);
1400 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1401 ConvertLongToLargeInteger(biod.length)),
1402 ConvertLongToLargeInteger(LONG_MAX))) {
1403 require_64bit_ops = 1;
1407 DPRINTF("cm_GetBuffer: fetching data scpDV=%I64d bufDV=%I64d scp=%x bp=%x dcp=%x\n",
1408 scp->dataVersion, bufp->dataVersion, scp, bufp, bufp->dcp);
1409 #endif /* DISKCACHE95 */
1411 #ifdef AFS_FREELANCE_CLIENT
1414 // if getroot then we don't need to make any calls
1415 // just return fake data
1417 if (cm_freelanceEnabled && getroot) {
1418 // setup the fake status
1419 afsStatus.InterfaceVersion = 0x1;
1420 afsStatus.FileType = 0x2;
1421 afsStatus.LinkCount = scp->linkCount;
1422 afsStatus.Length = cm_fakeDirSize;
1423 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1424 afsStatus.Author = 0x1;
1425 afsStatus.Owner = 0x0;
1426 afsStatus.CallerAccess = 0x9;
1427 afsStatus.AnonymousAccess = 0x9;
1428 afsStatus.UnixModeBits = 0x1ff;
1429 afsStatus.ParentVnode = 0x1;
1430 afsStatus.ParentUnique = 0x1;
1431 afsStatus.ResidencyMask = 0;
1432 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1433 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1434 afsStatus.Group = 0;
1435 afsStatus.SyncCounter = 0;
1436 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1437 afsStatus.lockCount = 0;
1438 afsStatus.Length_hi = 0;
1439 afsStatus.errorCode = 0;
1441 // once we're done setting up the status info,
1442 // we just fill the buffer pages with fakedata
1443 // from cm_FakeRootDir. Extra pages are set to
1446 lock_ObtainMutex(&cm_Freelance_Lock);
1447 t1 = bufp->offset.LowPart;
1448 qdp = biod.bufListEndp;
1450 tbufp = osi_GetQData(qdp);
1451 bufferp=tbufp->datap;
1452 memset(bufferp, 0, cm_data.buf_blockSize);
1453 t2 = cm_fakeDirSize - t1;
1454 if (t2> (afs_int32)cm_data.buf_blockSize)
1455 t2=cm_data.buf_blockSize;
1457 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1462 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1465 lock_ReleaseMutex(&cm_Freelance_Lock);
1467 // once we're done, we skip over the part of the
1468 // code that does the ACTUAL fetching of data for
1471 goto fetchingcompleted;
1474 #endif /* AFS_FREELANCE_CLIENT */
1476 /* now make the call */
1478 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
1482 rxconnp = cm_GetRxConn(connp);
1483 callp = rx_NewCall(rxconnp);
1484 rx_PutConnection(rxconnp);
1486 #ifdef AFS_LARGEFILES
1487 nbytes = nbytes_hi = 0;
1489 if (SERVERHAS64BIT(connp)) {
1490 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1491 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1493 code = StartRXAFS_FetchData64(callp, &tfid, biod.offset.QuadPart, biod.length);
1496 temp = rx_Read(callp, (char *) &nbytes_hi, sizeof(afs_int32));
1497 if (temp == sizeof(afs_int32)) {
1498 nbytes_hi = ntohl(nbytes_hi);
1501 code = callp->error;
1502 rx_EndCall(callp, code);
1508 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1509 if (require_64bit_ops) {
1510 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1511 code = CM_ERROR_TOOBIG;
1514 rxconnp = cm_GetRxConn(connp);
1515 callp = rx_NewCall(rxconnp);
1516 rx_PutConnection(rxconnp);
1519 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1520 scp, biod.offset.LowPart, biod.length);
1522 code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
1525 SET_SERVERHASNO64BIT(connp);
1530 temp = rx_Read(callp, (char *)&nbytes, sizeof(afs_int32));
1531 if (temp == sizeof(afs_int32)) {
1532 nbytes = ntohl(nbytes);
1533 FillInt64(length_found, nbytes_hi, nbytes);
1534 if (length_found > biod.length)
1535 code = (callp->error < 0) ? callp->error : -1;
1537 code = (callp->error < 0) ? callp->error : -1;
1540 /* for the moment, nbytes_hi will always be 0 if code == 0
1541 because biod.length is a 32-bit quantity. */
1543 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1544 scp, biod.offset.LowPart, biod.length);
1546 code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
1549 /* now copy the data out of the pipe and put it in the buffer */
1551 temp = rx_Read(callp, (char *)&nbytes, sizeof(afs_int32));
1552 if (temp == sizeof(afs_int32)) {
1553 nbytes = ntohl(nbytes);
1554 if (nbytes > biod.length)
1555 code = (callp->error < 0) ? callp->error : -1;
1558 code = (callp->error < 0) ? callp->error : -1;
1563 qdp = biod.bufListEndp;
1565 tbufp = osi_GetQData(qdp);
1566 bufferp = tbufp->datap;
1570 /* fill nbytes of data from the pipe into the pages.
1571 * When we stop, qdp will point at the last page we're
1572 * dealing with, and bufferp will tell us where we
1573 * stopped. We'll need this info below when we clear
1574 * the remainder of the last page out (and potentially
1575 * clear later pages out, if we fetch past EOF).
1577 while (nbytes > 0) {
1578 /* assert that there are still more buffers;
1579 * our check above for nbytes being less than
1580 * biod.length should ensure this.
1582 osi_assertx(bufferp != NULL, "null cm_buf_t");
1584 /* read rbytes of data */
1585 rbytes = (nbytes > cm_data.buf_blockSize? cm_data.buf_blockSize : nbytes);
1586 temp = rx_Read(callp, bufferp, rbytes);
1587 if (temp < rbytes) {
1588 code = (callp->error < 0) ? callp->error : -1;
1592 /* allow read-while-fetching.
1593 * if this is the last buffer, clear the
1594 * PREFETCHING flag, so the reader waiting for
1595 * this buffer will start a prefetch.
1597 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1598 lock_ObtainMutex(&scp->mx);
1599 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1600 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1601 osi_Wakeup((LONG_PTR) &scp->flags);
1603 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1604 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1606 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
1608 lock_ReleaseMutex(&scp->mx);
1610 /* and adjust counters */
1613 /* and move to the next buffer */
1615 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1617 tbufp = osi_GetQData(qdp);
1618 bufferp = tbufp->datap;
1626 /* zero out remainder of last pages, in case we are
1627 * fetching past EOF. We were fetching an integral #
1628 * of pages, but stopped, potentially in the middle of
1629 * a page. Zero the remainder of that page, and then
1630 * all of the rest of the pages.
1633 osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
1634 rbytes = (long) (bufferp - tbufp->datap);
1636 /* bytes left to zero */
1637 rbytes = cm_data.buf_blockSize - rbytes;
1640 memset(bufferp, 0, rbytes);
1641 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1644 tbufp = osi_GetQData(qdp);
1645 bufferp = tbufp->datap;
1646 /* bytes to clear in this page */
1647 rbytes = cm_data.buf_blockSize;
1652 if (SERVERHAS64BIT(connp))
1653 code = EndRXAFS_FetchData64(callp, &afsStatus, &callback, &volSync);
1655 code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
1657 if (SERVERHAS64BIT(connp))
1658 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
1660 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
1664 code = rx_EndCall(callp, code);
1666 if (code == RXKADUNKNOWNKEY)
1667 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
1669 osi_Log0(afsd_logp, "CALL FetchData DONE");
1671 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
1674 code = cm_MapRPCError(code, reqp);
1676 lock_ObtainMutex(&scp->mx);
1678 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_FETCHSTATUS);
1680 /* we know that no one else has changed the buffer, since we still have
1681 * the fetching flag on the buffers, and we have the scp locked again.
1682 * Copy in the version # into the buffer if we got code 0 back from the
1686 for(qdp = biod.bufListp;
1688 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1689 tbufp = osi_GetQData(qdp);
1690 tbufp->dataVersion = afsStatus.dataVersionHigh;
1691 tbufp->dataVersion <<= 32;
1692 tbufp->dataVersion |= afsStatus.DataVersion;
1695 /* write buffer out to disk cache */
1696 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
1697 tbufp->dataVersion);
1698 #endif /* DISKCACHE95 */
1702 /* release scatter/gather I/O structure (buffers, locks) */
1703 lock_ReleaseMutex(&scp->mx);
1704 cm_ReleaseBIOD(&biod, 0, code);
1705 lock_ObtainMutex(&scp->mx);
1708 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, 0);