2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
26 extern void afsi_log(char *pattern, ...);
29 #ifdef AFS_FREELANCE_CLIENT
30 extern osi_mutex_t cm_Freelance_Lock;
34 /* we can access connp->serverp without holding a lock because that
35 never changes since the connection is made. */
36 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
37 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
39 #define SERVERHAS64BIT(connp) (FALSE)
40 #define SET_SERVERHASNO64BIT(connp) (FALSE)
43 /* functions called back from the buffer package when reading or writing data,
44 * or when holding or releasing a vnode pointer.
46 long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
47 cm_user_t *userp, cm_req_t *reqp)
49 /* store the data back from this buffer; the buffer is locked and held,
50 * but the vnode involved isn't locked, yet. It is held by its
51 * reference from the buffer, which won't change until the buffer is
52 * released by our caller. Thus, we don't have to worry about holding
56 cm_scache_t *scp = vscp;
59 afs_int32 save_nbytes;
62 AFSFetchStatus outStatus;
63 AFSStoreStatus inStatus;
67 struct rx_call *rxcallp;
68 struct rx_connection *rxconnp;
75 cm_bulkIO_t biod; /* bulk IO descriptor */
76 int require_64bit_ops = 0;
77 int call_was_64bit = 0;
79 osi_assertx(userp != NULL, "null cm_user_t");
80 osi_assertx(scp != NULL, "null cm_scache_t");
82 /* now, the buffer may or may not be filled with good data (buf_GetNew
83 * drops lots of locks, and may indeed return a properly initialized
84 * buffer, although more likely it will just return a new, empty, buffer.
87 lock_ObtainWrite(&scp->rw);
88 if (scp->flags & CM_SCACHEFLAG_DELETED) {
89 lock_ReleaseWrite(&scp->rw);
90 return CM_ERROR_NOSUCHFILE;
93 cm_AFSFidFromFid(&tfid, &scp->fid);
95 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
96 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
98 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
100 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
101 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
102 lock_ReleaseWrite(&scp->rw);
106 if (biod.length == 0) {
107 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
108 cm_ReleaseBIOD(&biod, 1, 0, 1); /* should be a NOOP */
109 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
110 lock_ReleaseWrite(&scp->rw);
114 /* prepare the output status for the store */
115 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
116 cm_StatusFromAttr(&inStatus, scp, NULL);
117 truncPos = scp->length;
118 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
119 && LargeIntegerLessThan(scp->truncPos, truncPos))
120 truncPos = scp->truncPos;
121 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
123 /* compute how many bytes to write from this buffer */
124 thyper = LargeIntegerSubtract(scp->length, biod.offset);
125 if (LargeIntegerLessThanZero(thyper)) {
126 /* entire buffer is past EOF */
130 /* otherwise write out part of buffer before EOF, but not
131 * more than bufferSize bytes.
133 if (LargeIntegerGreaterThan(thyper,
134 ConvertLongToLargeInteger(biod.length))) {
135 nbytes = biod.length;
137 /* if thyper is less than or equal to biod.length, then we
138 can safely assume that the value fits in a long. */
139 nbytes = thyper.LowPart;
143 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
144 ConvertLongToLargeInteger(nbytes)),
145 ConvertLongToLargeInteger(LONG_MAX)) ||
146 LargeIntegerGreaterThan(truncPos,
147 ConvertLongToLargeInteger(LONG_MAX))) {
148 require_64bit_ops = 1;
151 lock_ReleaseWrite(&scp->rw);
153 /* now we're ready to do the store operation */
154 #ifdef AFS_LARGEFILES
155 save_nbytes = nbytes;
158 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
163 rxconnp = cm_GetRxConn(connp);
164 rxcallp = rx_NewCall(rxconnp);
165 rx_PutConnection(rxconnp);
167 #ifdef AFS_LARGEFILES
168 if (SERVERHAS64BIT(connp)) {
171 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
172 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
174 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
175 biod.offset.QuadPart,
179 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
181 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
185 if (require_64bit_ops) {
186 osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData. The operation requires large file support in the server.");
187 code = CM_ERROR_TOOBIG;
189 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
190 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
192 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
193 biod.offset.LowPart, nbytes, truncPos.LowPart);
195 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
197 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
201 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
202 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
204 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
205 biod.offset.LowPart, nbytes, truncPos.LowPart);
207 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
209 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
213 /* write the data from the the list of buffers */
216 afs_uint32 buf_offset;
218 qdp = biod.bufListEndp;
219 buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
221 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
224 osi_assertx(qdp != NULL, "null osi_queueData_t");
225 bufp = osi_GetQData(qdp);
226 bufferp = bufp->datap + buf_offset;
228 if (wbytes > cm_data.buf_blockSize - buf_offset)
229 wbytes = cm_data.buf_blockSize - buf_offset;
231 /* write out wbytes of data from bufferp */
232 temp = rx_Write(rxcallp, bufferp, wbytes);
233 if (temp != wbytes) {
234 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
235 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
238 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
241 } /* while more bytes to write */
242 } /* if RPC started successfully */
245 if (call_was_64bit) {
246 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
248 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
250 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
252 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
254 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
256 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
260 code1 = rx_EndCall(rxcallp, code);
262 #ifdef AFS_LARGEFILES
263 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
264 SET_SERVERHASNO64BIT(connp);
266 nbytes = save_nbytes;
270 /* Prefer StoreData error over rx_EndCall error */
273 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
275 code = cm_MapRPCError(code, reqp);
278 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
280 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
282 /* now, clean up our state */
283 lock_ObtainWrite(&scp->rw);
285 cm_ReleaseBIOD(&biod, 1, code, 1);
286 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
290 /* now, here's something a little tricky: in AFS 3, a dirty
291 * length can't be directly stored, instead, a dirty chunk is
292 * stored that sets the file's size (by writing and by using
293 * the truncate-first option in the store call).
295 * At this point, we've just finished a store, and so the trunc
296 * pos field is clean. If the file's size at the server is at
297 * least as big as we think it should be, then we turn off the
298 * length dirty bit, since all the other dirty buffers must
299 * precede this one in the file.
301 * The file's desired size shouldn't be smaller than what's
302 * stored at the server now, since we just did the trunc pos
305 * We have to turn off the length dirty bit as soon as we can,
306 * so that we see updates made by other machines.
309 if (call_was_64bit) {
310 t.LowPart = outStatus.Length;
311 t.HighPart = outStatus.Length_hi;
313 t = ConvertLongToLargeInteger(outStatus.Length);
316 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
317 scp->mask &= ~CM_SCACHEMASK_LENGTH;
319 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
321 if (code == CM_ERROR_SPACE)
322 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
323 else if (code == CM_ERROR_QUOTA)
324 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
326 lock_ReleaseWrite(&scp->rw);
332 * Truncate the file, by sending a StoreData RPC with zero length.
334 * Called with scp locked. Releases and re-obtains the lock.
336 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
338 AFSFetchStatus outStatus;
339 AFSStoreStatus inStatus;
343 osi_hyper_t truncPos;
345 struct rx_call *rxcallp;
346 struct rx_connection *rxconnp;
347 int require_64bit_ops = 0;
348 int call_was_64bit = 0;
350 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
351 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
352 CM_SCACHESYNC_STOREDATA_EXCL);
354 /* prepare the output status for the store */
355 inStatus.Mask = AFS_SETMODTIME;
356 inStatus.ClientModTime = scp->clientModTime;
357 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
359 /* calculate truncation position */
360 truncPos = scp->length;
361 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
362 && LargeIntegerLessThan(scp->truncPos, truncPos))
363 truncPos = scp->truncPos;
364 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
366 if (LargeIntegerGreaterThan(truncPos,
367 ConvertLongToLargeInteger(LONG_MAX))) {
369 require_64bit_ops = 1;
372 lock_ReleaseWrite(&scp->rw);
374 cm_AFSFidFromFid(&tfid, &scp->fid);
376 /* now we're ready to do the store operation */
378 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
383 rxconnp = cm_GetRxConn(connp);
384 rxcallp = rx_NewCall(rxconnp);
385 rx_PutConnection(rxconnp);
387 #ifdef AFS_LARGEFILES
388 if (SERVERHAS64BIT(connp)) {
391 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
392 0, 0, truncPos.QuadPart);
396 if (require_64bit_ops) {
397 code = CM_ERROR_TOOBIG;
399 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
400 0, 0, truncPos.LowPart);
404 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
405 0, 0, truncPos.LowPart);
410 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
412 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
414 code1 = rx_EndCall(rxcallp, code);
416 #ifdef AFS_LARGEFILES
417 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
418 SET_SERVERHASNO64BIT(connp);
422 /* prefer StoreData error over rx_EndCall error */
423 if (code == 0 && code1 != 0)
425 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
426 code = cm_MapRPCError(code, reqp);
428 /* now, clean up our state */
429 lock_ObtainWrite(&scp->rw);
431 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
436 * For explanation of handling of CM_SCACHEMASK_LENGTH,
439 if (call_was_64bit) {
440 t.HighPart = outStatus.Length_hi;
441 t.LowPart = outStatus.Length;
443 t = ConvertLongToLargeInteger(outStatus.Length);
446 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
447 scp->mask &= ~CM_SCACHEMASK_LENGTH;
448 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
454 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
456 *bytesReadp = cm_data.buf_blockSize;
458 /* now return a code that means that I/O is done */
462 /* stabilize scache entry, and return with it locked so
465 long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
467 cm_scache_t *scp = vscp;
470 lock_ObtainWrite(&scp->rw);
471 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
472 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
474 lock_ReleaseWrite(&scp->rw);
481 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
482 long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
484 cm_scache_t *scp = vscp;
486 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
488 lock_ReleaseWrite(&scp->rw);
490 /* always succeeds */
494 cm_buf_ops_t cm_bufOps = {
501 long cm_ValidateDCache(void)
503 return buf_ValidateBuffers();
506 long cm_ShutdownDCache(void)
511 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
513 return buf_Init(newFile, &cm_bufOps, nbuffers);
516 /* check to see if we have an up-to-date buffer. The buffer must have
517 * previously been obtained by calling buf_Get.
519 * Make sure we have a callback, and that the dataversion matches.
521 * Scp must be locked.
523 * Bufp *may* be locked.
525 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
528 if (!cm_HaveCallback(scp))
530 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
532 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow)
535 code = lock_TryMutex(&bufp->mx);
537 /* don't have the lock, and can't lock it, then
544 /* remember dirty flag for later */
545 code = bufp->flags & CM_BUF_DIRTY;
547 /* release lock if we obtained it here */
549 lock_ReleaseMutex(&bufp->mx);
551 /* if buffer was dirty, buffer is acceptable for use */
558 /* used when deciding whether to do a prefetch or not */
559 long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
560 cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
564 osi_hyper_t tblocksize;
569 /* now scan all buffers in the range, looking for any that look like
574 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
576 lock_ObtainWrite(&scp->rw);
577 while (LargeIntegerGreaterThanZero(tlength)) {
578 /* get callback so we can do a meaningful dataVersion comparison */
579 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
580 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
582 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
583 lock_ReleaseWrite(&scp->rw);
587 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
588 /* we're past the end of file */
592 bp = buf_Find(scp, &tbase);
593 /* We cheat slightly by not locking the bp mutex. */
595 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
596 && (bp->dataVersion < scp->bufDataVersionLow || bp->dataVersion > scp->dataVersion))
604 /* if this buffer is essentially guaranteed to require a fetch,
605 * break out here and return this position.
610 tbase = LargeIntegerAdd(tbase, tblocksize);
611 tlength = LargeIntegerSubtract(tlength, tblocksize);
614 /* if we get here, either everything is fine or 'stop' stopped us at a
615 * particular buffer in the range that definitely needs to be fetched.
618 /* return non-zero code since realBasep won't be valid */
619 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
623 /* successfully found a page that will need fetching */
627 lock_ReleaseWrite(&scp->rw);
632 cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
640 if (scp->flags & CM_SCACHEFLAG_DELETED) {
641 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
645 /* Retries will be performed by the BkgDaemon thread if appropriate */
646 req.flags |= CM_REQ_NORETRY;
648 toffset.LowPart = p1;
649 toffset.HighPart = p2;
652 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
654 code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, &req);
656 osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
660 * Keep the following list synchronized with the
661 * error code list in cm_BkgDaemon
664 case CM_ERROR_TIMEDOUT: /* or server restarting */
666 case CM_ERROR_WOULDBLOCK:
667 case CM_ERROR_ALLBUSY:
668 case CM_ERROR_ALLDOWN:
669 case CM_ERROR_ALLOFFLINE:
670 case CM_ERROR_PARTIALWRITE:
671 break; /* cm_BkgDaemon will re-insert the request in the queue */
674 lock_ObtainWrite(&scp->rw);
675 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
676 lock_ReleaseWrite(&scp->rw);
681 /* Called with scp locked */
682 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
687 end = LargeIntegerAdd(*base, *length);
688 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
689 scp->prefetch.base = *base;
690 if (LargeIntegerGreaterThan(end, scp->prefetch.end))
691 scp->prefetch.end = end;
693 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
696 /* do the prefetch. if the prefetch fails, return 0 (success)
697 * because there is no harm done. */
699 cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
707 osi_hyper_t tblocksize;
715 /* Retries will be performed by the BkgDaemon thread if appropriate */
716 req.flags |= CM_REQ_NORETRY;
719 fetched.HighPart = 0;
720 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
724 length.HighPart = p4;
726 end = LargeIntegerAdd(base, length);
728 osi_Log3(afsd_logp, "Starting BKG prefetch scp 0x%p, base 0x%x:%x", scp, p2, p1);
730 for ( code = 0, offset = base;
731 code == 0 && LargeIntegerLessThan(offset, end);
732 offset = LargeIntegerAdd(offset, tblocksize) )
735 lock_ReleaseWrite(&scp->rw);
739 code = buf_Get(scp, &offset, &req, &bp);
743 if (bp->cmFlags & CM_BUF_CMFETCHING) {
744 /* skip this buffer as another thread is already fetching it */
751 lock_ObtainWrite(&scp->rw);
755 code = cm_GetBuffer(scp, bp, NULL, userp, &req);
757 fetched = LargeIntegerAdd(fetched, tblocksize);
762 lock_ObtainWrite(&scp->rw);
765 cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
766 scp, &base, &fetched);
767 lock_ReleaseWrite(&scp->rw);
769 osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p, code %d bytes 0x%x:%x",
770 scp, code, fetched.HighPart, fetched.LowPart);
774 /* a read was issued to offsetp, and we have to determine whether we should
775 * do a prefetch of the next chunk.
777 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 count,
778 cm_user_t *userp, cm_req_t *reqp)
781 osi_hyper_t realBase;
782 osi_hyper_t readBase;
783 osi_hyper_t readLength;
786 /* round up to chunk boundary */
787 readBase.LowPart += (cm_chunkSize-1);
788 readBase.LowPart &= (-cm_chunkSize);
790 readLength = ConvertLongToLargeInteger(count);
792 lock_ObtainWrite(&scp->rw);
793 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
794 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
795 lock_ReleaseWrite(&scp->rw);
798 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
800 /* start the scan at the latter of the end of this read or
801 * the end of the last fetched region.
803 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
804 readBase = scp->prefetch.end;
806 lock_ReleaseWrite(&scp->rw);
808 code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
811 return; /* can't find something to prefetch */
813 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
814 scp, realBase.LowPart);
816 cm_QueueBKGRequest(scp, cm_BkgPrefetch,
817 realBase.LowPart, realBase.HighPart,
818 readLength.LowPart, readLength.HighPart,
822 /* scp must be locked; temporarily unlocked during processing.
823 * If returns 0, returns buffers held in biop, and with
824 * CM_BUF_CMSTORING set.
826 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
827 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
828 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
829 * must be woken, and the event must be set when the I/O is done. All of this
830 * is required so that buf_WaitIO synchronizes properly with the buffer as it
831 * is being written out.
833 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
834 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
837 osi_queueData_t *qdp;
840 osi_hyper_t scanStart; /* where to start scan for dirty pages */
841 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
842 osi_hyper_t firstModOffset; /* offset of first modified page in range */
845 long flags; /* flags to cm_SyncOp */
847 /* clear things out */
848 biop->scp = scp; /* do not hold; held by caller */
849 biop->offset = *inOffsetp;
851 biop->bufListp = NULL;
852 biop->bufListEndp = NULL;
855 /* reserve a chunk's worth of buffers */
856 lock_ReleaseWrite(&scp->rw);
857 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
858 lock_ObtainWrite(&scp->rw);
861 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
862 thyper = ConvertLongToLargeInteger(temp);
863 tbase = LargeIntegerAdd(*inOffsetp, thyper);
865 bufp = buf_Find(scp, &tbase);
867 /* get buffer mutex and scp mutex safely */
868 lock_ReleaseWrite(&scp->rw);
869 lock_ObtainMutex(&bufp->mx);
870 lock_ObtainWrite(&scp->rw);
872 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
873 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
875 lock_ReleaseMutex(&bufp->mx);
878 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
882 /* if the buffer is dirty, we're done */
883 if (bufp->flags & CM_BUF_DIRTY) {
884 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
885 "WRITING w/o CMSTORING in SetupStoreBIOD");
886 bufp->flags |= CM_BUF_WRITING;
890 /* this buffer is clean, so there's no reason to process it */
891 cm_SyncOpDone(scp, bufp, flags);
892 lock_ReleaseMutex(&bufp->mx);
900 /* if we get here, if bufp is null, we didn't find any dirty buffers
901 * that weren't already being stored back, so we just quit now.
907 /* don't need buffer mutex any more */
908 lock_ReleaseMutex(&bufp->mx);
910 /* put this element in the list */
912 osi_SetQData(qdp, bufp);
913 /* don't have to hold bufp, since held by buf_Find above */
914 osi_QAddH((osi_queue_t **) &biop->bufListp,
915 (osi_queue_t **) &biop->bufListEndp,
917 biop->length = cm_data.buf_blockSize;
918 firstModOffset = bufp->offset;
919 biop->offset = firstModOffset;
920 bufp = NULL; /* this buffer and reference added to the queue */
922 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
923 scanStart = *inOffsetp;
924 scanStart.LowPart &= (-cm_chunkSize);
925 thyper = ConvertLongToLargeInteger(cm_chunkSize);
926 scanEnd = LargeIntegerAdd(scanStart, thyper);
928 flags = CM_SCACHESYNC_GETSTATUS
929 | CM_SCACHESYNC_STOREDATA
930 | CM_SCACHESYNC_BUFLOCKED
931 | CM_SCACHESYNC_NOWAIT;
933 /* start by looking backwards until scanStart */
934 /* hyper version of cm_data.buf_blockSize */
935 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
936 tbase = LargeIntegerSubtract(firstModOffset, thyper);
937 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
938 /* see if we can find the buffer */
939 bufp = buf_Find(scp, &tbase);
943 /* try to lock it, and quit if we can't (simplifies locking) */
944 lock_ReleaseWrite(&scp->rw);
945 code = lock_TryMutex(&bufp->mx);
946 lock_ObtainWrite(&scp->rw);
953 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
955 lock_ReleaseMutex(&bufp->mx);
961 if (!(bufp->flags & CM_BUF_DIRTY)) {
962 /* buffer is clean, so we shouldn't add it */
963 cm_SyncOpDone(scp, bufp, flags);
964 lock_ReleaseMutex(&bufp->mx);
970 /* don't need buffer mutex any more */
971 lock_ReleaseMutex(&bufp->mx);
973 /* we have a dirty buffer ready for storing. Add it to the tail
974 * of the list, since it immediately precedes all of the disk
975 * addresses we've already collected.
978 osi_SetQData(qdp, bufp);
979 /* no buf_hold necessary, since we have it held from buf_Find */
980 osi_QAddT((osi_queue_t **) &biop->bufListp,
981 (osi_queue_t **) &biop->bufListEndp,
983 bufp = NULL; /* added to the queue */
985 /* update biod info describing the transfer */
986 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
987 biop->length += cm_data.buf_blockSize;
989 /* update loop pointer */
990 tbase = LargeIntegerSubtract(tbase, thyper);
991 } /* while loop looking for pages preceding the one we found */
993 /* now, find later dirty, contiguous pages, and add them to the list */
994 /* hyper version of cm_data.buf_blockSize */
995 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
996 tbase = LargeIntegerAdd(firstModOffset, thyper);
997 while(LargeIntegerLessThan(tbase, scanEnd)) {
998 /* see if we can find the buffer */
999 bufp = buf_Find(scp, &tbase);
1003 /* try to lock it, and quit if we can't (simplifies locking) */
1004 lock_ReleaseWrite(&scp->rw);
1005 code = lock_TryMutex(&bufp->mx);
1006 lock_ObtainWrite(&scp->rw);
1013 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1015 lock_ReleaseMutex(&bufp->mx);
1021 if (!(bufp->flags & CM_BUF_DIRTY)) {
1022 /* buffer is clean, so we shouldn't add it */
1023 cm_SyncOpDone(scp, bufp, flags);
1024 lock_ReleaseMutex(&bufp->mx);
1030 /* don't need buffer mutex any more */
1031 lock_ReleaseMutex(&bufp->mx);
1033 /* we have a dirty buffer ready for storing. Add it to the head
1034 * of the list, since it immediately follows all of the disk
1035 * addresses we've already collected.
1037 qdp = osi_QDAlloc();
1038 osi_SetQData(qdp, bufp);
1039 /* no buf_hold necessary, since we have it held from buf_Find */
1040 osi_QAddH((osi_queue_t **) &biop->bufListp,
1041 (osi_queue_t **) &biop->bufListEndp,
1045 /* update biod info describing the transfer */
1046 biop->length += cm_data.buf_blockSize;
1048 /* update loop pointer */
1049 tbase = LargeIntegerAdd(tbase, thyper);
1050 } /* while loop looking for pages following the first page we found */
1052 /* finally, we're done */
1056 /* scp must be locked; temporarily unlocked during processing.
1057 * If returns 0, returns buffers held in biop, and with
1058 * CM_BUF_CMFETCHING flags set.
1059 * If an error is returned, we don't return any buffers.
1061 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
1062 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
1066 osi_hyper_t tblocksize; /* a long long temp variable */
1067 osi_hyper_t pageBase; /* base offset we're looking at */
1068 osi_queueData_t *qdp; /* one temp queue structure */
1069 osi_queueData_t *tqdp; /* another temp queue structure */
1070 long collected; /* how many bytes have been collected */
1073 osi_hyper_t fileSize; /* the # of bytes in the file */
1074 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
1075 osi_queueData_t *heldBufListEndp; /* first one */
1078 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1080 biop->scp = scp; /* do not hold; held by caller */
1081 biop->offset = *offsetp;
1082 /* null out the list of buffers */
1083 biop->bufListp = biop->bufListEndp = NULL;
1086 /* first lookup the file's length, so we know when to stop */
1087 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
1088 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1092 /* copy out size, since it may change */
1093 fileSize = scp->serverLength;
1095 lock_ReleaseWrite(&scp->rw);
1097 pageBase = *offsetp;
1098 collected = pageBase.LowPart & (cm_chunkSize - 1);
1099 heldBufListp = NULL;
1100 heldBufListEndp = NULL;
1103 * Obtaining buffers can cause dirty buffers to be recycled, which
1104 * can cause a storeback, so cannot be done while we have buffers
1107 * To get around this, we get buffers twice. Before reserving buffers,
1108 * we obtain and release each one individually. After reserving
1109 * buffers, we try to obtain them again, but only by lookup, not by
1110 * recycling. If a buffer has gone away while we were waiting for
1111 * the others, we just use whatever buffers we already have.
1113 * On entry to this function, we are already holding a buffer, so we
1114 * can't wait for reservation. So we call buf_TryReserveBuffers()
1115 * instead. Not only that, we can't really even call buf_Get(), for
1116 * the same reason. We can't avoid that, though. To avoid deadlock
1117 * we allow only one thread to be executing the buf_Get()-buf_Release()
1118 * sequence at a time.
1121 /* first hold all buffers, since we can't hold any locks in buf_Get */
1123 /* stop at chunk boundary */
1124 if (collected >= cm_chunkSize)
1127 /* see if the next page would be past EOF */
1128 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1131 code = buf_Get(scp, &pageBase, reqp, &tbp);
1133 lock_ObtainWrite(&scp->rw);
1134 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1141 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1142 collected += cm_data.buf_blockSize;
1145 /* reserve a chunk's worth of buffers if possible */
1146 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1148 pageBase = *offsetp;
1149 collected = pageBase.LowPart & (cm_chunkSize - 1);
1151 /* now hold all buffers, if they are still there */
1153 /* stop at chunk boundary */
1154 if (collected >= cm_chunkSize)
1157 /* see if the next page would be past EOF */
1158 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1161 tbp = buf_Find(scp, &pageBase);
1165 /* add the buffer to the list */
1166 qdp = osi_QDAlloc();
1167 osi_SetQData(qdp, tbp);
1168 osi_QAddH((osi_queue_t **)&heldBufListp,
1169 (osi_queue_t **)&heldBufListEndp,
1171 /* leave tbp held (from buf_Get) */
1176 collected += cm_data.buf_blockSize;
1177 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1180 /* look at each buffer, adding it into the list if it looks idle and
1181 * filled with old data. One special case: wait for idle if it is the
1182 * first buffer since we really need that one for our caller to make
1186 collected = 0; /* now count how many we'll really use */
1187 for (tqdp = heldBufListEndp;
1189 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1190 /* get a ptr to the held buffer */
1191 tbp = osi_GetQData(tqdp);
1192 pageBase = tbp->offset;
1194 /* now lock the buffer lock */
1195 lock_ObtainMutex(&tbp->mx);
1196 lock_ObtainWrite(&scp->rw);
1198 /* don't bother fetching over data that is already current */
1199 if (tbp->dataVersion <= scp->dataVersion && tbp->dataVersion >= scp->bufDataVersionLow) {
1200 /* we don't need this buffer, since it is current */
1201 lock_ReleaseWrite(&scp->rw);
1202 lock_ReleaseMutex(&tbp->mx);
1206 flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
1208 flags |= CM_SCACHESYNC_NOWAIT;
1210 /* wait for the buffer to serialize, if required. Doesn't
1211 * release the scp or buffer lock(s) if NOWAIT is specified.
1213 code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
1215 lock_ReleaseWrite(&scp->rw);
1216 lock_ReleaseMutex(&tbp->mx);
1220 /* don't fetch over dirty buffers */
1221 if (tbp->flags & CM_BUF_DIRTY) {
1222 cm_SyncOpDone(scp, tbp, flags);
1223 lock_ReleaseWrite(&scp->rw);
1224 lock_ReleaseMutex(&tbp->mx);
1229 lock_ReleaseWrite(&scp->rw);
1230 lock_ReleaseMutex(&tbp->mx);
1232 /* add the buffer to the list */
1233 qdp = osi_QDAlloc();
1234 osi_SetQData(qdp, tbp);
1235 osi_QAddH((osi_queue_t **)&biop->bufListp,
1236 (osi_queue_t **)&biop->bufListEndp,
1240 /* from now on, a failure just stops our collection process, but
1241 * we still do the I/O to whatever we've already managed to collect.
1244 collected += cm_data.buf_blockSize;
1247 /* now, we've held in biop->bufListp all the buffer's we're really
1248 * interested in. We also have holds left from heldBufListp, and we
1249 * now release those holds on the buffers.
1251 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1252 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1253 tbp = osi_GetQData(qdp);
1254 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1255 (osi_queue_t **) &heldBufListEndp,
1262 /* Caller expects this */
1263 lock_ObtainWrite(&scp->rw);
1265 /* if we got a failure setting up the first buffer, then we don't have
1266 * any side effects yet, and we also have failed an operation that the
1267 * caller requires to make any progress. Give up now.
1269 if (code && isFirst) {
1270 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1274 /* otherwise, we're still OK, and should just return the I/O setup we've
1277 biop->length = collected;
1278 biop->reserved = reserving;
1282 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1285 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, long code, int scp_locked)
1287 cm_scache_t *scp; /* do not release; not held in biop */
1289 osi_queueData_t *qdp;
1290 osi_queueData_t *nqdp;
1293 /* Give back reserved buffers */
1295 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1298 flags = CM_SCACHESYNC_STOREDATA;
1300 flags = CM_SCACHESYNC_FETCHDATA;
1303 if (biop->bufListp) {
1304 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1305 /* lookup next guy first, since we're going to free this one */
1306 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1308 /* extract buffer and free queue data */
1309 bufp = osi_GetQData(qdp);
1310 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1311 (osi_queue_t **) &biop->bufListEndp,
1315 /* now, mark I/O as done, unlock the buffer and release it */
1317 lock_ReleaseWrite(&scp->rw);
1318 lock_ObtainMutex(&bufp->mx);
1319 lock_ObtainWrite(&scp->rw);
1320 cm_SyncOpDone(scp, bufp, flags);
1322 /* turn off writing and wakeup users */
1324 if (bufp->flags & CM_BUF_WAITING) {
1325 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1326 osi_Wakeup((LONG_PTR) bufp);
1329 bufp->flags &= ~CM_BUF_WRITING;
1331 case CM_ERROR_NOSUCHFILE:
1332 case CM_ERROR_BADFD:
1333 case CM_ERROR_NOACCESS:
1334 case CM_ERROR_QUOTA:
1335 case CM_ERROR_SPACE:
1336 case CM_ERROR_TOOBIG:
1337 case CM_ERROR_READONLY:
1338 case CM_ERROR_NOSUCHPATH:
1340 * Apply the fatal error to this buffer.
1342 bufp->flags &= ~CM_BUF_DIRTY;
1343 bufp->flags |= CM_BUF_ERROR;
1344 bufp->dirty_offset = 0;
1345 bufp->dirty_length = 0;
1347 bufp->dataVersion = CM_BUF_VERSION_BAD;
1348 bufp->dirtyCounter++;
1350 case CM_ERROR_TIMEDOUT:
1351 case CM_ERROR_ALLDOWN:
1352 case CM_ERROR_ALLBUSY:
1353 case CM_ERROR_ALLOFFLINE:
1354 case CM_ERROR_CLOCKSKEW:
1356 /* do not mark the buffer in error state but do
1357 * not attempt to complete the rest either.
1362 bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
1363 bufp->dirty_offset = bufp->dirty_length = 0;
1368 lock_ReleaseWrite(&scp->rw);
1369 lock_ReleaseMutex(&bufp->mx);
1375 lock_ObtainWrite(&scp->rw);
1376 cm_SyncOpDone(scp, NULL, flags);
1378 lock_ReleaseWrite(&scp->rw);
1381 /* clean things out */
1382 biop->bufListp = NULL;
1383 biop->bufListEndp = NULL;
1386 /* Fetch a buffer. Called with scp locked.
1387 * The scp is locked on return.
1389 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
1392 long code=0, code1=0;
1393 afs_uint32 nbytes; /* bytes in transfer */
1394 afs_uint32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1395 afs_uint64 length_found = 0;
1396 long rbytes; /* bytes in rx_Read call */
1398 AFSFetchStatus afsStatus;
1399 AFSCallBack callback;
1402 cm_buf_t *tbufp; /* buf we're filling */
1403 osi_queueData_t *qdp; /* q element we're scanning */
1405 struct rx_call *rxcallp;
1406 struct rx_connection *rxconnp;
1407 cm_bulkIO_t biod; /* bulk IO descriptor */
1411 int require_64bit_ops = 0;
1412 int call_was_64bit = 0;
1413 int fs_fetchdata_offset_bug = 0;
1417 /* now, the buffer may or may not be filled with good data (buf_GetNew
1418 * drops lots of locks, and may indeed return a properly initialized
1419 * buffer, although more likely it will just return a new, empty, buffer.
1422 #ifdef AFS_FREELANCE_CLIENT
1424 // yj: if they're trying to get the /afs directory, we need to
1425 // handle it differently, since it's local rather than on any
1428 getroot = (scp==cm_data.rootSCachep);
1430 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1433 if (cm_HaveCallback(scp) && bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow) {
1434 /* We already have this buffer don't do extra work */
1438 cm_AFSFidFromFid(&tfid, &scp->fid);
1440 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
1442 /* couldn't even get the first page setup properly */
1443 osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
1447 /* once we get here, we have the callback in place, we know that no one
1448 * is fetching the data now. Check one last time that we still have
1449 * the wrong data, and then fetch it if we're still wrong.
1451 * We can lose a race condition and end up with biod.length zero, in
1452 * which case we just retry.
1454 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow || biod.length == 0) {
1455 if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow) &&
1456 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
1458 osi_Log4(afsd_logp, "Bad DVs 0x%x != (0x%x -> 0x%x) or length 0x%x",
1459 bufp->dataVersion, scp->bufDataVersionLow, scp->dataVersion, biod.length);
1461 if (bufp->dataVersion == CM_BUF_VERSION_BAD)
1462 memset(bufp->datap, 0, cm_data.buf_blockSize);
1463 bufp->dataVersion = scp->dataVersion;
1465 cm_ReleaseBIOD(&biod, 0, 0, 1);
1467 } else if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow)
1468 && (scp->mask & CM_SCACHEMASK_TRUNCPOS) &&
1469 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->truncPos)) {
1470 memset(bufp->datap, 0, cm_data.buf_blockSize);
1471 bufp->dataVersion = scp->dataVersion;
1472 cm_ReleaseBIOD(&biod, 0, 0, 1);
1476 lock_ReleaseWrite(&scp->rw);
1479 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1480 ConvertLongToLargeInteger(biod.length)),
1481 ConvertLongToLargeInteger(LONG_MAX))) {
1482 require_64bit_ops = 1;
1485 osi_Log2(afsd_logp, "cm_GetBuffer: fetching data scp %p bufp %p", scp, bufp);
1486 osi_Log3(afsd_logp, "cm_GetBuffer: fetching data scpDV 0x%x scpDVLow 0x%x bufDV 0x%x",
1487 scp->dataVersion, scp->bufDataVersionLow, bufp->dataVersion);
1489 #ifdef AFS_FREELANCE_CLIENT
1492 // if getroot then we don't need to make any calls
1493 // just return fake data
1495 if (cm_freelanceEnabled && getroot) {
1496 // setup the fake status
1497 afsStatus.InterfaceVersion = 0x1;
1498 afsStatus.FileType = 0x2;
1499 afsStatus.LinkCount = scp->linkCount;
1500 afsStatus.Length = cm_fakeDirSize;
1501 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1502 afsStatus.Author = 0x1;
1503 afsStatus.Owner = 0x0;
1504 afsStatus.CallerAccess = 0x9;
1505 afsStatus.AnonymousAccess = 0x9;
1506 afsStatus.UnixModeBits = 0x1ff;
1507 afsStatus.ParentVnode = 0x1;
1508 afsStatus.ParentUnique = 0x1;
1509 afsStatus.ResidencyMask = 0;
1510 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1511 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1512 afsStatus.Group = 0;
1513 afsStatus.SyncCounter = 0;
1514 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1515 afsStatus.lockCount = 0;
1516 afsStatus.Length_hi = 0;
1517 afsStatus.errorCode = 0;
1519 // once we're done setting up the status info,
1520 // we just fill the buffer pages with fakedata
1521 // from cm_FakeRootDir. Extra pages are set to
1524 lock_ObtainMutex(&cm_Freelance_Lock);
1525 t1 = bufp->offset.LowPart;
1526 qdp = biod.bufListEndp;
1528 tbufp = osi_GetQData(qdp);
1529 bufferp=tbufp->datap;
1530 memset(bufferp, 0, cm_data.buf_blockSize);
1531 t2 = cm_fakeDirSize - t1;
1532 if (t2> (afs_int32)cm_data.buf_blockSize)
1533 t2=cm_data.buf_blockSize;
1535 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1540 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1543 lock_ReleaseMutex(&cm_Freelance_Lock);
1545 // once we're done, we skip over the part of the
1546 // code that does the ACTUAL fetching of data for
1549 goto fetchingcompleted;
1552 #endif /* AFS_FREELANCE_CLIENT */
1555 * if the requested offset is greater than the file length,
1556 * the file server will return zero bytes of data and the
1557 * current status for the file which we already have since
1558 * we have just obtained a callback. Instead, we can avoid
1559 * the network round trip by allocating zeroed buffers and
1560 * faking the status info.
1562 if (biod.offset.QuadPart >= scp->length.QuadPart) {
1563 osi_Log5(afsd_logp, "SKIP FetchData64 scp 0x%p, off 0x%x:%08x > length 0x%x:%08x",
1564 scp, biod.offset.HighPart, biod.offset.LowPart,
1565 scp->length.HighPart, scp->length.LowPart);
1567 // setup the status based upon the scp data
1568 afsStatus.InterfaceVersion = 0x1;
1569 switch (scp->fileType) {
1570 case CM_SCACHETYPE_FILE:
1571 afsStatus.FileType = File;
1573 case CM_SCACHETYPE_DIRECTORY:
1574 afsStatus.FileType = Directory;
1576 case CM_SCACHETYPE_MOUNTPOINT:
1577 afsStatus.FileType = SymbolicLink;
1579 case CM_SCACHETYPE_SYMLINK:
1580 case CM_SCACHETYPE_DFSLINK:
1581 afsStatus.FileType = SymbolicLink;
1584 afsStatus.FileType = -1; /* an invalid value */
1586 afsStatus.LinkCount = scp->linkCount;
1587 afsStatus.Length = scp->length.LowPart;
1588 afsStatus.DataVersion = (afs_uint32)(scp->dataVersion & MAX_AFS_UINT32);
1589 afsStatus.Author = 0x1;
1590 afsStatus.Owner = scp->owner;
1591 lock_ObtainWrite(&scp->rw);
1593 if (cm_FindACLCache(scp, userp, &afsStatus.CallerAccess))
1594 afsStatus.CallerAccess = scp->anyAccess;
1595 afsStatus.AnonymousAccess = scp->anyAccess;
1596 afsStatus.UnixModeBits = scp->unixModeBits;
1597 afsStatus.ParentVnode = scp->parentVnode;
1598 afsStatus.ParentUnique = scp->parentUnique;
1599 afsStatus.ResidencyMask = 0;
1600 afsStatus.ClientModTime = scp->clientModTime;
1601 afsStatus.ServerModTime = scp->serverModTime;
1602 afsStatus.Group = scp->group;
1603 afsStatus.SyncCounter = 0;
1604 afsStatus.dataVersionHigh = (afs_uint32)(scp->dataVersion >> 32);
1605 afsStatus.lockCount = 0;
1606 afsStatus.Length_hi = scp->length.HighPart;
1607 afsStatus.errorCode = 0;
1609 /* status info complete, fill pages with zeros */
1610 for (qdp = biod.bufListEndp;
1612 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q)) {
1613 tbufp = osi_GetQData(qdp);
1614 bufferp=tbufp->datap;
1615 memset(bufferp, 0, cm_data.buf_blockSize);
1618 /* no need to contact the file server */
1619 goto fetchingcompleted;
1622 /* now make the call */
1624 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
1628 rxconnp = cm_GetRxConn(connp);
1629 rxcallp = rx_NewCall(rxconnp);
1630 rx_PutConnection(rxconnp);
1632 #ifdef AFS_LARGEFILES
1633 nbytes = nbytes_hi = 0;
1635 if (SERVERHAS64BIT(connp)) {
1638 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1639 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1641 code = StartRXAFS_FetchData64(rxcallp, &tfid, biod.offset.QuadPart, biod.length);
1644 temp = rx_Read32(rxcallp, &nbytes_hi);
1645 if (temp == sizeof(afs_int32)) {
1646 nbytes_hi = ntohl(nbytes_hi);
1649 code = rxcallp->error;
1650 code1 = rx_EndCall(rxcallp, code);
1658 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1659 if (require_64bit_ops) {
1660 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1661 code = CM_ERROR_TOOBIG;
1664 rxconnp = cm_GetRxConn(connp);
1665 rxcallp = rx_NewCall(rxconnp);
1666 rx_PutConnection(rxconnp);
1669 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1670 scp, biod.offset.LowPart, biod.length);
1672 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1675 SET_SERVERHASNO64BIT(connp);
1680 temp = rx_Read32(rxcallp, &nbytes);
1681 if (temp == sizeof(afs_int32)) {
1682 nbytes = ntohl(nbytes);
1683 FillInt64(length_found, nbytes_hi, nbytes);
1684 if (length_found > biod.length) {
1686 * prior to 1.4.12 and 1.5.65 the file server would return
1687 * (filesize - offset) if the requested offset was greater than
1688 * the filesize. The correct return value would have been zero.
1689 * Force a retry by returning an RX_PROTOCOL_ERROR. If the cause
1690 * is a race between two RPCs issues by this cache manager, the
1691 * correct thing will happen the second time.
1693 osi_Log0(afsd_logp, "cm_GetBuffer length_found > biod.length");
1694 fs_fetchdata_offset_bug = 1;
1697 osi_Log1(afsd_logp, "cm_GetBuffer rx_Read32 returns %d != 4", temp);
1698 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1701 /* for the moment, nbytes_hi will always be 0 if code == 0
1702 because biod.length is a 32-bit quantity. */
1704 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1705 scp, biod.offset.LowPart, biod.length);
1707 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1710 /* now copy the data out of the pipe and put it in the buffer */
1712 temp = rx_Read32(rxcallp, &nbytes);
1713 if (temp == sizeof(afs_int32)) {
1714 length_found = ntohl(nbytes);
1715 if (length_found > biod.length) {
1717 * prior to 1.4.12 and 1.5.65 the file server would return
1718 * (filesize - offset) if the requested offset was greater than
1719 * the filesize. The correct return value would have been zero.
1720 * Force a retry by returning an RX_PROTOCOL_ERROR. If the cause
1721 * is a race between two RPCs issues by this cache manager, the
1722 * correct thing will happen the second time.
1724 osi_Log0(afsd_logp, "cm_GetBuffer length_found > biod.length");
1725 fs_fetchdata_offset_bug = 1;
1729 osi_Log1(afsd_logp, "cm_GetBuffer rx_Read32 returns %d != 4", temp);
1730 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1736 qdp = biod.bufListEndp;
1738 tbufp = osi_GetQData(qdp);
1739 bufferp = tbufp->datap;
1743 /* fill length_found of data from the pipe into the pages.
1744 * When we stop, qdp will point at the last page we're
1745 * dealing with, and bufferp will tell us where we
1746 * stopped. We'll need this info below when we clear
1747 * the remainder of the last page out (and potentially
1748 * clear later pages out, if we fetch past EOF).
1750 while (length_found > 0) {
1751 /* assert that there are still more buffers;
1752 * our check above for length_found being less than
1753 * biod.length should ensure this.
1755 osi_assertx(bufferp != NULL, "null cm_buf_t");
1757 /* read rbytes of data */
1758 rbytes = (afs_uint32)(length_found > cm_data.buf_blockSize ? cm_data.buf_blockSize : length_found);
1759 temp = rx_Read(rxcallp, bufferp, rbytes);
1760 if (temp < rbytes) {
1762 * If the file server returned (filesize - offset),
1763 * then the first rx_Read will return zero octets of data.
1764 * If it does, do not treat it as an error. Correct the
1765 * length_found and continue as if the file server said
1766 * it was sending us zero octets of data.
1768 if (fs_fetchdata_offset_bug && first_read)
1771 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1776 /* allow read-while-fetching.
1777 * if this is the last buffer, clear the
1778 * PREFETCHING flag, so the reader waiting for
1779 * this buffer will start a prefetch.
1781 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1782 lock_ObtainWrite(&scp->rw);
1783 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1784 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1785 osi_Wakeup((LONG_PTR) &scp->flags);
1787 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1788 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1790 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
1792 lock_ReleaseWrite(&scp->rw);
1794 /* and adjust counters */
1795 length_found -= temp;
1797 /* and move to the next buffer */
1798 if (length_found != 0) {
1799 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1801 tbufp = osi_GetQData(qdp);
1802 bufferp = tbufp->datap;
1810 /* zero out remainder of last pages, in case we are
1811 * fetching past EOF. We were fetching an integral #
1812 * of pages, but stopped, potentially in the middle of
1813 * a page. Zero the remainder of that page, and then
1814 * all of the rest of the pages.
1817 osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
1818 rbytes = (long) (bufferp - tbufp->datap);
1820 /* bytes left to zero */
1821 rbytes = cm_data.buf_blockSize - rbytes;
1824 memset(bufferp, 0, rbytes);
1825 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1828 tbufp = osi_GetQData(qdp);
1829 bufferp = tbufp->datap;
1830 /* bytes to clear in this page */
1831 rbytes = cm_data.buf_blockSize;
1837 code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
1839 code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
1842 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
1844 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
1848 code1 = rx_EndCall(rxcallp, code);
1850 if (code1 == RXKADUNKNOWNKEY)
1851 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
1853 /* If we are avoiding a file server bug, ignore the error state */
1854 if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451)
1856 /* Prefer the error value from FetchData over rx_EndCall */
1857 else if (code == 0 && code1 != 0)
1859 osi_Log0(afsd_logp, "CALL FetchData DONE");
1861 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
1864 code = cm_MapRPCError(code, reqp);
1867 lock_ObtainWrite(&scp->rw);
1869 /* we know that no one else has changed the buffer, since we still have
1870 * the fetching flag on the buffers, and we have the scp locked again.
1871 * Copy in the version # into the buffer if we got code 0 back from the
1875 for(qdp = biod.bufListp;
1877 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1878 tbufp = osi_GetQData(qdp);
1879 tbufp->dataVersion = afsStatus.dataVersionHigh;
1880 tbufp->dataVersion <<= 32;
1881 tbufp->dataVersion |= afsStatus.DataVersion;
1884 /* write buffer out to disk cache */
1885 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
1886 tbufp->dataVersion);
1887 #endif /* DISKCACHE95 */
1891 /* release scatter/gather I/O structure (buffers, locks) */
1892 cm_ReleaseBIOD(&biod, 0, code, 1);
1895 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, 0);