2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
26 extern void afsi_log(char *pattern, ...);
29 #ifdef AFS_FREELANCE_CLIENT
30 extern osi_mutex_t cm_Freelance_Lock;
34 /* we can access connp->serverp without holding a lock because that
35 never changes since the connection is made. */
36 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
37 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
39 #define SERVERHAS64BIT(connp) (FALSE)
40 #define SET_SERVERHASNO64BIT(connp) (FALSE)
43 /* functions called back from the buffer package when reading or writing data,
44 * or when holding or releasing a vnode pointer.
46 long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
47 cm_user_t *userp, cm_req_t *reqp)
49 /* store the data back from this buffer; the buffer is locked and held,
50 * but the vnode involved isn't locked, yet. It is held by its
51 * reference from the buffer, which won't change until the buffer is
52 * released by our caller. Thus, we don't have to worry about holding
56 cm_scache_t *scp = vscp;
59 AFSFetchStatus outStatus;
60 AFSStoreStatus inStatus;
64 struct rx_call *rxcallp;
65 struct rx_connection *rxconnp;
72 cm_bulkIO_t biod; /* bulk IO descriptor */
73 int require_64bit_ops = 0;
75 osi_assertx(userp != NULL, "null cm_user_t");
76 osi_assertx(scp != NULL, "null cm_scache_t");
78 /* now, the buffer may or may not be filled with good data (buf_GetNew
79 * drops lots of locks, and may indeed return a properly initialized
80 * buffer, although more likely it will just return a new, empty, buffer.
83 lock_ObtainWrite(&scp->rw);
84 if (scp->flags & CM_SCACHEFLAG_DELETED) {
85 lock_ReleaseWrite(&scp->rw);
86 return CM_ERROR_NOSUCHFILE;
89 cm_AFSFidFromFid(&tfid, &scp->fid);
91 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
92 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
94 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
96 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
97 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
98 lock_ReleaseWrite(&scp->rw);
102 if (biod.length == 0) {
103 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
104 cm_ReleaseBIOD(&biod, 1, 0, 1); /* should be a NOOP */
105 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
106 lock_ReleaseWrite(&scp->rw);
110 /* prepare the output status for the store */
111 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
112 cm_StatusFromAttr(&inStatus, scp, NULL);
113 truncPos = scp->length;
114 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
115 && LargeIntegerLessThan(scp->truncPos, truncPos))
116 truncPos = scp->truncPos;
117 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
119 /* compute how many bytes to write from this buffer */
120 thyper = LargeIntegerSubtract(scp->length, biod.offset);
121 if (LargeIntegerLessThanZero(thyper)) {
122 /* entire buffer is past EOF */
126 /* otherwise write out part of buffer before EOF, but not
127 * more than bufferSize bytes.
129 if (LargeIntegerGreaterThan(thyper,
130 ConvertLongToLargeInteger(biod.length))) {
131 nbytes = biod.length;
133 /* if thyper is less than or equal to biod.length, then we
134 can safely assume that the value fits in a long. */
135 nbytes = thyper.LowPart;
139 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
140 ConvertLongToLargeInteger(nbytes)),
141 ConvertLongToLargeInteger(LONG_MAX)) ||
142 LargeIntegerGreaterThan(truncPos,
143 ConvertLongToLargeInteger(LONG_MAX))) {
144 require_64bit_ops = 1;
147 lock_ReleaseWrite(&scp->rw);
149 /* now we're ready to do the store operation */
151 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
156 rxconnp = cm_GetRxConn(connp);
157 rxcallp = rx_NewCall(rxconnp);
158 rx_PutConnection(rxconnp);
160 #ifdef AFS_LARGEFILES
161 if (SERVERHAS64BIT(connp)) {
162 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
163 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
165 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
166 biod.offset.QuadPart,
170 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
172 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
174 if (require_64bit_ops) {
175 osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData. The operation requires large file support in the server.");
176 code = CM_ERROR_TOOBIG;
178 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
179 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
181 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
182 biod.offset.LowPart, nbytes, truncPos.LowPart);
184 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
186 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
190 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
191 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
193 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
194 biod.offset.LowPart, nbytes, truncPos.LowPart);
196 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
198 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
202 /* write the data from the the list of buffers */
206 qdp = biod.bufListEndp;
208 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
209 osi_assertx(qdp != NULL, "null osi_queueData_t");
210 bufp = osi_GetQData(qdp);
211 bufferp = bufp->datap;
213 if (wbytes > cm_data.buf_blockSize)
214 wbytes = cm_data.buf_blockSize;
216 /* write out wbytes of data from bufferp */
217 temp = rx_Write(rxcallp, bufferp, wbytes);
218 if (temp != wbytes) {
219 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
223 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
226 } /* while more bytes to write */
227 } /* if RPC started successfully */
230 if (SERVERHAS64BIT(connp)) {
231 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
233 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
235 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
237 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
239 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
241 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
245 code1 = rx_EndCall(rxcallp, code);
247 #ifdef AFS_LARGEFILES
248 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
249 SET_SERVERHASNO64BIT(connp);
253 /* Prefer StoreData error over rx_EndCall error */
254 if (code == 0 && code1 != 0)
256 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
258 code = cm_MapRPCError(code, reqp);
261 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
263 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
265 /* now, clean up our state */
266 lock_ObtainWrite(&scp->rw);
268 cm_ReleaseBIOD(&biod, 1, code, 1);
269 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
273 /* now, here's something a little tricky: in AFS 3, a dirty
274 * length can't be directly stored, instead, a dirty chunk is
275 * stored that sets the file's size (by writing and by using
276 * the truncate-first option in the store call).
278 * At this point, we've just finished a store, and so the trunc
279 * pos field is clean. If the file's size at the server is at
280 * least as big as we think it should be, then we turn off the
281 * length dirty bit, since all the other dirty buffers must
282 * precede this one in the file.
284 * The file's desired size shouldn't be smaller than what's
285 * stored at the server now, since we just did the trunc pos
288 * We have to turn off the length dirty bit as soon as we can,
289 * so that we see updates made by other machines.
292 if (SERVERHAS64BIT(connp)) {
293 t.LowPart = outStatus.Length;
294 t.HighPart = outStatus.Length_hi;
296 t = ConvertLongToLargeInteger(outStatus.Length);
299 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
300 scp->mask &= ~CM_SCACHEMASK_LENGTH;
302 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, CM_MERGEFLAG_STOREDATA);
304 if (code == CM_ERROR_SPACE)
305 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
306 else if (code == CM_ERROR_QUOTA)
307 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
309 lock_ReleaseWrite(&scp->rw);
315 * Truncate the file, by sending a StoreData RPC with zero length.
317 * Called with scp locked. Releases and re-obtains the lock.
319 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
321 AFSFetchStatus outStatus;
322 AFSStoreStatus inStatus;
326 osi_hyper_t truncPos;
328 struct rx_call *rxcallp;
329 struct rx_connection *rxconnp;
330 int require_64bit_ops = 0;
332 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
333 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
334 CM_SCACHESYNC_STOREDATA_EXCL);
336 /* prepare the output status for the store */
337 inStatus.Mask = AFS_SETMODTIME;
338 inStatus.ClientModTime = scp->clientModTime;
339 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
341 /* calculate truncation position */
342 truncPos = scp->length;
343 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
344 && LargeIntegerLessThan(scp->truncPos, truncPos))
345 truncPos = scp->truncPos;
346 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
348 if (LargeIntegerGreaterThan(truncPos,
349 ConvertLongToLargeInteger(LONG_MAX))) {
351 require_64bit_ops = 1;
354 lock_ReleaseWrite(&scp->rw);
356 cm_AFSFidFromFid(&tfid, &scp->fid);
358 /* now we're ready to do the store operation */
360 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
365 rxconnp = cm_GetRxConn(connp);
366 rxcallp = rx_NewCall(rxconnp);
367 rx_PutConnection(rxconnp);
369 #ifdef AFS_LARGEFILES
370 if (SERVERHAS64BIT(connp)) {
371 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
372 0, 0, truncPos.QuadPart);
374 if (require_64bit_ops) {
375 code = CM_ERROR_TOOBIG;
377 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
378 0, 0, truncPos.LowPart);
382 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
383 0, 0, truncPos.LowPart);
387 if (SERVERHAS64BIT(connp))
388 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
390 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
392 code1 = rx_EndCall(rxcallp, code);
394 #ifdef AFS_LARGEFILES
395 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
396 SET_SERVERHASNO64BIT(connp);
400 /* prefer StoreData error over rx_EndCall error */
401 if (code == 0 && code1 != 0)
403 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
404 code = cm_MapRPCError(code, reqp);
406 /* now, clean up our state */
407 lock_ObtainWrite(&scp->rw);
409 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
414 * For explanation of handling of CM_SCACHEMASK_LENGTH,
417 if (SERVERHAS64BIT(connp)) {
418 t.HighPart = outStatus.Length_hi;
419 t.LowPart = outStatus.Length;
421 t = ConvertLongToLargeInteger(outStatus.Length);
424 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
425 scp->mask &= ~CM_SCACHEMASK_LENGTH;
426 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, CM_MERGEFLAG_STOREDATA);
432 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
434 *bytesReadp = cm_data.buf_blockSize;
436 /* now return a code that means that I/O is done */
440 /* stabilize scache entry, and return with it locked so
443 long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
445 cm_scache_t *scp = vscp;
448 lock_ObtainWrite(&scp->rw);
449 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
450 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
452 lock_ReleaseWrite(&scp->rw);
459 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
460 long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
462 cm_scache_t *scp = vscp;
464 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
466 lock_ReleaseWrite(&scp->rw);
468 /* always succeeds */
472 cm_buf_ops_t cm_bufOps = {
479 long cm_ValidateDCache(void)
481 return buf_ValidateBuffers();
484 long cm_ShutdownDCache(void)
489 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
491 return buf_Init(newFile, &cm_bufOps, nbuffers);
494 /* check to see if we have an up-to-date buffer. The buffer must have
495 * previously been obtained by calling buf_Get.
497 * Make sure we have a callback, and that the dataversion matches.
499 * Scp must be locked.
501 * Bufp *may* be locked.
503 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
506 if (!cm_HaveCallback(scp))
508 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
510 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow)
513 code = lock_TryMutex(&bufp->mx);
515 /* don't have the lock, and can't lock it, then
522 /* remember dirty flag for later */
523 code = bufp->flags & CM_BUF_DIRTY;
525 /* release lock if we obtained it here */
527 lock_ReleaseMutex(&bufp->mx);
529 /* if buffer was dirty, buffer is acceptable for use */
536 /* used when deciding whether to do a prefetch or not */
537 long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
538 cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
542 osi_hyper_t tblocksize;
547 /* now scan all buffers in the range, looking for any that look like
552 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
554 lock_ObtainWrite(&scp->rw);
555 while (LargeIntegerGreaterThanZero(tlength)) {
556 /* get callback so we can do a meaningful dataVersion comparison */
557 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
558 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
560 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
561 lock_ReleaseWrite(&scp->rw);
565 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
566 /* we're past the end of file */
570 bp = buf_Find(scp, &tbase);
571 /* We cheat slightly by not locking the bp mutex. */
573 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
574 && (bp->dataVersion < scp->bufDataVersionLow || bp->dataVersion > scp->dataVersion))
582 /* if this buffer is essentially guaranteed to require a fetch,
583 * break out here and return this position.
588 tbase = LargeIntegerAdd(tbase, tblocksize);
589 tlength = LargeIntegerSubtract(tlength, tblocksize);
592 /* if we get here, either everything is fine or 'stop' stopped us at a
593 * particular buffer in the range that definitely needs to be fetched.
596 /* return non-zero code since realBasep won't be valid */
597 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
601 /* successfully found a page that will need fetching */
605 lock_ReleaseWrite(&scp->rw);
610 cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
618 if (scp->flags & CM_SCACHEFLAG_DELETED) {
619 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
623 /* Retries will be performed by the BkgDaemon thread if appropriate */
624 req.flags |= CM_REQ_NORETRY;
626 toffset.LowPart = p1;
627 toffset.HighPart = p2;
630 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
632 code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, &req);
634 osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
638 * Keep the following list synchronized with the
639 * error code list in cm_BkgDaemon
642 case CM_ERROR_TIMEDOUT: /* or server restarting */
644 case CM_ERROR_WOULDBLOCK:
645 case CM_ERROR_ALLBUSY:
646 case CM_ERROR_ALLDOWN:
647 case CM_ERROR_ALLOFFLINE:
648 case CM_ERROR_PARTIALWRITE:
649 break; /* cm_BkgDaemon will re-insert the request in the queue */
652 lock_ObtainWrite(&scp->rw);
653 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
654 lock_ReleaseWrite(&scp->rw);
659 /* Called with scp locked */
660 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
665 end = LargeIntegerAdd(*base, *length);
666 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
667 scp->prefetch.base = *base;
668 if (LargeIntegerGreaterThan(end, scp->prefetch.end))
669 scp->prefetch.end = end;
671 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
674 /* do the prefetch. if the prefetch fails, return 0 (success)
675 * because there is no harm done. */
677 cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
685 osi_hyper_t tblocksize;
693 /* Retries will be performed by the BkgDaemon thread if appropriate */
694 req.flags |= CM_REQ_NORETRY;
697 fetched.HighPart = 0;
698 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
702 length.HighPart = p4;
704 end = LargeIntegerAdd(base, length);
706 osi_Log3(afsd_logp, "Starting BKG prefetch scp 0x%p, base 0x%x:%x", scp, p2, p1);
708 for ( code = 0, offset = base;
709 code == 0 && LargeIntegerLessThan(offset, end);
710 offset = LargeIntegerAdd(offset, tblocksize) )
713 lock_ReleaseWrite(&scp->rw);
717 code = buf_Get(scp, &offset, &bp);
721 if (bp->cmFlags & CM_BUF_CMFETCHING) {
722 /* skip this buffer as another thread is already fetching it */
729 lock_ObtainWrite(&scp->rw);
733 code = cm_GetBuffer(scp, bp, NULL, userp, &req);
735 fetched = LargeIntegerAdd(fetched, tblocksize);
740 lock_ObtainWrite(&scp->rw);
743 cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
744 scp, &base, &fetched);
745 lock_ReleaseWrite(&scp->rw);
747 osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p, code %d bytes 0x%x:%x",
748 scp, code, fetched.HighPart, fetched.LowPart);
752 /* a read was issued to offsetp, and we have to determine whether we should
753 * do a prefetch of the next chunk.
755 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 count,
756 cm_user_t *userp, cm_req_t *reqp)
759 osi_hyper_t realBase;
760 osi_hyper_t readBase;
761 osi_hyper_t readLength;
764 /* round up to chunk boundary */
765 readBase.LowPart += (cm_chunkSize-1);
766 readBase.LowPart &= (-cm_chunkSize);
768 readLength = ConvertLongToLargeInteger(count);
770 lock_ObtainWrite(&scp->rw);
771 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
772 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
773 lock_ReleaseWrite(&scp->rw);
776 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
778 /* start the scan at the latter of the end of this read or
779 * the end of the last fetched region.
781 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
782 readBase = scp->prefetch.end;
784 lock_ReleaseWrite(&scp->rw);
786 code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
789 return; /* can't find something to prefetch */
791 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
792 scp, realBase.LowPart);
794 cm_QueueBKGRequest(scp, cm_BkgPrefetch,
795 realBase.LowPart, realBase.HighPart,
796 readLength.LowPart, readLength.HighPart,
800 /* scp must be locked; temporarily unlocked during processing.
801 * If returns 0, returns buffers held in biop, and with
802 * CM_BUF_CMSTORING set.
804 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
805 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
806 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
807 * must be woken, and the event must be set when the I/O is done. All of this
808 * is required so that buf_WaitIO synchronizes properly with the buffer as it
809 * is being written out.
811 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
812 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
815 osi_queueData_t *qdp;
818 osi_hyper_t scanStart; /* where to start scan for dirty pages */
819 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
820 osi_hyper_t firstModOffset; /* offset of first modified page in range */
823 long flags; /* flags to cm_SyncOp */
825 /* clear things out */
826 biop->scp = scp; /* do not hold; held by caller */
827 biop->offset = *inOffsetp;
829 biop->bufListp = NULL;
830 biop->bufListEndp = NULL;
833 /* reserve a chunk's worth of buffers */
834 lock_ReleaseWrite(&scp->rw);
835 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
836 lock_ObtainWrite(&scp->rw);
839 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
840 thyper = ConvertLongToLargeInteger(temp);
841 tbase = LargeIntegerAdd(*inOffsetp, thyper);
843 bufp = buf_Find(scp, &tbase);
845 /* get buffer mutex and scp mutex safely */
846 lock_ReleaseWrite(&scp->rw);
847 lock_ObtainMutex(&bufp->mx);
848 lock_ObtainWrite(&scp->rw);
850 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
851 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
853 lock_ReleaseMutex(&bufp->mx);
856 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
860 /* if the buffer is dirty, we're done */
861 if (bufp->flags & CM_BUF_DIRTY) {
862 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
863 "WRITING w/o CMSTORING in SetupStoreBIOD");
864 bufp->flags |= CM_BUF_WRITING;
868 /* this buffer is clean, so there's no reason to process it */
869 cm_SyncOpDone(scp, bufp, flags);
870 lock_ReleaseMutex(&bufp->mx);
878 /* if we get here, if bufp is null, we didn't find any dirty buffers
879 * that weren't already being stored back, so we just quit now.
885 /* don't need buffer mutex any more */
886 lock_ReleaseMutex(&bufp->mx);
888 /* put this element in the list */
890 osi_SetQData(qdp, bufp);
891 /* don't have to hold bufp, since held by buf_Find above */
892 osi_QAddH((osi_queue_t **) &biop->bufListp,
893 (osi_queue_t **) &biop->bufListEndp,
895 biop->length = cm_data.buf_blockSize;
896 firstModOffset = bufp->offset;
897 biop->offset = firstModOffset;
898 bufp = NULL; /* this buffer and reference added to the queue */
900 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
901 scanStart = *inOffsetp;
902 scanStart.LowPart &= (-cm_chunkSize);
903 thyper = ConvertLongToLargeInteger(cm_chunkSize);
904 scanEnd = LargeIntegerAdd(scanStart, thyper);
906 flags = CM_SCACHESYNC_GETSTATUS
907 | CM_SCACHESYNC_STOREDATA
908 | CM_SCACHESYNC_BUFLOCKED
909 | CM_SCACHESYNC_NOWAIT;
911 /* start by looking backwards until scanStart */
912 /* hyper version of cm_data.buf_blockSize */
913 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
914 tbase = LargeIntegerSubtract(firstModOffset, thyper);
915 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
916 /* see if we can find the buffer */
917 bufp = buf_Find(scp, &tbase);
921 /* try to lock it, and quit if we can't (simplifies locking) */
922 lock_ReleaseWrite(&scp->rw);
923 code = lock_TryMutex(&bufp->mx);
924 lock_ObtainWrite(&scp->rw);
931 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
933 lock_ReleaseMutex(&bufp->mx);
939 if (!(bufp->flags & CM_BUF_DIRTY)) {
940 /* buffer is clean, so we shouldn't add it */
941 cm_SyncOpDone(scp, bufp, flags);
942 lock_ReleaseMutex(&bufp->mx);
948 /* don't need buffer mutex any more */
949 lock_ReleaseMutex(&bufp->mx);
951 /* we have a dirty buffer ready for storing. Add it to the tail
952 * of the list, since it immediately precedes all of the disk
953 * addresses we've already collected.
956 osi_SetQData(qdp, bufp);
957 /* no buf_hold necessary, since we have it held from buf_Find */
958 osi_QAddT((osi_queue_t **) &biop->bufListp,
959 (osi_queue_t **) &biop->bufListEndp,
961 bufp = NULL; /* added to the queue */
963 /* update biod info describing the transfer */
964 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
965 biop->length += cm_data.buf_blockSize;
967 /* update loop pointer */
968 tbase = LargeIntegerSubtract(tbase, thyper);
969 } /* while loop looking for pages preceding the one we found */
971 /* now, find later dirty, contiguous pages, and add them to the list */
972 /* hyper version of cm_data.buf_blockSize */
973 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
974 tbase = LargeIntegerAdd(firstModOffset, thyper);
975 while(LargeIntegerLessThan(tbase, scanEnd)) {
976 /* see if we can find the buffer */
977 bufp = buf_Find(scp, &tbase);
981 /* try to lock it, and quit if we can't (simplifies locking) */
982 lock_ReleaseWrite(&scp->rw);
983 code = lock_TryMutex(&bufp->mx);
984 lock_ObtainWrite(&scp->rw);
991 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
993 lock_ReleaseMutex(&bufp->mx);
999 if (!(bufp->flags & CM_BUF_DIRTY)) {
1000 /* buffer is clean, so we shouldn't add it */
1001 cm_SyncOpDone(scp, bufp, flags);
1002 lock_ReleaseMutex(&bufp->mx);
1008 /* don't need buffer mutex any more */
1009 lock_ReleaseMutex(&bufp->mx);
1011 /* we have a dirty buffer ready for storing. Add it to the head
1012 * of the list, since it immediately follows all of the disk
1013 * addresses we've already collected.
1015 qdp = osi_QDAlloc();
1016 osi_SetQData(qdp, bufp);
1017 /* no buf_hold necessary, since we have it held from buf_Find */
1018 osi_QAddH((osi_queue_t **) &biop->bufListp,
1019 (osi_queue_t **) &biop->bufListEndp,
1023 /* update biod info describing the transfer */
1024 biop->length += cm_data.buf_blockSize;
1026 /* update loop pointer */
1027 tbase = LargeIntegerAdd(tbase, thyper);
1028 } /* while loop looking for pages following the first page we found */
1030 /* finally, we're done */
1034 /* scp must be locked; temporarily unlocked during processing.
1035 * If returns 0, returns buffers held in biop, and with
1036 * CM_BUF_CMFETCHING flags set.
1037 * If an error is returned, we don't return any buffers.
1039 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
1040 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
1044 osi_hyper_t tblocksize; /* a long long temp variable */
1045 osi_hyper_t pageBase; /* base offset we're looking at */
1046 osi_queueData_t *qdp; /* one temp queue structure */
1047 osi_queueData_t *tqdp; /* another temp queue structure */
1048 long collected; /* how many bytes have been collected */
1051 osi_hyper_t fileSize; /* the # of bytes in the file */
1052 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
1053 osi_queueData_t *heldBufListEndp; /* first one */
1056 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1058 biop->scp = scp; /* do not hold; held by caller */
1059 biop->offset = *offsetp;
1060 /* null out the list of buffers */
1061 biop->bufListp = biop->bufListEndp = NULL;
1064 /* first lookup the file's length, so we know when to stop */
1065 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
1066 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1070 /* copy out size, since it may change */
1071 fileSize = scp->serverLength;
1073 lock_ReleaseWrite(&scp->rw);
1075 pageBase = *offsetp;
1076 collected = pageBase.LowPart & (cm_chunkSize - 1);
1077 heldBufListp = NULL;
1078 heldBufListEndp = NULL;
1081 * Obtaining buffers can cause dirty buffers to be recycled, which
1082 * can cause a storeback, so cannot be done while we have buffers
1085 * To get around this, we get buffers twice. Before reserving buffers,
1086 * we obtain and release each one individually. After reserving
1087 * buffers, we try to obtain them again, but only by lookup, not by
1088 * recycling. If a buffer has gone away while we were waiting for
1089 * the others, we just use whatever buffers we already have.
1091 * On entry to this function, we are already holding a buffer, so we
1092 * can't wait for reservation. So we call buf_TryReserveBuffers()
1093 * instead. Not only that, we can't really even call buf_Get(), for
1094 * the same reason. We can't avoid that, though. To avoid deadlock
1095 * we allow only one thread to be executing the buf_Get()-buf_Release()
1096 * sequence at a time.
1099 /* first hold all buffers, since we can't hold any locks in buf_Get */
1101 /* stop at chunk boundary */
1102 if (collected >= cm_chunkSize)
1105 /* see if the next page would be past EOF */
1106 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1109 code = buf_Get(scp, &pageBase, &tbp);
1111 lock_ObtainWrite(&scp->rw);
1112 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1119 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1120 collected += cm_data.buf_blockSize;
1123 /* reserve a chunk's worth of buffers if possible */
1124 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1126 pageBase = *offsetp;
1127 collected = pageBase.LowPart & (cm_chunkSize - 1);
1129 /* now hold all buffers, if they are still there */
1131 /* stop at chunk boundary */
1132 if (collected >= cm_chunkSize)
1135 /* see if the next page would be past EOF */
1136 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1139 tbp = buf_Find(scp, &pageBase);
1143 /* add the buffer to the list */
1144 qdp = osi_QDAlloc();
1145 osi_SetQData(qdp, tbp);
1146 osi_QAddH((osi_queue_t **)&heldBufListp,
1147 (osi_queue_t **)&heldBufListEndp,
1149 /* leave tbp held (from buf_Get) */
1154 collected += cm_data.buf_blockSize;
1155 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1158 /* look at each buffer, adding it into the list if it looks idle and
1159 * filled with old data. One special case: wait for idle if it is the
1160 * first buffer since we really need that one for our caller to make
1164 collected = 0; /* now count how many we'll really use */
1165 for (tqdp = heldBufListEndp;
1167 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1168 /* get a ptr to the held buffer */
1169 tbp = osi_GetQData(tqdp);
1170 pageBase = tbp->offset;
1172 /* now lock the buffer lock */
1173 lock_ObtainMutex(&tbp->mx);
1174 lock_ObtainWrite(&scp->rw);
1176 /* don't bother fetching over data that is already current */
1177 if (tbp->dataVersion <= scp->dataVersion && tbp->dataVersion >= scp->bufDataVersionLow) {
1178 /* we don't need this buffer, since it is current */
1179 lock_ReleaseWrite(&scp->rw);
1180 lock_ReleaseMutex(&tbp->mx);
1184 flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
1186 flags |= CM_SCACHESYNC_NOWAIT;
1188 /* wait for the buffer to serialize, if required. Doesn't
1189 * release the scp or buffer lock(s) if NOWAIT is specified.
1191 code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
1193 lock_ReleaseWrite(&scp->rw);
1194 lock_ReleaseMutex(&tbp->mx);
1198 /* don't fetch over dirty buffers */
1199 if (tbp->flags & CM_BUF_DIRTY) {
1200 cm_SyncOpDone(scp, tbp, flags);
1201 lock_ReleaseWrite(&scp->rw);
1202 lock_ReleaseMutex(&tbp->mx);
1207 lock_ReleaseWrite(&scp->rw);
1208 lock_ReleaseMutex(&tbp->mx);
1210 /* add the buffer to the list */
1211 qdp = osi_QDAlloc();
1212 osi_SetQData(qdp, tbp);
1213 osi_QAddH((osi_queue_t **)&biop->bufListp,
1214 (osi_queue_t **)&biop->bufListEndp,
1218 /* from now on, a failure just stops our collection process, but
1219 * we still do the I/O to whatever we've already managed to collect.
1222 collected += cm_data.buf_blockSize;
1225 /* now, we've held in biop->bufListp all the buffer's we're really
1226 * interested in. We also have holds left from heldBufListp, and we
1227 * now release those holds on the buffers.
1229 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1230 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1231 tbp = osi_GetQData(qdp);
1232 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1233 (osi_queue_t **) &heldBufListEndp,
1240 /* Caller expects this */
1241 lock_ObtainWrite(&scp->rw);
1243 /* if we got a failure setting up the first buffer, then we don't have
1244 * any side effects yet, and we also have failed an operation that the
1245 * caller requires to make any progress. Give up now.
1247 if (code && isFirst) {
1248 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1252 /* otherwise, we're still OK, and should just return the I/O setup we've
1255 biop->length = collected;
1256 biop->reserved = reserving;
1260 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1263 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, int failed, int scp_locked)
1265 cm_scache_t *scp; /* do not release; not held in biop */
1267 osi_queueData_t *qdp;
1268 osi_queueData_t *nqdp;
1271 /* Give back reserved buffers */
1273 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1276 flags = CM_SCACHESYNC_STOREDATA;
1278 flags = CM_SCACHESYNC_FETCHDATA;
1281 if (biop->bufListp) {
1282 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1283 /* lookup next guy first, since we're going to free this one */
1284 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1286 /* extract buffer and free queue data */
1287 bufp = osi_GetQData(qdp);
1288 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1289 (osi_queue_t **) &biop->bufListEndp,
1293 /* now, mark I/O as done, unlock the buffer and release it */
1295 lock_ReleaseWrite(&scp->rw);
1296 lock_ObtainMutex(&bufp->mx);
1297 lock_ObtainWrite(&scp->rw);
1298 cm_SyncOpDone(scp, bufp, flags);
1300 /* turn off writing and wakeup users */
1302 if (bufp->flags & CM_BUF_WAITING) {
1303 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1304 osi_Wakeup((LONG_PTR) bufp);
1307 bufp->flags &= ~CM_BUF_WRITING;
1309 bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
1310 bufp->dirty_offset = bufp->dirty_length = 0;
1315 lock_ReleaseWrite(&scp->rw);
1316 lock_ReleaseMutex(&bufp->mx);
1322 lock_ObtainWrite(&scp->rw);
1323 cm_SyncOpDone(scp, NULL, flags);
1325 lock_ReleaseWrite(&scp->rw);
1328 /* clean things out */
1329 biop->bufListp = NULL;
1330 biop->bufListEndp = NULL;
1333 /* Fetch a buffer. Called with scp locked.
1334 * The scp is locked on return.
1336 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
1340 afs_int32 nbytes; /* bytes in transfer */
1341 afs_int32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1342 afs_int64 length_found = 0;
1343 long rbytes; /* bytes in rx_Read call */
1345 AFSFetchStatus afsStatus;
1346 AFSCallBack callback;
1349 cm_buf_t *tbufp; /* buf we're filling */
1350 osi_queueData_t *qdp; /* q element we're scanning */
1352 struct rx_call *rxcallp;
1353 struct rx_connection *rxconnp;
1354 cm_bulkIO_t biod; /* bulk IO descriptor */
1358 int require_64bit_ops = 0;
1360 /* now, the buffer may or may not be filled with good data (buf_GetNew
1361 * drops lots of locks, and may indeed return a properly initialized
1362 * buffer, although more likely it will just return a new, empty, buffer.
1365 #ifdef AFS_FREELANCE_CLIENT
1367 // yj: if they're trying to get the /afs directory, we need to
1368 // handle it differently, since it's local rather than on any
1371 getroot = (scp==cm_data.rootSCachep);
1373 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1376 if (cm_HaveCallback(scp) && bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow) {
1377 /* We already have this buffer don't do extra work */
1381 cm_AFSFidFromFid(&tfid, &scp->fid);
1383 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
1385 /* couldn't even get the first page setup properly */
1386 osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
1390 /* once we get here, we have the callback in place, we know that no one
1391 * is fetching the data now. Check one last time that we still have
1392 * the wrong data, and then fetch it if we're still wrong.
1394 * We can lose a race condition and end up with biod.length zero, in
1395 * which case we just retry.
1397 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow || biod.length == 0) {
1398 if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow) &&
1399 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
1401 osi_Log4(afsd_logp, "Bad DVs 0x%x != (0x%x -> 0x%x) or length 0x%x",
1402 bufp->dataVersion, scp->bufDataVersionLow, scp->dataVersion, biod.length);
1404 if (bufp->dataVersion == CM_BUF_VERSION_BAD)
1405 memset(bufp->datap, 0, cm_data.buf_blockSize);
1406 bufp->dataVersion = scp->dataVersion;
1408 cm_ReleaseBIOD(&biod, 0, 0, 1);
1410 } else if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow)
1411 && (scp->mask & CM_SCACHEMASK_TRUNCPOS) &&
1412 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->truncPos)) {
1413 memset(bufp->datap, 0, cm_data.buf_blockSize);
1414 bufp->dataVersion = scp->dataVersion;
1415 cm_ReleaseBIOD(&biod, 0, 0, 1);
1419 lock_ReleaseWrite(&scp->rw);
1421 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1422 ConvertLongToLargeInteger(biod.length)),
1423 ConvertLongToLargeInteger(LONG_MAX))) {
1424 require_64bit_ops = 1;
1427 osi_Log2(afsd_logp, "cm_GetBuffer: fetching data scp %p bufp %p", scp, bufp);
1428 osi_Log3(afsd_logp, "cm_GetBuffer: fetching data scpDV 0x%x scpDVLow 0x%x bufDV 0x%x",
1429 scp->dataVersion, scp->bufDataVersionLow, bufp->dataVersion);
1431 #ifdef AFS_FREELANCE_CLIENT
1434 // if getroot then we don't need to make any calls
1435 // just return fake data
1437 if (cm_freelanceEnabled && getroot) {
1438 // setup the fake status
1439 afsStatus.InterfaceVersion = 0x1;
1440 afsStatus.FileType = 0x2;
1441 afsStatus.LinkCount = scp->linkCount;
1442 afsStatus.Length = cm_fakeDirSize;
1443 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1444 afsStatus.Author = 0x1;
1445 afsStatus.Owner = 0x0;
1446 afsStatus.CallerAccess = 0x9;
1447 afsStatus.AnonymousAccess = 0x9;
1448 afsStatus.UnixModeBits = 0x1ff;
1449 afsStatus.ParentVnode = 0x1;
1450 afsStatus.ParentUnique = 0x1;
1451 afsStatus.ResidencyMask = 0;
1452 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1453 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1454 afsStatus.Group = 0;
1455 afsStatus.SyncCounter = 0;
1456 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1457 afsStatus.lockCount = 0;
1458 afsStatus.Length_hi = 0;
1459 afsStatus.errorCode = 0;
1461 // once we're done setting up the status info,
1462 // we just fill the buffer pages with fakedata
1463 // from cm_FakeRootDir. Extra pages are set to
1466 lock_ObtainMutex(&cm_Freelance_Lock);
1467 t1 = bufp->offset.LowPart;
1468 qdp = biod.bufListEndp;
1470 tbufp = osi_GetQData(qdp);
1471 bufferp=tbufp->datap;
1472 memset(bufferp, 0, cm_data.buf_blockSize);
1473 t2 = cm_fakeDirSize - t1;
1474 if (t2> (afs_int32)cm_data.buf_blockSize)
1475 t2=cm_data.buf_blockSize;
1477 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1482 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1485 lock_ReleaseMutex(&cm_Freelance_Lock);
1487 // once we're done, we skip over the part of the
1488 // code that does the ACTUAL fetching of data for
1491 goto fetchingcompleted;
1494 #endif /* AFS_FREELANCE_CLIENT */
1496 /* now make the call */
1498 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
1502 rxconnp = cm_GetRxConn(connp);
1503 rxcallp = rx_NewCall(rxconnp);
1504 rx_PutConnection(rxconnp);
1506 #ifdef AFS_LARGEFILES
1507 nbytes = nbytes_hi = 0;
1509 if (SERVERHAS64BIT(connp)) {
1510 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1511 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1513 code = StartRXAFS_FetchData64(rxcallp, &tfid, biod.offset.QuadPart, biod.length);
1516 temp = rx_Read32(rxcallp, &nbytes_hi);
1517 if (temp == sizeof(afs_int32)) {
1518 nbytes_hi = ntohl(nbytes_hi);
1521 code = rxcallp->error;
1522 code1 = rx_EndCall(rxcallp, code);
1528 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1529 if (require_64bit_ops) {
1530 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1531 code = CM_ERROR_TOOBIG;
1534 rxconnp = cm_GetRxConn(connp);
1535 rxcallp = rx_NewCall(rxconnp);
1536 rx_PutConnection(rxconnp);
1539 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1540 scp, biod.offset.LowPart, biod.length);
1542 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1545 SET_SERVERHASNO64BIT(connp);
1550 temp = rx_Read32(rxcallp, &nbytes);
1551 if (temp == sizeof(afs_int32)) {
1552 nbytes = ntohl(nbytes);
1553 FillInt64(length_found, nbytes_hi, nbytes);
1554 if (length_found > biod.length)
1555 code = (rxcallp->error < 0) ? rxcallp->error : -1;
1557 code = (rxcallp->error < 0) ? rxcallp->error : -1;
1560 /* for the moment, nbytes_hi will always be 0 if code == 0
1561 because biod.length is a 32-bit quantity. */
1563 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1564 scp, biod.offset.LowPart, biod.length);
1566 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1569 /* now copy the data out of the pipe and put it in the buffer */
1571 temp = rx_Read32(rxcallp, &nbytes);
1572 if (temp == sizeof(afs_int32)) {
1573 nbytes = ntohl(nbytes);
1574 if (nbytes > biod.length)
1575 code = (rxcallp->error < 0) ? rxcallp->error : -1;
1578 code = (rxcallp->error < 0) ? rxcallp->error : -1;
1583 qdp = biod.bufListEndp;
1585 tbufp = osi_GetQData(qdp);
1586 bufferp = tbufp->datap;
1590 /* fill nbytes of data from the pipe into the pages.
1591 * When we stop, qdp will point at the last page we're
1592 * dealing with, and bufferp will tell us where we
1593 * stopped. We'll need this info below when we clear
1594 * the remainder of the last page out (and potentially
1595 * clear later pages out, if we fetch past EOF).
1597 while (nbytes > 0) {
1598 /* assert that there are still more buffers;
1599 * our check above for nbytes being less than
1600 * biod.length should ensure this.
1602 osi_assertx(bufferp != NULL, "null cm_buf_t");
1604 /* read rbytes of data */
1605 rbytes = (nbytes > cm_data.buf_blockSize? cm_data.buf_blockSize : nbytes);
1606 temp = rx_Read(rxcallp, bufferp, rbytes);
1607 if (temp < rbytes) {
1608 code = (rxcallp->error < 0) ? rxcallp->error : -1;
1612 /* allow read-while-fetching.
1613 * if this is the last buffer, clear the
1614 * PREFETCHING flag, so the reader waiting for
1615 * this buffer will start a prefetch.
1617 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1618 lock_ObtainWrite(&scp->rw);
1619 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1620 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1621 osi_Wakeup((LONG_PTR) &scp->flags);
1623 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1624 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1626 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
1628 lock_ReleaseWrite(&scp->rw);
1630 /* and adjust counters */
1633 /* and move to the next buffer */
1635 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1637 tbufp = osi_GetQData(qdp);
1638 bufferp = tbufp->datap;
1646 /* zero out remainder of last pages, in case we are
1647 * fetching past EOF. We were fetching an integral #
1648 * of pages, but stopped, potentially in the middle of
1649 * a page. Zero the remainder of that page, and then
1650 * all of the rest of the pages.
1653 osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
1654 rbytes = (long) (bufferp - tbufp->datap);
1656 /* bytes left to zero */
1657 rbytes = cm_data.buf_blockSize - rbytes;
1660 memset(bufferp, 0, rbytes);
1661 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1664 tbufp = osi_GetQData(qdp);
1665 bufferp = tbufp->datap;
1666 /* bytes to clear in this page */
1667 rbytes = cm_data.buf_blockSize;
1672 if (SERVERHAS64BIT(connp))
1673 code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
1675 code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
1677 if (SERVERHAS64BIT(connp))
1678 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
1680 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
1684 code1 = rx_EndCall(rxcallp, code);
1686 if (code1 == RXKADUNKNOWNKEY)
1687 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
1689 /* Prefer the error value from FetchData over rx_EndCall */
1690 if (code == 0 && code1 != 0)
1692 osi_Log0(afsd_logp, "CALL FetchData DONE");
1694 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
1697 code = cm_MapRPCError(code, reqp);
1699 lock_ObtainWrite(&scp->rw);
1701 /* we know that no one else has changed the buffer, since we still have
1702 * the fetching flag on the buffers, and we have the scp locked again.
1703 * Copy in the version # into the buffer if we got code 0 back from the
1707 for(qdp = biod.bufListp;
1709 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
1710 tbufp = osi_GetQData(qdp);
1711 tbufp->dataVersion = afsStatus.dataVersionHigh;
1712 tbufp->dataVersion <<= 32;
1713 tbufp->dataVersion |= afsStatus.DataVersion;
1716 /* write buffer out to disk cache */
1717 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
1718 tbufp->dataVersion);
1719 #endif /* DISKCACHE95 */
1723 /* release scatter/gather I/O structure (buffers, locks) */
1724 cm_ReleaseBIOD(&biod, 0, code, 1);
1727 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, 0);