2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 #include <afsconfig.h>
12 #include <afs/param.h>
27 extern void afsi_log(char *pattern, ...);
30 #ifdef AFS_FREELANCE_CLIENT
31 extern osi_mutex_t cm_Freelance_Lock;
34 #define USE_RX_IOVEC 1
36 /* we can access connp->serverp without holding a lock because that
37 never changes since the connection is made. */
38 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
39 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
41 /* functions called back from the buffer package when reading or writing data,
42 * or when holding or releasing a vnode pointer.
44 long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
45 cm_user_t *userp, cm_req_t *reqp)
47 /* store the data back from this buffer; the buffer is locked and held,
48 * but the vnode involved isn't locked, yet. It is held by its
49 * reference from the buffer, which won't change until the buffer is
50 * released by our caller. Thus, we don't have to worry about holding
54 cm_scache_t *scp = vscp;
56 afs_int32 save_nbytes;
58 AFSFetchStatus outStatus;
59 AFSStoreStatus inStatus;
63 struct rx_call *rxcallp;
64 struct rx_connection *rxconnp;
71 cm_bulkIO_t biod; /* bulk IO descriptor */
72 int require_64bit_ops = 0;
73 int call_was_64bit = 0;
74 int scp_locked = flags & CM_BUF_WRITE_SCP_LOCKED;
76 osi_assertx(userp != NULL, "null cm_user_t");
77 osi_assertx(scp != NULL, "null cm_scache_t");
79 memset(&volSync, 0, sizeof(volSync));
81 /* now, the buffer may or may not be filled with good data (buf_GetNew
82 * drops lots of locks, and may indeed return a properly initialized
83 * buffer, although more likely it will just return a new, empty, buffer.
86 lock_ObtainWrite(&scp->rw);
87 if (scp->flags & CM_SCACHEFLAG_DELETED) {
89 lock_ReleaseWrite(&scp->rw);
90 return CM_ERROR_NOSUCHFILE;
93 cm_AFSFidFromFid(&tfid, &scp->fid);
95 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
96 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
98 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
100 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
101 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
103 lock_ReleaseWrite(&scp->rw);
107 if (biod.length == 0) {
108 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
109 cm_ReleaseBIOD(&biod, 1, 0, 1); /* should be a NOOP */
110 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
112 lock_ReleaseWrite(&scp->rw);
116 /* prepare the output status for the store */
117 scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
118 cm_StatusFromAttr(&inStatus, scp, NULL);
119 truncPos = scp->length;
120 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
121 && LargeIntegerLessThan(scp->truncPos, truncPos))
122 truncPos = scp->truncPos;
123 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
125 /* compute how many bytes to write from this buffer */
126 thyper = LargeIntegerSubtract(scp->length, biod.offset);
127 if (LargeIntegerLessThanZero(thyper)) {
128 /* entire buffer is past EOF */
132 /* otherwise write out part of buffer before EOF, but not
133 * more than bufferSize bytes.
135 if (LargeIntegerGreaterThan(thyper,
136 ConvertLongToLargeInteger(biod.length))) {
137 nbytes = biod.length;
139 /* if thyper is less than or equal to biod.length, then we
140 can safely assume that the value fits in a long. */
141 nbytes = thyper.LowPart;
145 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
146 ConvertLongToLargeInteger(nbytes)),
147 ConvertLongToLargeInteger(LONG_MAX)) ||
148 LargeIntegerGreaterThan(truncPos,
149 ConvertLongToLargeInteger(LONG_MAX))) {
150 require_64bit_ops = 1;
153 lock_ReleaseWrite(&scp->rw);
155 /* now we're ready to do the store operation */
156 save_nbytes = nbytes;
158 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
163 rxconnp = cm_GetRxConn(connp);
164 rxcallp = rx_NewCall(rxconnp);
165 rx_PutConnection(rxconnp);
167 if (SERVERHAS64BIT(connp)) {
170 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
171 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
173 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
174 biod.offset.QuadPart,
178 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
180 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
184 if (require_64bit_ops) {
185 osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData. The operation requires large file support in the server.");
186 code = CM_ERROR_TOOBIG;
188 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
189 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
191 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
192 biod.offset.LowPart, nbytes, truncPos.LowPart);
194 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
196 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
201 afs_uint32 buf_offset = 0, bytes_copied = 0;
203 /* write the data from the the list of buffers */
207 struct iovec tiov[RX_MAXIOVECS];
208 afs_int32 tnio, vlen, vbytes, iov, voffset;
211 vbytes = rx_WritevAlloc(rxcallp, tiov, &tnio, RX_MAXIOVECS, nbytes);
213 code = RX_PROTOCOL_ERROR;
217 for ( iov = voffset = vlen = 0;
218 vlen < vbytes && iov < tnio; vlen += wbytes) {
220 qdp = biod.bufListEndp;
221 buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
222 } else if (buf_offset == cm_data.buf_blockSize) {
223 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
227 osi_assertx(qdp != NULL, "null osi_queueData_t");
228 bufp = osi_GetQData(qdp);
229 bufferp = bufp->datap + buf_offset;
230 wbytes = vbytes - vlen;
231 if (wbytes > cm_data.buf_blockSize - buf_offset)
232 wbytes = cm_data.buf_blockSize - buf_offset;
234 vleft = tiov[iov].iov_len - voffset;
235 while (wbytes > vleft && iov < tnio) {
236 memcpy(tiov[iov].iov_base + voffset, bufferp, vleft);
237 bytes_copied += vleft;
245 vleft = tiov[iov].iov_len;
249 memcpy(tiov[iov].iov_base + voffset, bufferp, wbytes);
250 bytes_copied += wbytes;
251 if (tiov[iov].iov_len == voffset + wbytes) {
254 vleft = (iov < tnio) ? tiov[iov].iov_len : 0;
260 buf_offset += wbytes;
266 osi_assertx(iov == tnio, "incorrect iov count");
267 osi_assertx(vlen == vbytes, "bytes_copied != vbytes");
268 osi_assertx(bufp->offset.QuadPart + buf_offset == biod.offset.QuadPart + bytes_copied,
269 "begin and end offsets don't match");
271 temp = rx_Writev(rxcallp, tiov, tnio, vbytes);
272 if (temp != vbytes) {
273 osi_Log3(afsd_logp, "rx_Writev failed bp 0x%p, %d != %d", bufp, temp, vbytes);
274 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
278 osi_Log3(afsd_logp, "rx_Writev succeeded bp 0x%p offset 0x%x, wrote %u",
279 bufp, buf_offset, vbytes);
281 #else /* USE_RX_IOVEC */
283 qdp = biod.bufListEndp;
284 buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
286 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
290 osi_assertx(qdp != NULL, "null osi_queueData_t");
291 bufp = osi_GetQData(qdp);
292 bufferp = bufp->datap + buf_offset;
294 if (wbytes > cm_data.buf_blockSize - buf_offset)
295 wbytes = cm_data.buf_blockSize - buf_offset;
297 /* write out wbytes of data from bufferp */
298 temp = rx_Write(rxcallp, bufferp, wbytes);
299 if (temp != wbytes) {
300 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
301 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
304 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
307 #endif /* USE_RX_IOVEC */
308 } /* while more bytes to write */
309 } /* if RPC started successfully */
312 if (call_was_64bit) {
313 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
315 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
317 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
319 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
321 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
323 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
327 code1 = rx_EndCall(rxcallp, code);
329 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
330 SET_SERVERHASNO64BIT(connp);
332 nbytes = save_nbytes;
336 /* Prefer StoreData error over rx_EndCall error */
339 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
341 code = cm_MapRPCError(code, reqp);
344 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
346 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
348 /* now, clean up our state */
349 lock_ObtainWrite(&scp->rw);
351 cm_ReleaseBIOD(&biod, 1, code, 1);
352 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
356 /* now, here's something a little tricky: in AFS 3, a dirty
357 * length can't be directly stored, instead, a dirty chunk is
358 * stored that sets the file's size (by writing and by using
359 * the truncate-first option in the store call).
361 * At this point, we've just finished a store, and so the trunc
362 * pos field is clean. If the file's size at the server is at
363 * least as big as we think it should be, then we turn off the
364 * length dirty bit, since all the other dirty buffers must
365 * precede this one in the file.
367 * The file's desired size shouldn't be smaller than what's
368 * stored at the server now, since we just did the trunc pos
371 * We have to turn off the length dirty bit as soon as we can,
372 * so that we see updates made by other machines.
375 if (call_was_64bit) {
376 t.LowPart = outStatus.Length;
377 t.HighPart = outStatus.Length_hi;
379 t = ConvertLongToLargeInteger(outStatus.Length);
382 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
383 scp->mask &= ~CM_SCACHEMASK_LENGTH;
385 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
387 if (code == CM_ERROR_SPACE)
388 scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
389 else if (code == CM_ERROR_QUOTA)
390 scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
393 lock_ReleaseWrite(&scp->rw);
399 * Truncate the file, by sending a StoreData RPC with zero length.
401 * Called with scp locked. Releases and re-obtains the lock.
403 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
405 AFSFetchStatus outStatus;
406 AFSStoreStatus inStatus;
410 osi_hyper_t truncPos;
412 struct rx_call *rxcallp;
413 struct rx_connection *rxconnp;
414 int require_64bit_ops = 0;
415 int call_was_64bit = 0;
417 memset(&volSync, 0, sizeof(volSync));
419 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
420 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
421 CM_SCACHESYNC_STOREDATA_EXCL);
423 /* prepare the output status for the store */
424 inStatus.Mask = AFS_SETMODTIME;
425 inStatus.ClientModTime = scp->clientModTime;
426 scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
428 /* calculate truncation position */
429 truncPos = scp->length;
430 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
431 && LargeIntegerLessThan(scp->truncPos, truncPos))
432 truncPos = scp->truncPos;
433 scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
435 if (LargeIntegerGreaterThan(truncPos,
436 ConvertLongToLargeInteger(LONG_MAX))) {
438 require_64bit_ops = 1;
441 lock_ReleaseWrite(&scp->rw);
443 cm_AFSFidFromFid(&tfid, &scp->fid);
445 /* now we're ready to do the store operation */
447 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
452 rxconnp = cm_GetRxConn(connp);
453 rxcallp = rx_NewCall(rxconnp);
454 rx_PutConnection(rxconnp);
456 if (SERVERHAS64BIT(connp)) {
459 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
460 0, 0, truncPos.QuadPart);
464 if (require_64bit_ops) {
465 code = CM_ERROR_TOOBIG;
467 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
468 0, 0, truncPos.LowPart);
474 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
476 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
478 code1 = rx_EndCall(rxcallp, code);
480 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
481 SET_SERVERHASNO64BIT(connp);
485 /* prefer StoreData error over rx_EndCall error */
486 if (code == 0 && code1 != 0)
488 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
489 code = cm_MapRPCError(code, reqp);
491 /* now, clean up our state */
492 lock_ObtainWrite(&scp->rw);
494 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
499 * For explanation of handling of CM_SCACHEMASK_LENGTH,
502 if (call_was_64bit) {
503 t.HighPart = outStatus.Length_hi;
504 t.LowPart = outStatus.Length;
506 t = ConvertLongToLargeInteger(outStatus.Length);
509 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
510 scp->mask &= ~CM_SCACHEMASK_LENGTH;
511 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
517 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
521 /* now return a code that means that I/O is done */
526 * stabilize scache entry with CM_SCACHESYNC_SETSIZE. This prevents any new
527 * data buffers to be allocated, new data to be fetched from the file server,
528 * and writes to be accepted from the application but permits dirty buffers
529 * to be written to the file server.
531 * Stabilize uses cm_SyncOp to maintain the cm_scache_t in this stable state
532 * instead of holding the rwlock exclusively. This permits background stores
533 * to be performed in parallel and in particular allow FlushFile to be
534 * implemented without violating the locking hierarchy.
536 long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
538 cm_scache_t *scp = vscp;
541 lock_ObtainWrite(&scp->rw);
542 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
543 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
544 lock_ReleaseWrite(&scp->rw);
549 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
550 long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
552 cm_scache_t *scp = vscp;
554 lock_ObtainWrite(&scp->rw);
555 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
557 lock_ReleaseWrite(&scp->rw);
559 /* always succeeds */
563 cm_buf_ops_t cm_bufOps = {
570 long cm_ValidateDCache(void)
572 return buf_ValidateBuffers();
575 long cm_ShutdownDCache(void)
580 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
582 return buf_Init(newFile, &cm_bufOps, nbuffers);
585 /* check to see if we have an up-to-date buffer. The buffer must have
586 * previously been obtained by calling buf_Get.
588 * Make sure we have a callback, and that the dataversion matches.
590 * Scp must be locked.
592 * Bufp *may* be locked.
594 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
597 if (!cm_HaveCallback(scp))
599 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
601 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow)
603 if (bufp->offset.QuadPart >= scp->serverLength.QuadPart)
606 code = lock_TryMutex(&bufp->mx);
608 /* don't have the lock, and can't lock it, then
615 /* remember dirty flag for later */
616 code = bufp->flags & CM_BUF_DIRTY;
618 /* release lock if we obtained it here */
620 lock_ReleaseMutex(&bufp->mx);
622 /* if buffer was dirty, buffer is acceptable for use */
630 * used when deciding whether to do a background fetch or not.
631 * call with scp->rw write-locked.
634 cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
635 cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
639 osi_hyper_t tblocksize;
644 /* now scan all buffers in the range, looking for any that look like
649 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
651 while (LargeIntegerGreaterThanZero(tlength)) {
652 /* get callback so we can do a meaningful dataVersion comparison */
653 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
654 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
658 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
659 /* we're past the end of file */
663 bp = buf_Find(scp, &tbase);
664 /* We cheat slightly by not locking the bp mutex. */
666 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMBKGFETCH)) == 0
667 && (bp->dataVersion < scp->bufDataVersionLow || bp->dataVersion > scp->dataVersion))
675 /* if this buffer is essentially guaranteed to require a fetch,
676 * break out here and return this position.
681 tbase = LargeIntegerAdd(tbase, tblocksize);
682 tlength = LargeIntegerSubtract(tlength, tblocksize);
685 /* if we get here, either everything is fine or 'stop' stopped us at a
686 * particular buffer in the range that definitely needs to be fetched.
689 /* return non-zero code since realBasep won't be valid */
693 /* successfully found a page that will need fetching */
701 cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
709 if (scp->flags & CM_SCACHEFLAG_DELETED) {
710 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
714 /* Retries will be performed by the BkgDaemon thread if appropriate */
715 req.flags |= CM_REQ_NORETRY;
717 toffset.LowPart = p1;
718 toffset.HighPart = p2;
721 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
723 code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, &req);
725 osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
729 * Keep the following list synchronized with the
730 * error code list in cm_BkgDaemon
733 case CM_ERROR_TIMEDOUT: /* or server restarting */
735 case CM_ERROR_WOULDBLOCK:
736 case CM_ERROR_ALLBUSY:
737 case CM_ERROR_ALLDOWN:
738 case CM_ERROR_ALLOFFLINE:
739 case CM_ERROR_PARTIALWRITE:
740 break; /* cm_BkgDaemon will re-insert the request in the queue */
743 lock_ObtainWrite(&scp->rw);
744 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
745 lock_ReleaseWrite(&scp->rw);
750 /* Called with scp locked */
751 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
756 end = LargeIntegerAdd(*base, *length);
757 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
758 scp->prefetch.base = *base;
759 if (LargeIntegerGreaterThan(end, scp->prefetch.end))
760 scp->prefetch.end = end;
762 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
765 /* do the prefetch. if the prefetch fails, return 0 (success)
766 * because there is no harm done. */
768 cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
776 osi_hyper_t tblocksize;
784 /* Retries will be performed by the BkgDaemon thread if appropriate */
785 req.flags |= CM_REQ_NORETRY;
788 fetched.HighPart = 0;
789 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
793 length.HighPart = p4;
795 end = LargeIntegerAdd(base, length);
797 osi_Log5(afsd_logp, "Starting BKG prefetch scp 0x%p offset 0x%x:%x length 0x%x:%x",
798 scp, p2, p1, p4, p3);
800 for ( code = 0, offset = base;
801 code == 0 && LargeIntegerLessThan(offset, end);
802 offset = LargeIntegerAdd(offset, tblocksize) )
805 lock_ReleaseWrite(&scp->rw);
809 code = buf_Get(scp, &offset, &req, &bp);
813 if (bp->cmFlags & CM_BUF_CMFETCHING) {
814 /* skip this buffer as another thread is already fetching it */
816 lock_ObtainWrite(&scp->rw);
819 bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
826 lock_ObtainWrite(&scp->rw);
830 code = cm_GetBuffer(scp, bp, NULL, userp, &req);
832 fetched = LargeIntegerAdd(fetched, tblocksize);
834 bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
838 lock_ObtainWrite(&scp->rw);
842 /* Clear flag from any remaining buffers */
844 LargeIntegerLessThan(offset, end);
845 offset = LargeIntegerAdd(offset, tblocksize) )
847 bp = buf_Find(scp, &offset);
849 bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
853 cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
854 scp, &base, &fetched);
856 /* wakeup anyone who is waiting */
857 if (scp->flags & CM_SCACHEFLAG_WAITING) {
858 osi_Log1(afsd_logp, "CM BkgPrefetch Waking scp 0x%p", scp);
859 osi_Wakeup((LONG_PTR) &scp->flags);
861 lock_ReleaseWrite(&scp->rw);
863 osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p code 0x%x fetched 0x%x:%x",
864 scp, code, fetched.HighPart, fetched.LowPart);
868 /* a read was issued to offsetp, and we have to determine whether we should
869 * do a prefetch of the next chunk.
871 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 count,
872 cm_user_t *userp, cm_req_t *reqp)
876 osi_hyper_t realBase;
877 osi_hyper_t readBase;
878 osi_hyper_t readLength;
881 osi_hyper_t tblocksize; /* a long long temp variable */
884 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
887 /* round up to chunk boundary */
888 readBase.LowPart += (cm_chunkSize-1);
889 readBase.LowPart &= (-cm_chunkSize);
891 readLength = ConvertLongToLargeInteger(count);
893 lock_ObtainWrite(&scp->rw);
895 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
896 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
897 lock_ReleaseWrite(&scp->rw);
900 scp->flags |= CM_SCACHEFLAG_PREFETCHING;
902 /* start the scan at the latter of the end of this read or
903 * the end of the last fetched region.
905 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
906 readBase = scp->prefetch.end;
908 code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
911 scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
912 lock_ReleaseWrite(&scp->rw);
913 return; /* can't find something to prefetch */
916 readEnd = LargeIntegerAdd(realBase, readLength);
919 * Mark each buffer in the range as queued for a
922 for ( offset = realBase;
923 LargeIntegerLessThan(offset, readEnd);
924 offset = LargeIntegerAdd(offset, tblocksize) )
927 lock_ReleaseWrite(&scp->rw);
931 bp = buf_Find(scp, &offset);
936 lock_ObtainWrite(&scp->rw);
940 bp->cmFlags |= CM_BUF_CMBKGFETCH;
945 lock_ReleaseWrite(&scp->rw);
947 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
948 scp, realBase.LowPart);
950 cm_QueueBKGRequest(scp, cm_BkgPrefetch,
951 realBase.LowPart, realBase.HighPart,
952 readLength.LowPart, readLength.HighPart,
956 /* scp must be locked; temporarily unlocked during processing.
957 * If returns 0, returns buffers held in biop, and with
958 * CM_BUF_CMSTORING set.
960 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
961 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
962 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
963 * must be woken, and the event must be set when the I/O is done. All of this
964 * is required so that buf_WaitIO synchronizes properly with the buffer as it
965 * is being written out.
967 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
968 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
971 osi_queueData_t *qdp;
974 osi_hyper_t scanStart; /* where to start scan for dirty pages */
975 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
976 osi_hyper_t firstModOffset; /* offset of first modified page in range */
979 long flags; /* flags to cm_SyncOp */
981 /* clear things out */
982 biop->scp = scp; /* do not hold; held by caller */
983 biop->offset = *inOffsetp;
985 biop->bufListp = NULL;
986 biop->bufListEndp = NULL;
989 /* reserve a chunk's worth of buffers */
990 lock_ReleaseWrite(&scp->rw);
991 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
992 lock_ObtainWrite(&scp->rw);
995 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
996 thyper = ConvertLongToLargeInteger(temp);
997 tbase = LargeIntegerAdd(*inOffsetp, thyper);
999 bufp = buf_Find(scp, &tbase);
1001 /* get buffer mutex and scp mutex safely */
1002 lock_ReleaseWrite(&scp->rw);
1003 lock_ObtainMutex(&bufp->mx);
1006 * if the buffer is actively involved in I/O
1007 * we wait for the I/O to complete.
1009 if (bufp->flags & (CM_BUF_WRITING|CM_BUF_READING))
1010 buf_WaitIO(scp, bufp);
1012 lock_ObtainWrite(&scp->rw);
1013 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
1014 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1016 lock_ReleaseMutex(&bufp->mx);
1019 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1023 /* if the buffer is dirty, we're done */
1024 if (bufp->flags & CM_BUF_DIRTY) {
1025 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
1026 "WRITING w/o CMSTORING in SetupStoreBIOD");
1027 bufp->flags |= CM_BUF_WRITING;
1031 /* this buffer is clean, so there's no reason to process it */
1032 cm_SyncOpDone(scp, bufp, flags);
1033 lock_ReleaseMutex(&bufp->mx);
1041 /* if we get here, if bufp is null, we didn't find any dirty buffers
1042 * that weren't already being stored back, so we just quit now.
1048 /* don't need buffer mutex any more */
1049 lock_ReleaseMutex(&bufp->mx);
1051 /* put this element in the list */
1052 qdp = osi_QDAlloc();
1053 osi_SetQData(qdp, bufp);
1054 /* don't have to hold bufp, since held by buf_Find above */
1055 osi_QAddH((osi_queue_t **) &biop->bufListp,
1056 (osi_queue_t **) &biop->bufListEndp,
1058 biop->length = cm_data.buf_blockSize;
1059 firstModOffset = bufp->offset;
1060 biop->offset = firstModOffset;
1061 bufp = NULL; /* this buffer and reference added to the queue */
1063 /* compute the window surrounding *inOffsetp of size cm_chunkSize */
1064 scanStart = *inOffsetp;
1065 scanStart.LowPart &= (-cm_chunkSize);
1066 thyper = ConvertLongToLargeInteger(cm_chunkSize);
1067 scanEnd = LargeIntegerAdd(scanStart, thyper);
1069 flags = CM_SCACHESYNC_GETSTATUS
1070 | CM_SCACHESYNC_STOREDATA
1071 | CM_SCACHESYNC_BUFLOCKED
1072 | CM_SCACHESYNC_NOWAIT;
1074 /* start by looking backwards until scanStart */
1075 /* hyper version of cm_data.buf_blockSize */
1076 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1077 tbase = LargeIntegerSubtract(firstModOffset, thyper);
1078 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
1079 /* see if we can find the buffer */
1080 bufp = buf_Find(scp, &tbase);
1084 /* try to lock it, and quit if we can't (simplifies locking) */
1085 lock_ReleaseWrite(&scp->rw);
1086 code = lock_TryMutex(&bufp->mx);
1087 lock_ObtainWrite(&scp->rw);
1094 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1096 lock_ReleaseMutex(&bufp->mx);
1102 if (!(bufp->flags & CM_BUF_DIRTY)) {
1103 /* buffer is clean, so we shouldn't add it */
1104 cm_SyncOpDone(scp, bufp, flags);
1105 lock_ReleaseMutex(&bufp->mx);
1111 /* don't need buffer mutex any more */
1112 lock_ReleaseMutex(&bufp->mx);
1114 /* we have a dirty buffer ready for storing. Add it to the tail
1115 * of the list, since it immediately precedes all of the disk
1116 * addresses we've already collected.
1118 qdp = osi_QDAlloc();
1119 osi_SetQData(qdp, bufp);
1120 /* no buf_hold necessary, since we have it held from buf_Find */
1121 osi_QAddT((osi_queue_t **) &biop->bufListp,
1122 (osi_queue_t **) &biop->bufListEndp,
1124 bufp = NULL; /* added to the queue */
1126 /* update biod info describing the transfer */
1127 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
1128 biop->length += cm_data.buf_blockSize;
1130 /* update loop pointer */
1131 tbase = LargeIntegerSubtract(tbase, thyper);
1132 } /* while loop looking for pages preceding the one we found */
1134 /* now, find later dirty, contiguous pages, and add them to the list */
1135 /* hyper version of cm_data.buf_blockSize */
1136 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1137 tbase = LargeIntegerAdd(firstModOffset, thyper);
1138 while(LargeIntegerLessThan(tbase, scanEnd)) {
1139 /* see if we can find the buffer */
1140 bufp = buf_Find(scp, &tbase);
1144 /* try to lock it, and quit if we can't (simplifies locking) */
1145 lock_ReleaseWrite(&scp->rw);
1146 code = lock_TryMutex(&bufp->mx);
1147 lock_ObtainWrite(&scp->rw);
1154 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1156 lock_ReleaseMutex(&bufp->mx);
1162 if (!(bufp->flags & CM_BUF_DIRTY)) {
1163 /* buffer is clean, so we shouldn't add it */
1164 cm_SyncOpDone(scp, bufp, flags);
1165 lock_ReleaseMutex(&bufp->mx);
1171 /* don't need buffer mutex any more */
1172 lock_ReleaseMutex(&bufp->mx);
1174 /* we have a dirty buffer ready for storing. Add it to the head
1175 * of the list, since it immediately follows all of the disk
1176 * addresses we've already collected.
1178 qdp = osi_QDAlloc();
1179 osi_SetQData(qdp, bufp);
1180 /* no buf_hold necessary, since we have it held from buf_Find */
1181 osi_QAddH((osi_queue_t **) &biop->bufListp,
1182 (osi_queue_t **) &biop->bufListEndp,
1186 /* update biod info describing the transfer */
1187 biop->length += cm_data.buf_blockSize;
1189 /* update loop pointer */
1190 tbase = LargeIntegerAdd(tbase, thyper);
1191 } /* while loop looking for pages following the first page we found */
1193 /* finally, we're done */
1197 /* scp must be locked; temporarily unlocked during processing.
1198 * If returns 0, returns buffers held in biop, and with
1199 * CM_BUF_CMFETCHING flags set.
1200 * If an error is returned, we don't return any buffers.
1202 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
1203 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
1207 osi_hyper_t tblocksize; /* a long long temp variable */
1208 osi_hyper_t pageBase; /* base offset we're looking at */
1209 osi_queueData_t *qdp; /* one temp queue structure */
1210 osi_queueData_t *tqdp; /* another temp queue structure */
1211 long collected; /* how many bytes have been collected */
1214 osi_hyper_t fileSize; /* the # of bytes in the file */
1215 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
1216 osi_queueData_t *heldBufListEndp; /* first one */
1219 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1221 biop->scp = scp; /* do not hold; held by caller */
1222 biop->offset = *offsetp;
1223 /* null out the list of buffers */
1224 biop->bufListp = biop->bufListEndp = NULL;
1227 /* first lookup the file's length, so we know when to stop */
1228 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
1229 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1233 /* copy out size, since it may change */
1234 fileSize = scp->serverLength;
1236 lock_ReleaseWrite(&scp->rw);
1238 pageBase = *offsetp;
1239 collected = pageBase.LowPart & (cm_chunkSize - 1);
1240 heldBufListp = NULL;
1241 heldBufListEndp = NULL;
1244 * Obtaining buffers can cause dirty buffers to be recycled, which
1245 * can cause a storeback, so cannot be done while we have buffers
1248 * To get around this, we get buffers twice. Before reserving buffers,
1249 * we obtain and release each one individually. After reserving
1250 * buffers, we try to obtain them again, but only by lookup, not by
1251 * recycling. If a buffer has gone away while we were waiting for
1252 * the others, we just use whatever buffers we already have.
1254 * On entry to this function, we are already holding a buffer, so we
1255 * can't wait for reservation. So we call buf_TryReserveBuffers()
1256 * instead. Not only that, we can't really even call buf_Get(), for
1257 * the same reason. We can't avoid that, though. To avoid deadlock
1258 * we allow only one thread to be executing the buf_Get()-buf_Release()
1259 * sequence at a time.
1262 /* first hold all buffers, since we can't hold any locks in buf_Get */
1264 /* stop at chunk boundary */
1265 if (collected >= cm_chunkSize)
1268 /* see if the next page would be past EOF */
1269 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1272 code = buf_Get(scp, &pageBase, reqp, &tbp);
1274 lock_ObtainWrite(&scp->rw);
1275 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1282 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1283 collected += cm_data.buf_blockSize;
1286 /* reserve a chunk's worth of buffers if possible */
1287 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1289 pageBase = *offsetp;
1290 collected = pageBase.LowPart & (cm_chunkSize - 1);
1292 /* now hold all buffers, if they are still there */
1294 /* stop at chunk boundary */
1295 if (collected >= cm_chunkSize)
1298 /* see if the next page would be past EOF */
1299 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1302 tbp = buf_Find(scp, &pageBase);
1306 /* add the buffer to the list */
1307 qdp = osi_QDAlloc();
1308 osi_SetQData(qdp, tbp);
1309 osi_QAddH((osi_queue_t **)&heldBufListp,
1310 (osi_queue_t **)&heldBufListEndp,
1312 /* leave tbp held (from buf_Get) */
1317 collected += cm_data.buf_blockSize;
1318 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1321 /* look at each buffer, adding it into the list if it looks idle and
1322 * filled with old data. One special case: wait for idle if it is the
1323 * first buffer since we really need that one for our caller to make
1327 collected = 0; /* now count how many we'll really use */
1328 for (tqdp = heldBufListEndp;
1330 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1331 /* get a ptr to the held buffer */
1332 tbp = osi_GetQData(tqdp);
1333 pageBase = tbp->offset;
1335 /* now lock the buffer lock */
1336 lock_ObtainMutex(&tbp->mx);
1337 lock_ObtainWrite(&scp->rw);
1339 /* don't bother fetching over data that is already current */
1340 if (tbp->dataVersion <= scp->dataVersion && tbp->dataVersion >= scp->bufDataVersionLow) {
1341 /* we don't need this buffer, since it is current */
1342 lock_ReleaseWrite(&scp->rw);
1343 lock_ReleaseMutex(&tbp->mx);
1347 flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
1349 flags |= CM_SCACHESYNC_NOWAIT;
1351 /* wait for the buffer to serialize, if required. Doesn't
1352 * release the scp or buffer lock(s) if NOWAIT is specified.
1354 code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
1356 lock_ReleaseWrite(&scp->rw);
1357 lock_ReleaseMutex(&tbp->mx);
1361 /* don't fetch over dirty buffers */
1362 if (tbp->flags & CM_BUF_DIRTY) {
1363 cm_SyncOpDone(scp, tbp, flags);
1364 lock_ReleaseWrite(&scp->rw);
1365 lock_ReleaseMutex(&tbp->mx);
1370 lock_ReleaseWrite(&scp->rw);
1371 lock_ReleaseMutex(&tbp->mx);
1373 /* add the buffer to the list */
1374 qdp = osi_QDAlloc();
1375 osi_SetQData(qdp, tbp);
1376 osi_QAddH((osi_queue_t **)&biop->bufListp,
1377 (osi_queue_t **)&biop->bufListEndp,
1381 /* from now on, a failure just stops our collection process, but
1382 * we still do the I/O to whatever we've already managed to collect.
1385 collected += cm_data.buf_blockSize;
1388 /* now, we've held in biop->bufListp all the buffer's we're really
1389 * interested in. We also have holds left from heldBufListp, and we
1390 * now release those holds on the buffers.
1392 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1393 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1394 tbp = osi_GetQData(qdp);
1395 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1396 (osi_queue_t **) &heldBufListEndp,
1403 /* Caller expects this */
1404 lock_ObtainWrite(&scp->rw);
1406 /* if we got a failure setting up the first buffer, then we don't have
1407 * any side effects yet, and we also have failed an operation that the
1408 * caller requires to make any progress. Give up now.
1410 if (code && isFirst) {
1411 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1415 /* otherwise, we're still OK, and should just return the I/O setup we've
1418 biop->length = collected;
1419 biop->reserved = reserving;
1423 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1426 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, long code, int scp_locked)
1428 cm_scache_t *scp; /* do not release; not held in biop */
1430 osi_queueData_t *qdp;
1431 osi_queueData_t *nqdp;
1434 /* Give back reserved buffers */
1436 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1439 flags = CM_SCACHESYNC_STOREDATA;
1441 flags = CM_SCACHESYNC_FETCHDATA;
1444 if (biop->bufListp) {
1445 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1446 /* lookup next guy first, since we're going to free this one */
1447 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1449 /* extract buffer and free queue data */
1450 bufp = osi_GetQData(qdp);
1451 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1452 (osi_queue_t **) &biop->bufListEndp,
1456 /* now, mark I/O as done, unlock the buffer and release it */
1458 lock_ReleaseWrite(&scp->rw);
1459 lock_ObtainMutex(&bufp->mx);
1460 lock_ObtainWrite(&scp->rw);
1461 cm_SyncOpDone(scp, bufp, flags);
1463 /* turn off writing and wakeup users */
1465 if (bufp->flags & CM_BUF_WAITING) {
1466 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1467 osi_Wakeup((LONG_PTR) bufp);
1470 bufp->flags &= ~CM_BUF_WRITING;
1472 case CM_ERROR_NOSUCHFILE:
1473 case CM_ERROR_BADFD:
1474 case CM_ERROR_NOACCESS:
1475 case CM_ERROR_QUOTA:
1476 case CM_ERROR_SPACE:
1477 case CM_ERROR_TOOBIG:
1478 case CM_ERROR_READONLY:
1479 case CM_ERROR_NOSUCHPATH:
1481 * Apply the fatal error to this buffer.
1483 bufp->flags &= ~CM_BUF_DIRTY;
1484 bufp->flags |= CM_BUF_ERROR;
1485 bufp->dirty_offset = 0;
1486 bufp->dirty_length = 0;
1488 bufp->dataVersion = CM_BUF_VERSION_BAD;
1489 bufp->dirtyCounter++;
1491 case CM_ERROR_TIMEDOUT:
1492 case CM_ERROR_ALLDOWN:
1493 case CM_ERROR_ALLBUSY:
1494 case CM_ERROR_ALLOFFLINE:
1495 case CM_ERROR_CLOCKSKEW:
1497 /* do not mark the buffer in error state but do
1498 * not attempt to complete the rest either.
1503 bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
1504 bufp->dirty_offset = bufp->dirty_length = 0;
1509 lock_ReleaseWrite(&scp->rw);
1510 lock_ReleaseMutex(&bufp->mx);
1516 lock_ObtainWrite(&scp->rw);
1517 cm_SyncOpDone(scp, NULL, flags);
1519 lock_ReleaseWrite(&scp->rw);
1522 /* clean things out */
1523 biop->bufListp = NULL;
1524 biop->bufListEndp = NULL;
1528 cm_CloneStatus(cm_scache_t *scp, cm_user_t *userp, int scp_locked,
1529 AFSFetchStatus *afsStatusp, AFSVolSync *volSyncp)
1531 // setup the status based upon the scp data
1532 afsStatusp->InterfaceVersion = 0x1;
1533 switch (scp->fileType) {
1534 case CM_SCACHETYPE_FILE:
1535 afsStatusp->FileType = File;
1537 case CM_SCACHETYPE_DIRECTORY:
1538 afsStatusp->FileType = Directory;
1540 case CM_SCACHETYPE_MOUNTPOINT:
1541 afsStatusp->FileType = SymbolicLink;
1543 case CM_SCACHETYPE_SYMLINK:
1544 case CM_SCACHETYPE_DFSLINK:
1545 afsStatusp->FileType = SymbolicLink;
1548 afsStatusp->FileType = -1; /* an invalid value */
1550 afsStatusp->LinkCount = scp->linkCount;
1551 afsStatusp->Length = scp->length.LowPart;
1552 afsStatusp->DataVersion = (afs_uint32)(scp->dataVersion & MAX_AFS_UINT32);
1553 afsStatusp->Author = 0x1;
1554 afsStatusp->Owner = scp->owner;
1556 lock_ObtainWrite(&scp->rw);
1559 if (cm_FindACLCache(scp, userp, &afsStatusp->CallerAccess))
1560 afsStatusp->CallerAccess = scp->anyAccess;
1561 afsStatusp->AnonymousAccess = scp->anyAccess;
1562 afsStatusp->UnixModeBits = scp->unixModeBits;
1563 afsStatusp->ParentVnode = scp->parentVnode;
1564 afsStatusp->ParentUnique = scp->parentUnique;
1565 afsStatusp->ResidencyMask = 0;
1566 afsStatusp->ClientModTime = scp->clientModTime;
1567 afsStatusp->ServerModTime = scp->serverModTime;
1568 afsStatusp->Group = scp->group;
1569 afsStatusp->SyncCounter = 0;
1570 afsStatusp->dataVersionHigh = (afs_uint32)(scp->dataVersion >> 32);
1571 afsStatusp->lockCount = 0;
1572 afsStatusp->Length_hi = scp->length.HighPart;
1573 afsStatusp->errorCode = 0;
1575 volSyncp->spare1 = scp->volumeCreationDate;
1580 /* Fetch a buffer. Called with scp locked.
1581 * The scp is locked on return.
1583 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
1586 long code=0, code1=0;
1587 afs_uint32 nbytes; /* bytes in transfer */
1588 afs_uint32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1589 afs_uint64 length_found = 0;
1590 long rbytes; /* bytes in rx_Read call */
1592 AFSFetchStatus afsStatus;
1593 AFSCallBack callback;
1596 afs_uint32 buffer_offset;
1597 cm_buf_t *tbufp; /* buf we're filling */
1598 osi_queueData_t *qdp; /* q element we're scanning */
1600 struct rx_call *rxcallp;
1601 struct rx_connection *rxconnp;
1602 cm_bulkIO_t biod; /* bulk IO descriptor */
1606 int require_64bit_ops = 0;
1607 int call_was_64bit = 0;
1608 int fs_fetchdata_offset_bug = 0;
1612 memset(&volSync, 0, sizeof(volSync));
1614 /* now, the buffer may or may not be filled with good data (buf_GetNew
1615 * drops lots of locks, and may indeed return a properly initialized
1616 * buffer, although more likely it will just return a new, empty, buffer.
1619 #ifdef AFS_FREELANCE_CLIENT
1621 // yj: if they're trying to get the /afs directory, we need to
1622 // handle it differently, since it's local rather than on any
1625 getroot = (scp==cm_data.rootSCachep);
1627 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1630 if (cm_HaveCallback(scp) && bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow) {
1631 /* We already have this buffer don't do extra work */
1635 cm_AFSFidFromFid(&tfid, &scp->fid);
1637 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
1639 /* couldn't even get the first page setup properly */
1640 osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
1644 /* once we get here, we have the callback in place, we know that no one
1645 * is fetching the data now. Check one last time that we still have
1646 * the wrong data, and then fetch it if we're still wrong.
1648 * We can lose a race condition and end up with biod.length zero, in
1649 * which case we just retry.
1651 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow || biod.length == 0) {
1652 if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow) &&
1653 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
1655 osi_Log4(afsd_logp, "Bad DVs 0x%x != (0x%x -> 0x%x) or length 0x%x",
1656 bufp->dataVersion, scp->bufDataVersionLow, scp->dataVersion, biod.length);
1658 if (bufp->dataVersion == CM_BUF_VERSION_BAD)
1659 memset(bufp->datap, 0, cm_data.buf_blockSize);
1660 bufp->dataVersion = scp->dataVersion;
1662 cm_ReleaseBIOD(&biod, 0, 0, 1);
1664 } else if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow)
1665 && (scp->mask & CM_SCACHEMASK_TRUNCPOS) &&
1666 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->truncPos)) {
1667 memset(bufp->datap, 0, cm_data.buf_blockSize);
1668 bufp->dataVersion = scp->dataVersion;
1669 cm_ReleaseBIOD(&biod, 0, 0, 1);
1673 lock_ReleaseWrite(&scp->rw);
1676 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1677 ConvertLongToLargeInteger(biod.length)),
1678 ConvertLongToLargeInteger(LONG_MAX))) {
1679 require_64bit_ops = 1;
1682 osi_Log2(afsd_logp, "cm_GetBuffer: fetching data scp %p bufp %p", scp, bufp);
1683 osi_Log3(afsd_logp, "cm_GetBuffer: fetching data scpDV 0x%x scpDVLow 0x%x bufDV 0x%x",
1684 scp->dataVersion, scp->bufDataVersionLow, bufp->dataVersion);
1686 #ifdef AFS_FREELANCE_CLIENT
1689 // if getroot then we don't need to make any calls
1690 // just return fake data
1692 if (cm_freelanceEnabled && getroot) {
1693 // setup the fake status
1694 afsStatus.InterfaceVersion = 0x1;
1695 afsStatus.FileType = 0x2;
1696 afsStatus.LinkCount = scp->linkCount;
1697 afsStatus.Length = cm_fakeDirSize;
1698 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1699 afsStatus.Author = 0x1;
1700 afsStatus.Owner = 0x0;
1701 afsStatus.CallerAccess = 0x9;
1702 afsStatus.AnonymousAccess = 0x9;
1703 afsStatus.UnixModeBits = 0x1ff;
1704 afsStatus.ParentVnode = 0x1;
1705 afsStatus.ParentUnique = 0x1;
1706 afsStatus.ResidencyMask = 0;
1707 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1708 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1709 afsStatus.Group = 0;
1710 afsStatus.SyncCounter = 0;
1711 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1712 afsStatus.lockCount = 0;
1713 afsStatus.Length_hi = 0;
1714 afsStatus.errorCode = 0;
1715 memset(&volSync, 0, sizeof(volSync));
1717 // once we're done setting up the status info,
1718 // we just fill the buffer pages with fakedata
1719 // from cm_FakeRootDir. Extra pages are set to
1722 lock_ObtainMutex(&cm_Freelance_Lock);
1723 t1 = bufp->offset.LowPart;
1724 qdp = biod.bufListEndp;
1726 tbufp = osi_GetQData(qdp);
1727 bufferp=tbufp->datap;
1728 memset(bufferp, 0, cm_data.buf_blockSize);
1729 t2 = cm_fakeDirSize - t1;
1730 if (t2> (afs_int32)cm_data.buf_blockSize)
1731 t2=cm_data.buf_blockSize;
1733 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1738 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1741 lock_ReleaseMutex(&cm_Freelance_Lock);
1743 // once we're done, we skip over the part of the
1744 // code that does the ACTUAL fetching of data for
1747 goto fetchingcompleted;
1750 #endif /* AFS_FREELANCE_CLIENT */
1753 * if the requested offset is greater than the file length,
1754 * the file server will return zero bytes of data and the
1755 * current status for the file which we already have since
1756 * we have just obtained a callback. Instead, we can avoid
1757 * the network round trip by allocating zeroed buffers and
1758 * faking the status info.
1760 if (biod.offset.QuadPart >= scp->length.QuadPart) {
1761 osi_Log5(afsd_logp, "SKIP FetchData64 scp 0x%p, off 0x%x:%08x > length 0x%x:%08x",
1762 scp, biod.offset.HighPart, biod.offset.LowPart,
1763 scp->length.HighPart, scp->length.LowPart);
1765 /* Clone the current status info */
1766 scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
1768 /* status info complete, fill pages with zeros */
1769 for (qdp = biod.bufListEndp;
1771 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q)) {
1772 tbufp = osi_GetQData(qdp);
1773 bufferp=tbufp->datap;
1774 memset(bufferp, 0, cm_data.buf_blockSize);
1777 /* no need to contact the file server */
1778 goto fetchingcompleted;
1782 lock_ReleaseWrite(&scp->rw);
1786 /* now make the call */
1788 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
1792 rxconnp = cm_GetRxConn(connp);
1793 rxcallp = rx_NewCall(rxconnp);
1794 rx_PutConnection(rxconnp);
1796 nbytes = nbytes_hi = 0;
1798 if (SERVERHAS64BIT(connp)) {
1801 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1802 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1804 code = StartRXAFS_FetchData64(rxcallp, &tfid, biod.offset.QuadPart, biod.length);
1807 temp = rx_Read32(rxcallp, &nbytes_hi);
1808 if (temp == sizeof(afs_int32)) {
1809 nbytes_hi = ntohl(nbytes_hi);
1812 code = rxcallp->error;
1813 code1 = rx_EndCall(rxcallp, code);
1821 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1822 if (require_64bit_ops) {
1823 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1824 code = CM_ERROR_TOOBIG;
1827 rxconnp = cm_GetRxConn(connp);
1828 rxcallp = rx_NewCall(rxconnp);
1829 rx_PutConnection(rxconnp);
1832 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1833 scp, biod.offset.LowPart, biod.length);
1835 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1838 SET_SERVERHASNO64BIT(connp);
1843 temp = rx_Read32(rxcallp, &nbytes);
1844 if (temp == sizeof(afs_int32)) {
1845 nbytes = ntohl(nbytes);
1846 FillInt64(length_found, nbytes_hi, nbytes);
1847 if (length_found > biod.length) {
1849 * prior to 1.4.12 and 1.5.65 the file server would return
1850 * (filesize - offset) if the requested offset was greater than
1851 * the filesize. The correct return value would have been zero.
1852 * Force a retry by returning an RX_PROTOCOL_ERROR. If the cause
1853 * is a race between two RPCs issues by this cache manager, the
1854 * correct thing will happen the second time.
1856 osi_Log0(afsd_logp, "cm_GetBuffer length_found > biod.length");
1857 fs_fetchdata_offset_bug = 1;
1860 osi_Log1(afsd_logp, "cm_GetBuffer rx_Read32 returns %d != 4", temp);
1861 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1864 /* for the moment, nbytes_hi will always be 0 if code == 0
1865 because biod.length is a 32-bit quantity. */
1868 qdp = biod.bufListEndp;
1870 tbufp = osi_GetQData(qdp);
1871 bufferp = tbufp->datap;
1877 /* fill length_found of data from the pipe into the pages.
1878 * When we stop, qdp will point at the last page we're
1879 * dealing with, and bufferp will tell us where we
1880 * stopped. We'll need this info below when we clear
1881 * the remainder of the last page out (and potentially
1882 * clear later pages out, if we fetch past EOF).
1884 while (length_found > 0) {
1886 struct iovec tiov[RX_MAXIOVECS];
1887 afs_int32 tnio, iov, iov_offset;
1889 temp = rx_Readv(rxcallp, tiov, &tnio, RX_MAXIOVECS, length_found);
1890 osi_Log1(afsd_logp, "cm_GetBuffer rx_Readv returns %d", temp);
1891 if (temp != length_found && temp < cm_data.buf_blockSize) {
1893 * If the file server returned (filesize - offset),
1894 * then the first rx_Read will return zero octets of data.
1895 * If it does, do not treat it as an error. Correct the
1896 * length_found and continue as if the file server said
1897 * it was sending us zero octets of data.
1899 if (fs_fetchdata_offset_bug && first_read)
1902 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1910 while (rbytes > 0) {
1913 osi_assertx(bufferp != NULL, "null cm_buf_t");
1915 len = MIN(tiov[iov].iov_len - iov_offset, cm_data.buf_blockSize - buffer_offset);
1916 memcpy(bufferp + buffer_offset, tiov[iov].iov_base + iov_offset, len);
1918 buffer_offset += len;
1921 if (iov_offset == tiov[iov].iov_len) {
1926 if (buffer_offset == cm_data.buf_blockSize) {
1927 /* allow read-while-fetching.
1928 * if this is the last buffer, clear the
1929 * PREFETCHING flag, so the reader waiting for
1930 * this buffer will start a prefetch.
1932 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1933 lock_ObtainWrite(&scp->rw);
1934 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1935 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1936 osi_Wakeup((LONG_PTR) &scp->flags);
1938 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1939 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1941 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
1943 lock_ReleaseWrite(&scp->rw);
1945 /* Advance the buffer */
1946 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1948 tbufp = osi_GetQData(qdp);
1949 bufferp = tbufp->datap;
1957 length_found -= temp;
1958 #else /* USE_RX_IOVEC */
1959 /* assert that there are still more buffers;
1960 * our check above for length_found being less than
1961 * biod.length should ensure this.
1963 osi_assertx(bufferp != NULL, "null cm_buf_t");
1965 /* read rbytes of data */
1966 rbytes = (afs_uint32)(length_found > cm_data.buf_blockSize ? cm_data.buf_blockSize : length_found);
1967 temp = rx_Read(rxcallp, bufferp, rbytes);
1968 if (temp < rbytes) {
1970 * If the file server returned (filesize - offset),
1971 * then the first rx_Read will return zero octets of data.
1972 * If it does, do not treat it as an error. Correct the
1973 * length_found and continue as if the file server said
1974 * it was sending us zero octets of data.
1976 if (fs_fetchdata_offset_bug && first_read)
1979 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
1984 /* allow read-while-fetching.
1985 * if this is the last buffer, clear the
1986 * PREFETCHING flag, so the reader waiting for
1987 * this buffer will start a prefetch.
1989 tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
1990 lock_ObtainWrite(&scp->rw);
1991 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1992 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1993 osi_Wakeup((LONG_PTR) &scp->flags);
1995 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1996 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1998 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
2000 lock_ReleaseWrite(&scp->rw);
2002 /* and adjust counters */
2003 length_found -= temp;
2005 /* and move to the next buffer */
2006 if (length_found != 0) {
2007 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
2009 tbufp = osi_GetQData(qdp);
2010 bufferp = tbufp->datap;
2016 #endif /* USE_RX_IOVEC */
2019 /* zero out remainder of last pages, in case we are
2020 * fetching past EOF. We were fetching an integral #
2021 * of pages, but stopped, potentially in the middle of
2022 * a page. Zero the remainder of that page, and then
2023 * all of the rest of the pages.
2026 rbytes = cm_data.buf_blockSize - buffer_offset;
2027 bufferp = tbufp->datap + buffer_offset;
2028 #else /* USE_RX_IOVEC */
2030 osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
2031 rbytes = (long) (bufferp - tbufp->datap);
2033 /* bytes left to zero */
2034 rbytes = cm_data.buf_blockSize - rbytes;
2035 #endif /* USE_RX_IOVEC */
2038 memset(bufferp, 0, rbytes);
2039 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
2042 tbufp = osi_GetQData(qdp);
2043 bufferp = tbufp->datap;
2044 /* bytes to clear in this page */
2045 rbytes = cm_data.buf_blockSize;
2051 code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
2053 code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
2056 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
2058 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
2062 code1 = rx_EndCall(rxcallp, code);
2064 if (code1 == RXKADUNKNOWNKEY)
2065 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
2067 /* If we are avoiding a file server bug, ignore the error state */
2068 if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451) {
2069 /* Clone the current status info and clear the error state */
2070 scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
2072 lock_ReleaseWrite(&scp->rw);
2076 /* Prefer the error value from FetchData over rx_EndCall */
2077 } else if (code == 0 && code1 != 0)
2079 osi_Log0(afsd_logp, "CALL FetchData DONE");
2081 } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
2084 code = cm_MapRPCError(code, reqp);
2087 lock_ObtainWrite(&scp->rw);
2089 /* we know that no one else has changed the buffer, since we still have
2090 * the fetching flag on the buffers, and we have the scp locked again.
2091 * Copy in the version # into the buffer if we got code 0 back from the
2095 for(qdp = biod.bufListp;
2097 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
2098 tbufp = osi_GetQData(qdp);
2099 tbufp->dataVersion = afsStatus.dataVersionHigh;
2100 tbufp->dataVersion <<= 32;
2101 tbufp->dataVersion |= afsStatus.DataVersion;
2104 /* write buffer out to disk cache */
2105 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
2106 tbufp->dataVersion);
2107 #endif /* DISKCACHE95 */
2111 /* release scatter/gather I/O structure (buffers, locks) */
2112 cm_ReleaseBIOD(&biod, 0, code, 1);
2115 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, 0);