2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 #include <afsconfig.h>
12 #include <afs/param.h>
28 extern void afsi_log(char *pattern, ...);
31 #ifdef AFS_FREELANCE_CLIENT
32 extern osi_mutex_t cm_Freelance_Lock;
35 #define USE_RX_IOVEC 1
37 /* we can access connp->serverp without holding a lock because that
38 never changes since the connection is made. */
39 #define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
40 #define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
42 /* functions called back from the buffer package when reading or writing data,
43 * or when holding or releasing a vnode pointer.
45 long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
46 cm_user_t *userp, cm_req_t *reqp)
49 * store the data back from this buffer; the buffer is locked and held,
50 * but the vnode involved may or may not be locked depending on whether
51 * or not the CM_BUF_WRITE_SCP_LOCKED flag is set.
54 cm_scache_t *scp = vscp;
56 afs_int32 save_nbytes;
58 AFSFetchStatus outStatus;
59 AFSStoreStatus inStatus;
63 struct rx_call *rxcallp;
64 struct rx_connection *rxconnp;
71 cm_bulkIO_t biod; /* bulk IO descriptor */
72 int require_64bit_ops = 0;
73 int call_was_64bit = 0;
74 int scp_locked = flags & CM_BUF_WRITE_SCP_LOCKED;
75 int storedata_excl = 0;
77 osi_assertx(userp != NULL, "null cm_user_t");
78 osi_assertx(scp != NULL, "null cm_scache_t");
80 memset(&volSync, 0, sizeof(volSync));
83 * now, the buffer may or may not be filled with good data (buf_GetNewLocked
84 * drops lots of locks, and may indeed return a properly initialized
85 * buffer, although more likely it will just return a new, empty, buffer.
88 lock_ObtainWrite(&scp->rw);
89 if (scp->flags & CM_SCACHEFLAG_DELETED) {
91 lock_ReleaseWrite(&scp->rw);
92 return CM_ERROR_NOSUCHFILE;
95 cm_AFSFidFromFid(&tfid, &scp->fid);
97 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
98 (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
101 code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
103 osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
104 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
106 lock_ReleaseWrite(&scp->rw);
110 if (biod.length == 0) {
111 osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
112 cm_ReleaseBIOD(&biod, 1, 0, 1); /* should be a NOOP */
114 goto exit_storedata_excl;
117 /* prepare the output status for the store */
118 _InterlockedOr(&scp->mask, CM_SCACHEMASK_CLIENTMODTIME);
119 cm_StatusFromAttr(&inStatus, scp, NULL);
120 truncPos = scp->length;
121 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
122 && LargeIntegerLessThan(scp->truncPos, truncPos)) {
123 truncPos = scp->truncPos;
124 _InterlockedAnd(&scp->mask, ~CM_SCACHEMASK_TRUNCPOS);
127 /* compute how many bytes to write from this buffer */
128 thyper = LargeIntegerSubtract(scp->length, biod.offset);
129 if (LargeIntegerLessThanZero(thyper)) {
130 /* entire buffer is past EOF */
134 /* otherwise write out part of buffer before EOF, but not
135 * more than bufferSize bytes.
137 if (LargeIntegerGreaterThan(thyper,
138 ConvertLongToLargeInteger(biod.length))) {
139 nbytes = biod.length;
141 /* if thyper is less than or equal to biod.length, then we
142 can safely assume that the value fits in a long. */
143 nbytes = thyper.LowPart;
147 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
148 ConvertLongToLargeInteger(nbytes)),
149 ConvertLongToLargeInteger(LONG_MAX)) ||
150 LargeIntegerGreaterThan(truncPos,
151 ConvertLongToLargeInteger(LONG_MAX))) {
152 require_64bit_ops = 1;
155 InterlockedIncrement(&scp->activeRPCs);
156 lock_ReleaseWrite(&scp->rw);
158 /* now we're ready to do the store operation */
159 save_nbytes = nbytes;
161 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
166 rxconnp = cm_GetRxConn(connp);
167 rxcallp = rx_NewCall(rxconnp);
168 rx_PutConnection(rxconnp);
170 if (SERVERHAS64BIT(connp)) {
173 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
174 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
175 osi_Log2(afsd_logp, "... truncPos 0x%x:%08x", truncPos.HighPart, truncPos.LowPart);
177 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
178 biod.offset.QuadPart,
182 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
184 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
188 if (require_64bit_ops) {
189 osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData. The operation requires large file support in the server.");
190 code = CM_ERROR_TOOBIG;
192 osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
193 scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
195 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
196 biod.offset.LowPart, nbytes, truncPos.LowPart);
198 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
200 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
205 afs_uint32 buf_offset = 0, bytes_copied = 0;
207 /* write the data from the the list of buffers */
211 struct iovec tiov[RX_MAXIOVECS];
212 afs_int32 tnio, vlen, vbytes, iov, voffset;
215 vbytes = rx_WritevAlloc(rxcallp, tiov, &tnio, RX_MAXIOVECS, nbytes);
217 code = RX_PROTOCOL_ERROR;
221 for ( iov = voffset = vlen = 0;
222 vlen < vbytes && iov < tnio; vlen += wbytes) {
224 qdp = biod.bufListEndp;
225 buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
226 } else if (buf_offset == cm_data.buf_blockSize) {
227 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
231 osi_assertx(qdp != NULL, "null osi_queueData_t");
232 bufp = osi_GetQData(qdp);
233 bufferp = bufp->datap + buf_offset;
234 wbytes = vbytes - vlen;
235 if (wbytes > cm_data.buf_blockSize - buf_offset)
236 wbytes = cm_data.buf_blockSize - buf_offset;
238 vleft = tiov[iov].iov_len - voffset;
239 while (wbytes > vleft && iov < tnio) {
240 memcpy(tiov[iov].iov_base + voffset, bufferp, vleft);
241 bytes_copied += vleft;
249 vleft = tiov[iov].iov_len;
253 memcpy(tiov[iov].iov_base + voffset, bufferp, wbytes);
254 bytes_copied += wbytes;
255 if (tiov[iov].iov_len == voffset + wbytes) {
258 vleft = (iov < tnio) ? tiov[iov].iov_len : 0;
264 buf_offset += wbytes;
270 osi_assertx(iov == tnio, "incorrect iov count");
271 osi_assertx(vlen == vbytes, "bytes_copied != vbytes");
272 osi_assertx(bufp->offset.QuadPart + buf_offset == biod.offset.QuadPart + bytes_copied,
273 "begin and end offsets don't match");
275 temp = rx_Writev(rxcallp, tiov, tnio, vbytes);
276 if (temp != vbytes) {
277 osi_Log3(afsd_logp, "rx_Writev failed bp 0x%p, %d != %d", bufp, temp, vbytes);
278 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
282 osi_Log2(afsd_logp, "rx_Writev succeeded iov offset 0x%x, wrote 0x%x",
283 (unsigned long)(bufp->offset.QuadPart + buf_offset - vbytes), vbytes);
285 #else /* USE_RX_IOVEC */
287 qdp = biod.bufListEndp;
288 buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
290 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
294 osi_assertx(qdp != NULL, "null osi_queueData_t");
295 bufp = osi_GetQData(qdp);
296 bufferp = bufp->datap + buf_offset;
298 if (wbytes > cm_data.buf_blockSize - buf_offset)
299 wbytes = cm_data.buf_blockSize - buf_offset;
301 /* write out wbytes of data from bufferp */
302 temp = rx_Write(rxcallp, bufferp, wbytes);
303 if (temp != wbytes) {
304 osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d", bufp, temp, wbytes);
305 code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
308 osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p written %d", bufp, temp);
311 #endif /* USE_RX_IOVEC */
314 * Rx supports an out of band signalling mechanism that permits
315 * RPC specific status information to be communicated in the
316 * reverse direction of the channel. For RXAFS_StoreData, the
317 * 0-bit is set once all of the permission checks have completed
318 * and the volume/vnode locks have been obtained by the file
319 * server. The signal is intended to notify the Unix afs client
320 * that is performing store-on-close that it is safe to permit
321 * the close operation to complete while the store continues
322 * in the background. All of the callbacks have been broken
323 * and the locks will not be dropped until the RPC completes
324 * which prevents any other operation from being initiated on
325 * the vnode until the store is finished.
327 * The Windows client does not perform store-on-close. Instead
328 * it uses the CM_SCACHESYNC_STOREDATA_EXCL request flag and
329 * CM_SCACHEFLAG_DATASTORING scache state to ensure that store
330 * operations are serialized. The 0-bit signal permits the
331 * CM_SCACHEFLAG_DATASTORING state to the dropped which in
332 * turn permits another thread to prep its own BIOD in parallel.
333 * This is safe because it is impossible for that second store
334 * RPC to complete before this one does.
336 if ( storedata_excl && (rx_GetRemoteStatus(rxcallp) & 1)) {
337 lock_ObtainWrite(&scp->rw);
338 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
339 lock_ReleaseWrite(&scp->rw);
342 } /* while more bytes to write */
343 } /* if RPC started successfully */
346 if (call_was_64bit) {
347 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
349 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
351 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
353 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
355 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
357 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
361 code1 = rx_EndCall(rxcallp, code);
363 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
364 SET_SERVERHASNO64BIT(connp);
366 nbytes = save_nbytes;
369 /* Prefer rx_EndCall error over StoreData error */
371 osi_Log2(afsd_logp, "rx_EndCall converted 0x%x to 0x%x", code, code1);
374 } while (cm_Analyze(connp, userp, reqp, &scp->fid, 1, &volSync, NULL, NULL, code));
376 code = cm_MapRPCError(code, reqp);
379 osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
381 osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
383 /* now, clean up our state */
384 lock_ObtainWrite(&scp->rw);
386 cm_ReleaseBIOD(&biod, 1, code, 1);
390 /* now, here's something a little tricky: in AFS 3, a dirty
391 * length can't be directly stored, instead, a dirty chunk is
392 * stored that sets the file's size (by writing and by using
393 * the truncate-first option in the store call).
395 * At this point, we've just finished a store, and so the trunc
396 * pos field is clean. If the file's size at the server is at
397 * least as big as we think it should be, then we turn off the
398 * length dirty bit, since all the other dirty buffers must
399 * precede this one in the file.
401 * The file's desired size shouldn't be smaller than what's
402 * stored at the server now, since we just did the trunc pos
405 * We have to turn off the length dirty bit as soon as we can,
406 * so that we see updates made by other machines.
409 if (call_was_64bit) {
410 t.LowPart = outStatus.Length;
411 t.HighPart = outStatus.Length_hi;
413 t = ConvertLongToLargeInteger(outStatus.Length);
416 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
417 _InterlockedAnd(&scp->mask, ~CM_SCACHEMASK_LENGTH);
419 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
421 InterlockedDecrement(&scp->activeRPCs);
422 if (code == CM_ERROR_SPACE)
423 _InterlockedOr(&scp->flags, CM_SCACHEFLAG_OUTOFSPACE);
424 else if (code == CM_ERROR_QUOTA)
425 _InterlockedOr(&scp->flags, CM_SCACHEFLAG_OVERQUOTA);
430 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
433 lock_ReleaseWrite(&scp->rw);
439 * Truncate the file, by sending a StoreData RPC with zero length.
441 * Called with scp locked. Releases and re-obtains the lock.
443 long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
445 AFSFetchStatus outStatus;
446 AFSStoreStatus inStatus;
450 osi_hyper_t truncPos;
452 struct rx_call *rxcallp;
453 struct rx_connection *rxconnp;
454 int require_64bit_ops = 0;
455 int call_was_64bit = 0;
457 memset(&volSync, 0, sizeof(volSync));
459 osi_Log2(afsd_logp, "cm_StoreMini scp 0x%p userp 0x%p", scp, userp);
461 /* Serialize StoreData RPC's; for rationale see cm_scache.c */
462 (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
463 CM_SCACHESYNC_STOREDATA_EXCL);
465 /* prepare the output status for the store */
466 inStatus.Mask = AFS_SETMODTIME;
467 inStatus.ClientModTime = scp->clientModTime;
468 _InterlockedAnd(&scp->mask, ~CM_SCACHEMASK_CLIENTMODTIME);
470 /* calculate truncation position */
471 truncPos = scp->length;
472 if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
473 && LargeIntegerLessThan(scp->truncPos, truncPos))
474 truncPos = scp->truncPos;
475 _InterlockedAnd(&scp->mask, ~CM_SCACHEMASK_TRUNCPOS);
477 if (LargeIntegerGreaterThan(truncPos,
478 ConvertLongToLargeInteger(LONG_MAX))) {
480 require_64bit_ops = 1;
483 InterlockedIncrement(&scp->activeRPCs);
484 lock_ReleaseWrite(&scp->rw);
486 cm_AFSFidFromFid(&tfid, &scp->fid);
488 /* now we're ready to do the store operation */
490 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
495 rxconnp = cm_GetRxConn(connp);
496 rxcallp = rx_NewCall(rxconnp);
497 rx_PutConnection(rxconnp);
499 if (SERVERHAS64BIT(connp)) {
502 osi_Log3(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, truncPos 0x%x:%08x",
503 scp, truncPos.HighPart, truncPos.LowPart);
505 code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
506 0, 0, truncPos.QuadPart);
508 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
510 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
514 if (require_64bit_ops) {
515 code = CM_ERROR_TOOBIG;
517 osi_Log3(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, truncPos 0x%x:%08x",
518 scp, truncPos.HighPart, truncPos.LowPart);
520 code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
521 0, 0, truncPos.LowPart);
523 osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
525 osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
530 if (call_was_64bit) {
531 code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
533 osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
535 osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
537 code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
539 osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX", scp, code);
541 osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
544 code1 = rx_EndCall(rxcallp, code);
546 if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
547 SET_SERVERHASNO64BIT(connp);
551 /* prefer StoreData error over rx_EndCall error */
552 if (code == 0 && code1 != 0)
554 } while (cm_Analyze(connp, userp, reqp, &scp->fid, 1, &volSync, NULL, NULL, code));
555 code = cm_MapRPCError(code, reqp);
557 /* now, clean up our state */
558 lock_ObtainWrite(&scp->rw);
563 * For explanation of handling of CM_SCACHEMASK_LENGTH,
566 if (call_was_64bit) {
567 t.HighPart = outStatus.Length_hi;
568 t.LowPart = outStatus.Length;
570 t = ConvertLongToLargeInteger(outStatus.Length);
573 if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
574 _InterlockedAnd(&scp->mask, ~CM_SCACHEMASK_LENGTH);
575 cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
577 InterlockedDecrement(&scp->activeRPCs);
579 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
584 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
588 /* now return a code that means that I/O is done */
593 * stabilize scache entry with CM_SCACHESYNC_SETSIZE. This prevents any new
594 * data buffers to be allocated, new data to be fetched from the file server,
595 * and writes to be accepted from the application but permits dirty buffers
596 * to be written to the file server.
598 * Stabilize uses cm_SyncOp to maintain the cm_scache_t in this stable state
599 * instead of holding the rwlock exclusively. This permits background stores
600 * to be performed in parallel and in particular allow FlushFile to be
601 * implemented without violating the locking hierarchy.
603 long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
605 cm_scache_t *scp = vscp;
608 lock_ObtainWrite(&scp->rw);
609 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
610 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
611 lock_ReleaseWrite(&scp->rw);
616 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
617 long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
619 cm_scache_t *scp = vscp;
621 lock_ObtainWrite(&scp->rw);
622 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
624 lock_ReleaseWrite(&scp->rw);
626 /* always succeeds */
630 cm_buf_ops_t cm_bufOps = {
637 long cm_ValidateDCache(void)
639 return buf_ValidateBuffers();
642 long cm_ShutdownDCache(void)
647 int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
649 return buf_Init(newFile, &cm_bufOps, nbuffers);
652 /* check to see if we have an up-to-date buffer. The buffer must have
653 * previously been obtained by calling buf_Get.
655 * Make sure we have a callback, and that the dataversion matches.
657 * Scp must be locked.
659 * Bufp *may* be locked.
661 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
664 if (!cm_HaveCallback(scp))
666 if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
668 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow)
670 if (bufp->offset.QuadPart >= scp->serverLength.QuadPart)
673 code = lock_TryMutex(&bufp->mx);
675 /* don't have the lock, and can't lock it, then
682 /* remember dirty flag for later */
683 code = bufp->flags & CM_BUF_DIRTY;
685 /* release lock if we obtained it here */
687 lock_ReleaseMutex(&bufp->mx);
689 /* if buffer was dirty, buffer is acceptable for use */
697 * used when deciding whether to do a background fetch or not.
698 * call with scp->rw write-locked.
701 cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
702 cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
706 osi_hyper_t tblocksize;
711 /* now scan all buffers in the range, looking for any that look like
716 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
718 while (LargeIntegerGreaterThanZero(tlength)) {
719 /* get callback so we can do a meaningful dataVersion comparison */
720 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
721 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
725 if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
726 /* we're past the end of file */
730 bp = buf_Find(&scp->fid, &tbase);
731 /* We cheat slightly by not locking the bp mutex. */
733 if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
734 && (bp->dataVersion < scp->bufDataVersionLow || bp->dataVersion > scp->dataVersion))
742 /* if this buffer is essentially guaranteed to require a fetch,
743 * break out here and return this position.
748 tbase = LargeIntegerAdd(tbase, tblocksize);
749 tlength = LargeIntegerSubtract(tlength, tblocksize);
752 /* if we get here, either everything is fine or 'stop' stopped us at a
753 * particular buffer in the range that definitely needs to be fetched.
756 /* return non-zero code since realBasep won't be valid */
760 /* successfully found a page that will need fetching */
768 cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
769 cm_user_t *userp, cm_req_t *reqp)
774 afs_uint32 req_flags = reqp->flags;
776 if (scp->flags & CM_SCACHEFLAG_DELETED) {
777 osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
779 /* Retries will be performed by the BkgDaemon thread if appropriate */
780 reqp->flags |= CM_REQ_NORETRY;
782 toffset.LowPart = p1;
783 toffset.HighPart = p2;
786 osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
788 code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, reqp);
790 osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
792 reqp->flags = req_flags;
796 * Keep the following list synchronized with the
797 * error code list in cm_BkgDaemon
800 case CM_ERROR_TIMEDOUT: /* or server restarting */
802 case CM_ERROR_WOULDBLOCK:
803 case CM_ERROR_ALLBUSY:
804 case CM_ERROR_ALLDOWN:
805 case CM_ERROR_ALLOFFLINE:
806 case CM_ERROR_PARTIALWRITE:
807 break; /* cm_BkgDaemon will re-insert the request in the queue */
810 lock_ObtainWrite(&scp->rw);
811 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
812 lock_ReleaseWrite(&scp->rw);
818 /* Called with scp locked */
819 void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
824 end = LargeIntegerAdd(*base, *length);
825 if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
826 scp->prefetch.base = *base;
827 if (LargeIntegerGreaterThan(end, scp->prefetch.end))
828 scp->prefetch.end = end;
830 _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_PREFETCHING);
833 /* do the prefetch. if the prefetch fails, return 0 (success)
834 * because there is no harm done. */
836 cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
837 cm_user_t *userp, cm_req_t *reqp)
844 osi_hyper_t tblocksize;
848 afs_uint32 req_flags;
850 /* Retries will be performed by the BkgDaemon thread if appropriate */
851 req_flags = reqp->flags;
852 reqp->flags |= CM_REQ_NORETRY;
855 fetched.HighPart = 0;
856 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
860 length.HighPart = p4;
862 end = LargeIntegerAdd(base, length);
864 osi_Log5(afsd_logp, "Starting BKG prefetch scp 0x%p offset 0x%x:%x length 0x%x:%x",
865 scp, p2, p1, p4, p3);
867 for ( code = 0, offset = base;
868 code == 0 && LargeIntegerLessThan(offset, end);
869 offset = LargeIntegerAdd(offset, tblocksize) )
872 lock_ReleaseWrite(&scp->rw);
876 code = buf_Get(scp, &offset, reqp, &bp);
880 if (bp->cmFlags & CM_BUF_CMFETCHING) {
881 /* skip this buffer as another thread is already fetching it */
883 lock_ObtainWrite(&scp->rw);
892 lock_ObtainWrite(&scp->rw);
896 code = cm_GetBuffer(scp, bp, NULL, userp, reqp);
898 fetched = LargeIntegerAdd(fetched, tblocksize);
903 lock_ObtainWrite(&scp->rw);
907 cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
908 scp, &base, &fetched);
910 /* wakeup anyone who is waiting */
911 if (scp->flags & CM_SCACHEFLAG_WAITING) {
912 osi_Log1(afsd_logp, "CM BkgPrefetch Waking scp 0x%p", scp);
913 osi_Wakeup((LONG_PTR) &scp->flags);
915 lock_ReleaseWrite(&scp->rw);
917 osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p code 0x%x fetched 0x%x:%x",
918 scp, code, fetched.HighPart, fetched.LowPart);
920 reqp->flags = req_flags;
924 /* a read was issued to offsetp, and we have to determine whether we should
925 * do a prefetch of the next chunk.
927 void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 count,
928 cm_user_t *userp, cm_req_t *reqp)
932 osi_hyper_t realBase;
933 osi_hyper_t readBase;
934 osi_hyper_t readLength;
936 osi_hyper_t tblocksize; /* a long long temp variable */
938 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
941 /* round up to chunk boundary */
942 readBase.LowPart += (cm_chunkSize-1);
943 readBase.LowPart &= (-cm_chunkSize);
945 readLength = ConvertLongToLargeInteger(count);
947 lock_ObtainWrite(&scp->rw);
949 if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
950 || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
951 lock_ReleaseWrite(&scp->rw);
954 _InterlockedOr(&scp->flags, CM_SCACHEFLAG_PREFETCHING);
956 /* start the scan at the latter of the end of this read or
957 * the end of the last fetched region.
959 if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
960 readBase = scp->prefetch.end;
962 code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
965 _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_PREFETCHING);
966 lock_ReleaseWrite(&scp->rw);
967 return; /* can't find something to prefetch */
970 readEnd = LargeIntegerAdd(realBase, readLength);
973 lock_ReleaseWrite(&scp->rw);
975 osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
976 scp, realBase.LowPart);
978 cm_QueueBKGRequest(scp, cm_BkgPrefetch,
979 realBase.LowPart, realBase.HighPart,
980 readLength.LowPart, readLength.HighPart,
984 /* scp must be locked; temporarily unlocked during processing.
985 * If returns 0, returns buffers held in biop, and with
986 * CM_BUF_CMSTORING set.
988 * Caller *must* set CM_BUF_WRITING and reset the over.hEvent field if the
989 * buffer is ever unlocked before CM_BUF_DIRTY is cleared. And if
990 * CM_BUF_WRITING is ever viewed by anyone, then it must be cleared, sleepers
991 * must be woken, and the event must be set when the I/O is done. All of this
992 * is required so that buf_WaitIO synchronizes properly with the buffer as it
993 * is being written out.
995 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
996 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
999 osi_queueData_t *qdp;
1002 osi_hyper_t scanStart; /* where to start scan for dirty pages */
1003 osi_hyper_t scanEnd; /* where to stop scan for dirty pages */
1004 osi_hyper_t firstModOffset; /* offset of first modified page in range */
1007 long flags; /* flags to cm_SyncOp */
1009 /* clear things out */
1010 biop->scp = scp; /* do not hold; held by caller */
1011 biop->userp = userp; /* do not hold; held by caller */
1012 biop->offset = *inOffsetp;
1014 biop->bufListp = NULL;
1015 biop->bufListEndp = NULL;
1018 /* reserve a chunk's worth of buffers */
1019 lock_ReleaseWrite(&scp->rw);
1020 buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1021 lock_ObtainWrite(&scp->rw);
1024 for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
1025 thyper = ConvertLongToLargeInteger(temp);
1026 tbase = LargeIntegerAdd(*inOffsetp, thyper);
1028 bufp = buf_Find(&scp->fid, &tbase);
1030 /* get buffer mutex and scp mutex safely */
1031 lock_ReleaseWrite(&scp->rw);
1032 lock_ObtainMutex(&bufp->mx);
1035 * if the buffer is actively involved in I/O
1036 * we wait for the I/O to complete.
1038 if (bufp->flags & (CM_BUF_WRITING|CM_BUF_READING))
1039 buf_WaitIO(scp, bufp);
1041 lock_ObtainWrite(&scp->rw);
1042 flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
1043 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1045 lock_ReleaseMutex(&bufp->mx);
1048 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1052 /* if the buffer is dirty, we're done */
1053 if (bufp->flags & CM_BUF_DIRTY) {
1054 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
1055 "WRITING w/o CMSTORING in SetupStoreBIOD");
1056 _InterlockedOr(&bufp->flags, CM_BUF_WRITING);
1060 /* this buffer is clean, so there's no reason to process it */
1061 cm_SyncOpDone(scp, bufp, flags);
1062 lock_ReleaseMutex(&bufp->mx);
1070 /* if we get here, if bufp is null, we didn't find any dirty buffers
1071 * that weren't already being stored back, so we just quit now.
1077 /* don't need buffer mutex any more */
1078 lock_ReleaseMutex(&bufp->mx);
1080 /* put this element in the list */
1081 qdp = osi_QDAlloc();
1082 osi_SetQData(qdp, bufp);
1083 /* don't have to hold bufp, since held by buf_Find above */
1084 osi_QAddH((osi_queue_t **) &biop->bufListp,
1085 (osi_queue_t **) &biop->bufListEndp,
1087 biop->length = cm_data.buf_blockSize;
1088 firstModOffset = bufp->offset;
1089 biop->offset = firstModOffset;
1090 bufp = NULL; /* this buffer and reference added to the queue */
1092 /* compute the window surrounding firstModOffset of size cm_chunkSize */
1093 scanStart = firstModOffset;
1094 scanStart.LowPart &= (-cm_chunkSize);
1095 thyper = ConvertLongToLargeInteger(cm_chunkSize);
1096 scanEnd = LargeIntegerAdd(scanStart, thyper);
1098 flags = CM_SCACHESYNC_GETSTATUS
1099 | CM_SCACHESYNC_STOREDATA
1100 | CM_SCACHESYNC_BUFLOCKED;
1102 /* start by looking backwards until scanStart */
1103 /* hyper version of cm_data.buf_blockSize */
1104 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1105 tbase = LargeIntegerSubtract(firstModOffset, thyper);
1106 while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
1107 /* see if we can find the buffer */
1108 bufp = buf_Find(&scp->fid, &tbase);
1112 /* try to lock it, and quit if we can't (simplifies locking) */
1113 lock_ReleaseWrite(&scp->rw);
1114 code = lock_TryMutex(&bufp->mx);
1115 lock_ObtainWrite(&scp->rw);
1122 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1124 lock_ReleaseMutex(&bufp->mx);
1130 if (!(bufp->flags & CM_BUF_DIRTY)) {
1131 /* buffer is clean, so we shouldn't add it */
1132 cm_SyncOpDone(scp, bufp, flags);
1133 lock_ReleaseMutex(&bufp->mx);
1139 /* don't need buffer mutex any more */
1140 lock_ReleaseMutex(&bufp->mx);
1142 /* we have a dirty buffer ready for storing. Add it to the tail
1143 * of the list, since it immediately precedes all of the disk
1144 * addresses we've already collected.
1146 qdp = osi_QDAlloc();
1147 osi_SetQData(qdp, bufp);
1148 /* no buf_hold necessary, since we have it held from buf_Find */
1149 osi_QAddT((osi_queue_t **) &biop->bufListp,
1150 (osi_queue_t **) &biop->bufListEndp,
1152 bufp = NULL; /* added to the queue */
1154 /* update biod info describing the transfer */
1155 biop->offset = LargeIntegerSubtract(biop->offset, thyper);
1156 biop->length += cm_data.buf_blockSize;
1158 /* update loop pointer */
1159 tbase = LargeIntegerSubtract(tbase, thyper);
1160 } /* while loop looking for pages preceding the one we found */
1162 /* now, find later dirty, contiguous pages, and add them to the list */
1163 /* hyper version of cm_data.buf_blockSize */
1164 thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1165 tbase = LargeIntegerAdd(firstModOffset, thyper);
1166 while(LargeIntegerLessThan(tbase, scanEnd)) {
1167 /* see if we can find the buffer */
1168 bufp = buf_Find(&scp->fid, &tbase);
1172 /* try to lock it, and quit if we can't (simplifies locking) */
1173 lock_ReleaseWrite(&scp->rw);
1174 code = lock_TryMutex(&bufp->mx);
1175 lock_ObtainWrite(&scp->rw);
1182 code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
1184 lock_ReleaseMutex(&bufp->mx);
1190 if (!(bufp->flags & CM_BUF_DIRTY)) {
1191 /* buffer is clean, so we shouldn't add it */
1192 cm_SyncOpDone(scp, bufp, flags);
1193 lock_ReleaseMutex(&bufp->mx);
1199 /* don't need buffer mutex any more */
1200 lock_ReleaseMutex(&bufp->mx);
1202 /* we have a dirty buffer ready for storing. Add it to the head
1203 * of the list, since it immediately follows all of the disk
1204 * addresses we've already collected.
1206 qdp = osi_QDAlloc();
1207 osi_SetQData(qdp, bufp);
1208 /* no buf_hold necessary, since we have it held from buf_Find */
1209 osi_QAddH((osi_queue_t **) &biop->bufListp,
1210 (osi_queue_t **) &biop->bufListEndp,
1214 /* update biod info describing the transfer */
1215 biop->length += cm_data.buf_blockSize;
1217 /* update loop pointer */
1218 tbase = LargeIntegerAdd(tbase, thyper);
1219 } /* while loop looking for pages following the first page we found */
1221 /* finally, we're done */
1225 /* scp must be locked; temporarily unlocked during processing.
1226 * If returns 0, returns buffers held in biop, and with
1227 * CM_BUF_CMFETCHING flags set.
1228 * If an error is returned, we don't return any buffers.
1230 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
1231 cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
1235 osi_hyper_t tblocksize; /* a long long temp variable */
1236 osi_hyper_t pageBase; /* base offset we're looking at */
1237 osi_queueData_t *qdp; /* one temp queue structure */
1238 osi_queueData_t *tqdp; /* another temp queue structure */
1239 long collected; /* how many bytes have been collected */
1242 osi_hyper_t fileSize; /* the # of bytes in the file */
1243 osi_queueData_t *heldBufListp; /* we hold all buffers in this list */
1244 osi_queueData_t *heldBufListEndp; /* first one */
1247 tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
1249 biop->scp = scp; /* do not hold; held by caller */
1250 biop->userp = userp; /* do not hold; held by caller */
1252 biop->offset = *offsetp;
1253 /* null out the list of buffers */
1254 biop->bufListp = biop->bufListEndp = NULL;
1257 /* first lookup the file's length, so we know when to stop */
1258 code = cm_SyncOp(scp, NULL, userp, reqp, 0,
1259 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1263 /* copy out size, since it may change */
1264 fileSize = scp->serverLength;
1266 lock_ReleaseWrite(&scp->rw);
1268 pageBase = *offsetp;
1269 collected = pageBase.LowPart & (cm_chunkSize - 1);
1270 heldBufListp = NULL;
1271 heldBufListEndp = NULL;
1274 * Obtaining buffers can cause dirty buffers to be recycled, which
1275 * can cause a storeback, so cannot be done while we have buffers
1278 * To get around this, we get buffers twice. Before reserving buffers,
1279 * we obtain and release each one individually. After reserving
1280 * buffers, we try to obtain them again, but only by lookup, not by
1281 * recycling. If a buffer has gone away while we were waiting for
1282 * the others, we just use whatever buffers we already have.
1284 * On entry to this function, we are already holding a buffer, so we
1285 * can't wait for reservation. So we call buf_TryReserveBuffers()
1286 * instead. Not only that, we can't really even call buf_Get(), for
1287 * the same reason. We can't avoid that, though. To avoid deadlock
1288 * we allow only one thread to be executing the buf_Get()-buf_Release()
1289 * sequence at a time.
1292 /* first hold all buffers, since we can't hold any locks in buf_Get */
1294 /* stop at chunk boundary */
1295 if (collected >= cm_chunkSize)
1298 /* see if the next page would be past EOF */
1299 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1302 code = buf_Get(scp, &pageBase, reqp, &tbp);
1304 lock_ObtainWrite(&scp->rw);
1305 cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
1312 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1313 collected += cm_data.buf_blockSize;
1316 /* reserve a chunk's worth of buffers if possible */
1317 reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1319 pageBase = *offsetp;
1320 collected = pageBase.LowPart & (cm_chunkSize - 1);
1322 /* now hold all buffers, if they are still there */
1324 /* stop at chunk boundary */
1325 if (collected >= cm_chunkSize)
1328 /* see if the next page would be past EOF */
1329 if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
1332 tbp = buf_Find(&scp->fid, &pageBase);
1336 /* add the buffer to the list */
1337 qdp = osi_QDAlloc();
1338 osi_SetQData(qdp, tbp);
1339 osi_QAddH((osi_queue_t **)&heldBufListp,
1340 (osi_queue_t **)&heldBufListEndp,
1342 /* leave tbp held (from buf_Get) */
1347 collected += cm_data.buf_blockSize;
1348 pageBase = LargeIntegerAdd(tblocksize, pageBase);
1351 /* look at each buffer, adding it into the list if it looks idle and
1352 * filled with old data. One special case: wait for idle if it is the
1353 * first buffer since we really need that one for our caller to make
1357 collected = 0; /* now count how many we'll really use */
1358 for (tqdp = heldBufListEndp;
1360 tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
1361 /* get a ptr to the held buffer */
1362 tbp = osi_GetQData(tqdp);
1363 pageBase = tbp->offset;
1365 /* now lock the buffer lock */
1366 lock_ObtainMutex(&tbp->mx);
1367 lock_ObtainWrite(&scp->rw);
1369 /* don't bother fetching over data that is already current */
1370 if (tbp->dataVersion <= scp->dataVersion && tbp->dataVersion >= scp->bufDataVersionLow) {
1371 /* we don't need this buffer, since it is current */
1372 lock_ReleaseWrite(&scp->rw);
1373 lock_ReleaseMutex(&tbp->mx);
1377 flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
1379 flags |= CM_SCACHESYNC_NOWAIT;
1381 /* wait for the buffer to serialize, if required. Doesn't
1382 * release the scp or buffer lock(s) if NOWAIT is specified.
1384 code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
1386 lock_ReleaseWrite(&scp->rw);
1387 lock_ReleaseMutex(&tbp->mx);
1391 /* don't fetch over dirty buffers */
1392 if (tbp->flags & CM_BUF_DIRTY) {
1393 cm_SyncOpDone(scp, tbp, flags);
1394 lock_ReleaseWrite(&scp->rw);
1395 lock_ReleaseMutex(&tbp->mx);
1400 lock_ReleaseWrite(&scp->rw);
1401 lock_ReleaseMutex(&tbp->mx);
1403 /* add the buffer to the list */
1404 qdp = osi_QDAlloc();
1405 osi_SetQData(qdp, tbp);
1406 osi_QAddH((osi_queue_t **)&biop->bufListp,
1407 (osi_queue_t **)&biop->bufListEndp,
1411 /* from now on, a failure just stops our collection process, but
1412 * we still do the I/O to whatever we've already managed to collect.
1415 collected += cm_data.buf_blockSize;
1418 /* now, we've held in biop->bufListp all the buffer's we're really
1419 * interested in. We also have holds left from heldBufListp, and we
1420 * now release those holds on the buffers.
1422 for (qdp = heldBufListp; qdp; qdp = tqdp) {
1423 tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1424 tbp = osi_GetQData(qdp);
1425 osi_QRemoveHT((osi_queue_t **) &heldBufListp,
1426 (osi_queue_t **) &heldBufListEndp,
1433 /* Caller expects this */
1434 lock_ObtainWrite(&scp->rw);
1436 /* if we got a failure setting up the first buffer, then we don't have
1437 * any side effects yet, and we also have failed an operation that the
1438 * caller requires to make any progress. Give up now.
1440 if (code && isFirst) {
1441 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1445 /* otherwise, we're still OK, and should just return the I/O setup we've
1448 biop->length = collected;
1449 biop->reserved = reserving;
1453 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
1456 void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, long code, int scp_locked)
1458 cm_scache_t *scp; /* do not release; not held in biop */
1460 osi_queueData_t *qdp;
1461 osi_queueData_t *nqdp;
1463 int reportErrorToRedir = 0;
1465 /* Give back reserved buffers */
1467 buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
1470 flags = CM_SCACHESYNC_STOREDATA;
1472 flags = CM_SCACHESYNC_FETCHDATA;
1475 if (biop->bufListp) {
1476 for(qdp = biop->bufListp; qdp; qdp = nqdp) {
1477 /* lookup next guy first, since we're going to free this one */
1478 nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
1480 /* extract buffer and free queue data */
1481 bufp = osi_GetQData(qdp);
1482 osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
1483 (osi_queue_t **) &biop->bufListEndp,
1487 /* now, mark I/O as done, unlock the buffer and release it */
1489 lock_ReleaseWrite(&scp->rw);
1490 lock_ObtainMutex(&bufp->mx);
1491 lock_ObtainWrite(&scp->rw);
1492 cm_SyncOpDone(scp, bufp, flags);
1494 /* turn off writing and wakeup users */
1496 if (bufp->flags & CM_BUF_WAITING) {
1497 osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
1498 osi_Wakeup((LONG_PTR) bufp);
1501 _InterlockedAnd(&bufp->flags, ~CM_BUF_WRITING);
1503 case CM_ERROR_NOSUCHFILE:
1504 case CM_ERROR_BADFD:
1505 case CM_ERROR_NOACCESS:
1506 case CM_ERROR_QUOTA:
1507 case CM_ERROR_SPACE:
1508 case CM_ERROR_TOOBIG:
1509 case CM_ERROR_READONLY:
1510 case CM_ERROR_NOSUCHPATH:
1512 * Apply the fatal error to this buffer.
1514 _InterlockedAnd(&bufp->flags, ~CM_BUF_DIRTY);
1515 _InterlockedOr(&bufp->flags, CM_BUF_ERROR);
1516 bufp->dirty_offset = 0;
1517 bufp->dirty_length = 0;
1519 bufp->dataVersion = CM_BUF_VERSION_BAD;
1520 bufp->dirtyCounter++;
1521 reportErrorToRedir = 1;
1523 case CM_ERROR_TIMEDOUT:
1524 case CM_ERROR_ALLDOWN:
1525 case CM_ERROR_ALLBUSY:
1526 case CM_ERROR_ALLOFFLINE:
1527 case CM_ERROR_CLOCKSKEW:
1529 /* do not mark the buffer in error state but do
1530 * not attempt to complete the rest either.
1535 _InterlockedAnd(&bufp->flags, ~(CM_BUF_WRITING | CM_BUF_DIRTY));
1536 bufp->dirty_offset = bufp->dirty_length = 0;
1541 lock_ReleaseWrite(&scp->rw);
1542 lock_ReleaseMutex(&bufp->mx);
1547 if (RDR_Initialized && reportErrorToRedir) {
1549 smb_MapNTError(cm_MapRPCError(code, biop->reqp), &status, TRUE);
1550 RDR_SetFileStatus( &scp->fid, &biop->userp->authgroup, status);
1554 lock_ObtainWrite(&scp->rw);
1555 cm_SyncOpDone(scp, NULL, flags);
1557 lock_ReleaseWrite(&scp->rw);
1560 /* clean things out */
1561 biop->bufListp = NULL;
1562 biop->bufListEndp = NULL;
1566 cm_CloneStatus(cm_scache_t *scp, cm_user_t *userp, int scp_locked,
1567 AFSFetchStatus *afsStatusp, AFSVolSync *volSyncp)
1569 // setup the status based upon the scp data
1570 afsStatusp->InterfaceVersion = 0x1;
1571 switch (scp->fileType) {
1572 case CM_SCACHETYPE_FILE:
1573 afsStatusp->FileType = File;
1575 case CM_SCACHETYPE_DIRECTORY:
1576 afsStatusp->FileType = Directory;
1578 case CM_SCACHETYPE_MOUNTPOINT:
1579 afsStatusp->FileType = SymbolicLink;
1581 case CM_SCACHETYPE_SYMLINK:
1582 case CM_SCACHETYPE_DFSLINK:
1583 afsStatusp->FileType = SymbolicLink;
1586 afsStatusp->FileType = -1; /* an invalid value */
1588 afsStatusp->LinkCount = scp->linkCount;
1589 afsStatusp->Length = scp->length.LowPart;
1590 afsStatusp->DataVersion = (afs_uint32)(scp->dataVersion & MAX_AFS_UINT32);
1591 afsStatusp->Author = 0x1;
1592 afsStatusp->Owner = scp->owner;
1594 lock_ObtainWrite(&scp->rw);
1597 if (cm_FindACLCache(scp, userp, &afsStatusp->CallerAccess))
1598 afsStatusp->CallerAccess = scp->anyAccess;
1599 afsStatusp->AnonymousAccess = scp->anyAccess;
1600 afsStatusp->UnixModeBits = scp->unixModeBits;
1601 afsStatusp->ParentVnode = scp->parentVnode;
1602 afsStatusp->ParentUnique = scp->parentUnique;
1603 afsStatusp->ResidencyMask = 0;
1604 afsStatusp->ClientModTime = scp->clientModTime;
1605 afsStatusp->ServerModTime = scp->serverModTime;
1606 afsStatusp->Group = scp->group;
1607 afsStatusp->SyncCounter = 0;
1608 afsStatusp->dataVersionHigh = (afs_uint32)(scp->dataVersion >> 32);
1609 afsStatusp->lockCount = 0;
1610 afsStatusp->Length_hi = scp->length.HighPart;
1611 afsStatusp->errorCode = 0;
1613 volSyncp->spare1 = scp->volumeCreationDate;
1618 /* Fetch a buffer. Called with scp locked.
1619 * The scp is locked on return.
1621 long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
1624 long code=0, code1=0;
1625 afs_uint32 nbytes; /* bytes in transfer */
1626 afs_uint32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
1627 afs_uint64 length_found = 0;
1628 long rbytes; /* bytes in rx_Read call */
1630 AFSFetchStatus afsStatus;
1631 AFSCallBack callback;
1634 afs_uint32 buffer_offset;
1635 cm_buf_t *tbufp; /* buf we're filling */
1636 osi_queueData_t *qdp; /* q element we're scanning */
1638 struct rx_call *rxcallp;
1639 struct rx_connection *rxconnp;
1640 cm_bulkIO_t biod; /* bulk IO descriptor */
1644 int require_64bit_ops = 0;
1645 int call_was_64bit = 0;
1646 int fs_fetchdata_offset_bug = 0;
1650 memset(&volSync, 0, sizeof(volSync));
1652 /* now, the buffer may or may not be filled with good data (buf_GetNewLocked
1653 * drops lots of locks, and may indeed return a properly initialized
1654 * buffer, although more likely it will just return a new, empty, buffer.
1657 #ifdef AFS_FREELANCE_CLIENT
1659 // yj: if they're trying to get the /afs directory, we need to
1660 // handle it differently, since it's local rather than on any
1663 getroot = (scp==cm_data.rootSCachep);
1665 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
1668 if (cm_HaveCallback(scp) && bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow) {
1669 /* We already have this buffer don't do extra work */
1673 cm_AFSFidFromFid(&tfid, &scp->fid);
1675 code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
1677 /* couldn't even get the first page setup properly */
1678 osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
1682 /* once we get here, we have the callback in place, we know that no one
1683 * is fetching the data now. Check one last time that we still have
1684 * the wrong data, and then fetch it if we're still wrong.
1686 * We can lose a race condition and end up with biod.length zero, in
1687 * which case we just retry.
1689 if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow || biod.length == 0) {
1690 if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow) &&
1691 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
1693 osi_Log4(afsd_logp, "Bad DVs 0x%x != (0x%x -> 0x%x) or length 0x%x",
1694 bufp->dataVersion, scp->bufDataVersionLow, scp->dataVersion, biod.length);
1696 if (bufp->dataVersion == CM_BUF_VERSION_BAD)
1697 memset(bufp->datap, 0, cm_data.buf_blockSize);
1698 bufp->dataVersion = scp->dataVersion;
1700 cm_ReleaseBIOD(&biod, 0, 0, 1);
1702 } else if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow)
1703 && (scp->mask & CM_SCACHEMASK_TRUNCPOS) &&
1704 LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->truncPos)) {
1705 memset(bufp->datap, 0, cm_data.buf_blockSize);
1706 bufp->dataVersion = scp->dataVersion;
1707 cm_ReleaseBIOD(&biod, 0, 0, 1);
1711 InterlockedIncrement(&scp->activeRPCs);
1712 lock_ReleaseWrite(&scp->rw);
1715 if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
1716 ConvertLongToLargeInteger(biod.length)),
1717 ConvertLongToLargeInteger(LONG_MAX))) {
1718 require_64bit_ops = 1;
1721 osi_Log2(afsd_logp, "cm_GetBuffer: fetching data scp %p bufp %p", scp, bufp);
1722 osi_Log3(afsd_logp, "cm_GetBuffer: fetching data scpDV 0x%x scpDVLow 0x%x bufDV 0x%x",
1723 scp->dataVersion, scp->bufDataVersionLow, bufp->dataVersion);
1725 #ifdef AFS_FREELANCE_CLIENT
1728 // if getroot then we don't need to make any calls
1729 // just return fake data
1731 if (cm_freelanceEnabled && getroot) {
1732 // setup the fake status
1733 afsStatus.InterfaceVersion = 0x1;
1734 afsStatus.FileType = 0x2;
1735 afsStatus.LinkCount = scp->linkCount;
1736 afsStatus.Length = cm_fakeDirSize;
1737 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
1738 afsStatus.Author = 0x1;
1739 afsStatus.Owner = 0x0;
1740 afsStatus.CallerAccess = 0x9;
1741 afsStatus.AnonymousAccess = 0x9;
1742 afsStatus.UnixModeBits = 0x1ff;
1743 afsStatus.ParentVnode = 0x1;
1744 afsStatus.ParentUnique = 0x1;
1745 afsStatus.ResidencyMask = 0;
1746 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
1747 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
1748 afsStatus.Group = 0;
1749 afsStatus.SyncCounter = 0;
1750 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
1751 afsStatus.lockCount = 0;
1752 afsStatus.Length_hi = 0;
1753 afsStatus.errorCode = 0;
1754 memset(&volSync, 0, sizeof(volSync));
1756 // once we're done setting up the status info,
1757 // we just fill the buffer pages with fakedata
1758 // from cm_FakeRootDir. Extra pages are set to
1761 lock_ObtainMutex(&cm_Freelance_Lock);
1762 t1 = bufp->offset.LowPart;
1763 qdp = biod.bufListEndp;
1765 tbufp = osi_GetQData(qdp);
1766 bufferp=tbufp->datap;
1767 memset(bufferp, 0, cm_data.buf_blockSize);
1768 t2 = cm_fakeDirSize - t1;
1769 if (t2> (afs_int32)cm_data.buf_blockSize)
1770 t2=cm_data.buf_blockSize;
1772 memcpy(bufferp, cm_FakeRootDir+t1, t2);
1777 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1780 lock_ReleaseMutex(&cm_Freelance_Lock);
1782 // once we're done, we skip over the part of the
1783 // code that does the ACTUAL fetching of data for
1786 goto fetchingcompleted;
1789 #endif /* AFS_FREELANCE_CLIENT */
1792 * if the requested offset is greater than the file length,
1793 * the file server will return zero bytes of data and the
1794 * current status for the file which we already have since
1795 * we have just obtained a callback. Instead, we can avoid
1796 * the network round trip by allocating zeroed buffers and
1797 * faking the status info.
1799 if (biod.offset.QuadPart >= scp->length.QuadPart) {
1800 osi_Log5(afsd_logp, "SKIP FetchData64 scp 0x%p, off 0x%x:%08x > length 0x%x:%08x",
1801 scp, biod.offset.HighPart, biod.offset.LowPart,
1802 scp->length.HighPart, scp->length.LowPart);
1804 /* Clone the current status info */
1805 scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
1807 /* status info complete, fill pages with zeros */
1808 for (qdp = biod.bufListEndp;
1810 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q)) {
1811 tbufp = osi_GetQData(qdp);
1812 bufferp=tbufp->datap;
1813 memset(bufferp, 0, cm_data.buf_blockSize);
1816 /* no need to contact the file server */
1817 goto fetchingcompleted;
1821 lock_ReleaseWrite(&scp->rw);
1825 /* now make the call */
1827 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
1831 rxconnp = cm_GetRxConn(connp);
1832 rxcallp = rx_NewCall(rxconnp);
1833 rx_PutConnection(rxconnp);
1835 nbytes = nbytes_hi = 0;
1837 if (SERVERHAS64BIT(connp)) {
1840 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
1841 scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
1843 code = StartRXAFS_FetchData64(rxcallp, &tfid, biod.offset.QuadPart, biod.length);
1846 temp = rx_Read32(rxcallp, &nbytes_hi);
1847 if (temp == sizeof(afs_int32)) {
1848 nbytes_hi = ntohl(nbytes_hi);
1851 code = rx_Error(rxcallp);
1852 code1 = rx_EndCall(rxcallp, code);
1860 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
1861 if (require_64bit_ops) {
1862 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
1863 code = CM_ERROR_TOOBIG;
1866 rxconnp = cm_GetRxConn(connp);
1867 rxcallp = rx_NewCall(rxconnp);
1868 rx_PutConnection(rxconnp);
1871 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
1872 scp, biod.offset.LowPart, biod.length);
1874 code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
1877 SET_SERVERHASNO64BIT(connp);
1882 temp = rx_Read32(rxcallp, &nbytes);
1883 if (temp == sizeof(afs_int32)) {
1884 nbytes = ntohl(nbytes);
1885 FillInt64(length_found, nbytes_hi, nbytes);
1886 if (length_found > biod.length) {
1888 * prior to 1.4.12 and 1.5.65 the file server would return
1889 * (filesize - offset) if the requested offset was greater than
1890 * the filesize. The correct return value would have been zero.
1891 * Force a retry by returning an RX_PROTOCOL_ERROR. If the cause
1892 * is a race between two RPCs issues by this cache manager, the
1893 * correct thing will happen the second time.
1895 osi_Log0(afsd_logp, "cm_GetBuffer length_found > biod.length");
1896 fs_fetchdata_offset_bug = 1;
1899 osi_Log1(afsd_logp, "cm_GetBuffer rx_Read32 returns %d != 4", temp);
1900 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
1903 /* for the moment, nbytes_hi will always be 0 if code == 0
1904 because biod.length is a 32-bit quantity. */
1907 qdp = biod.bufListEndp;
1909 tbufp = osi_GetQData(qdp);
1910 bufferp = tbufp->datap;
1916 /* fill length_found of data from the pipe into the pages.
1917 * When we stop, qdp will point at the last page we're
1918 * dealing with, and bufferp will tell us where we
1919 * stopped. We'll need this info below when we clear
1920 * the remainder of the last page out (and potentially
1921 * clear later pages out, if we fetch past EOF).
1923 while (length_found > 0) {
1925 struct iovec tiov[RX_MAXIOVECS];
1926 afs_int32 tnio, iov, iov_offset;
1928 temp = rx_Readv(rxcallp, tiov, &tnio, RX_MAXIOVECS, length_found);
1929 osi_Log1(afsd_logp, "cm_GetBuffer rx_Readv returns %d", temp);
1930 if (temp != length_found && temp < cm_data.buf_blockSize) {
1932 * If the file server returned (filesize - offset),
1933 * then the first rx_Read will return zero octets of data.
1934 * If it does, do not treat it as an error. Correct the
1935 * length_found and continue as if the file server said
1936 * it was sending us zero octets of data.
1938 if (fs_fetchdata_offset_bug && first_read)
1941 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
1949 while (rbytes > 0) {
1952 osi_assertx(bufferp != NULL, "null cm_buf_t");
1954 len = min(tiov[iov].iov_len - iov_offset, cm_data.buf_blockSize - buffer_offset);
1955 memcpy(bufferp + buffer_offset, tiov[iov].iov_base + iov_offset, len);
1957 buffer_offset += len;
1960 if (iov_offset == tiov[iov].iov_len) {
1965 if (buffer_offset == cm_data.buf_blockSize) {
1966 /* allow read-while-fetching.
1967 * if this is the last buffer, clear the
1968 * PREFETCHING flag, so the reader waiting for
1969 * this buffer will start a prefetch.
1971 _InterlockedOr(&tbufp->cmFlags, CM_BUF_CMFULLYFETCHED);
1972 lock_ObtainWrite(&scp->rw);
1973 if (scp->flags & CM_SCACHEFLAG_WAITING) {
1974 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
1975 osi_Wakeup((LONG_PTR) &scp->flags);
1977 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
1978 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
1980 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
1982 lock_ReleaseWrite(&scp->rw);
1984 /* Advance the buffer */
1985 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
1987 tbufp = osi_GetQData(qdp);
1988 bufferp = tbufp->datap;
1996 length_found -= temp;
1997 #else /* USE_RX_IOVEC */
1998 /* assert that there are still more buffers;
1999 * our check above for length_found being less than
2000 * biod.length should ensure this.
2002 osi_assertx(bufferp != NULL, "null cm_buf_t");
2004 /* read rbytes of data */
2005 rbytes = (afs_uint32)(length_found > cm_data.buf_blockSize ? cm_data.buf_blockSize : length_found);
2006 temp = rx_Read(rxcallp, bufferp, rbytes);
2007 if (temp < rbytes) {
2009 * If the file server returned (filesize - offset),
2010 * then the first rx_Read will return zero octets of data.
2011 * If it does, do not treat it as an error. Correct the
2012 * length_found and continue as if the file server said
2013 * it was sending us zero octets of data.
2015 if (fs_fetchdata_offset_bug && first_read)
2018 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
2023 /* allow read-while-fetching.
2024 * if this is the last buffer, clear the
2025 * PREFETCHING flag, so the reader waiting for
2026 * this buffer will start a prefetch.
2028 _InterlockedOr(&tbufp->cmFlags, CM_BUF_CMFULLYFETCHED);
2029 lock_ObtainWrite(&scp->rw);
2030 if (scp->flags & CM_SCACHEFLAG_WAITING) {
2031 osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
2032 osi_Wakeup((LONG_PTR) &scp->flags);
2034 if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
2035 osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
2037 cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
2039 lock_ReleaseWrite(&scp->rw);
2041 /* and adjust counters */
2042 length_found -= temp;
2044 /* and move to the next buffer */
2045 if (length_found != 0) {
2046 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
2048 tbufp = osi_GetQData(qdp);
2049 bufferp = tbufp->datap;
2055 #endif /* USE_RX_IOVEC */
2058 /* zero out remainder of last pages, in case we are
2059 * fetching past EOF. We were fetching an integral #
2060 * of pages, but stopped, potentially in the middle of
2061 * a page. Zero the remainder of that page, and then
2062 * all of the rest of the pages.
2065 rbytes = cm_data.buf_blockSize - buffer_offset;
2066 bufferp = tbufp->datap + buffer_offset;
2067 #else /* USE_RX_IOVEC */
2069 osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
2070 rbytes = (long) (bufferp - tbufp->datap);
2072 /* bytes left to zero */
2073 rbytes = cm_data.buf_blockSize - rbytes;
2074 #endif /* USE_RX_IOVEC */
2077 memset(bufferp, 0, rbytes);
2078 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
2081 tbufp = osi_GetQData(qdp);
2082 bufferp = tbufp->datap;
2083 /* bytes to clear in this page */
2084 rbytes = cm_data.buf_blockSize;
2090 code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
2092 code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
2095 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
2097 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
2101 code1 = rx_EndCall(rxcallp, code);
2103 if (code1 == RXKADUNKNOWNKEY)
2104 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
2106 /* If we are avoiding a file server bug, ignore the error state */
2107 if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451) {
2108 /* Clone the current status info and clear the error state */
2109 scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
2111 lock_ReleaseWrite(&scp->rw);
2115 /* Prefer the error value from FetchData over rx_EndCall */
2116 } else if (code == 0 && code1 != 0)
2118 osi_Log0(afsd_logp, "CALL FetchData DONE");
2120 } while (cm_Analyze(connp, userp, reqp, &scp->fid, 0, &volSync, NULL, NULL, code));
2123 code = cm_MapRPCError(code, reqp);
2126 lock_ObtainWrite(&scp->rw);
2128 /* we know that no one else has changed the buffer, since we still have
2129 * the fetching flag on the buffers, and we have the scp locked again.
2130 * Copy in the version # into the buffer if we got code 0 back from the
2134 for(qdp = biod.bufListp;
2136 qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
2137 tbufp = osi_GetQData(qdp);
2138 tbufp->dataVersion = afsStatus.dataVersionHigh;
2139 tbufp->dataVersion <<= 32;
2140 tbufp->dataVersion |= afsStatus.DataVersion;
2143 /* write buffer out to disk cache */
2144 diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
2145 tbufp->dataVersion);
2146 #endif /* DISKCACHE95 */
2150 /* release scatter/gather I/O structure (buffers, locks) */
2151 cm_ReleaseBIOD(&biod, 0, code, 1);
2154 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, CM_MERGEFLAG_FETCHDATA);
2156 InterlockedDecrement(&scp->activeRPCs);
2162 * Similar to cm_GetBuffer but doesn't use an allocated cm_buf_t object.
2163 * Instead the data is read from the file server and copied directly into
2164 * a provided buffer. Called with scp locked. The scp is locked on return.
2166 long cm_GetData(cm_scache_t *scp, osi_hyper_t *offsetp, char *datap, int data_length,
2167 cm_user_t *userp, cm_req_t *reqp)
2169 long code=0, code1=0;
2170 afs_uint32 nbytes; /* bytes in transfer */
2171 afs_uint32 nbytes_hi = 0; /* high-order 32 bits of bytes in transfer */
2172 afs_uint64 length_found = 0;
2173 char *bufferp = datap;
2174 afs_uint32 buffer_offset = 0;
2175 long rbytes; /* bytes in rx_Read call */
2177 AFSFetchStatus afsStatus;
2178 AFSCallBack callback;
2181 struct rx_call *rxcallp;
2182 struct rx_connection *rxconnp;
2186 int require_64bit_ops = 0;
2187 int call_was_64bit = 0;
2188 int fs_fetchdata_offset_bug = 0;
2192 memset(&volSync, 0, sizeof(volSync));
2194 /* now, the buffer may or may not be filled with good data (buf_GetNewLocked
2195 * drops lots of locks, and may indeed return a properly initialized
2196 * buffer, although more likely it will just return a new, empty, buffer.
2199 #ifdef AFS_FREELANCE_CLIENT
2201 // yj: if they're trying to get the /afs directory, we need to
2202 // handle it differently, since it's local rather than on any
2205 getroot = (scp==cm_data.rootSCachep);
2207 osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
2210 cm_AFSFidFromFid(&tfid, &scp->fid);
2212 if (LargeIntegerGreaterThan(LargeIntegerAdd(*offsetp,
2213 ConvertLongToLargeInteger(data_length)),
2214 ConvertLongToLargeInteger(LONG_MAX))) {
2215 require_64bit_ops = 1;
2218 InterlockedIncrement(&scp->activeRPCs);
2219 osi_Log2(afsd_logp, "cm_GetData: fetching data scp %p DV 0x%x", scp, scp->dataVersion);
2221 #ifdef AFS_FREELANCE_CLIENT
2224 // if getroot then we don't need to make any calls
2225 // just return fake data
2227 if (cm_freelanceEnabled && getroot) {
2228 // setup the fake status
2229 afsStatus.InterfaceVersion = 0x1;
2230 afsStatus.FileType = 0x2;
2231 afsStatus.LinkCount = scp->linkCount;
2232 afsStatus.Length = cm_fakeDirSize;
2233 afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
2234 afsStatus.Author = 0x1;
2235 afsStatus.Owner = 0x0;
2236 afsStatus.CallerAccess = 0x9;
2237 afsStatus.AnonymousAccess = 0x9;
2238 afsStatus.UnixModeBits = 0x1ff;
2239 afsStatus.ParentVnode = 0x1;
2240 afsStatus.ParentUnique = 0x1;
2241 afsStatus.ResidencyMask = 0;
2242 afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
2243 afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
2244 afsStatus.Group = 0;
2245 afsStatus.SyncCounter = 0;
2246 afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
2247 afsStatus.lockCount = 0;
2248 afsStatus.Length_hi = 0;
2249 afsStatus.errorCode = 0;
2250 memset(&volSync, 0, sizeof(volSync));
2252 // once we're done setting up the status info,
2253 // we just fill the buffer pages with fakedata
2254 // from cm_FakeRootDir. Extra pages are set to
2257 lock_ObtainMutex(&cm_Freelance_Lock);
2258 t1 = offsetp->LowPart;
2259 memset(datap, 0, data_length);
2260 t2 = cm_fakeDirSize - t1;
2261 if (t2 > data_length)
2264 memcpy(datap, cm_FakeRootDir+t1, t2);
2265 lock_ReleaseMutex(&cm_Freelance_Lock);
2267 // once we're done, we skip over the part of the
2268 // code that does the ACTUAL fetching of data for
2271 goto fetchingcompleted;
2274 #endif /* AFS_FREELANCE_CLIENT */
2277 lock_ReleaseWrite(&scp->rw);
2281 /* now make the call */
2283 code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
2287 rxconnp = cm_GetRxConn(connp);
2288 rxcallp = rx_NewCall(rxconnp);
2289 rx_PutConnection(rxconnp);
2291 nbytes = nbytes_hi = 0;
2293 if (SERVERHAS64BIT(connp)) {
2296 osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
2297 scp, offsetp->HighPart, offsetp->LowPart, data_length);
2299 code = StartRXAFS_FetchData64(rxcallp, &tfid, offsetp->QuadPart, data_length);
2302 temp = rx_Read32(rxcallp, &nbytes_hi);
2303 if (temp == sizeof(afs_int32)) {
2304 nbytes_hi = ntohl(nbytes_hi);
2307 code = rx_Error(rxcallp);
2308 code1 = rx_EndCall(rxcallp, code);
2316 if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
2317 if (require_64bit_ops) {
2318 osi_Log0(afsd_logp, "Skipping FetchData. Operation requires FetchData64");
2319 code = CM_ERROR_TOOBIG;
2322 rxconnp = cm_GetRxConn(connp);
2323 rxcallp = rx_NewCall(rxconnp);
2324 rx_PutConnection(rxconnp);
2327 osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
2328 scp, offsetp->LowPart, data_length);
2330 code = StartRXAFS_FetchData(rxcallp, &tfid, offsetp->LowPart, data_length);
2332 SET_SERVERHASNO64BIT(connp);
2337 temp = rx_Read32(rxcallp, &nbytes);
2338 if (temp == sizeof(afs_int32)) {
2339 nbytes = ntohl(nbytes);
2340 FillInt64(length_found, nbytes_hi, nbytes);
2341 if (length_found > data_length) {
2343 * prior to 1.4.12 and 1.5.65 the file server would return
2344 * (filesize - offset) if the requested offset was greater than
2345 * the filesize. The correct return value would have been zero.
2346 * Force a retry by returning an RX_PROTOCOL_ERROR. If the cause
2347 * is a race between two RPCs issues by this cache manager, the
2348 * correct thing will happen the second time.
2350 osi_Log0(afsd_logp, "cm_GetData length_found > data_length");
2351 fs_fetchdata_offset_bug = 1;
2354 osi_Log1(afsd_logp, "cm_GetData rx_Read32 returns %d != 4", temp);
2355 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
2358 /* for the moment, nbytes_hi will always be 0 if code == 0
2359 because data_length is a 32-bit quantity. */
2362 /* fill length_found of data from the pipe into the pages.
2363 * When we stop, qdp will point at the last page we're
2364 * dealing with, and bufferp will tell us where we
2365 * stopped. We'll need this info below when we clear
2366 * the remainder of the last page out (and potentially
2367 * clear later pages out, if we fetch past EOF).
2369 while (length_found > 0) {
2371 struct iovec tiov[RX_MAXIOVECS];
2372 afs_int32 tnio, iov, iov_offset;
2374 temp = rx_Readv(rxcallp, tiov, &tnio, RX_MAXIOVECS, length_found);
2375 osi_Log1(afsd_logp, "cm_GetData rx_Readv returns %d", temp);
2376 if (temp != length_found && temp < data_length) {
2378 * If the file server returned (filesize - offset),
2379 * then the first rx_Read will return zero octets of data.
2380 * If it does, do not treat it as an error. Correct the
2381 * length_found and continue as if the file server said
2382 * it was sending us zero octets of data.
2384 if (fs_fetchdata_offset_bug && first_read)
2387 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
2395 while (rbytes > 0) {
2398 osi_assertx(bufferp != NULL, "null cm_buf_t");
2400 len = min(tiov[iov].iov_len - iov_offset, data_length - buffer_offset);
2401 memcpy(bufferp + buffer_offset, tiov[iov].iov_base + iov_offset, len);
2403 buffer_offset += len;
2406 if (iov_offset == tiov[iov].iov_len) {
2412 length_found -= temp;
2413 #else /* USE_RX_IOVEC */
2414 /* assert that there are still more buffers;
2415 * our check above for length_found being less than
2416 * data_length should ensure this.
2418 osi_assertx(bufferp != NULL, "null cm_buf_t");
2420 /* read rbytes of data */
2421 rbytes = (afs_uint32)(length_found > data_length ? data_length : length_found);
2422 temp = rx_Read(rxcallp, bufferp, rbytes);
2423 if (temp < rbytes) {
2425 * If the file server returned (filesize - offset),
2426 * then the first rx_Read will return zero octets of data.
2427 * If it does, do not treat it as an error. Correct the
2428 * length_found and continue as if the file server said
2429 * it was sending us zero octets of data.
2431 if (fs_fetchdata_offset_bug && first_read)
2434 code = (rx_Error(rxcallp) < 0) ? rx_Error(rxcallp) : RX_PROTOCOL_ERROR;
2439 /* and adjust counters */
2440 length_found -= temp;
2441 #endif /* USE_RX_IOVEC */
2444 /* zero out remainder of last pages, in case we are
2445 * fetching past EOF. We were fetching an integral #
2446 * of pages, but stopped, potentially in the middle of
2447 * a page. Zero the remainder of that page, and then
2448 * all of the rest of the pages.
2451 rbytes = data_length - buffer_offset;
2452 bufferp = datap + buffer_offset;
2453 #else /* USE_RX_IOVEC */
2455 osi_assertx((bufferp - datap) < LONG_MAX, "data >= LONG_MAX");
2456 rbytes = (long) (bufferp - datap);
2458 /* bytes left to zero */
2459 rbytes = data_length - rbytes;
2460 #endif /* USE_RX_IOVEC */
2462 memset(bufferp, 0, rbytes);
2467 code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
2469 code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
2472 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
2474 osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
2478 code1 = rx_EndCall(rxcallp, code);
2480 if (code1 == RXKADUNKNOWNKEY)
2481 osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
2483 /* If we are avoiding a file server bug, ignore the error state */
2484 if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451) {
2485 /* Clone the current status info and clear the error state */
2486 scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
2488 lock_ReleaseWrite(&scp->rw);
2492 /* Prefer the error value from FetchData over rx_EndCall */
2493 } else if (code == 0 && code1 != 0)
2495 osi_Log0(afsd_logp, "CALL FetchData DONE");
2497 } while (cm_Analyze(connp, userp, reqp, &scp->fid, 0, &volSync, NULL, NULL, code));
2500 code = cm_MapRPCError(code, reqp);
2503 lock_ObtainWrite(&scp->rw);
2506 cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, CM_MERGEFLAG_FETCHDATA);
2508 InterlockedDecrement(&scp->activeRPCs);