Windows: MergeStatus before SyncOpDone
[openafs.git] / src / WINNT / afsd / cm_dcache.c
index f65e9bb..de3dea1 100644 (file)
@@ -1,21 +1,22 @@
 /*
  * Copyright 2000, International Business Machines Corporation and others.
  * All Rights Reserved.
- * 
+ *
  * This software has been released under the terms of the IBM Public
  * License.  For details, see the LICENSE file in the top-level source
  * directory or online at http://www.openafs.org/dl/license10.html
  */
 
+
+#include <afsconfig.h>
 #include <afs/param.h>
+#include <roken.h>
+
 #include <afs/stds.h>
 
-#ifndef DJGPP
 #include <windows.h>
 #include <winsock2.h>
 #include <nb30.h>
-#endif /* !DJGPP */
-#include <malloc.h>
 #include <string.h>
 #include <stdlib.h>
 #include <osi.h>
 extern void afsi_log(char *pattern, ...);
 #endif
 
-osi_mutex_t cm_bufGetMutex;
 #ifdef AFS_FREELANCE_CLIENT
 extern osi_mutex_t cm_Freelance_Lock;
 #endif
 
+#define USE_RX_IOVEC 1
+
+/* we can access connp->serverp without holding a lock because that
+   never changes since the connection is made. */
+#define SERVERHAS64BIT(connp) (!((connp)->serverp->flags & CM_SERVERFLAG_NO64BIT))
+#define SET_SERVERHASNO64BIT(connp) (cm_SetServerNo64Bit((connp)->serverp, TRUE))
+
 /* functions called back from the buffer package when reading or writing data,
  * or when holding or releasing a vnode pointer.
  */
-long cm_BufWrite(void *vfidp, osi_hyper_t *offsetp, long length, long flags,
-       cm_user_t *userp, cm_req_t *reqp)
+long cm_BufWrite(void *vscp, osi_hyper_t *offsetp, long length, long flags,
+                 cm_user_t *userp, cm_req_t *reqp)
 {
-       /* store the data back from this buffer; the buffer is locked and held,
-        * but the vnode involved isn't locked, yet.  It is held by its
-        * reference from the buffer, which won't change until the buffer is
-        * released by our caller.  Thus, we don't have to worry about holding
-        * bufp->scp.
-         */
-       long code;
-       cm_fid_t *fidp = vfidp;
-    cm_scache_t *scp;
-    long nbytes;
+    /* store the data back from this buffer; the buffer is locked and held,
+     * but the vnode involved isn't locked, yet.  It is held by its
+     * reference from the buffer, which won't change until the buffer is
+     * released by our caller.  Thus, we don't have to worry about holding
+     * bufp->scp.
+     */
+    long code, code1;
+    cm_scache_t *scp = vscp;
+    afs_int32 nbytes;
+    afs_int32 save_nbytes;
     long temp;
     AFSFetchStatus outStatus;
     AFSStoreStatus inStatus;
     osi_hyper_t thyper;
     AFSVolSync volSync;
     AFSFid tfid;
-    struct rx_call *callp;
+    struct rx_call *rxcallp;
+    struct rx_connection *rxconnp;
     osi_queueData_t *qdp;
     cm_buf_t *bufp;
-    long wbytes;
+    afs_uint32 wbytes;
     char *bufferp;
     cm_conn_t *connp;
-    long truncPos;
+    osi_hyper_t truncPos;
     cm_bulkIO_t biod;          /* bulk IO descriptor */
+    int require_64bit_ops = 0;
+    int call_was_64bit = 0;
+    int scp_locked = flags & CM_BUF_WRITE_SCP_LOCKED;
 
-    osi_assert(userp != NULL);
+    osi_assertx(userp != NULL, "null cm_user_t");
+    osi_assertx(scp != NULL, "null cm_scache_t");
 
-    /* now, the buffer may or may not be filled with good data (buf_GetNew
+    memset(&volSync, 0, sizeof(volSync));
+
+    /* now, the buffer may or may not be filled with good data (buf_GetNewLocked
      * drops lots of locks, and may indeed return a properly initialized
      * buffer, although more likely it will just return a new, empty, buffer.
      */
-       scp = cm_FindSCache(fidp);
-       if (scp == NULL)
-               return CM_ERROR_NOSUCHFILE;     /* shouldn't happen */
+    if (!scp_locked)
+        lock_ObtainWrite(&scp->rw);
+    if (scp->flags & CM_SCACHEFLAG_DELETED) {
+        if (!scp_locked)
+            lock_ReleaseWrite(&scp->rw);
+       return CM_ERROR_NOSUCHFILE;
+    }
+
+    cm_AFSFidFromFid(&tfid, &scp->fid);
 
-       cm_AFSFidFromFid(&tfid, fidp);
+    /* Serialize StoreData RPC's; for rationale see cm_scache.c */
+    (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
 
-       lock_ObtainMutex(&scp->mx);
-        
     code = cm_SetupStoreBIOD(scp, offsetp, length, &biod, userp, reqp);
     if (code) {
-               osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
-               lock_ReleaseMutex(&scp->mx);
-               cm_ReleaseSCache(scp);
+        osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
+        cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+        if (!scp_locked)
+            lock_ReleaseWrite(&scp->rw);
         return code;
     }
 
-       if (biod.length == 0) {
-               osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
-               lock_ReleaseMutex(&scp->mx);
-               cm_ReleaseBIOD(&biod, 1);       /* should be a NOOP */
-               cm_ReleaseSCache(scp);
+    if (biod.length == 0) {
+        osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
+        cm_ReleaseBIOD(&biod, 1, 0, 1);        /* should be a NOOP */
+        cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+        if (!scp_locked)
+            lock_ReleaseWrite(&scp->rw);
         return 0;
-       }   
-
-       /* Serialize StoreData RPC's; for rationale see cm_scache.c */
-       (void) cm_SyncOp(scp, NULL, userp, reqp, 0, CM_SCACHESYNC_STOREDATA_EXCL);
+    }
 
-       /* prepare the output status for the store */
-       scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
+    /* prepare the output status for the store */
+    scp->mask |= CM_SCACHEMASK_CLIENTMODTIME;
     cm_StatusFromAttr(&inStatus, scp, NULL);
-    truncPos = scp->length.LowPart;
+    truncPos = scp->length;
     if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
-         && scp->truncPos.LowPart < (unsigned long) truncPos)
-        truncPos = scp->truncPos.LowPart;
+        && LargeIntegerLessThan(scp->truncPos, truncPos))
+        truncPos = scp->truncPos;
        scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
-                
-       /* compute how many bytes to write from this buffer */
+
+    /* compute how many bytes to write from this buffer */
     thyper = LargeIntegerSubtract(scp->length, biod.offset);
     if (LargeIntegerLessThanZero(thyper)) {
-               /* entire buffer is past EOF */
-               nbytes = 0;
+        /* entire buffer is past EOF */
+        nbytes = 0;
     }
     else {
-               /* otherwise write out part of buffer before EOF, but not
+        /* otherwise write out part of buffer before EOF, but not
          * more than bufferSize bytes.
          */
-               nbytes = thyper.LowPart;
-        if (nbytes > biod.length) 
+        if (LargeIntegerGreaterThan(thyper,
+                                    ConvertLongToLargeInteger(biod.length))) {
             nbytes = biod.length;
+        } else {
+            /* if thyper is less than or equal to biod.length, then we
+               can safely assume that the value fits in a long. */
+            nbytes = thyper.LowPart;
+        }
+    }
+
+    if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
+                                                 ConvertLongToLargeInteger(nbytes)),
+                                 ConvertLongToLargeInteger(LONG_MAX)) ||
+         LargeIntegerGreaterThan(truncPos,
+                                 ConvertLongToLargeInteger(LONG_MAX))) {
+        require_64bit_ops = 1;
     }
 
-       lock_ReleaseMutex(&scp->mx);
-        
+    lock_ReleaseWrite(&scp->rw);
+
     /* now we're ready to do the store operation */
+    save_nbytes = nbytes;
     do {
-               code = cm_Conn(&scp->fid, userp, reqp, &connp);
-        if (code) 
+        code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
+        if (code)
             continue;
-               
-               callp = rx_NewCall(connp->callp);
 
-               osi_Log3(afsd_logp, "CALL StoreData vp %x, off 0x%x, size 0x%x",
-                 (long) scp, biod.offset.LowPart, nbytes);
+    retry:
+        rxconnp = cm_GetRxConn(connp);
+        rxcallp = rx_NewCall(rxconnp);
+        rx_PutConnection(rxconnp);
+
+        if (SERVERHAS64BIT(connp)) {
+            call_was_64bit = 1;
+
+            osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData64 scp 0x%p, offset 0x%x:%08x, length 0x%x",
+                     scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
+
+            code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
+                                          biod.offset.QuadPart,
+                                          nbytes,
+                                          truncPos.QuadPart);
+           if (code)
+               osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData64 FAILURE, code 0x%x", code);
+           else
+               osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData64 SUCCESS");
+        } else {
+            call_was_64bit = 0;
+
+            if (require_64bit_ops) {
+                osi_Log0(afsd_logp, "Skipping StartRXAFS_StoreData.  The operation requires large file support in the server.");
+                code = CM_ERROR_TOOBIG;
+            } else {
+                osi_Log4(afsd_logp, "CALL StartRXAFS_StoreData scp 0x%p, offset 0x%x:%08x, length 0x%x",
+                         scp, biod.offset.HighPart, biod.offset.LowPart, nbytes);
+
+                code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
+                                            biod.offset.LowPart, nbytes, truncPos.LowPart);
+               if (code)
+                   osi_Log1(afsd_logp, "CALL StartRXAFS_StoreData FAILURE, code 0x%x", code);
+               else
+                   osi_Log0(afsd_logp, "CALL StartRXAFS_StoreData SUCCESS");
+            }
+        }
 
-        code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
-                                    biod.offset.LowPart, nbytes, truncPos);
+        if (code == 0) {
+            afs_uint32 buf_offset = 0, bytes_copied = 0;
 
-               if (code == 0) {
             /* write the data from the the list of buffers */
             qdp = NULL;
-                       while(nbytes > 0) {
-                               if (qdp == NULL)
-                                       qdp = biod.bufListEndp;
-                               else
-                                       qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
-                               osi_assert(qdp != NULL);
+            while(nbytes > 0) {
+#ifdef USE_RX_IOVEC
+                struct iovec tiov[RX_MAXIOVECS];
+                afs_int32 tnio, vlen, vbytes, iov, voffset;
+                afs_uint32 vleft;
+
+                vbytes = rx_WritevAlloc(rxcallp, tiov, &tnio, RX_MAXIOVECS, nbytes);
+                if (vbytes <= 0) {
+                    code = RX_PROTOCOL_ERROR;
+                    break;
+                }
+
+                for ( iov = voffset = vlen = 0;
+                      vlen < vbytes && iov < tnio; vlen += wbytes) {
+                    if (qdp == NULL) {
+                        qdp = biod.bufListEndp;
+                        buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
+                    } else if (buf_offset == cm_data.buf_blockSize) {
+                        qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
+                        buf_offset = 0;
+                    }
+
+                    osi_assertx(qdp != NULL, "null osi_queueData_t");
+                    bufp = osi_GetQData(qdp);
+                    bufferp = bufp->datap + buf_offset;
+                    wbytes = vbytes - vlen;
+                    if (wbytes > cm_data.buf_blockSize - buf_offset)
+                        wbytes = cm_data.buf_blockSize - buf_offset;
+
+                    vleft = tiov[iov].iov_len - voffset;
+                    while (wbytes > vleft && iov < tnio) {
+                        memcpy(tiov[iov].iov_base + voffset, bufferp, vleft);
+                        bytes_copied += vleft;
+                        vlen += vleft;
+                        wbytes -= vleft;
+                        bufferp += vleft;
+                        buf_offset += vleft;
+
+                        iov++;
+                        voffset = 0;
+                        vleft = tiov[iov].iov_len;
+                    }
+
+                    if (iov < tnio) {
+                        memcpy(tiov[iov].iov_base + voffset, bufferp, wbytes);
+                        bytes_copied += wbytes;
+                        if (tiov[iov].iov_len == voffset + wbytes) {
+                            iov++;
+                            voffset = 0;
+                            vleft = (iov < tnio) ? tiov[iov].iov_len : 0;
+                        } else {
+                            voffset += wbytes;
+                            vleft -= wbytes;
+                        }
+                        bufferp += wbytes;
+                        buf_offset += wbytes;
+                    } else {
+                        voffset = vleft = 0;
+                    }
+                }
+
+                osi_assertx(iov == tnio, "incorrect iov count");
+                osi_assertx(vlen == vbytes, "bytes_copied != vbytes");
+                osi_assertx(bufp->offset.QuadPart + buf_offset == biod.offset.QuadPart + bytes_copied,
+                            "begin and end offsets don't match");
+
+                temp = rx_Writev(rxcallp, tiov, tnio, vbytes);
+                if (temp != vbytes) {
+                    osi_Log3(afsd_logp, "rx_Writev failed bp 0x%p, %d != %d", bufp, temp, vbytes);
+                    code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+                    break;
+                }
+
+                osi_Log3(afsd_logp, "rx_Writev succeeded bp 0x%p offset 0x%x, wrote %u",
+                         bufp, buf_offset, vbytes);
+                nbytes -= vbytes;
+#else /* USE_RX_IOVEC */
+                if (qdp == NULL) {
+                    qdp = biod.bufListEndp;
+                    buf_offset = offsetp->LowPart % cm_data.buf_blockSize;
+                } else {
+                    qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
+                    buf_offset = 0;
+                }
+
+                osi_assertx(qdp != NULL, "null osi_queueData_t");
                 bufp = osi_GetQData(qdp);
-                bufferp = bufp->datap;
+                bufferp = bufp->datap + buf_offset;
                 wbytes = nbytes;
-                if (wbytes > buf_bufferSize) 
-                    wbytes = buf_bufferSize;
+                if (wbytes > cm_data.buf_blockSize - buf_offset)
+                    wbytes = cm_data.buf_blockSize - buf_offset;
 
                 /* write out wbytes of data from bufferp */
-                temp = rx_Write(callp, bufferp, wbytes);
+                temp = rx_Write(rxcallp, bufferp, wbytes);
                 if (temp != wbytes) {
-                    osi_Log2(afsd_logp, "rx_Write failed %d != %d",temp,wbytes);
-                    code = -1;
-                                       break;
-                               } else {
-                    osi_Log1(afsd_logp, "rx_Write succeeded %d",temp);
+                    osi_Log3(afsd_logp, "rx_Write failed bp 0x%p, %d != %d",bufp,temp,wbytes);
+                    code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+                    break;
+                } else {
+                    osi_Log2(afsd_logp, "rx_Write succeeded bp 0x%p, %d",bufp,temp);
                 }
                 nbytes -= wbytes;
+#endif /* USE_RX_IOVEC */
             }  /* while more bytes to write */
-               }               /* if RPC started successfully */
-        else {
-            osi_Log1(afsd_logp, "StartRXAFS_StoreData failed (%lX)",code);
+        }      /* if RPC started successfully */
+
+        if (code == 0) {
+            if (call_was_64bit) {
+                code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
+                if (code)
+                    osi_Log2(afsd_logp, "EndRXAFS_StoreData64 FAILURE scp 0x%p code %lX", scp, code);
+               else
+                   osi_Log0(afsd_logp, "EndRXAFS_StoreData64 SUCCESS");
+            } else {
+                code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
+                if (code)
+                    osi_Log2(afsd_logp, "EndRXAFS_StoreData FAILURE scp 0x%p code %lX",scp,code);
+               else
+                   osi_Log0(afsd_logp, "EndRXAFS_StoreData SUCCESS");
+            }
         }
-               if (code == 0) {
-                       code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
-            if (code)
-                osi_Log1(afsd_logp, "EndRXAFS_StoreData failed (%lX)",code);
+
+        code1 = rx_EndCall(rxcallp, code);
+
+        if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
+            SET_SERVERHASNO64BIT(connp);
+            qdp = NULL;
+            nbytes = save_nbytes;
+            goto retry;
         }
-        code = rx_EndCall(callp, code);
-        osi_Log0(afsd_logp, "CALL StoreData DONE");
-                
-       } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+
+        /* Prefer StoreData error over rx_EndCall error */
+        if (code1 != 0)
+            code = code1;
+    } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+
     code = cm_MapRPCError(code, reqp);
-        
+
+    if (code)
+        osi_Log2(afsd_logp, "CALL StoreData FAILURE scp 0x%p, code 0x%x", scp, code);
+    else
+        osi_Log1(afsd_logp, "CALL StoreData SUCCESS scp 0x%p", scp);
+
     /* now, clean up our state */
-    lock_ObtainMutex(&scp->mx);
+    lock_ObtainWrite(&scp->rw);
 
-       cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+    cm_ReleaseBIOD(&biod, 1, code, 1);
 
-       if (code == 0) {
-               /* now, here's something a little tricky: in AFS 3, a dirty
-                * length can't be directly stored, instead, a dirty chunk is
-                * stored that sets the file's size (by writing and by using
-                * the truncate-first option in the store call).
-                *
+    if (code == 0) {
+        osi_hyper_t t;
+        /* now, here's something a little tricky: in AFS 3, a dirty
+         * length can't be directly stored, instead, a dirty chunk is
+         * stored that sets the file's size (by writing and by using
+         * the truncate-first option in the store call).
+         *
          * At this point, we've just finished a store, and so the trunc
-                * pos field is clean.  If the file's size at the server is at
-                * least as big as we think it should be, then we turn off the
-                * length dirty bit, since all the other dirty buffers must
-                * precede this one in the file.
+         * pos field is clean.  If the file's size at the server is at
+         * least as big as we think it should be, then we turn off the
+         * length dirty bit, since all the other dirty buffers must
+         * precede this one in the file.
          *
          * The file's desired size shouldn't be smaller than what's
-                * stored at the server now, since we just did the trunc pos
-                * store.
+         * stored at the server now, since we just did the trunc pos
+         * store.
          *
          * We have to turn off the length dirty bit as soon as we can,
-                * so that we see updates made by other machines.
+         * so that we see updates made by other machines.
          */
-               if (outStatus.Length >= scp->length.LowPart)
+
+        if (call_was_64bit) {
+            t.LowPart = outStatus.Length;
+            t.HighPart = outStatus.Length_hi;
+        } else {
+            t = ConvertLongToLargeInteger(outStatus.Length);
+        }
+
+        if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
             scp->mask &= ~CM_SCACHEMASK_LENGTH;
-               cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
-       } else {
-               if (code == CM_ERROR_SPACE)
-                       scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
-               else if (code == CM_ERROR_QUOTA)
-                       scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
-       }
-    lock_ReleaseMutex(&scp->mx);
-    cm_ReleaseBIOD(&biod, 1);
-       cm_ReleaseSCache(scp);
+
+        cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
+    } else {
+        if (code == CM_ERROR_SPACE)
+            scp->flags |= CM_SCACHEFLAG_OUTOFSPACE;
+        else if (code == CM_ERROR_QUOTA)
+            scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
+    }
+    cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+
+    if (!scp_locked)
+        lock_ReleaseWrite(&scp->rw);
 
     return code;
 }
@@ -227,121 +407,179 @@ long cm_StoreMini(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
     AFSStoreStatus inStatus;
     AFSVolSync volSync;
     AFSFid tfid;
-       long code;
-       long truncPos;
-       cm_conn_t *connp;
-    struct rx_call *callp;
+    long code, code1;
+    osi_hyper_t truncPos;
+    cm_conn_t *connp;
+    struct rx_call *rxcallp;
+    struct rx_connection *rxconnp;
+    int require_64bit_ops = 0;
+    int call_was_64bit = 0;
+
+    memset(&volSync, 0, sizeof(volSync));
 
-       /* Serialize StoreData RPC's; for rationale see cm_scache.c */
-       (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
+    /* Serialize StoreData RPC's; for rationale see cm_scache.c */
+    (void) cm_SyncOp(scp, NULL, userp, reqp, 0,
                      CM_SCACHESYNC_STOREDATA_EXCL);
 
-       /* prepare the output status for the store */
-       inStatus.Mask = AFS_SETMODTIME;
-       inStatus.ClientModTime = scp->clientModTime;
-       scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
+    /* prepare the output status for the store */
+    inStatus.Mask = AFS_SETMODTIME;
+    inStatus.ClientModTime = scp->clientModTime;
+    scp->mask &= ~CM_SCACHEMASK_CLIENTMODTIME;
 
-       /* calculate truncation position */
-    truncPos = scp->length.LowPart;
+    /* calculate truncation position */
+    truncPos = scp->length;
     if ((scp->mask & CM_SCACHEMASK_TRUNCPOS)
-         && scp->truncPos.LowPart < (unsigned long) truncPos)
-        truncPos = scp->truncPos.LowPart;
-       scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
-                
-       lock_ReleaseMutex(&scp->mx);
+        && LargeIntegerLessThan(scp->truncPos, truncPos))
+        truncPos = scp->truncPos;
+    scp->mask &= ~CM_SCACHEMASK_TRUNCPOS;
+
+    if (LargeIntegerGreaterThan(truncPos,
+                                ConvertLongToLargeInteger(LONG_MAX))) {
+
+        require_64bit_ops = 1;
+    }
+
+    lock_ReleaseWrite(&scp->rw);
 
-       cm_AFSFidFromFid(&tfid, &scp->fid);
+    cm_AFSFidFromFid(&tfid, &scp->fid);
 
     /* now we're ready to do the store operation */
     do {
-               code = cm_Conn(&scp->fid, userp, reqp, &connp);
-        if (code) 
+        code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
+        if (code)
             continue;
-               
-               callp = rx_NewCall(connp->callp);
 
-        code = StartRXAFS_StoreData(callp, &tfid, &inStatus,
-                                    0, 0, truncPos);
+    retry:
+        rxconnp = cm_GetRxConn(connp);
+        rxcallp = rx_NewCall(rxconnp);
+        rx_PutConnection(rxconnp);
+
+        if (SERVERHAS64BIT(connp)) {
+            call_was_64bit = 1;
+
+            code = StartRXAFS_StoreData64(rxcallp, &tfid, &inStatus,
+                                          0, 0, truncPos.QuadPart);
+        } else {
+            call_was_64bit = 0;
+
+            if (require_64bit_ops) {
+                code = CM_ERROR_TOOBIG;
+            } else {
+                code = StartRXAFS_StoreData(rxcallp, &tfid, &inStatus,
+                                            0, 0, truncPos.LowPart);
+            }
+        }
 
-               if (code == 0)
-                       code = EndRXAFS_StoreData(callp, &outStatus, &volSync);
-        code = rx_EndCall(callp, code);
-       } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+        if (code == 0) {
+            if (call_was_64bit)
+                code = EndRXAFS_StoreData64(rxcallp, &outStatus, &volSync);
+            else
+                code = EndRXAFS_StoreData(rxcallp, &outStatus, &volSync);
+        }
+        code1 = rx_EndCall(rxcallp, code);
+
+        if ((code == RXGEN_OPCODE || code1 == RXGEN_OPCODE) && SERVERHAS64BIT(connp)) {
+            SET_SERVERHASNO64BIT(connp);
+            goto retry;
+        }
+
+        /* prefer StoreData error over rx_EndCall error */
+        if (code == 0 && code1 != 0)
+            code = code1;
+    } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
     code = cm_MapRPCError(code, reqp);
-        
+
     /* now, clean up our state */
-    lock_ObtainMutex(&scp->mx);
+    lock_ObtainWrite(&scp->rw);
 
-       cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
+    if (code == 0) {
+        osi_hyper_t t;
+        /*
+         * For explanation of handling of CM_SCACHEMASK_LENGTH,
+         * see cm_BufWrite().
+         */
+        if (call_was_64bit) {
+            t.HighPart = outStatus.Length_hi;
+            t.LowPart = outStatus.Length;
+        } else {
+            t = ConvertLongToLargeInteger(outStatus.Length);
+        }
 
-       if (code == 0) {
-               /*
-                * For explanation of handling of CM_SCACHEMASK_LENGTH,
-                * see cm_BufWrite().
-                */
-               if (outStatus.Length >= scp->length.LowPart)
+        if (LargeIntegerGreaterThanOrEqualTo(t, scp->length))
             scp->mask &= ~CM_SCACHEMASK_LENGTH;
-               cm_MergeStatus(scp, &outStatus, &volSync, userp, 0);
-       }
+        cm_MergeStatus(NULL, scp, &outStatus, &volSync, userp, reqp, CM_MERGEFLAG_STOREDATA);
+    }
+    cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
 
-       return code;
+    return code;
 }
 
 long cm_BufRead(cm_buf_t *bufp, long nbytes, long *bytesReadp, cm_user_t *userp)
 {
-       *bytesReadp = buf_bufferSize;
+    *bytesReadp = 0;
 
-       /* now return a code that means that I/O is done */
+    /* now return a code that means that I/O is done */
     return 0;
 }
 
-/* stabilize scache entry, and return with it locked so 
- * it stays stable.
+/*
+ * stabilize scache entry with CM_SCACHESYNC_SETSIZE.  This prevents any new
+ * data buffers to be allocated, new data to be fetched from the file server,
+ * and writes to be accepted from the application but permits dirty buffers
+ * to be written to the file server.
+ *
+ * Stabilize uses cm_SyncOp to maintain the cm_scache_t in this stable state
+ * instead of holding the rwlock exclusively.  This permits background stores
+ * to be performed in parallel and in particular allow FlushFile to be
+ * implemented without violating the locking hierarchy.
  */
-long cm_BufStabilize(void *parmp, cm_user_t *userp, cm_req_t *reqp)
+long cm_BufStabilize(void *vscp, cm_user_t *userp, cm_req_t *reqp)
 {
-       cm_scache_t *scp;
+    cm_scache_t *scp = vscp;
     long code;
 
-    scp = parmp;
-        
-       lock_ObtainMutex(&scp->mx);
-    code = cm_SyncOp(scp, NULL, userp, reqp, 0, 
+    lock_ObtainWrite(&scp->rw);
+    code = cm_SyncOp(scp, NULL, userp, reqp, 0,
                      CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
-       if (code) {
-               lock_ReleaseMutex(&scp->mx);
-        return code;
-       }
-        
-    return 0;
+    lock_ReleaseWrite(&scp->rw);
+
+    return code;
 }
 
 /* undoes the work that cm_BufStabilize does: releases lock so things can change again */
-long cm_BufUnstabilize(void *parmp, cm_user_t *userp)
+long cm_BufUnstabilize(void *vscp, cm_user_t *userp)
 {
-       cm_scache_t *scp;
-        
-    scp = parmp;
-        
-    lock_ReleaseMutex(&scp->mx);
-        
-       /* always succeeds */
+    cm_scache_t *scp = vscp;
+
+    lock_ObtainWrite(&scp->rw);
+    cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_SETSIZE);
+
+    lock_ReleaseWrite(&scp->rw);
+
+    /* always succeeds */
     return 0;
 }
 
 cm_buf_ops_t cm_bufOps = {
-       cm_BufWrite,
+    cm_BufWrite,
     cm_BufRead,
     cm_BufStabilize,
     cm_BufUnstabilize
 };
 
-int cm_InitDCache(long chunkSize, long nbuffers)
+long cm_ValidateDCache(void)
 {
-       lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
-       if (nbuffers) 
-        buf_nbuffers = nbuffers;
-       return buf_Init(&cm_bufOps);
+    return buf_ValidateBuffers();
+}
+
+long cm_ShutdownDCache(void)
+{
+    return 0;
+}
+
+int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
+{
+    return buf_Init(newFile, &cm_bufOps, nbuffers);
 }
 
 /* check to see if we have an up-to-date buffer.  The buffer must have
@@ -355,231 +593,364 @@ int cm_InitDCache(long chunkSize, long nbuffers)
  */
 int cm_HaveBuffer(cm_scache_t *scp, cm_buf_t *bufp, int isBufLocked)
 {
-       int code;
-       if (!cm_HaveCallback(scp))
-               return 0;
-       if ((bufp->cmFlags
-            & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
-               == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
-               return 1;
-               if (bufp->dataVersion == scp->dataVersion)
-               return 1;
-       if (!isBufLocked) {
-               code = lock_TryMutex(&bufp->mx);
+    int code;
+    if (!cm_HaveCallback(scp))
+        return 0;
+    if ((bufp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED)) == (CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED))
+        return 1;
+    if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow)
+        return 1;
+    if (bufp->offset.QuadPart >= scp->serverLength.QuadPart)
+        return 1;
+    if (!isBufLocked) {
+        code = lock_TryMutex(&bufp->mx);
         if (code == 0) {
-                       /* don't have the lock, and can't lock it, then
+            /* don't have the lock, and can't lock it, then
              * return failure.
              */
             return 0;
         }
     }
 
-       /* remember dirty flag for later */
-       code = bufp->flags & CM_BUF_DIRTY;
+    /* remember dirty flag for later */
+    code = bufp->flags & CM_BUF_DIRTY;
 
-       /* release lock if we obtained it here */
-       if (!isBufLocked) 
+    /* release lock if we obtained it here */
+    if (!isBufLocked)
         lock_ReleaseMutex(&bufp->mx);
 
-       /* if buffer was dirty, buffer is acceptable for use */
-        if (code) 
-            return 1;
-        else 
-            return 0;
+    /* if buffer was dirty, buffer is acceptable for use */
+    if (code)
+        return 1;
+    else
+        return 0;
 }
 
-/* used when deciding whether to do a prefetch or not */
-long cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, long length,
-       cm_user_t *up, cm_req_t *reqp, osi_hyper_t *realBasep)
+/*
+ * used when deciding whether to do a background fetch or not.
+ * call with scp->rw write-locked.
+ */
+afs_int32
+cm_CheckFetchRange(cm_scache_t *scp, osi_hyper_t *startBasep, osi_hyper_t *length,
+                        cm_user_t *userp, cm_req_t *reqp, osi_hyper_t *realBasep)
 {
-       osi_hyper_t toffset;
     osi_hyper_t tbase;
+    osi_hyper_t tlength;
+    osi_hyper_t tblocksize;
     long code;
     cm_buf_t *bp;
     int stop;
-        
+
     /* now scan all buffers in the range, looking for any that look like
      * they need work.
      */
-       tbase = *startBasep;
-       stop = 0;
-       lock_ObtainMutex(&scp->mx);
-    while(length > 0) {
-               /* get callback so we can do a meaningful dataVersion comparison */
-        code = cm_SyncOp(scp, NULL, up, reqp, 0,
-                         CM_SCACHESYNC_NEEDCALLBACK
-                         | CM_SCACHESYNC_GETSTATUS);
-               if (code) {
-                       scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
-                       lock_ReleaseMutex(&scp->mx);
+    tbase = *startBasep;
+    tlength = *length;
+    tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+    stop = 0;
+    while (LargeIntegerGreaterThanZero(tlength)) {
+        /* get callback so we can do a meaningful dataVersion comparison */
+        code = cm_SyncOp(scp, NULL, userp, reqp, 0,
+                         CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+        if (code)
             return code;
-        }
-                
+
         if (LargeIntegerGreaterThanOrEqualTo(tbase, scp->length)) {
-                       /* we're past the end of file */
+            /* we're past the end of file */
             break;
         }
 
-               bp = buf_Find(scp, &tbase);
-               /* We cheat slightly by not locking the bp mutex. */
+        bp = buf_Find(scp, &tbase);
+        /* We cheat slightly by not locking the bp mutex. */
         if (bp) {
-            if ((bp->cmFlags
-                             & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING)) == 0
-                 && bp->dataVersion != scp->dataVersion)
+            if ((bp->cmFlags & (CM_BUF_CMFETCHING | CM_BUF_CMSTORING | CM_BUF_CMBKGFETCH)) == 0
+                 && (bp->dataVersion < scp->bufDataVersionLow || bp->dataVersion > scp->dataVersion))
                 stop = 1;
             buf_Release(bp);
-               }
-        else 
+           bp = NULL;
+        }
+        else
             stop = 1;
 
-               /* if this buffer is essentially guaranteed to require a fetch,
+        /* if this buffer is essentially guaranteed to require a fetch,
          * break out here and return this position.
          */
-        if (stop) 
+        if (stop)
             break;
-                
-        toffset.LowPart = buf_bufferSize;
-        toffset.HighPart = 0;
-        tbase = LargeIntegerAdd(toffset, tbase);
-        length -= buf_bufferSize;
+
+        tbase = LargeIntegerAdd(tbase, tblocksize);
+        tlength = LargeIntegerSubtract(tlength,  tblocksize);
     }
-        
-    /* if we get here, either everything is fine or stop stopped us at a
+
+    /* if we get here, either everything is fine or 'stop' stopped us at a
      * particular buffer in the range that definitely needs to be fetched.
      */
     if (stop == 0) {
-               /* return non-zero code since realBasep won't be valid */
-               scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
-               code = -1;
+        /* return non-zero code since realBasep won't be valid */
+        code = -1;
     }
     else {
-               /* successfully found a page that will need fetching */
-               *realBasep = tbase;
+        /* successfully found a page that will need fetching */
+        *realBasep = tbase;
         code = 0;
     }
-    lock_ReleaseMutex(&scp->mx);
     return code;
 }
 
-void cm_BkgStore(cm_scache_t *scp, long p1, long p2, long p3, long p4,
-       cm_user_t *userp)
+afs_int32
+cm_BkgStore(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
+           cm_user_t *userp)
 {
-       osi_hyper_t toffset;
+    osi_hyper_t toffset;
     long length;
-       cm_req_t req;
+    cm_req_t req;
+    long code = 0;
 
+    if (scp->flags & CM_SCACHEFLAG_DELETED) {
+       osi_Log4(afsd_logp, "Skipping BKG store - Deleted scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
+    } else {
        cm_InitReq(&req);
+
+       /* Retries will be performed by the BkgDaemon thread if appropriate */
        req.flags |= CM_REQ_NORETRY;
 
-    toffset.LowPart = p1;
-    toffset.HighPart = p2;
-    length = p3;
+       toffset.LowPart = p1;
+       toffset.HighPart = p2;
+       length = p3;
+
+       osi_Log4(afsd_logp, "Starting BKG store scp 0x%p, offset 0x%x:%08x, length 0x%x", scp, p2, p1, p3);
 
-       osi_Log2(afsd_logp, "Starting BKG store vp 0x%x, base 0x%x", scp, p1);
+       code = cm_BufWrite(scp, &toffset, length, /* flags */ 0, userp, &req);
 
-       cm_BufWrite(&scp->fid, &toffset, length, /* flags */ 0, userp, &req);
+       osi_Log4(afsd_logp, "Finished BKG store scp 0x%p, offset 0x%x:%08x, code 0x%x", scp, p2, p1, code);
+    }
 
-       lock_ObtainMutex(&scp->mx);
-       cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
-    lock_ReleaseMutex(&scp->mx);
+    /*
+     * Keep the following list synchronized with the
+     * error code list in cm_BkgDaemon
+     */
+    switch ( code ) {
+    case CM_ERROR_TIMEDOUT: /* or server restarting */
+    case CM_ERROR_RETRY:
+    case CM_ERROR_WOULDBLOCK:
+    case CM_ERROR_ALLBUSY:
+    case CM_ERROR_ALLDOWN:
+    case CM_ERROR_ALLOFFLINE:
+    case CM_ERROR_PARTIALWRITE:
+        break;  /* cm_BkgDaemon will re-insert the request in the queue */
+    case 0:
+    default:
+        lock_ObtainWrite(&scp->rw);
+        cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_ASYNCSTORE);
+        lock_ReleaseWrite(&scp->rw);
+    }
+    return code;
 }
 
-void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base)
+/* Called with scp locked */
+void cm_ClearPrefetchFlag(long code, cm_scache_t *scp, osi_hyper_t *base, osi_hyper_t *length)
 {
-       osi_hyper_t thyper;
-
-       if (code == 0) {
-               thyper.LowPart = cm_chunkSize;
-               thyper.HighPart = 0;
-               thyper =  LargeIntegerAdd(*base, thyper);
-               thyper.LowPart &= (-cm_chunkSize);
-               if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
+    osi_hyper_t end;
+
+    if (code == 0) {
+        end =  LargeIntegerAdd(*base, *length);
+        if (LargeIntegerGreaterThan(*base, scp->prefetch.base))
             scp->prefetch.base = *base;
-               if (LargeIntegerGreaterThan(thyper, scp->prefetch.end))
-            scp->prefetch.end = thyper;
-       }
-       scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+        if (LargeIntegerGreaterThan(end, scp->prefetch.end))
+            scp->prefetch.end = end;
+    }
+    scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
 }
 
-/* do the prefetch */
-void cm_BkgPrefetch(cm_scache_t *scp, long p1, long p2, long p3, long p4,
-       cm_user_t *userp)
+/* do the prefetch.  if the prefetch fails, return 0 (success)
+ * because there is no harm done.  */
+afs_int32
+cm_BkgPrefetch(cm_scache_t *scp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4,
+              cm_user_t *userp)
 {
-       long length;
+    osi_hyper_t length;
     osi_hyper_t base;
-    long code;
-    cm_buf_t *bp;
-       int cpff = 0;                   /* cleared prefetch flag */
-       cm_req_t req;
-
-       cm_InitReq(&req);
-       req.flags |= CM_REQ_NORETRY;
-        
-       base.LowPart = p1;
+    osi_hyper_t offset;
+    osi_hyper_t end;
+    osi_hyper_t fetched;
+    osi_hyper_t tblocksize;
+    afs_int32 code;
+    int rxheld = 0;
+    cm_buf_t *bp = NULL;
+    cm_req_t req;
+
+    cm_InitReq(&req);
+
+    /* Retries will be performed by the BkgDaemon thread if appropriate */
+    req.flags |= CM_REQ_NORETRY;
+
+    fetched.LowPart = 0;
+    fetched.HighPart = 0;
+    tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+    base.LowPart = p1;
     base.HighPart = p2;
-    length = p3;
-        
-       osi_Log2(afsd_logp, "Starting BKG prefetch vp 0x%x, base 0x%x", scp, p1);
+    length.LowPart = p3;
+    length.HighPart = p4;
 
-    code = buf_Get(scp, &base, &bp);
+    end = LargeIntegerAdd(base, length);
 
-       lock_ObtainMutex(&scp->mx);
+    osi_Log5(afsd_logp, "Starting BKG prefetch scp 0x%p offset 0x%x:%x length 0x%x:%x",
+             scp, p2, p1, p4, p3);
 
-    if (code || (bp->cmFlags & CM_BUF_CMFETCHING)) {
-               scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
-               lock_ReleaseMutex(&scp->mx);
-               return;
-       }
+    for ( code = 0, offset = base;
+          code == 0 && LargeIntegerLessThan(offset, end);
+          offset = LargeIntegerAdd(offset, tblocksize) )
+    {
+        if (rxheld) {
+            lock_ReleaseWrite(&scp->rw);
+            rxheld = 0;
+        }
 
-    code = cm_GetBuffer(scp, bp, &cpff, userp, &req);
-       if (!cpff) 
-        cm_ClearPrefetchFlag(code, scp, &base);
-       lock_ReleaseMutex(&scp->mx);
-    buf_Release(bp);
-    return;
+        code = buf_Get(scp, &offset, &req, &bp);
+        if (code)
+            break;
+
+        if (bp->cmFlags & CM_BUF_CMFETCHING) {
+            /* skip this buffer as another thread is already fetching it */
+            if (!rxheld) {
+                lock_ObtainWrite(&scp->rw);
+                rxheld = 1;
+            }
+            bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
+            buf_Release(bp);
+            bp = NULL;
+            continue;
+        }
+
+        if (!rxheld) {
+            lock_ObtainWrite(&scp->rw);
+            rxheld = 1;
+        }
+
+        code = cm_GetBuffer(scp, bp, NULL, userp, &req);
+        if (code == 0)
+            fetched = LargeIntegerAdd(fetched, tblocksize);
+        buf_Release(bp);
+        bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
+    }
+
+    if (!rxheld) {
+        lock_ObtainWrite(&scp->rw);
+        rxheld = 1;
+    }
+
+    /* Clear flag from any remaining buffers */
+    for ( ;
+          LargeIntegerLessThan(offset, end);
+          offset = LargeIntegerAdd(offset, tblocksize) )
+    {
+        bp = buf_Find(scp, &offset);
+        if (bp) {
+            bp->cmFlags &= ~CM_BUF_CMBKGFETCH;
+            buf_Release(bp);
+        }
+    }
+    cm_ClearPrefetchFlag(LargeIntegerGreaterThanZero(fetched) ? 0 : code,
+                         scp, &base, &fetched);
+
+    /* wakeup anyone who is waiting */
+    if (scp->flags & CM_SCACHEFLAG_WAITING) {
+        osi_Log1(afsd_logp, "CM BkgPrefetch Waking scp 0x%p", scp);
+        osi_Wakeup((LONG_PTR) &scp->flags);
+    }
+    lock_ReleaseWrite(&scp->rw);
+
+    osi_Log4(afsd_logp, "Ending BKG prefetch scp 0x%p code 0x%x fetched 0x%x:%x",
+             scp, code, fetched.HighPart, fetched.LowPart);
+    return code;
 }
 
 /* a read was issued to offsetp, and we have to determine whether we should
- * do a prefetch.
+ * do a prefetch of the next chunk.
  */
-void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
-       cm_user_t *userp, cm_req_t *reqp)
+void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp, afs_uint32 count,
+                         cm_user_t *userp, cm_req_t *reqp)
 {
-       long code;
+    long code;
+    int  rwheld = 0;
     osi_hyper_t realBase;
     osi_hyper_t readBase;
-        
+    osi_hyper_t readLength;
+    osi_hyper_t readEnd;
+    osi_hyper_t offset;
+    osi_hyper_t tblocksize;            /* a long long temp variable */
+    cm_buf_t    *bp;
+
+    tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+
     readBase = *offsetp;
-       /* round up to chunk boundary */
-       readBase.LowPart += (cm_chunkSize-1);
-       readBase.LowPart &= (-cm_chunkSize);
+    /* round up to chunk boundary */
+    readBase.LowPart += (cm_chunkSize-1);
+    readBase.LowPart &= (-cm_chunkSize);
+
+    readLength = ConvertLongToLargeInteger(count);
 
-       lock_ObtainMutex(&scp->mx);
-       if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
+    lock_ObtainWrite(&scp->rw);
+    rwheld = 1;
+    if ((scp->flags & CM_SCACHEFLAG_PREFETCHING)
          || LargeIntegerLessThanOrEqualTo(readBase, scp->prefetch.base)) {
-               lock_ReleaseMutex(&scp->mx);
+        lock_ReleaseWrite(&scp->rw);
         return;
-       }
-       scp->flags |= CM_SCACHEFLAG_PREFETCHING;
+    }
+    scp->flags |= CM_SCACHEFLAG_PREFETCHING;
 
-       /* start the scan at the latter of the end of this read or
+    /* start the scan at the latter of the end of this read or
      * the end of the last fetched region.
      */
-       if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
+    if (LargeIntegerGreaterThan(scp->prefetch.end, readBase))
         readBase = scp->prefetch.end;
 
-    lock_ReleaseMutex(&scp->mx);
-
-    code = cm_CheckFetchRange(scp, &readBase, cm_chunkSize, userp, reqp,
-                                  &realBase);
-       if (code) 
+    code = cm_CheckFetchRange(scp, &readBase, &readLength, userp, reqp,
+                              &realBase);
+    if (code) {
+        scp->flags &= ~CM_SCACHEFLAG_PREFETCHING;
+        lock_ReleaseWrite(&scp->rw);
         return;        /* can't find something to prefetch */
+    }
+
+    readEnd = LargeIntegerAdd(realBase, readLength);
+
+    /*
+     * Mark each buffer in the range as queued for a
+     * background fetch
+     */
+    for ( offset = realBase;
+          LargeIntegerLessThan(offset, readEnd);
+          offset = LargeIntegerAdd(offset, tblocksize) )
+    {
+        if (rwheld) {
+            lock_ReleaseWrite(&scp->rw);
+            rwheld = 0;
+        }
+
+        bp = buf_Find(scp, &offset);
+        if (!bp)
+            continue;
+
+        if (!rwheld) {
+            lock_ObtainWrite(&scp->rw);
+            rwheld = 1;
+        }
+
+        bp->cmFlags |= CM_BUF_CMBKGFETCH;
+        buf_Release(bp);
+    }
+
+    if (rwheld)
+        lock_ReleaseWrite(&scp->rw);
 
-    osi_Log2(afsd_logp, "BKG Prefetch request vp 0x%x, base 0x%x",
+    osi_Log2(afsd_logp, "BKG Prefetch request scp 0x%p, base 0x%x",
              scp, realBase.LowPart);
 
-    cm_QueueBKGRequest(scp, cm_BkgPrefetch, realBase.LowPart,
-                       realBase.HighPart, cm_chunkSize, 0, userp);
+    cm_QueueBKGRequest(scp, cm_BkgPrefetch,
+                       realBase.LowPart, realBase.HighPart,
+                       readLength.LowPart, readLength.HighPart,
+                       userp);
 }
 
 /* scp must be locked; temporarily unlocked during processing.
@@ -594,7 +965,7 @@ void cm_ConsiderPrefetch(cm_scache_t *scp, osi_hyper_t *offsetp,
  * is being written out.
  */
 long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
-       cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
+                       cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
 {
     cm_buf_t *bufp;
     osi_queueData_t *qdp;
@@ -606,203 +977,219 @@ long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
     long temp;
     long code;
     long flags;                        /* flags to cm_SyncOp */
-        
-       /* clear things out */
-       biop->scp = scp;                /* don't hold */
+
+    /* clear things out */
+    biop->scp = scp;                   /* do not hold; held by caller */
     biop->offset = *inOffsetp;
     biop->length = 0;
     biop->bufListp = NULL;
     biop->bufListEndp = NULL;
-       biop->reserved = 0;
+    biop->reserved = 0;
 
-       /* reserve a chunk's worth of buffers */
-       lock_ReleaseMutex(&scp->mx);
-       buf_ReserveBuffers(cm_chunkSize / buf_bufferSize);
-       lock_ObtainMutex(&scp->mx);
+    /* reserve a chunk's worth of buffers */
+    lock_ReleaseWrite(&scp->rw);
+    buf_ReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
+    lock_ObtainWrite(&scp->rw);
 
     bufp = NULL;
-    for(temp = 0; temp < inSize; temp += buf_bufferSize, bufp = NULL) {
-               thyper.HighPart = 0;
-               thyper.LowPart = temp;
+    for (temp = 0; temp < inSize; temp += cm_data.buf_blockSize) {
+        thyper = ConvertLongToLargeInteger(temp);
         tbase = LargeIntegerAdd(*inOffsetp, thyper);
 
         bufp = buf_Find(scp, &tbase);
         if (bufp) {
-                       /* get buffer mutex and scp mutex safely */
-                       lock_ReleaseMutex(&scp->mx);
-                       lock_ObtainMutex(&bufp->mx);
-                       lock_ObtainMutex(&scp->mx);
-
-                       flags = CM_SCACHESYNC_NEEDCALLBACK
-                               | CM_SCACHESYNC_GETSTATUS
-                                | CM_SCACHESYNC_STOREDATA
-                                | CM_SCACHESYNC_BUFLOCKED;
-                       code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags); 
+            /* get buffer mutex and scp mutex safely */
+            lock_ReleaseWrite(&scp->rw);
+            lock_ObtainMutex(&bufp->mx);
+
+            /*
+             * if the buffer is actively involved in I/O
+             * we wait for the I/O to complete.
+             */
+            if (bufp->flags & (CM_BUF_WRITING|CM_BUF_READING))
+                buf_WaitIO(scp, bufp);
+
+            lock_ObtainWrite(&scp->rw);
+            flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_STOREDATA | CM_SCACHESYNC_BUFLOCKED;
+            code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
             if (code) {
-                               lock_ReleaseMutex(&bufp->mx);
+                lock_ReleaseMutex(&bufp->mx);
                 buf_Release(bufp);
-                               buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
+                bufp = NULL;
+                buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
                 return code;
             }
-                        
-                       /* if the buffer is dirty, we're done */
+
+            /* if the buffer is dirty, we're done */
             if (bufp->flags & CM_BUF_DIRTY) {
                 osi_assertx(!(bufp->flags & CM_BUF_WRITING),
                             "WRITING w/o CMSTORING in SetupStoreBIOD");
-                               bufp->flags |= CM_BUF_WRITING;
-                               break;
+                bufp->flags |= CM_BUF_WRITING;
+                break;
             }
 
-                       /* this buffer is clean, so there's no reason to process it */
-                       cm_SyncOpDone(scp, bufp, flags);
-                       lock_ReleaseMutex(&bufp->mx);
-                       buf_Release(bufp);
+            /* this buffer is clean, so there's no reason to process it */
+            cm_SyncOpDone(scp, bufp, flags);
+            lock_ReleaseMutex(&bufp->mx);
+            buf_Release(bufp);
+           bufp = NULL;
         }
     }
 
-       biop->reserved = 1;
-        
+    biop->reserved = 1;
+
     /* if we get here, if bufp is null, we didn't find any dirty buffers
-        * that weren't already being stored back, so we just quit now.
+     * that weren't already being stored back, so we just quit now.
      */
-        if (!bufp) {
-               return 0;
-       }
+    if (!bufp) {
+        return 0;
+    }
+
+    /* don't need buffer mutex any more */
+    lock_ReleaseMutex(&bufp->mx);
 
-       /* don't need buffer mutex any more */
-       lock_ReleaseMutex(&bufp->mx);
-        
-       /* put this element in the list */
+    /* put this element in the list */
     qdp = osi_QDAlloc();
     osi_SetQData(qdp, bufp);
-       /* don't have to hold bufp, since held by buf_Find above */
+    /* don't have to hold bufp, since held by buf_Find above */
     osi_QAddH((osi_queue_t **) &biop->bufListp,
               (osi_queue_t **) &biop->bufListEndp,
               &qdp->q);
-    biop->length = buf_bufferSize;
+    biop->length = cm_data.buf_blockSize;
     firstModOffset = bufp->offset;
     biop->offset = firstModOffset;
+    bufp = NULL;       /* this buffer and reference added to the queue */
 
-       /* compute the window surrounding *inOffsetp of size cm_chunkSize */
-       scanStart = *inOffsetp;
+    /* compute the window surrounding *inOffsetp of size cm_chunkSize */
+    scanStart = *inOffsetp;
     scanStart.LowPart &= (-cm_chunkSize);
-       thyper.LowPart = cm_chunkSize;
-    thyper.HighPart = 0;
-       scanEnd = LargeIntegerAdd(scanStart, thyper);
+    thyper = ConvertLongToLargeInteger(cm_chunkSize);
+    scanEnd = LargeIntegerAdd(scanStart, thyper);
 
-       flags = CM_SCACHESYNC_NEEDCALLBACK
-               | CM_SCACHESYNC_GETSTATUS
+    flags = CM_SCACHESYNC_GETSTATUS
         | CM_SCACHESYNC_STOREDATA
         | CM_SCACHESYNC_BUFLOCKED
         | CM_SCACHESYNC_NOWAIT;
 
-       /* start by looking backwards until scanStart */
-       thyper.HighPart = 0;            /* hyper version of buf_bufferSize */
-    thyper.LowPart = buf_bufferSize;
-       tbase = LargeIntegerSubtract(firstModOffset, thyper);
+    /* start by looking backwards until scanStart */
+    /* hyper version of cm_data.buf_blockSize */
+    thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+    tbase = LargeIntegerSubtract(firstModOffset, thyper);
     while(LargeIntegerGreaterThanOrEqualTo(tbase, scanStart)) {
         /* see if we can find the buffer */
-               bufp = buf_Find(scp, &tbase);
-        if (!bufp) 
+        bufp = buf_Find(scp, &tbase);
+        if (!bufp)
             break;
 
-               /* try to lock it, and quit if we can't (simplifies locking) */
+        /* try to lock it, and quit if we can't (simplifies locking) */
+        lock_ReleaseWrite(&scp->rw);
         code = lock_TryMutex(&bufp->mx);
+        lock_ObtainWrite(&scp->rw);
         if (code == 0) {
-                       buf_Release(bufp);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
-                
+
         code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
         if (code) {
-                       lock_ReleaseMutex(&bufp->mx);
-                       buf_Release(bufp);
+            lock_ReleaseMutex(&bufp->mx);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
-                
-               if (!(bufp->flags & CM_BUF_DIRTY)) {
-                       /* buffer is clean, so we shouldn't add it */
-                       cm_SyncOpDone(scp, bufp, flags);
-                       lock_ReleaseMutex(&bufp->mx);
-                       buf_Release(bufp);
+
+        if (!(bufp->flags & CM_BUF_DIRTY)) {
+            /* buffer is clean, so we shouldn't add it */
+            cm_SyncOpDone(scp, bufp, flags);
+            lock_ReleaseMutex(&bufp->mx);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
 
-               /* don't need buffer mutex any more */
-               lock_ReleaseMutex(&bufp->mx);
+        /* don't need buffer mutex any more */
+        lock_ReleaseMutex(&bufp->mx);
 
         /* we have a dirty buffer ready for storing.  Add it to the tail
          * of the list, since it immediately precedes all of the disk
          * addresses we've already collected.
          */
-               qdp = osi_QDAlloc();
+        qdp = osi_QDAlloc();
         osi_SetQData(qdp, bufp);
         /* no buf_hold necessary, since we have it held from buf_Find */
         osi_QAddT((osi_queue_t **) &biop->bufListp,
                   (osi_queue_t **) &biop->bufListEndp,
                   &qdp->q);
+       bufp = NULL;            /* added to the queue */
 
-               /* update biod info describing the transfer */
+        /* update biod info describing the transfer */
         biop->offset = LargeIntegerSubtract(biop->offset, thyper);
-        biop->length += buf_bufferSize;
+        biop->length += cm_data.buf_blockSize;
 
         /* update loop pointer */
         tbase = LargeIntegerSubtract(tbase, thyper);
     }  /* while loop looking for pages preceding the one we found */
 
-       /* now, find later dirty, contiguous pages, and add them to the list */
-       thyper.HighPart = 0;            /* hyper version of buf_bufferSize */
-    thyper.LowPart = buf_bufferSize;
-       tbase = LargeIntegerAdd(firstModOffset, thyper);
+    /* now, find later dirty, contiguous pages, and add them to the list */
+    /* hyper version of cm_data.buf_blockSize */
+    thyper = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+    tbase = LargeIntegerAdd(firstModOffset, thyper);
     while(LargeIntegerLessThan(tbase, scanEnd)) {
-               /* see if we can find the buffer */
-               bufp = buf_Find(scp, &tbase);
-        if (!bufp) 
+        /* see if we can find the buffer */
+        bufp = buf_Find(scp, &tbase);
+        if (!bufp)
             break;
 
-               /* try to lock it, and quit if we can't (simplifies locking) */
+        /* try to lock it, and quit if we can't (simplifies locking) */
+        lock_ReleaseWrite(&scp->rw);
         code = lock_TryMutex(&bufp->mx);
+        lock_ObtainWrite(&scp->rw);
         if (code == 0) {
-                       buf_Release(bufp);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
 
         code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
         if (code) {
-                       lock_ReleaseMutex(&bufp->mx);
-                       buf_Release(bufp);
+            lock_ReleaseMutex(&bufp->mx);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
-                
-               if (!(bufp->flags & CM_BUF_DIRTY)) {
-                       /* buffer is clean, so we shouldn't add it */
-                       cm_SyncOpDone(scp, bufp, flags);
-                       lock_ReleaseMutex(&bufp->mx);
-                       buf_Release(bufp);
+
+        if (!(bufp->flags & CM_BUF_DIRTY)) {
+            /* buffer is clean, so we shouldn't add it */
+            cm_SyncOpDone(scp, bufp, flags);
+            lock_ReleaseMutex(&bufp->mx);
+            buf_Release(bufp);
+           bufp = NULL;
             break;
         }
 
-               /* don't need buffer mutex any more */
-               lock_ReleaseMutex(&bufp->mx);
+        /* don't need buffer mutex any more */
+        lock_ReleaseMutex(&bufp->mx);
 
         /* we have a dirty buffer ready for storing.  Add it to the head
          * of the list, since it immediately follows all of the disk
          * addresses we've already collected.
          */
-               qdp = osi_QDAlloc();
+        qdp = osi_QDAlloc();
         osi_SetQData(qdp, bufp);
         /* no buf_hold necessary, since we have it held from buf_Find */
         osi_QAddH((osi_queue_t **) &biop->bufListp,
                   (osi_queue_t **) &biop->bufListEndp,
                   &qdp->q);
+       bufp = NULL;
+
+        /* update biod info describing the transfer */
+        biop->length += cm_data.buf_blockSize;
 
-               /* update biod info describing the transfer */
-        biop->length += buf_bufferSize;
-                
         /* update loop pointer */
         tbase = LargeIntegerAdd(tbase, thyper);
     }  /* while loop looking for pages following the first page we found */
-       
+
     /* finally, we're done */
     return 0;
 }
@@ -813,11 +1200,11 @@ long cm_SetupStoreBIOD(cm_scache_t *scp, osi_hyper_t *inOffsetp, long inSize,
  * If an error is returned, we don't return any buffers.
  */
 long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
-                       cm_bulkIO_t *biop, cm_user_t *up, cm_req_t *reqp)
+                       cm_bulkIO_t *biop, cm_user_t *userp, cm_req_t *reqp)
 {
-       long code;
+    long code;
     cm_buf_t *tbp;
-    osi_hyper_t toffset;               /* a long long temp variable */
+    osi_hyper_t tblocksize;            /* a long long temp variable */
     osi_hyper_t pageBase;              /* base offset we're looking at */
     osi_queueData_t *qdp;              /* one temp queue structure */
     osi_queueData_t *tqdp;             /* another temp queue structure */
@@ -827,556 +1214,1254 @@ long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp,
     osi_hyper_t fileSize;              /* the # of bytes in the file */
     osi_queueData_t *heldBufListp;     /* we hold all buffers in this list */
     osi_queueData_t *heldBufListEndp;  /* first one */
-       int reserving;
+    int reserving;
 
-    biop->scp = scp;
+    tblocksize = ConvertLongToLargeInteger(cm_data.buf_blockSize);
+
+    biop->scp = scp;                   /* do not hold; held by caller */
     biop->offset = *offsetp;
-       /* null out the list of buffers */
+    /* null out the list of buffers */
     biop->bufListp = biop->bufListEndp = NULL;
-       biop->reserved = 0;
+    biop->reserved = 0;
 
-       /* first lookup the file's length, so we know when to stop */
-    code = cm_SyncOp(scp, NULL, up, reqp, 0, CM_SCACHESYNC_NEEDCALLBACK
-                     | CM_SCACHESYNC_GETSTATUS);
-    if (code) 
+    /* first lookup the file's length, so we know when to stop */
+    code = cm_SyncOp(scp, NULL, userp, reqp, 0,
+                     CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+    if (code)
         return code;
-        
-       /* copy out size, since it may change */
+
+    /* copy out size, since it may change */
     fileSize = scp->serverLength;
-        
-    lock_ReleaseMutex(&scp->mx);
 
-       pageBase = *offsetp;
+    lock_ReleaseWrite(&scp->rw);
+
+    pageBase = *offsetp;
     collected = pageBase.LowPart & (cm_chunkSize - 1);
     heldBufListp = NULL;
     heldBufListEndp = NULL;
 
-       /*
-        * Obtaining buffers can cause dirty buffers to be recycled, which
-        * can cause a storeback, so cannot be done while we have buffers
-        * reserved.
-        *
-        * To get around this, we get buffers twice.  Before reserving buffers,
-        * we obtain and release each one individually.  After reserving
-        * buffers, we try to obtain them again, but only by lookup, not by
-        * recycling.  If a buffer has gone away while we were waiting for
-        * the others, we just use whatever buffers we already have.
-        *
-        * On entry to this function, we are already holding a buffer, so we
-        * can't wait for reservation.  So we call buf_TryReserveBuffers()
-        * instead.  Not only that, we can't really even call buf_Get(), for
-        * the same reason.  We can't avoid that, though.  To avoid deadlock
-        * we allow only one thread to be executing the buf_Get()-buf_Release()
-        * sequence at a time.
-        */
-
-       /* first hold all buffers, since we can't hold any locks in buf_Get */
+    /*
+     * Obtaining buffers can cause dirty buffers to be recycled, which
+     * can cause a storeback, so cannot be done while we have buffers
+     * reserved.
+     *
+     * To get around this, we get buffers twice.  Before reserving buffers,
+     * we obtain and release each one individually.  After reserving
+     * buffers, we try to obtain them again, but only by lookup, not by
+     * recycling.  If a buffer has gone away while we were waiting for
+     * the others, we just use whatever buffers we already have.
+     *
+     * On entry to this function, we are already holding a buffer, so we
+     * can't wait for reservation.  So we call buf_TryReserveBuffers()
+     * instead.  Not only that, we can't really even call buf_Get(), for
+     * the same reason.  We can't avoid that, though.  To avoid deadlock
+     * we allow only one thread to be executing the buf_Get()-buf_Release()
+     * sequence at a time.
+     */
+
+    /* first hold all buffers, since we can't hold any locks in buf_Get */
     while (1) {
-               /* stop at chunk boundary */
-               if (collected >= cm_chunkSize) break;
-                
-        /* see if the next page would be past EOF */
-        if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize)) break;
+        /* stop at chunk boundary */
+        if (collected >= cm_chunkSize)
+            break;
 
-               lock_ObtainMutex(&cm_bufGetMutex);
+        /* see if the next page would be past EOF */
+        if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
+            break;
 
-               code = buf_Get(scp, &pageBase, &tbp);
+        code = buf_Get(scp, &pageBase, reqp, &tbp);
         if (code) {
-                       lock_ReleaseMutex(&cm_bufGetMutex);
-                       lock_ObtainMutex(&scp->mx);
-                       return code;
-               }
-                
-               buf_Release(tbp);
+            lock_ObtainWrite(&scp->rw);
+            cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+            return code;
+        }
 
-               lock_ReleaseMutex(&cm_bufGetMutex);
+        buf_Release(tbp);
+        tbp = NULL;
 
-        toffset.HighPart = 0;
-        toffset.LowPart = buf_bufferSize;
-        pageBase = LargeIntegerAdd(toffset, pageBase);
-               collected += buf_bufferSize;
+        pageBase = LargeIntegerAdd(tblocksize, pageBase);
+        collected += cm_data.buf_blockSize;
     }
 
     /* reserve a chunk's worth of buffers if possible */
-       reserving = buf_TryReserveBuffers(cm_chunkSize / buf_bufferSize);
+    reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
 
-       pageBase = *offsetp;
+    pageBase = *offsetp;
     collected = pageBase.LowPart & (cm_chunkSize - 1);
 
-       /* now hold all buffers, if they are still there */
+    /* now hold all buffers, if they are still there */
     while (1) {
-               /* stop at chunk boundary */
-               if (collected >= cm_chunkSize) 
+        /* stop at chunk boundary */
+        if (collected >= cm_chunkSize)
             break;
-                
+
         /* see if the next page would be past EOF */
-        if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize)) 
+        if (LargeIntegerGreaterThanOrEqualTo(pageBase, fileSize))
             break;
 
         tbp = buf_Find(scp, &pageBase);
-               if (!tbp) 
+        if (!tbp)
             break;
 
         /* add the buffer to the list */
-               qdp = osi_QDAlloc();
+        qdp = osi_QDAlloc();
         osi_SetQData(qdp, tbp);
-        osi_QAdd((osi_queue_t **)&heldBufListp, &qdp->q);
-        if (!heldBufListEndp) heldBufListEndp = qdp;
-               /* leave tbp held (from buf_Get) */
+        osi_QAddH((osi_queue_t **)&heldBufListp,
+                 (osi_queue_t **)&heldBufListEndp,
+                 &qdp->q);
+        /* leave tbp held (from buf_Get) */
 
-               if (!reserving) 
+        if (!reserving)
             break;
 
-        collected += buf_bufferSize;
-        toffset.HighPart = 0;
-        toffset.LowPart = buf_bufferSize;
-        pageBase = LargeIntegerAdd(toffset, pageBase);
+        collected += cm_data.buf_blockSize;
+        pageBase = LargeIntegerAdd(tblocksize, pageBase);
     }
 
     /* look at each buffer, adding it into the list if it looks idle and
-        * filled with old data.  One special case: wait for idle if it is the
-        * first buffer since we really need that one for our caller to make
-        * any progress.
+     * filled with old data.  One special case: wait for idle if it is the
+     * first buffer since we really need that one for our caller to make
+     * any progress.
      */
     isFirst = 1;
     collected = 0;             /* now count how many we'll really use */
-       for(tqdp = heldBufListEndp;
+    for (tqdp = heldBufListEndp;
         tqdp;
-           tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
-               /* get a ptr to the held buffer */
-               tbp = osi_GetQData(tqdp);
+          tqdp = (osi_queueData_t *) osi_QPrev(&tqdp->q)) {
+        /* get a ptr to the held buffer */
+        tbp = osi_GetQData(tqdp);
         pageBase = tbp->offset;
 
-               /* now lock the buffer lock */
-               lock_ObtainMutex(&tbp->mx);
-               lock_ObtainMutex(&scp->mx);
+        /* now lock the buffer lock */
+        lock_ObtainMutex(&tbp->mx);
+        lock_ObtainWrite(&scp->rw);
 
-               /* don't bother fetching over data that is already current */
-               if (tbp->dataVersion == scp->dataVersion) {
-                       /* we don't need this buffer, since it is current */
-                       lock_ReleaseMutex(&scp->mx);
+        /* don't bother fetching over data that is already current */
+        if (tbp->dataVersion <= scp->dataVersion && tbp->dataVersion >= scp->bufDataVersionLow) {
+            /* we don't need this buffer, since it is current */
+            lock_ReleaseWrite(&scp->rw);
             lock_ReleaseMutex(&tbp->mx);
             break;
         }
 
-               flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
-                       | CM_SCACHESYNC_BUFLOCKED;
-               if (!isFirst) 
+        flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
+        if (!isFirst)
             flags |= CM_SCACHESYNC_NOWAIT;
 
-               /* wait for the buffer to serialize, if required.  Doesn't
-                * release the scp or buffer lock(s) if NOWAIT is specified.
+        /* wait for the buffer to serialize, if required.  Doesn't
+         * release the scp or buffer lock(s) if NOWAIT is specified.
          */
-               code = cm_SyncOp(scp, tbp, up, reqp, 0, flags);
+        code = cm_SyncOp(scp, tbp, userp, reqp, 0, flags);
         if (code) {
-                       lock_ReleaseMutex(&scp->mx);
-                       lock_ReleaseMutex(&tbp->mx);
+            lock_ReleaseWrite(&scp->rw);
+            lock_ReleaseMutex(&tbp->mx);
             break;
-               }
-                
-               /* don't fetch over dirty buffers */
+        }
+
+        /* don't fetch over dirty buffers */
         if (tbp->flags & CM_BUF_DIRTY) {
-                       cm_SyncOpDone(scp, tbp, flags);
-                       lock_ReleaseMutex(&scp->mx);
+            cm_SyncOpDone(scp, tbp, flags);
+            lock_ReleaseWrite(&scp->rw);
             lock_ReleaseMutex(&tbp->mx);
             break;
-               }
+        }
 
-               /* Release locks */
-               lock_ReleaseMutex(&scp->mx);
-               lock_ReleaseMutex(&tbp->mx);
+        /* Release locks */
+        lock_ReleaseWrite(&scp->rw);
+        lock_ReleaseMutex(&tbp->mx);
 
         /* add the buffer to the list */
-               qdp = osi_QDAlloc();
+        qdp = osi_QDAlloc();
         osi_SetQData(qdp, tbp);
-        osi_QAdd((osi_queue_t **)&biop->bufListp, &qdp->q);
-        if (!biop->bufListEndp) 
-            biop->bufListEndp = qdp;
-               buf_Hold(tbp);
+        osi_QAddH((osi_queue_t **)&biop->bufListp,
+                 (osi_queue_t **)&biop->bufListEndp,
+                 &qdp->q);
+        buf_Hold(tbp);
 
-               /* from now on, a failure just stops our collection process, but
+        /* from now on, a failure just stops our collection process, but
          * we still do the I/O to whatever we've already managed to collect.
          */
         isFirst = 0;
-        collected += buf_bufferSize;
+        collected += cm_data.buf_blockSize;
     }
-        
+
     /* now, we've held in biop->bufListp all the buffer's we're really
-        * interested in.  We also have holds left from heldBufListp, and we
-        * now release those holds on the buffers.
+     * interested in.  We also have holds left from heldBufListp, and we
+     * now release those holds on the buffers.
      */
-       for(qdp = heldBufListp; qdp; qdp = tqdp) {
-               tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
-               tbp = osi_GetQData(qdp);
+    for (qdp = heldBufListp; qdp; qdp = tqdp) {
+        tqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
+        tbp = osi_GetQData(qdp);
+       osi_QRemoveHT((osi_queue_t **) &heldBufListp,
+                     (osi_queue_t **) &heldBufListEndp,
+                     &qdp->q);
         osi_QDFree(qdp);
         buf_Release(tbp);
+        tbp = NULL;
     }
 
-       /* Caller expects this */
-       lock_ObtainMutex(&scp->mx);
-       /* if we got a failure setting up the first buffer, then we don't have
+    /* Caller expects this */
+    lock_ObtainWrite(&scp->rw);
+
+    /* if we got a failure setting up the first buffer, then we don't have
      * any side effects yet, and we also have failed an operation that the
      * caller requires to make any progress.  Give up now.
      */
     if (code && isFirst) {
-               buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
-               return code;
-       }
-        
+        buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
+        return code;
+    }
+
     /* otherwise, we're still OK, and should just return the I/O setup we've
      * got.
      */
-       biop->length = collected;
-       biop->reserved = reserving;
+    biop->length = collected;
+    biop->reserved = reserving;
     return 0;
 }
 
 /* release a bulk I/O structure that was setup by cm_SetupFetchBIOD or by
  * cm_SetupStoreBIOD
  */
-void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore)
+void cm_ReleaseBIOD(cm_bulkIO_t *biop, int isStore, long code, int scp_locked)
 {
-       cm_scache_t *scp;
+    cm_scache_t *scp;          /* do not release; not held in biop */
     cm_buf_t *bufp;
     osi_queueData_t *qdp;
     osi_queueData_t *nqdp;
     int flags;
 
-       /* Give back reserved buffers */
-       if (biop->reserved)
-               buf_UnreserveBuffers(cm_chunkSize / buf_bufferSize);
-        
-       flags = CM_SCACHESYNC_NEEDCALLBACK;
-    if (isStore)
-        flags |= CM_SCACHESYNC_STOREDATA;
-       else
-               flags |= CM_SCACHESYNC_FETCHDATA;
-
-       scp = biop->scp;
-    for(qdp = biop->bufListp; qdp; qdp = nqdp) {
-               /* lookup next guy first, since we're going to free this one */
-               nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
-                
-               /* extract buffer and free queue data */
-        bufp = osi_GetQData(qdp);
-        osi_QDFree(qdp);
+    /* Give back reserved buffers */
+    if (biop->reserved)
+        buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
 
-        /* now, mark I/O as done, unlock the buffer and release it */
-               lock_ObtainMutex(&bufp->mx);
-               lock_ObtainMutex(&scp->mx);
-        cm_SyncOpDone(scp, bufp, flags);
-               lock_ReleaseMutex(&scp->mx);
-                
-               /* turn off writing and wakeup users */
-        if (isStore) {
-            if (bufp->flags & CM_BUF_WAITING) {
-                               osi_Wakeup((long) bufp);
-            }
-                       bufp->flags &= ~(CM_BUF_WAITING | CM_BUF_WRITING
-                             | CM_BUF_DIRTY);
-        }
+    if (isStore)
+        flags = CM_SCACHESYNC_STOREDATA;
+    else
+        flags = CM_SCACHESYNC_FETCHDATA;
+
+    scp = biop->scp;
+    if (biop->bufListp) {
+       for(qdp = biop->bufListp; qdp; qdp = nqdp) {
+           /* lookup next guy first, since we're going to free this one */
+           nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
+
+           /* extract buffer and free queue data */
+           bufp = osi_GetQData(qdp);
+           osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
+                          (osi_queue_t **) &biop->bufListEndp,
+                          &qdp->q);
+           osi_QDFree(qdp);
+
+           /* now, mark I/O as done, unlock the buffer and release it */
+            if (scp_locked)
+                lock_ReleaseWrite(&scp->rw);
+           lock_ObtainMutex(&bufp->mx);
+           lock_ObtainWrite(&scp->rw);
+           cm_SyncOpDone(scp, bufp, flags);
+
+           /* turn off writing and wakeup users */
+           if (isStore) {
+               if (bufp->flags & CM_BUF_WAITING) {
+                   osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
+                   osi_Wakeup((LONG_PTR) bufp);
+               }
+               if (code) {
+                   bufp->flags &= ~CM_BUF_WRITING;
+                    switch (code) {
+                    case CM_ERROR_NOSUCHFILE:
+                    case CM_ERROR_BADFD:
+                    case CM_ERROR_NOACCESS:
+                    case CM_ERROR_QUOTA:
+                    case CM_ERROR_SPACE:
+                    case CM_ERROR_TOOBIG:
+                    case CM_ERROR_READONLY:
+                    case CM_ERROR_NOSUCHPATH:
+                        /*
+                         * Apply the fatal error to this buffer.
+                         */
+                        bufp->flags &= ~CM_BUF_DIRTY;
+                        bufp->flags |= CM_BUF_ERROR;
+                        bufp->dirty_offset = 0;
+                        bufp->dirty_length = 0;
+                        bufp->error = code;
+                        bufp->dataVersion = CM_BUF_VERSION_BAD;
+                        bufp->dirtyCounter++;
+                        break;
+                    case CM_ERROR_TIMEDOUT:
+                    case CM_ERROR_ALLDOWN:
+                    case CM_ERROR_ALLBUSY:
+                    case CM_ERROR_ALLOFFLINE:
+                    case CM_ERROR_CLOCKSKEW:
+                    default:
+                        /* do not mark the buffer in error state but do
+                        * not attempt to complete the rest either.
+                        */
+                        break;
+                    }
+               } else {
+                   bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
+                    bufp->dirty_offset = bufp->dirty_length = 0;
+                }
+           }
 
-        lock_ReleaseMutex(&bufp->mx);
-        buf_Release(bufp);
+            if (!scp_locked)
+                lock_ReleaseWrite(&scp->rw);
+           lock_ReleaseMutex(&bufp->mx);
+           buf_Release(bufp);
+           bufp = NULL;
+       }
+    } else {
+       if (!scp_locked)
+            lock_ObtainWrite(&scp->rw);
+       cm_SyncOpDone(scp, NULL, flags);
+        if (!scp_locked)
+            lock_ReleaseWrite(&scp->rw);
     }
 
     /* clean things out */
     biop->bufListp = NULL;
     biop->bufListEndp = NULL;
-}   
+}
+
+static int
+cm_CloneStatus(cm_scache_t *scp, cm_user_t *userp, int scp_locked,
+               AFSFetchStatus *afsStatusp, AFSVolSync *volSyncp)
+{
+    // setup the status based upon the scp data
+    afsStatusp->InterfaceVersion = 0x1;
+    switch (scp->fileType) {
+    case CM_SCACHETYPE_FILE:
+        afsStatusp->FileType = File;
+        break;
+    case CM_SCACHETYPE_DIRECTORY:
+        afsStatusp->FileType = Directory;
+        break;
+    case CM_SCACHETYPE_MOUNTPOINT:
+        afsStatusp->FileType = SymbolicLink;
+        break;
+    case CM_SCACHETYPE_SYMLINK:
+    case CM_SCACHETYPE_DFSLINK:
+        afsStatusp->FileType = SymbolicLink;
+        break;
+    default:
+        afsStatusp->FileType = -1;    /* an invalid value */
+    }
+    afsStatusp->LinkCount = scp->linkCount;
+    afsStatusp->Length = scp->length.LowPart;
+    afsStatusp->DataVersion = (afs_uint32)(scp->dataVersion & MAX_AFS_UINT32);
+    afsStatusp->Author = 0x1;
+    afsStatusp->Owner = scp->owner;
+    if (!scp_locked) {
+        lock_ObtainWrite(&scp->rw);
+        scp_locked = 1;
+    }
+    if (cm_FindACLCache(scp, userp, &afsStatusp->CallerAccess))
+        afsStatusp->CallerAccess = scp->anyAccess;
+    afsStatusp->AnonymousAccess = scp->anyAccess;
+    afsStatusp->UnixModeBits = scp->unixModeBits;
+    afsStatusp->ParentVnode = scp->parentVnode;
+    afsStatusp->ParentUnique = scp->parentUnique;
+    afsStatusp->ResidencyMask = 0;
+    afsStatusp->ClientModTime = scp->clientModTime;
+    afsStatusp->ServerModTime = scp->serverModTime;
+    afsStatusp->Group = scp->group;
+    afsStatusp->SyncCounter = 0;
+    afsStatusp->dataVersionHigh = (afs_uint32)(scp->dataVersion >> 32);
+    afsStatusp->lockCount = 0;
+    afsStatusp->Length_hi = scp->length.HighPart;
+    afsStatusp->errorCode = 0;
+
+    volSyncp->spare1 = scp->volumeCreationDate;
+
+    return scp_locked;
+}
 
 /* Fetch a buffer.  Called with scp locked.
  * The scp is locked on return.
  */
-long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *up,
-       cm_req_t *reqp)
+long cm_GetBuffer(cm_scache_t *scp, cm_buf_t *bufp, int *cpffp, cm_user_t *userp,
+                  cm_req_t *reqp)
 {
-       long code;
-    long nbytes;                       /* bytes in transfer */
+    long code=0, code1=0;
+    afs_uint32 nbytes;                 /* bytes in transfer */
+    afs_uint32 nbytes_hi = 0;            /* high-order 32 bits of bytes in transfer */
+    afs_uint64 length_found = 0;
     long rbytes;                       /* bytes in rx_Read call */
     long temp;
     AFSFetchStatus afsStatus;
     AFSCallBack callback;
     AFSVolSync volSync;
     char *bufferp;
-    cm_buf_t *tbufp;           /* buf we're filling */
+    afs_uint32 buffer_offset;
+    cm_buf_t *tbufp;                   /* buf we're filling */
     osi_queueData_t *qdp;              /* q element we're scanning */
     AFSFid tfid;
-    struct rx_call *callp;
+    struct rx_call *rxcallp;
+    struct rx_connection *rxconnp;
     cm_bulkIO_t biod;          /* bulk IO descriptor */
     cm_conn_t *connp;
-       int getroot;
-       long t1, t2;
+    int getroot;
+    afs_int32 t1,t2;
+    int require_64bit_ops = 0;
+    int call_was_64bit = 0;
+    int fs_fetchdata_offset_bug = 0;
+    int first_read = 1;
+    int scp_locked = 1;
+
+    memset(&volSync, 0, sizeof(volSync));
 
-    /* now, the buffer may or may not be filled with good data (buf_GetNew
+    /* now, the buffer may or may not be filled with good data (buf_GetNewLocked
      * drops lots of locks, and may indeed return a properly initialized
      * buffer, although more likely it will just return a new, empty, buffer.
      */
 
 #ifdef AFS_FREELANCE_CLIENT
 
-       // yj: if they're trying to get the /afs directory, we need to
-       // handle it differently, since it's local rather than on any
-       // server
+    // yj: if they're trying to get the /afs directory, we need to
+    // handle it differently, since it's local rather than on any
+    // server
 
-       getroot = (scp==cm_rootSCachep);
-       if (getroot)
-               osi_Log1(afsd_logp,"GetBuffer returns cm_rootSCachep=%x",cm_rootSCachep);
+    getroot = (scp==cm_data.rootSCachep);
+    if (getroot)
+        osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
 #endif
 
-       cm_AFSFidFromFid(&tfid, &scp->fid);
+    if (cm_HaveCallback(scp) && bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow) {
+        /* We already have this buffer don't do extra work */
+        return 0;
+    }
 
-       code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, up, reqp);
-       if (code) {
-               /* couldn't even get the first page setup properly */
-               osi_Log1(afsd_logp, "SetupFetchBIOD failure code %d", code);
+    cm_AFSFidFromFid(&tfid, &scp->fid);
+
+    code = cm_SetupFetchBIOD(scp, &bufp->offset, &biod, userp, reqp);
+    if (code) {
+        /* couldn't even get the first page setup properly */
+        osi_Log1(afsd_logp, "GetBuffer: SetupFetchBIOD failure code %d", code);
         return code;
-       }
+    }
 
     /* once we get here, we have the callback in place, we know that no one
-        * is fetching the data now.  Check one last time that we still have
-        * the wrong data, and then fetch it if we're still wrong.
-        *
+     * is fetching the data now.  Check one last time that we still have
+     * the wrong data, and then fetch it if we're still wrong.
+     *
      * We can lose a race condition and end up with biod.length zero, in
-        * which case we just retry.
+     * which case we just retry.
      */
-    if (bufp->dataVersion == scp->dataVersion || biod.length == 0) {
-               osi_Log3(afsd_logp, "Bad DVs %d, %d or length 0x%x",
-                 bufp->dataVersion, scp->dataVersion, biod.length);
-               if ((bufp->dataVersion == -1
-                    || bufp->dataVersion < scp->dataVersion)
-             && LargeIntegerGreaterThanOrEqualTo(bufp->offset,
-                                                 scp->serverLength)) {
-                       if (bufp->dataVersion == -1)
-                               memset(bufp->datap, 0, buf_bufferSize);
-                       bufp->dataVersion = scp->dataVersion;
-               }
-               lock_ReleaseMutex(&scp->mx);
-               cm_ReleaseBIOD(&biod, 0);
-               lock_ObtainMutex(&scp->mx);
+    if (bufp->dataVersion <= scp->dataVersion && bufp->dataVersion >= scp->bufDataVersionLow || biod.length == 0) {
+        if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow) &&
+             LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->serverLength))
+        {
+            osi_Log4(afsd_logp, "Bad DVs 0x%x != (0x%x -> 0x%x) or length 0x%x",
+                     bufp->dataVersion, scp->bufDataVersionLow, scp->dataVersion, biod.length);
+
+            if (bufp->dataVersion == CM_BUF_VERSION_BAD)
+                memset(bufp->datap, 0, cm_data.buf_blockSize);
+            bufp->dataVersion = scp->dataVersion;
+        }
+        cm_ReleaseBIOD(&biod, 0, 0, 1);
+        return 0;
+    } else if ((bufp->dataVersion == CM_BUF_VERSION_BAD || bufp->dataVersion < scp->bufDataVersionLow)
+                && (scp->mask & CM_SCACHEMASK_TRUNCPOS) &&
+                LargeIntegerGreaterThanOrEqualTo(bufp->offset, scp->truncPos)) {
+        memset(bufp->datap, 0, cm_data.buf_blockSize);
+        bufp->dataVersion = scp->dataVersion;
+        cm_ReleaseBIOD(&biod, 0, 0, 1);
         return 0;
     }
-        
-    lock_ReleaseMutex(&scp->mx);
 
-#ifdef DISKCACHE95
-    DPRINTF("cm_GetBuffer: fetching data scpDV=%d bufDV=%d scp=%x bp=%x dcp=%x\n",
-            scp->dataVersion, bufp->dataVersion, scp, bufp, bufp->dcp);
-#endif /* DISKCACHE95 */
+    lock_ReleaseWrite(&scp->rw);
+    scp_locked = 0;
+
+    if (LargeIntegerGreaterThan(LargeIntegerAdd(biod.offset,
+                                                ConvertLongToLargeInteger(biod.length)),
+                                ConvertLongToLargeInteger(LONG_MAX))) {
+        require_64bit_ops = 1;
+    }
+
+    osi_Log2(afsd_logp, "cm_GetBuffer: fetching data scp %p bufp %p", scp, bufp);
+    osi_Log3(afsd_logp, "cm_GetBuffer: fetching data scpDV 0x%x scpDVLow 0x%x bufDV 0x%x",
+             scp->dataVersion, scp->bufDataVersionLow, bufp->dataVersion);
 
 #ifdef AFS_FREELANCE_CLIENT
 
-       // yj code
-       // if getroot then we don't need to make any calls
-       // just return fake data
-       
-        if (cm_freelanceEnabled && getroot) {
-               // setup the fake status                        
-               afsStatus.InterfaceVersion = 0x1;
-               afsStatus.FileType = 0x2;
-               afsStatus.LinkCount = scp->linkCount;
-               afsStatus.Length = cm_fakeDirSize;
-               afsStatus.DataVersion = cm_fakeDirVersion;
-               afsStatus.Author = 0x1;
-               afsStatus.Owner = 0x0;
-               afsStatus.CallerAccess = 0x9;
-               afsStatus.AnonymousAccess = 0x9;
-               afsStatus.UnixModeBits = 0x1ff;
-               afsStatus.ParentVnode = 0x1;
-               afsStatus.ParentUnique = 0x1;
-               afsStatus.ResidencyMask = 0;
-               afsStatus.ClientModTime = 0x3b49f6e2;
-               afsStatus.ServerModTime = 0x3b49f6e2;
-               afsStatus.Group = 0;
-               afsStatus.SyncCounter = 0;
-               afsStatus.dataVersionHigh = 0;
-       
-               // once we're done setting up the status info,
-               // we just fill the buffer pages with fakedata
-               // from cm_FakeRootDir. Extra pages are set to
-               // 0. 
-               
-               lock_ObtainMutex(&cm_Freelance_Lock);
-               t1 = bufp->offset.LowPart;
-               qdp = biod.bufListEndp;
-               while (qdp) {
-                       tbufp = osi_GetQData(qdp);
-                       bufferp=tbufp->datap;
-                       memset(bufferp, 0, buf_bufferSize);
-                       t2 = cm_fakeDirSize - t1;
-                       if (t2>buf_bufferSize) t2=buf_bufferSize;
-                       if (t2 > 0) {
-                               memcpy(bufferp, cm_FakeRootDir+t1, t2);
-                       } else {
-                               t2 = 0;
-                       }
-                       t1+=t2;
-                       qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
-                       
-               }
-               lock_ReleaseMutex(&cm_Freelance_Lock);
-       
-               // once we're done, we skip over the part of the
-               // code that does the ACTUAL fetching of data for
-               // real files
+    // yj code
+    // if getroot then we don't need to make any calls
+    // just return fake data
+
+    if (cm_freelanceEnabled && getroot) {
+        // setup the fake status
+        afsStatus.InterfaceVersion = 0x1;
+        afsStatus.FileType = 0x2;
+        afsStatus.LinkCount = scp->linkCount;
+        afsStatus.Length = cm_fakeDirSize;
+        afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
+        afsStatus.Author = 0x1;
+        afsStatus.Owner = 0x0;
+        afsStatus.CallerAccess = 0x9;
+        afsStatus.AnonymousAccess = 0x9;
+        afsStatus.UnixModeBits = 0x1ff;
+        afsStatus.ParentVnode = 0x1;
+        afsStatus.ParentUnique = 0x1;
+        afsStatus.ResidencyMask = 0;
+        afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
+        afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
+        afsStatus.Group = 0;
+        afsStatus.SyncCounter = 0;
+        afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
+        afsStatus.lockCount = 0;
+        afsStatus.Length_hi = 0;
+        afsStatus.errorCode = 0;
+       memset(&volSync, 0, sizeof(volSync));
+
+        // once we're done setting up the status info,
+        // we just fill the buffer pages with fakedata
+        // from cm_FakeRootDir. Extra pages are set to
+        // 0.
+
+        lock_ObtainMutex(&cm_Freelance_Lock);
+        t1 = bufp->offset.LowPart;
+        qdp = biod.bufListEndp;
+        while (qdp) {
+            tbufp = osi_GetQData(qdp);
+            bufferp=tbufp->datap;
+            memset(bufferp, 0, cm_data.buf_blockSize);
+            t2 = cm_fakeDirSize - t1;
+            if (t2> (afs_int32)cm_data.buf_blockSize)
+                t2=cm_data.buf_blockSize;
+            if (t2 > 0) {
+                memcpy(bufferp, cm_FakeRootDir+t1, t2);
+            } else {
+                t2 = 0;
+            }
+            t1+=t2;
+            qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
 
-               goto fetchingcompleted;
-       }
+        }
+        lock_ReleaseMutex(&cm_Freelance_Lock);
+
+        // once we're done, we skip over the part of the
+        // code that does the ACTUAL fetching of data for
+        // real files
+
+        goto fetchingcompleted;
+    }
 
 #endif /* AFS_FREELANCE_CLIENT */
 
-       /* now make the call */
+    /*
+     * if the requested offset is greater than the file length,
+     * the file server will return zero bytes of data and the
+     * current status for the file which we already have since
+     * we have just obtained a callback.  Instead, we can avoid
+     * the network round trip by allocating zeroed buffers and
+     * faking the status info.
+     */
+    if (biod.offset.QuadPart >= scp->length.QuadPart) {
+        osi_Log5(afsd_logp, "SKIP FetchData64 scp 0x%p, off 0x%x:%08x > length 0x%x:%08x",
+                 scp, biod.offset.HighPart, biod.offset.LowPart,
+                 scp->length.HighPart, scp->length.LowPart);
+
+        /* Clone the current status info */
+        scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
+
+        /* status info complete, fill pages with zeros */
+        for (qdp = biod.bufListEndp;
+             qdp;
+             qdp = (osi_queueData_t *) osi_QPrev(&qdp->q)) {
+            tbufp = osi_GetQData(qdp);
+            bufferp=tbufp->datap;
+            memset(bufferp, 0, cm_data.buf_blockSize);
+        }
+
+        /* no need to contact the file server */
+        goto fetchingcompleted;
+    }
+
+    if (scp_locked) {
+        lock_ReleaseWrite(&scp->rw);
+        scp_locked = 0;
+    }
+
+    /* now make the call */
     do {
-               code = cm_Conn(&scp->fid, up, reqp, &connp);
-        if (code) 
+        code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
+        if (code)
             continue;
-               
-               callp = rx_NewCall(connp->callp);
 
-               osi_Log3(afsd_logp, "CALL FetchData vp %x, off 0x%x, size 0x%x",
-                 (long) scp, biod.offset.LowPart, biod.length);
+        rxconnp = cm_GetRxConn(connp);
+        rxcallp = rx_NewCall(rxconnp);
+        rx_PutConnection(rxconnp);
+
+        nbytes = nbytes_hi = 0;
+
+        if (SERVERHAS64BIT(connp)) {
+            call_was_64bit = 1;
+
+            osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
+                     scp, biod.offset.HighPart, biod.offset.LowPart, biod.length);
+
+            code = StartRXAFS_FetchData64(rxcallp, &tfid, biod.offset.QuadPart, biod.length);
+
+            if (code == 0) {
+                temp = rx_Read32(rxcallp, &nbytes_hi);
+                if (temp == sizeof(afs_int32)) {
+                    nbytes_hi = ntohl(nbytes_hi);
+                } else {
+                    nbytes_hi = 0;
+                   code = rxcallp->error;
+                    code1 = rx_EndCall(rxcallp, code);
+                    rxcallp = NULL;
+                }
+            }
+        } else {
+            call_was_64bit = 0;
+        }
+
+        if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
+            if (require_64bit_ops) {
+                osi_Log0(afsd_logp, "Skipping FetchData.  Operation requires FetchData64");
+                code = CM_ERROR_TOOBIG;
+            } else {
+                if (!rxcallp) {
+                    rxconnp = cm_GetRxConn(connp);
+                    rxcallp = rx_NewCall(rxconnp);
+                    rx_PutConnection(rxconnp);
+                }
+
+                osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
+                         scp, biod.offset.LowPart, biod.length);
 
-        code = StartRXAFS_FetchData(callp, &tfid, biod.offset.LowPart,
-                                    biod.length);
+                code = StartRXAFS_FetchData(rxcallp, &tfid, biod.offset.LowPart,
+                                            biod.length);
 
-               /* now copy the data out of the pipe and put it in the buffer */
-               temp  = rx_Read(callp, (char *)&nbytes, 4);
-               if (temp == 4) {
-                       nbytes = ntohl(nbytes);
-            if (nbytes > biod.length) 
-                code = (callp->error < 0) ? callp->error : -1;
+                SET_SERVERHASNO64BIT(connp);
+            }
         }
-        else 
-            code = (callp->error < 0) ? callp->error : -1;
 
-               if (code == 0) {
+        if (code == 0) {
+            temp  = rx_Read32(rxcallp, &nbytes);
+            if (temp == sizeof(afs_int32)) {
+                nbytes = ntohl(nbytes);
+                FillInt64(length_found, nbytes_hi, nbytes);
+                if (length_found > biod.length) {
+                    /*
+                     * prior to 1.4.12 and 1.5.65 the file server would return
+                     * (filesize - offset) if the requested offset was greater than
+                     * the filesize.  The correct return value would have been zero.
+                     * Force a retry by returning an RX_PROTOCOL_ERROR.  If the cause
+                     * is a race between two RPCs issues by this cache manager, the
+                     * correct thing will happen the second time.
+                     */
+                    osi_Log0(afsd_logp, "cm_GetBuffer length_found > biod.length");
+                    fs_fetchdata_offset_bug = 1;
+                }
+            } else {
+                osi_Log1(afsd_logp, "cm_GetBuffer rx_Read32 returns %d != 4", temp);
+                code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+            }
+        }
+        /* for the moment, nbytes_hi will always be 0 if code == 0
+           because biod.length is a 32-bit quantity. */
+
+        if (code == 0) {
             qdp = biod.bufListEndp;
             if (qdp) {
-                               tbufp = osi_GetQData(qdp);
+                tbufp = osi_GetQData(qdp);
                 bufferp = tbufp->datap;
+                buffer_offset = 0;
             }
-            else 
+            else
                 bufferp = NULL;
-                       /* fill nbytes of data from the pipe into the pages.
-                        * When we stop, qdp will point at the last page we're
-                        * dealing with, and bufferp will tell us where we
-                        * stopped.  We'll need this info below when we clear
-                        * the remainder of the last page out (and potentially
+
+            /* fill length_found of data from the pipe into the pages.
+             * When we stop, qdp will point at the last page we're
+             * dealing with, and bufferp will tell us where we
+             * stopped.  We'll need this info below when we clear
+             * the remainder of the last page out (and potentially
              * clear later pages out, if we fetch past EOF).
              */
-            while(nbytes > 0) {
-                               /* assert that there are still more buffers;
-                                * our check above for nbytes being less than
-                                * biod.length should ensure this.
+            while (length_found > 0) {
+#ifdef USE_RX_IOVEC
+                struct iovec tiov[RX_MAXIOVECS];
+                afs_int32 tnio, iov, iov_offset;
+
+                temp = rx_Readv(rxcallp, tiov, &tnio, RX_MAXIOVECS, length_found);
+                osi_Log1(afsd_logp, "cm_GetBuffer rx_Readv returns %d", temp);
+                if (temp != length_found && temp < cm_data.buf_blockSize) {
+                    /*
+                     * If the file server returned (filesize - offset),
+                     * then the first rx_Read will return zero octets of data.
+                     * If it does, do not treat it as an error.  Correct the
+                     * length_found and continue as if the file server said
+                     * it was sending us zero octets of data.
+                     */
+                    if (fs_fetchdata_offset_bug && first_read)
+                        length_found = 0;
+                    else
+                        code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+                    break;
+                }
+
+                iov = 0;
+                iov_offset = 0;
+                rbytes = temp;
+
+                while (rbytes > 0) {
+                    afs_int32 len;
+
+                    osi_assertx(bufferp != NULL, "null cm_buf_t");
+
+                    len = MIN(tiov[iov].iov_len - iov_offset, cm_data.buf_blockSize - buffer_offset);
+                    memcpy(bufferp + buffer_offset, tiov[iov].iov_base + iov_offset, len);
+                    iov_offset += len;
+                    buffer_offset += len;
+                    rbytes -= len;
+
+                    if (iov_offset == tiov[iov].iov_len) {
+                        iov++;
+                        iov_offset = 0;
+                    }
+
+                    if (buffer_offset == cm_data.buf_blockSize) {
+                        /* allow read-while-fetching.
+                        * if this is the last buffer, clear the
+                        * PREFETCHING flag, so the reader waiting for
+                        * this buffer will start a prefetch.
+                        */
+                        tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
+                        lock_ObtainWrite(&scp->rw);
+                        if (scp->flags & CM_SCACHEFLAG_WAITING) {
+                            osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
+                            osi_Wakeup((LONG_PTR) &scp->flags);
+                        }
+                        if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
+                            osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
+                            *cpffp = 1;
+                            cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
+                        }
+                        lock_ReleaseWrite(&scp->rw);
+
+                        /* Advance the buffer */
+                        qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
+                        if (qdp) {
+                            tbufp = osi_GetQData(qdp);
+                            bufferp = tbufp->datap;
+                            buffer_offset = 0;
+                        }
+                        else
+                            bufferp = NULL;
+                    }
+                }
+
+                length_found -= temp;
+#else /* USE_RX_IOVEC */
+                /* assert that there are still more buffers;
+                 * our check above for length_found being less than
+                 * biod.length should ensure this.
                  */
-                               osi_assert(bufferp != NULL);
+                osi_assertx(bufferp != NULL, "null cm_buf_t");
 
-                               /* read rbytes of data */
-                rbytes = (nbytes > buf_bufferSize? buf_bufferSize : nbytes);
-                temp = rx_Read(callp, bufferp, rbytes);
+                /* read rbytes of data */
+                rbytes = (afs_uint32)(length_found > cm_data.buf_blockSize ? cm_data.buf_blockSize : length_found);
+                temp = rx_Read(rxcallp, bufferp, rbytes);
                 if (temp < rbytes) {
-                    code = (callp->error < 0) ? callp->error : -1;
+                    /*
+                     * If the file server returned (filesize - offset),
+                     * then the first rx_Read will return zero octets of data.
+                     * If it does, do not treat it as an error.  Correct the
+                     * length_found and continue as if the file server said
+                     * it was sending us zero octets of data.
+                     */
+                    if (fs_fetchdata_offset_bug && first_read)
+                        length_found = 0;
+                    else
+                        code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
                     break;
-                               }
-
-                               /* allow read-while-fetching.
-                                * if this is the last buffer, clear the
-                                * PREFETCHING flag, so the reader waiting for
-                                * this buffer will start a prefetch.
-                                */
-                               tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
-                               lock_ObtainMutex(&scp->mx);
-                               if (scp->flags & CM_SCACHEFLAG_WAITING) {
-                                       scp->flags &= ~CM_SCACHEFLAG_WAITING;
-                                       osi_Wakeup((long) &scp->flags);
-                               }
-                               if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
-                                       *cpffp = 1;
-                                       cm_ClearPrefetchFlag(0, scp, &biod.offset);
-                               }
-                               lock_ReleaseMutex(&scp->mx);
-
-                               /* and adjust counters */
-                nbytes -= temp;
-                                
+                }
+                first_read = 0;
+
+                /* allow read-while-fetching.
+                 * if this is the last buffer, clear the
+                 * PREFETCHING flag, so the reader waiting for
+                 * this buffer will start a prefetch.
+                 */
+                tbufp->cmFlags |= CM_BUF_CMFULLYFETCHED;
+                lock_ObtainWrite(&scp->rw);
+                if (scp->flags & CM_SCACHEFLAG_WAITING) {
+                    osi_Log1(afsd_logp, "CM GetBuffer Waking scp 0x%p", scp);
+                    osi_Wakeup((LONG_PTR) &scp->flags);
+                }
+                if (cpffp && !*cpffp && !osi_QPrev(&qdp->q)) {
+                    osi_hyper_t tlength = ConvertLongToLargeInteger(biod.length);
+                    *cpffp = 1;
+                    cm_ClearPrefetchFlag(0, scp, &biod.offset, &tlength);
+                }
+                lock_ReleaseWrite(&scp->rw);
+
+                /* and adjust counters */
+                length_found -= temp;
+
                 /* and move to the next buffer */
-                               if (nbytes != 0) {
+                if (length_found != 0) {
                     qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
                     if (qdp) {
-                                               tbufp = osi_GetQData(qdp);
+                        tbufp = osi_GetQData(qdp);
                         bufferp = tbufp->datap;
                     }
-                    else 
+                    else
                         bufferp = NULL;
-                               } else 
+                } else
                     bufferp += temp;
+#endif /* USE_RX_IOVEC */
             }
 
             /* zero out remainder of last pages, in case we are
-                        * fetching past EOF.  We were fetching an integral #
-                        * of pages, but stopped, potentially in the middle of
-                        * a page.  Zero the remainder of that page, and then
-                        * all of the rest of the pages.
+             * fetching past EOF.  We were fetching an integral #
+             * of pages, but stopped, potentially in the middle of
+             * a page.  Zero the remainder of that page, and then
+             * all of the rest of the pages.
              */
-                       /* bytes fetched */
-            rbytes = bufferp - tbufp->datap;
-                       /* bytes left to zero */
-            rbytes = buf_bufferSize - rbytes;
+#ifdef USE_RX_IOVEC
+            rbytes = cm_data.buf_blockSize - buffer_offset;
+            bufferp = tbufp->datap + buffer_offset;
+#else /* USE_RX_IOVEC */
+            /* bytes fetched */
+           osi_assertx((bufferp - tbufp->datap) < LONG_MAX, "data >= LONG_MAX");
+            rbytes = (long) (bufferp - tbufp->datap);
+
+            /* bytes left to zero */
+            rbytes = cm_data.buf_blockSize - rbytes;
+#endif /* USE_RX_IOVEC */
             while(qdp) {
                 if (rbytes != 0)
-                                       memset(bufferp, 0, rbytes);
+                    memset(bufferp, 0, rbytes);
                 qdp = (osi_queueData_t *) osi_QPrev(&qdp->q);
-                               if (qdp == NULL) 
+                if (qdp == NULL)
                     break;
-                               tbufp = osi_GetQData(qdp);
+                tbufp = osi_GetQData(qdp);
                 bufferp = tbufp->datap;
-                               /* bytes to clear in this page */
-                               rbytes = buf_bufferSize;
+                /* bytes to clear in this page */
+                rbytes = cm_data.buf_blockSize;
             }
-               }
+        }
 
-               if (code == 0)
-                       code = EndRXAFS_FetchData(callp, &afsStatus, &callback, &volSync);
-               else
-                       osi_Log0(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error");
-        code = rx_EndCall(callp, code);
-        if (code == RXKADUNKNOWNKEY)
+        if (code == 0) {
+            if (call_was_64bit)
+                code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
+            else
+                code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
+        } else {
+            if (call_was_64bit)
+                osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
+            else
+                osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
+        }
+
+        if (rxcallp)
+            code1 = rx_EndCall(rxcallp, code);
+
+        if (code1 == RXKADUNKNOWNKEY)
             osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
+
+        /* If we are avoiding a file server bug, ignore the error state */
+        if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451) {
+            /* Clone the current status info and clear the error state */
+            scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
+            if (scp_locked) {
+                lock_ReleaseWrite(&scp->rw);
+                scp_locked = 0;
+            }
+            code = 0;
+        /* Prefer the error value from FetchData over rx_EndCall */
+        } else if (code == 0 && code1 != 0)
+            code = code1;
         osi_Log0(afsd_logp, "CALL FetchData DONE");
 
-       } while (cm_Analyze(connp, up, reqp, &scp->fid, &volSync, NULL, NULL, code));
+    } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
 
   fetchingcompleted:
     code = cm_MapRPCError(code, reqp);
 
-    lock_ObtainMutex(&scp->mx);
-       /* we know that no one else has changed the buffer, since we still have
-        * the fetching flag on the buffers, and we have the scp locked again.
-        * Copy in the version # into the buffer if we got code 0 back from the
-        * read.
+    if (!scp_locked)
+        lock_ObtainWrite(&scp->rw);
+
+    /* we know that no one else has changed the buffer, since we still have
+     * the fetching flag on the buffers, and we have the scp locked again.
+     * Copy in the version # into the buffer if we got code 0 back from the
+     * read.
      */
-       if (code == 0) {
-               for(qdp = biod.bufListp;
-                   qdp;
-                   qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
-                       tbufp = osi_GetQData(qdp);
-            tbufp->dataVersion = afsStatus.DataVersion;
+    if (code == 0) {
+        for(qdp = biod.bufListp;
+             qdp;
+             qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
+            tbufp = osi_GetQData(qdp);
+            tbufp->dataVersion = afsStatus.dataVersionHigh;
+            tbufp->dataVersion <<= 32;
+            tbufp->dataVersion |= afsStatus.DataVersion;
 
 #ifdef DISKCACHE95
             /* write buffer out to disk cache */
-            diskcache_Update(tbufp->dcp, tbufp->datap, buf_bufferSize,
+            diskcache_Update(tbufp->dcp, tbufp->datap, cm_data.buf_blockSize,
                               tbufp->dataVersion);
 #endif /* DISKCACHE95 */
         }
     }
 
-       /* release scatter/gather I/O structure (buffers, locks) */
-       lock_ReleaseMutex(&scp->mx);
-       cm_ReleaseBIOD(&biod, 0);
-       lock_ObtainMutex(&scp->mx);
+    /* release scatter/gather I/O structure (buffers, locks) */
+    cm_ReleaseBIOD(&biod, 0, code, 1);
+
+    if (code == 0)
+        cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, CM_MERGEFLAG_FETCHDATA);
+
+    return code;
+}
+
+/*
+ * Similar to cm_GetBuffer but doesn't use an allocated cm_buf_t object.
+ * Instead the data is read from the file server and copied directly into
+ * a provided buffer.  Called with scp locked. The scp is locked on return.
+ */
+long cm_GetData(cm_scache_t *scp, osi_hyper_t *offsetp, char *datap, int data_length,
+                cm_user_t *userp, cm_req_t *reqp)
+{
+    long code=0, code1=0;
+    afs_uint32 nbytes;                 /* bytes in transfer */
+    afs_uint32 nbytes_hi = 0;           /* high-order 32 bits of bytes in transfer */
+    afs_uint64 length_found = 0;
+    char *bufferp = datap;
+    afs_uint32 buffer_offset = 0;
+    long rbytes;                       /* bytes in rx_Read call */
+    long temp;
+    AFSFetchStatus afsStatus;
+    AFSCallBack callback;
+    AFSVolSync volSync;
+    AFSFid tfid;
+    struct rx_call *rxcallp;
+    struct rx_connection *rxconnp;
+    cm_conn_t *connp;
+    int getroot;
+    afs_int32 t1,t2;
+    int require_64bit_ops = 0;
+    int call_was_64bit = 0;
+    int fs_fetchdata_offset_bug = 0;
+    int first_read = 1;
+    int scp_locked = 1;
+
+    memset(&volSync, 0, sizeof(volSync));
+
+    /* now, the buffer may or may not be filled with good data (buf_GetNewLocked
+     * drops lots of locks, and may indeed return a properly initialized
+     * buffer, although more likely it will just return a new, empty, buffer.
+     */
+
+#ifdef AFS_FREELANCE_CLIENT
+
+    // yj: if they're trying to get the /afs directory, we need to
+    // handle it differently, since it's local rather than on any
+    // server
+
+    getroot = (scp==cm_data.rootSCachep);
+    if (getroot)
+        osi_Log1(afsd_logp,"GetBuffer returns cm_data.rootSCachep=%x",cm_data.rootSCachep);
+#endif
+
+    cm_AFSFidFromFid(&tfid, &scp->fid);
+
+    if (LargeIntegerGreaterThan(LargeIntegerAdd(*offsetp,
+                                                ConvertLongToLargeInteger(data_length)),
+                                ConvertLongToLargeInteger(LONG_MAX))) {
+        require_64bit_ops = 1;
+    }
+
+    osi_Log2(afsd_logp, "cm_GetData: fetching data scp %p DV 0x%x", scp, scp->dataVersion);
+
+#ifdef AFS_FREELANCE_CLIENT
+
+    // yj code
+    // if getroot then we don't need to make any calls
+    // just return fake data
+
+    if (cm_freelanceEnabled && getroot) {
+        // setup the fake status
+        afsStatus.InterfaceVersion = 0x1;
+        afsStatus.FileType = 0x2;
+        afsStatus.LinkCount = scp->linkCount;
+        afsStatus.Length = cm_fakeDirSize;
+        afsStatus.DataVersion = (afs_uint32)(cm_data.fakeDirVersion & 0xFFFFFFFF);
+        afsStatus.Author = 0x1;
+        afsStatus.Owner = 0x0;
+        afsStatus.CallerAccess = 0x9;
+        afsStatus.AnonymousAccess = 0x9;
+        afsStatus.UnixModeBits = 0x1ff;
+        afsStatus.ParentVnode = 0x1;
+        afsStatus.ParentUnique = 0x1;
+        afsStatus.ResidencyMask = 0;
+        afsStatus.ClientModTime = (afs_uint32)FakeFreelanceModTime;
+        afsStatus.ServerModTime = (afs_uint32)FakeFreelanceModTime;
+        afsStatus.Group = 0;
+        afsStatus.SyncCounter = 0;
+        afsStatus.dataVersionHigh = (afs_uint32)(cm_data.fakeDirVersion >> 32);
+        afsStatus.lockCount = 0;
+        afsStatus.Length_hi = 0;
+        afsStatus.errorCode = 0;
+       memset(&volSync, 0, sizeof(volSync));
+
+        // once we're done setting up the status info,
+        // we just fill the buffer pages with fakedata
+        // from cm_FakeRootDir. Extra pages are set to
+        // 0.
+
+        lock_ObtainMutex(&cm_Freelance_Lock);
+        t1 = offsetp->LowPart;
+        memset(datap, 0, data_length);
+        t2 = cm_fakeDirSize - t1;
+        if (t2 > data_length)
+            t2 = data_length;
+        if (t2 > 0)
+            memcpy(datap, cm_FakeRootDir+t1, t2);
+        lock_ReleaseMutex(&cm_Freelance_Lock);
+
+        // once we're done, we skip over the part of the
+        // code that does the ACTUAL fetching of data for
+        // real files
+
+        goto fetchingcompleted;
+    }
+
+#endif /* AFS_FREELANCE_CLIENT */
+
+    if (scp_locked) {
+        lock_ReleaseWrite(&scp->rw);
+        scp_locked = 0;
+    }
+
+    /* now make the call */
+    do {
+        code = cm_ConnFromFID(&scp->fid, userp, reqp, &connp);
+        if (code)
+            continue;
+
+        rxconnp = cm_GetRxConn(connp);
+        rxcallp = rx_NewCall(rxconnp);
+        rx_PutConnection(rxconnp);
+
+        nbytes = nbytes_hi = 0;
+
+        if (SERVERHAS64BIT(connp)) {
+            call_was_64bit = 1;
+
+            osi_Log4(afsd_logp, "CALL FetchData64 scp 0x%p, off 0x%x:%08x, size 0x%x",
+                     scp, offsetp->HighPart, offsetp->LowPart, data_length);
+
+            code = StartRXAFS_FetchData64(rxcallp, &tfid, offsetp->QuadPart, data_length);
+
+            if (code == 0) {
+                temp = rx_Read32(rxcallp, &nbytes_hi);
+                if (temp == sizeof(afs_int32)) {
+                    nbytes_hi = ntohl(nbytes_hi);
+                } else {
+                    nbytes_hi = 0;
+                   code = rxcallp->error;
+                    code1 = rx_EndCall(rxcallp, code);
+                    rxcallp = NULL;
+                }
+            }
+        } else {
+            call_was_64bit = 0;
+        }
+
+        if (code == RXGEN_OPCODE || !SERVERHAS64BIT(connp)) {
+            if (require_64bit_ops) {
+                osi_Log0(afsd_logp, "Skipping FetchData.  Operation requires FetchData64");
+                code = CM_ERROR_TOOBIG;
+            } else {
+                if (!rxcallp) {
+                    rxconnp = cm_GetRxConn(connp);
+                    rxcallp = rx_NewCall(rxconnp);
+                    rx_PutConnection(rxconnp);
+                }
 
-    if (code == 0) 
-        cm_MergeStatus(scp, &afsStatus, &volSync, up, 0);
-       return code;
+                osi_Log3(afsd_logp, "CALL FetchData scp 0x%p, off 0x%x, size 0x%x",
+                         scp, offsetp->LowPart, data_length);
+
+                code = StartRXAFS_FetchData(rxcallp, &tfid, offsetp->LowPart, data_length);
+
+                SET_SERVERHASNO64BIT(connp);
+            }
+        }
+
+        if (code == 0) {
+            temp  = rx_Read32(rxcallp, &nbytes);
+            if (temp == sizeof(afs_int32)) {
+                nbytes = ntohl(nbytes);
+                FillInt64(length_found, nbytes_hi, nbytes);
+                if (length_found > data_length) {
+                    /*
+                     * prior to 1.4.12 and 1.5.65 the file server would return
+                     * (filesize - offset) if the requested offset was greater than
+                     * the filesize.  The correct return value would have been zero.
+                     * Force a retry by returning an RX_PROTOCOL_ERROR.  If the cause
+                     * is a race between two RPCs issues by this cache manager, the
+                     * correct thing will happen the second time.
+                     */
+                    osi_Log0(afsd_logp, "cm_GetData length_found > data_length");
+                    fs_fetchdata_offset_bug = 1;
+                }
+            } else {
+                osi_Log1(afsd_logp, "cm_GetData rx_Read32 returns %d != 4", temp);
+                code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+            }
+        }
+        /* for the moment, nbytes_hi will always be 0 if code == 0
+           because data_length is a 32-bit quantity. */
+
+        if (code == 0) {
+            /* fill length_found of data from the pipe into the pages.
+             * When we stop, qdp will point at the last page we're
+             * dealing with, and bufferp will tell us where we
+             * stopped.  We'll need this info below when we clear
+             * the remainder of the last page out (and potentially
+             * clear later pages out, if we fetch past EOF).
+             */
+            while (length_found > 0) {
+#ifdef USE_RX_IOVEC
+                struct iovec tiov[RX_MAXIOVECS];
+                afs_int32 tnio, iov, iov_offset;
+
+                temp = rx_Readv(rxcallp, tiov, &tnio, RX_MAXIOVECS, length_found);
+                osi_Log1(afsd_logp, "cm_GetData rx_Readv returns %d", temp);
+                if (temp != length_found && temp < data_length) {
+                    /*
+                     * If the file server returned (filesize - offset),
+                     * then the first rx_Read will return zero octets of data.
+                     * If it does, do not treat it as an error.  Correct the
+                     * length_found and continue as if the file server said
+                     * it was sending us zero octets of data.
+                     */
+                    if (fs_fetchdata_offset_bug && first_read)
+                        length_found = 0;
+                    else
+                        code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+                    break;
+                }
+
+                iov = 0;
+                iov_offset = 0;
+                rbytes = temp;
+
+                while (rbytes > 0) {
+                    afs_int32 len;
+
+                    osi_assertx(bufferp != NULL, "null cm_buf_t");
+
+                    len = MIN(tiov[iov].iov_len - iov_offset, data_length - buffer_offset);
+                    memcpy(bufferp + buffer_offset, tiov[iov].iov_base + iov_offset, len);
+                    iov_offset += len;
+                    buffer_offset += len;
+                    rbytes -= len;
+
+                    if (iov_offset == tiov[iov].iov_len) {
+                        iov++;
+                        iov_offset = 0;
+                    }
+                }
+
+                length_found -= temp;
+#else /* USE_RX_IOVEC */
+                /* assert that there are still more buffers;
+                 * our check above for length_found being less than
+                 * data_length should ensure this.
+                 */
+                osi_assertx(bufferp != NULL, "null cm_buf_t");
+
+                /* read rbytes of data */
+                rbytes = (afs_uint32)(length_found > data_length ? data_length : length_found);
+                temp = rx_Read(rxcallp, bufferp, rbytes);
+                if (temp < rbytes) {
+                    /*
+                     * If the file server returned (filesize - offset),
+                     * then the first rx_Read will return zero octets of data.
+                     * If it does, do not treat it as an error.  Correct the
+                     * length_found and continue as if the file server said
+                     * it was sending us zero octets of data.
+                     */
+                    if (fs_fetchdata_offset_bug && first_read)
+                        length_found = 0;
+                    else
+                        code = (rxcallp->error < 0) ? rxcallp->error : RX_PROTOCOL_ERROR;
+                    break;
+                }
+                first_read = 0;
+
+                /* and adjust counters */
+                length_found -= temp;
+#endif /* USE_RX_IOVEC */
+            }
+
+            /* zero out remainder of last pages, in case we are
+             * fetching past EOF.  We were fetching an integral #
+             * of pages, but stopped, potentially in the middle of
+             * a page.  Zero the remainder of that page, and then
+             * all of the rest of the pages.
+             */
+#ifdef USE_RX_IOVEC
+            rbytes = data_length - buffer_offset;
+            bufferp = datap + buffer_offset;
+#else /* USE_RX_IOVEC */
+            /* bytes fetched */
+           osi_assertx((bufferp - datap) < LONG_MAX, "data >= LONG_MAX");
+            rbytes = (long) (bufferp - datap);
+
+            /* bytes left to zero */
+            rbytes = data_length - rbytes;
+#endif /* USE_RX_IOVEC */
+            if (rbytes != 0)
+                memset(bufferp, 0, rbytes);
+        }
+
+        if (code == 0) {
+            if (call_was_64bit)
+                code = EndRXAFS_FetchData64(rxcallp, &afsStatus, &callback, &volSync);
+            else
+                code = EndRXAFS_FetchData(rxcallp, &afsStatus, &callback, &volSync);
+        } else {
+            if (call_was_64bit)
+                osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData64 skipped due to error %d", code);
+            else
+                osi_Log1(afsd_logp, "CALL EndRXAFS_FetchData skipped due to error %d", code);
+        }
+
+        if (rxcallp)
+            code1 = rx_EndCall(rxcallp, code);
+
+        if (code1 == RXKADUNKNOWNKEY)
+            osi_Log0(afsd_logp, "CALL EndCall returns RXKADUNKNOWNKEY");
+
+        /* If we are avoiding a file server bug, ignore the error state */
+        if (fs_fetchdata_offset_bug && first_read && length_found == 0 && code == -451) {
+            /* Clone the current status info and clear the error state */
+            scp_locked = cm_CloneStatus(scp, userp, scp_locked, &afsStatus, &volSync);
+            if (scp_locked) {
+                lock_ReleaseWrite(&scp->rw);
+                scp_locked = 0;
+            }
+            code = 0;
+        /* Prefer the error value from FetchData over rx_EndCall */
+        } else if (code == 0 && code1 != 0)
+            code = code1;
+        osi_Log0(afsd_logp, "CALL FetchData DONE");
+
+    } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync, NULL, NULL, code));
+
+  fetchingcompleted:
+    code = cm_MapRPCError(code, reqp);
+
+    if (!scp_locked)
+        lock_ObtainWrite(&scp->rw);
+
+    if (code == 0)
+        cm_MergeStatus(NULL, scp, &afsStatus, &volSync, userp, reqp, CM_MERGEFLAG_FETCHDATA);
+
+    return code;
 }