libafs: update uio resid in bypasscache
[openafs.git] / src / afs / afs_bypasscache.c
index b887289..2e36d00 100644 (file)
 
 
 int cache_bypass_strategy   =  NEVER_BYPASS_CACHE;
-int cache_bypass_threshold  =          AFS_CACHE_BYPASS_DISABLED; /* file size > threshold triggers bypass */
+afs_size_t cache_bypass_threshold  =   AFS_CACHE_BYPASS_DISABLED; /* file size > threshold triggers bypass */
 int cache_bypass_prefetch = 1; /* Should we do prefetching ? */
 
 extern afs_rwlock_t afs_xcbhash;
@@ -270,48 +270,12 @@ done:
 #ifdef UKERNEL
 typedef void * bypass_page_t;
 
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio)     \
-    do { \
-       memcpy(((char *)pp) + pageoff,                 \
-              ((char *)rxiov[iovno].iov_base) + iovoff,        \
-              PAGE_CACHE_SIZE - pageoff);                      \
-       auio->uio_resid -= (PAGE_CACHE_SIZE - pageoff);         \
-    } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio)    \
-    do { \
-       memcpy(((char *)pp) + pageoff,                          \
-              ((char *)rxiov[iovno].iov_base) + iovoff,        \
-              rxiov[iovno].iov_len - iovoff);                  \
-       auio->uio_resid -= (rxiov[iovno].iov_len - iovoff);     \
-    } while(0)
-
 #define unlock_and_release_pages(auio)
-#define release_full_page(pp)
+#define release_full_page(pp, pageoff)
+
 #else
 typedef struct page * bypass_page_t;
 
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio)     \
-    do { \
-        char *address;                                         \
-       address = kmap_atomic(pp, KM_USER0); \
-       memcpy(address + pageoff, \
-              (char *)(rxiov[iovno].iov_base) + iovoff,        \
-              PAGE_CACHE_SIZE - pageoff); \
-       kunmap_atomic(address, KM_USER0); \
-    } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio)    \
-    do { \
-        char *address; \
-       address = kmap_atomic(pp, KM_USER0); \
-       memcpy(address + pageoff, \
-              (char *)(rxiov[iovno].iov_base) + iovoff,        \
-              rxiov[iovno].iov_len - iovoff); \
-       kunmap_atomic(address, KM_USER0); \
-    } while(0)
-
-
 #define unlock_and_release_pages(auio) \
     do { \
        struct iovec *ciov;     \
@@ -335,7 +299,7 @@ typedef struct page * bypass_page_t;
        } \
     } while(0)
 
-#define release_full_page(pp) \
+#define release_full_page(pp, pageoff)                 \
     do { \
        /* this is appropriate when no caller intends to unlock \
         * and release the page */ \
@@ -346,8 +310,38 @@ typedef struct page * bypass_page_t;
            afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
        put_page(pp); /* decrement refcount */ \
     } while(0)
+#endif
 
+static void
+afs_bypass_copy_page(bypass_page_t pp, int pageoff, struct iovec *rxiov,
+       int iovno, int iovoff, struct uio *auio, int curiov, int partial)
+{
+    char *address;
+    int dolen;
+
+    if (partial)
+       dolen = auio->uio_iov[curiov].iov_len - pageoff;
+    else
+       dolen = rxiov[iovno].iov_len - iovoff;
+
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+    address = kmap_atomic(pp);
+# else
+    address = kmap_atomic(pp, KM_USER0);
+# endif
+#else
+    address = pp;
+#endif
+    memcpy(address + pageoff, (char *)(rxiov[iovno].iov_base) + iovoff, dolen);
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+    kunmap_atomic(address);
+# else
+    kunmap_atomic(address, KM_USER0);
+# endif
 #endif
+}
 
 /* no-cache prefetch routine */
 static afs_int32
@@ -379,7 +373,7 @@ afs_NoCacheFetchProc(struct rx_call *acall,
        code = rx_Read(acall, (char *)&length, sizeof(afs_int32));
        COND_RE_GLOCK(locked);
        if (code != sizeof(afs_int32)) {
-           result = 0;
+           result = EIO;
            afs_warn("Preread error. code: %d instead of %d\n",
                code, (int)sizeof(afs_int32));
            unlock_and_release_pages(auio);
@@ -421,7 +415,7 @@ afs_NoCacheFetchProc(struct rx_call *acall,
        for (curpage = 0; curpage <= iovmax; curpage++) {
            pageoff = 0;
            /* properly, this should track uio_resid, not a fixed page size! */
-           while (pageoff < PAGE_CACHE_SIZE) {
+           while (pageoff < auio->uio_iov[curpage].iov_len) {
                /* If no more iovs, issue new read. */
                if (iovno >= nio) {
                    COND_GUNLOCK(locked);
@@ -429,38 +423,42 @@ afs_NoCacheFetchProc(struct rx_call *acall,
                    COND_RE_GLOCK(locked);
                    if (bytes < 0) {
                        afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", bytes);
-                       result = 0;
+                       result = bytes;
                        unlock_and_release_pages(auio);
                        goto done;
                    } else if (bytes == 0) {
-                       result = 0;
+                       /* we failed to read the full length */
+                       result = EIO;
                        afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n");
                        unlock_and_release_pages(auio);
                        goto done;
                    }
-                   length -= bytes;
+                   size -= bytes;
+                   auio->uio_resid -= bytes;
                    iovno = 0;
                }
                pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
-               if (pageoff + (rxiov[iovno].iov_len - iovoff) <= PAGE_CACHE_SIZE) {
+               if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
                    /* Copy entire (or rest of) current iovec into current page */
                    if (pp)
-                     copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio);
+                       afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 0);
+                   length -= (rxiov[iovno].iov_len - iovoff);
                    pageoff += rxiov[iovno].iov_len - iovoff;
                    iovno++;
                    iovoff = 0;
                } else {
                    /* Copy only what's needed to fill current page */
                    if (pp)
-                     copy_page(pp, pageoff, rxiov, iovno, iovoff, auio);
-                   iovoff += PAGE_CACHE_SIZE - pageoff;
-                   pageoff = PAGE_CACHE_SIZE;
+                       afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 1);
+                   length -= (auio->uio_iov[curpage].iov_len - pageoff);
+                   iovoff += auio->uio_iov[curpage].iov_len - pageoff;
+                   pageoff = auio->uio_iov[curpage].iov_len;
                }
 
                /* we filled a page, or this is the last page.  conditionally release it */
-               if (pp && ((pageoff == PAGE_CACHE_SIZE && release_pages)
-                               || (length == 0 && iovno >= nio)))
-                   release_full_page(pp);
+               if (pp && ((pageoff == auio->uio_iov[curpage].iov_len &&
+                           release_pages) || (length == 0 && iovno >= nio)))
+                   release_full_page(pp, pageoff);
 
                if (length == 0 && iovno >= nio)
                    goto done;
@@ -488,7 +486,7 @@ afs_ReadNoCache(struct vcache *avc,
     /* the reciever will free this */
     areq = osi_Alloc(sizeof(struct vrequest));
 
-    if (avc && avc->vc_error) {
+    if (avc->vc_error) {
        code = EIO;
        afs_warn("afs_ReadNoCache VCache Error!\n");
        goto cleanup;
@@ -553,7 +551,9 @@ afs_PrefetchNoCache(struct vcache *avc,
                    struct nocache_read_request *bparms)
 {
     struct uio *auio;
+#ifndef UKERNEL
     struct iovec *iovecp;
+#endif
     struct vrequest *areq;
     afs_int32 code = 0;
     struct rx_connection *rxconn;
@@ -572,9 +572,11 @@ afs_PrefetchNoCache(struct vcache *avc,
 
     auio = bparms->auio;
     areq = bparms->areq;
+#ifndef UKERNEL
     iovecp = auio->uio_iov;
+#endif
 
-    tcallspec = (struct tlocal1 *) osi_Alloc(sizeof(struct tlocal1));
+    tcallspec = osi_Alloc(sizeof(struct tlocal1));
     do {
        tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
        if (tc) {
@@ -654,7 +656,8 @@ done:
      * Copy appropriate fields into vcache
      */
 
-    afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
+    if (!code)
+       afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
 
     osi_Free(areq, sizeof(struct vrequest));
     osi_Free(tcallspec, sizeof(struct tlocal1));