cache-bypass explicitly reference pages involved in background i/o
authormatt@linuxbox.com <matt@linuxbox.com>
Fri, 18 Jun 2010 18:27:07 +0000 (14:27 -0400)
committerDerrick Brashear <shadow@dementia.org>
Fri, 2 Jul 2010 16:55:39 +0000 (09:55 -0700)
Formerly, we assumed that any page eligible for background i/o could
be effectively pinned by lock_page.  Persuant to concern expressed by
Simon that such pages might be eligible to be released by the kernel
after readpages returns, call get_page to increment the refcount on
each page before dispatching a background read (and matching put_page
when the i/o is completed).

Change-Id: Ib3a63e56e6b902b4eb5deb769847e7f17ce2c9ff
Reviewed-on: http://gerrit.openafs.org/2215
Reviewed-by: Derrick Brashear <shadow@dementia.org>
Tested-by: Derrick Brashear <shadow@dementia.org>

src/afs/LINUX/osi_vnodeops.c
src/afs/afs_bypasscache.c

index b640693..4beed54 100644 (file)
@@ -1771,6 +1771,13 @@ afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
                lock_page(pp);
            }
 
+            /* increment page refcount--our original design assumed
+             * that locking it would effectively pin it;  protect
+             * ourselves from the possiblity that this assumption is
+             * is faulty, at low cost (provided we do not fail to
+             * do the corresponding decref on the other side) */
+            get_page(pp);
+
            /* save the page for background map */
             iovecp[page_ix].iov_base = (void*) pp;
 
index 1cd4bc9..66a374a 100644 (file)
@@ -268,7 +268,7 @@ afs_TransitionToCaching(register struct vcache *avc,
  * to be unlocked.
  */
 #if defined(AFS_LINUX24_ENV)
-#define unlock_pages(auio) \
+#define unlock_and_release_pages(auio) \
     do { \
        struct iovec *ciov;     \
        struct page *pp; \
@@ -281,6 +281,7 @@ afs_TransitionToCaching(register struct vcache *avc,
        while(1) { \
            if(pp != NULL && PageLocked(pp)) \
                UnlockPage(pp); \
+            put_page(pp); /* decrement refcount */ \
            iovno++; \
            if(iovno > iovmax) \
                break; \
@@ -291,7 +292,7 @@ afs_TransitionToCaching(register struct vcache *avc,
     } while(0)
 #else
 #ifdef UKERNEL
-#define unlock_pages(auio) \
+#define unlock_and_release_pages(auio) \
         do { } while(0)
 #else
 #error AFS_CACHE_BYPASS not implemented on this platform
@@ -333,7 +334,7 @@ afs_NoCacheFetchProc(register struct rx_call *acall,
            result = 0;
            afs_warn("Preread error. code: %d instead of %d\n",
                code, sizeof(afs_int32));
-           unlock_pages(auio);
+           unlock_and_release_pages(auio);
            goto done;
        } else
            length = ntohl(length);             
@@ -342,7 +343,7 @@ afs_NoCacheFetchProc(register struct rx_call *acall,
            result = EIO;
            afs_warn("Preread error. Got length %d, which is greater than size %d\n",
                     length, size);
-           unlock_pages(auio);
+           unlock_and_release_pages(auio);
            goto done;
        }
                                        
@@ -394,12 +395,12 @@ afs_NoCacheFetchProc(register struct rx_call *acall,
            if (code < 0) {
                afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", code);
                result = 0;
-               unlock_pages(auio);
+               unlock_and_release_pages(auio);
                goto done;
            } else if (code == 0) {
                result = 0;
                afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n");
-               unlock_pages(auio);
+               unlock_and_release_pages(auio);
                goto done;
            }
            length -= code;
@@ -427,13 +428,14 @@ afs_NoCacheFetchProc(register struct rx_call *acall,
                    /* this is appropriate when no caller intends to unlock
                     * and release the page */
 #ifdef AFS_LINUX24_ENV
-                   SetPageUptodate(pp);
-                   if(PageLocked(pp))
-                       UnlockPage(pp);
-                   else
-                       afs_warn("afs_NoCacheFetchProc: page not locked at iovno %d!\n", iovno);
+                    SetPageUptodate(pp);
+                    if(PageLocked(pp))
+                        UnlockPage(pp);
+                    else
+                        afs_warn("afs_NoCacheFetchProc: page not locked at iovno %d!\n", iovno);
+                    put_page(pp); /* decrement refcount */
 #ifndef AFS_KMAP_ATOMIC
-                   kunmap(pp);
+                    kunmap(pp);
 #endif
 #else
 #ifndef UKERNEL
@@ -523,7 +525,7 @@ cleanup:
      * processed, like unlocking the pages and freeing memory.
      */
 #ifdef AFS_LINUX24_ENV
-    unlock_pages(bparms->auio);
+    unlock_and_release_pages(bparms->auio);
 #else
 #ifndef UKERNEL
 #error AFS_CACHE_BYPASS not implemented on this platform
@@ -622,7 +624,7 @@ afs_PrefetchNoCache(register struct vcache *avc,
                                            bparms->length);
            } else {
                afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
-               unlock_pages(auio);
+               unlock_and_release_pages(auio);
                goto done;
            }
            if (code == 0) {
@@ -637,7 +639,7 @@ afs_PrefetchNoCache(register struct vcache *avc,
            afs_warn("BYPASS: No connection.\n");
            code = -1;
 #ifdef AFS_LINUX24_ENV
-           unlock_pages(auio);
+           unlock_and_release_pages(auio);
 #else
 #ifndef UKERNEL
 #error AFS_CACHE_BYPASS not implemented on this platform