#ifdef UKERNEL
typedef void * bypass_page_t;
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
- do { \
- memcpy(((char *)pp) + pageoff, \
- ((char *)rxiov[iovno].iov_base) + iovoff, \
- PAGE_CACHE_SIZE - pageoff); \
- auio->uio_resid -= (PAGE_CACHE_SIZE - pageoff); \
- } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
- do { \
- memcpy(((char *)pp) + pageoff, \
- ((char *)rxiov[iovno].iov_base) + iovoff, \
- rxiov[iovno].iov_len - iovoff); \
- auio->uio_resid -= (rxiov[iovno].iov_len - iovoff); \
- } while(0)
-
#define unlock_and_release_pages(auio)
-#define release_full_page(pp)
+#define release_full_page(pp, pageoff)
+
#else
typedef struct page * bypass_page_t;
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
- do { \
- char *address; \
- address = kmap_atomic(pp, KM_USER0); \
- memcpy(address + pageoff, \
- (char *)(rxiov[iovno].iov_base) + iovoff, \
- PAGE_CACHE_SIZE - pageoff); \
- kunmap_atomic(address, KM_USER0); \
- } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
- do { \
- char *address; \
- address = kmap_atomic(pp, KM_USER0); \
- memcpy(address + pageoff, \
- (char *)(rxiov[iovno].iov_base) + iovoff, \
- rxiov[iovno].iov_len - iovoff); \
- kunmap_atomic(address, KM_USER0); \
- } while(0)
-
-
#define unlock_and_release_pages(auio) \
do { \
struct iovec *ciov; \
} \
} while(0)
-#define release_full_page(pp) \
+#define release_full_page(pp, pageoff) \
do { \
/* this is appropriate when no caller intends to unlock \
* and release the page */ \
afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
put_page(pp); /* decrement refcount */ \
} while(0)
+#endif
+static void
+afs_bypass_copy_page(bypass_page_t pp, int pageoff, struct iovec *rxiov,
+ int iovno, int iovoff, struct uio *auio, int curiov, int partial)
+{
+ char *address;
+ int dolen;
+
+ if (partial)
+ dolen = rxiov[iovno].iov_len - iovoff;
+ else
+ dolen = auio->uio_iov[curiov].iov_len - pageoff;
+
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+ address = kmap_atomic(pp);
+# else
+ address = kmap_atomic(pp, KM_USER0);
+# endif
+#else
+ address = pp;
+#endif
+ memcpy(address + pageoff, (char *)(rxiov[iovno].iov_base) + iovoff, dolen);
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+ kunmap_atomic(address);
+# else
+ kunmap_atomic(address, KM_USER0);
+# endif
#endif
+}
/* no-cache prefetch routine */
static afs_int32
code = rx_Read(acall, (char *)&length, sizeof(afs_int32));
COND_RE_GLOCK(locked);
if (code != sizeof(afs_int32)) {
- result = 0;
+ result = EIO;
afs_warn("Preread error. code: %d instead of %d\n",
code, (int)sizeof(afs_int32));
unlock_and_release_pages(auio);
for (curpage = 0; curpage <= iovmax; curpage++) {
pageoff = 0;
/* properly, this should track uio_resid, not a fixed page size! */
- while (pageoff < PAGE_CACHE_SIZE) {
+ while (pageoff < auio->uio_iov[curpage].iov_len) {
/* If no more iovs, issue new read. */
if (iovno >= nio) {
COND_GUNLOCK(locked);
COND_RE_GLOCK(locked);
if (bytes < 0) {
afs_warn("afs_NoCacheFetchProc: rx_Read error. Return code was %d\n", bytes);
- result = 0;
+ result = bytes;
unlock_and_release_pages(auio);
goto done;
} else if (bytes == 0) {
- result = 0;
+ /* we failed to read the full length */
+ result = EIO;
afs_warn("afs_NoCacheFetchProc: rx_Read returned zero. Aborting.\n");
unlock_and_release_pages(auio);
goto done;
}
- length -= bytes;
+ size -= bytes;
iovno = 0;
}
pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
- if (pageoff + (rxiov[iovno].iov_len - iovoff) <= PAGE_CACHE_SIZE) {
+ if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
/* Copy entire (or rest of) current iovec into current page */
if (pp)
- copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio);
+ afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 0);
+ length -= (rxiov[iovno].iov_len - iovoff);
pageoff += rxiov[iovno].iov_len - iovoff;
iovno++;
iovoff = 0;
} else {
/* Copy only what's needed to fill current page */
if (pp)
- copy_page(pp, pageoff, rxiov, iovno, iovoff, auio);
- iovoff += PAGE_CACHE_SIZE - pageoff;
- pageoff = PAGE_CACHE_SIZE;
+ afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 1);
+ length -= (auio->uio_iov[curpage].iov_len - pageoff);
+ iovoff += auio->uio_iov[curpage].iov_len - pageoff;
+ pageoff = auio->uio_iov[curpage].iov_len;
}
/* we filled a page, or this is the last page. conditionally release it */
- if (pp && ((pageoff == PAGE_CACHE_SIZE && release_pages)
- || (length == 0 && iovno >= nio)))
- release_full_page(pp);
+ if (pp && ((pageoff == auio->uio_iov[curpage].iov_len &&
+ release_pages) || (length == 0 && iovno >= nio)))
+ release_full_page(pp, pageoff);
if (length == 0 && iovno >= nio)
goto done;
/* the reciever will free this */
areq = osi_Alloc(sizeof(struct vrequest));
- if (avc && avc->vc_error) {
+ if (avc->vc_error) {
code = EIO;
afs_warn("afs_ReadNoCache VCache Error!\n");
goto cleanup;
struct nocache_read_request *bparms)
{
struct uio *auio;
+#ifndef UKERNEL
struct iovec *iovecp;
+#endif
struct vrequest *areq;
afs_int32 code = 0;
struct rx_connection *rxconn;
auio = bparms->auio;
areq = bparms->areq;
+#ifndef UKERNEL
iovecp = auio->uio_iov;
+#endif
- tcallspec = (struct tlocal1 *) osi_Alloc(sizeof(struct tlocal1));
+ tcallspec = osi_Alloc(sizeof(struct tlocal1));
do {
tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
if (tc) {
* Copy appropriate fields into vcache
*/
- afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
+ if (!code)
+ afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
osi_Free(areq, sizeof(struct vrequest));
osi_Free(tcallspec, sizeof(struct tlocal1));
osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL
/* in UKERNEL, the "pages" are passed in */
- osi_Free(auio, sizeof(struct uio));
osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
+ osi_Free(auio, sizeof(struct uio));
#endif
return code;
}