#include "afs/kauth.h"
#include "afs/kautils.h"
#include "afs/afsutil.h"
+#include "afs/afs_bypasscache.h"
#include "rx/rx_globals.h"
#include "afsd/afsd.h"
}
int
+uafs_pread_nocache(int fd, char *buf, int len, off_t offset)
+{
+ int retval;
+ AFS_GLOCK();
+ retval = uafs_pread_nocache_r(fd, buf, len, offset);
+ AFS_GUNLOCK();
+ return retval;
+}
+
+int
+uafs_pread_nocache_r(int fd, char *buf, int len, off_t offset)
+{
+ int code;
+ struct iovec iov[1];
+ struct usr_vnode *fileP;
+ struct nocache_read_request *bparms;
+ struct usr_uio uio;
+
+ /*
+ * Make sure this is an open file
+ */
+ fileP = afs_FileTable[fd];
+ if (fileP == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
+ /* these get freed in PrefetchNoCache, so... */
+ bparms = afs_osi_Alloc(sizeof(struct nocache_read_request));
+ bparms->areq = afs_osi_Alloc(sizeof(struct vrequest));
+
+ afs_InitReq(bparms->areq, get_user_struct()->u_cred);
+
+ bparms->auio = &uio;
+ bparms->offset = offset;
+ bparms->length = len;
+
+ /*
+ * set up the uio buffer
+ */
+ iov[0].iov_base = buf;
+ iov[0].iov_len = len;
+ uio.uio_iov = &iov[0];
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = offset;
+ uio.uio_segflg = 0;
+ uio.uio_fmode = FREAD;
+ uio.uio_resid = len;
+
+ /*
+ * do the read
+ */
+ code = afs_PrefetchNoCache(VTOAFS(fileP), get_user_struct()->u_cred,
+ bparms);
+
+ if (code) {
+ errno = code;
+ return -1;
+ }
+
+ afs_FileOffsets[fd] = uio.uio_offset;
+ return (len - uio.uio_resid);
+}
+
+int
uafs_pread(int fd, char *buf, int len, off_t offset)
{
int retval;
#define BOP_FETCH 1 /* parm1 is chunk to get */
#define BOP_STORE 2 /* parm1 is chunk to store */
#define BOP_PATH 3 /* parm1 is path, parm2 is chunk to fetch */
-
-#if defined(AFS_CACHE_BYPASS)
#define BOP_FETCH_NOCACHE 4 /* parms are: vnode ptr, offset, segment ptr, addr, cred ptr */
-#endif
#ifdef AFS_DARWIN_ENV
#define BOP_MOVE 5 /* ptr1 afs_uspc_param ptr2 sname ptr3 dname */
#endif
/*... to be continued ... */
-#if defined(AFS_CACHE_BYPASS)
/* vcache (file) cachingStates bits */
#define FCSDesireBypass 0x1 /* This file should bypass the cache */
#define FCSBypass 0x2 /* This file is currently NOT being cached */
* lock vcache (it's already locked) */
#define TRANSSetManualBit 0x4 /* The Transition routine should set FCSManuallySet so that
* filename checking does not override pioctl requests */
-#endif /* AFS_CACHE_BYPASS */
#define CPSIZE 2
#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
short flockCount; /* count of flock readers, or -1 if writer */
char mvstat; /* 0->normal, 1->mt pt, 2->root. */
-#if defined(AFS_CACHE_BYPASS)
- char cachingStates; /* Caching policies for this file */
- afs_uint32 cachingTransitions; /* # of times file has flopped between caching and not */
+ char cachingStates; /* Caching policies for this file */
+ afs_uint32 cachingTransitions; /* # of times file has flopped between caching and not */
+
#if defined(AFS_LINUX24_ENV)
- off_t next_seq_offset; /* Next sequential offset (used by prefetch/readahead) */
-#else
- off_t next_seq_blk_offset; /* accounted in blocks for Solaris & IRIX */
-#endif
+ off_t next_seq_offset; /* Next sequential offset (used by prefetch/readahead) */
+#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI65_ENV)
+ off_t next_seq_blk_offset; /* accounted in blocks for Solaris & IRIX */
#endif
#if defined(AFS_SUN5_ENV)
#include <afsconfig.h>
#include "afs/param.h"
-
-#if defined(AFS_CACHE_BYPASS) && defined(AFS_LINUX24_ENV)
-
+#if defined(AFS_CACHE_BYPASS) || defined(UKERNEL)
#include "afs/afs_bypasscache.h"
/*
* afs_PrefetchNoCache, all of the pages they've been passed need
* to be unlocked.
*/
+#ifdef UKERNEL
+typedef void * bypass_page_t;
+
+#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
+ do { \
+ memcpy(((char *)pp) + pageoff, \
+ ((char *)rxiov[iovno].iov_base) + iovoff, \
+ PAGE_CACHE_SIZE - pageoff); \
+ auio->uio_resid -= (PAGE_CACHE_SIZE - pageoff); \
+ } while(0)
+
+#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
+ do { \
+ memcpy(((char *)pp) + pageoff, \
+ ((char *)rxiov[iovno].iov_base) + iovoff, \
+ rxiov[iovno].iov_len - iovoff); \
+ auio->uio_resid -= (rxiov[iovno].iov_len - iovoff); \
+ } while(0)
+
+#define unlock_and_release_pages(auio)
+#define release_full_page(pp)
+#else
+typedef struct page * bypass_page_t;
+
+#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio) \
+ do { \
+ char *address; \
+ address = kmap_atomic(pp, KM_USER0); \
+ memcpy(address + pageoff, \
+ (char *)(rxiov[iovno].iov_base) + iovoff, \
+ PAGE_CACHE_SIZE - pageoff); \
+ kunmap_atomic(address, KM_USER0); \
+ } while(0)
+
+#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio) \
+ do { \
+ char *address; \
+ address = kmap_atomic(pp, KM_USER0); \
+ memcpy(address + pageoff, \
+ (char *)(rxiov[iovno].iov_base) + iovoff, \
+ rxiov[iovno].iov_len - iovoff); \
+ kunmap_atomic(address, KM_USER0); \
+ } while(0)
+
+
#define unlock_and_release_pages(auio) \
do { \
struct iovec *ciov; \
- struct page *pp; \
+ bypass_page_t pp; \
afs_int32 iovmax; \
afs_int32 iovno = 0; \
ciov = auio->uio_iov; \
iovmax = auio->uio_iovcnt - 1; \
- pp = (struct page*) ciov->iov_base; \
+ pp = (bypass_page_t) ciov->iov_base; \
while(1) { \
if (pp) { \
if (PageLocked(pp)) \
if(iovno > iovmax) \
break; \
ciov = (auio->uio_iov + iovno); \
- pp = (struct page*) ciov->iov_base; \
+ pp = (bypass_page_t) ciov->iov_base; \
} \
} while(0)
+#define release_full_page(pp) \
+ do { \
+ /* this is appropriate when no caller intends to unlock \
+ * and release the page */ \
+ SetPageUptodate(pp); \
+ if(PageLocked(pp)) \
+ unlock_page(pp); \
+ else \
+ afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
+ put_page(pp); /* decrement refcount */ \
+ } while(0)
+
+#endif
+
/* no-cache prefetch routine */
static afs_int32
afs_NoCacheFetchProc(struct rx_call *acall,
int moredata, iovno, iovoff, iovmax, result, locked;
struct iovec *ciov;
struct iovec *rxiov;
- int nio;
- struct page *pp;
- char *address;
+ int nio = 0;
+ bypass_page_t pp;
int curpage, bytes;
int pageoff;
rxiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
ciov = auio->uio_iov;
- pp = (struct page*) ciov->iov_base;
+ pp = (bypass_page_t) ciov->iov_base;
iovmax = auio->uio_iovcnt - 1;
iovno = iovoff = result = 0;
for (curpage = 0; curpage <= iovmax; curpage++) {
pageoff = 0;
- while (pageoff < 4096) {
+ /* properly, this should track uio_resid, not a fixed page size! */
+ while (pageoff < PAGE_CACHE_SIZE) {
/* If no more iovs, issue new read. */
if (iovno >= nio) {
COND_GUNLOCK(locked);
length -= bytes;
iovno = 0;
}
- pp = (struct page *)auio->uio_iov[curpage].iov_base;
+ pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
if (pageoff + (rxiov[iovno].iov_len - iovoff) <= PAGE_CACHE_SIZE) {
/* Copy entire (or rest of) current iovec into current page */
- if (pp) {
- address = kmap_atomic(pp, KM_USER0);
- memcpy(address + pageoff, rxiov[iovno].iov_base + iovoff,
- rxiov[iovno].iov_len - iovoff);
- kunmap_atomic(address, KM_USER0);
- }
+ if (pp)
+ copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio);
pageoff += rxiov[iovno].iov_len - iovoff;
iovno++;
iovoff = 0;
} else {
/* Copy only what's needed to fill current page */
- if (pp) {
- address = kmap_atomic(pp, KM_USER0);
- memcpy(address + pageoff, rxiov[iovno].iov_base + iovoff,
- PAGE_CACHE_SIZE - pageoff);
- kunmap_atomic(address, KM_USER0);
- }
+ if (pp)
+ copy_page(pp, pageoff, rxiov, iovno, iovoff, auio);
iovoff += PAGE_CACHE_SIZE - pageoff;
pageoff = PAGE_CACHE_SIZE;
}
+
/* we filled a page, or this is the last page. conditionally release it */
if (pp && ((pageoff == PAGE_CACHE_SIZE && release_pages)
- || (length == 0 && iovno >= nio))) {
- /* this is appropriate when no caller intends to unlock
- * and release the page */
- SetPageUptodate(pp);
- if(PageLocked(pp))
- unlock_page(pp);
- else
- afs_warn("afs_NoCacheFetchProc: page not locked!\n");
- put_page(pp); /* decrement refcount */
- }
+ || (length == 0 && iovno >= nio)))
+ release_full_page(pp);
+
if (length == 0 && iovno >= nio)
goto done;
}
osi_Free(areq, sizeof(struct vrequest));
osi_Free(tcallspec, sizeof(struct tlocal1));
- osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
osi_Free(bparms, sizeof(struct nocache_read_request));
+#ifndef UKERNEL
+ /* in UKERNEL, the "pages" are passed in */
osi_Free(auio, sizeof(struct uio));
+ osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
+#endif
return code;
}
-
-#endif /* AFS_CACHE_BYPASS && AFS_LINUX24_ENV */
+#endif