int cache_bypass_strategy = NEVER_BYPASS_CACHE;
-int cache_bypass_threshold = AFS_CACHE_BYPASS_DISABLED; /* file size > threshold triggers bypass */
+afs_size_t cache_bypass_threshold = AFS_CACHE_BYPASS_DISABLED; /* file size > threshold triggers bypass */
int cache_bypass_prefetch = 1; /* Should we do prefetching ? */
extern afs_rwlock_t afs_xcbhash;
{
afs_int32 code;
- struct vrequest treq;
int setDesire = 0;
int setManual = 0;
if (aflags & TRANSSetManualBit)
setManual = 1;
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */
-#else
AFS_GLOCK();
-#endif
ObtainWriteLock(&avc->lock, 925);
/*
/* cg2v, try to store any chunks not written 20071204 */
if (avc->execsOrWriters > 0) {
- code = afs_InitReq(&treq, acred);
- if (!code)
- code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE);
+ struct vrequest *treq = NULL;
+
+ code = afs_CreateReq(&treq, acred);
+ if (!code) {
+ code = afs_StoreAllSegments(avc, treq, AFS_SYNC | AFS_LASTSTORE);
+ afs_DestroyReq(treq);
+ }
}
-#if 0
/* also cg2v, don't dequeue the callback */
- ObtainWriteLock(&afs_xcbhash, 956);
- afs_DequeueCallback(avc);
- ReleaseWriteLock(&afs_xcbhash);
-#endif
- avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
+ /* next reference will re-stat */
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_NOCB, CDirty);
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
- osi_dnlc_purgedp(avc);
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
done:
ReleaseWriteLock(&avc->lock);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonUnlock(&avc->pvnLock, avc);
-#else
AFS_GUNLOCK();
-#endif
}
/*
if (aflags & TRANSSetManualBit)
setManual = 1;
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */
-#else
AFS_GLOCK();
-#endif
ObtainWriteLock(&avc->lock, 926);
/*
* Someone may have beat us to doing the transition - we had no lock
goto done;
/* Ok, we actually do need to flush */
- ObtainWriteLock(&afs_xcbhash, 957);
- afs_DequeueCallback(avc);
- avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
- ReleaseWriteLock(&afs_xcbhash);
+ /* next reference will re-stat cache entry */
+ afs_StaleVCacheFlags(avc, 0, CDirty);
+
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
- osi_dnlc_purgedp(avc);
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
done:
ReleaseWriteLock(&avc->lock);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonUnlock(&avc->pvnLock, avc);
-#else
AFS_GUNLOCK();
-#endif
}
/* In the case where there's an error in afs_NoCacheFetchProc or
#ifdef UKERNEL
typedef void * bypass_page_t;
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
- do { \
- int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
- memcpy(((char *)pp) + pageoff, \
- ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
- auio->uio_resid -= dolen; \
- } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
- do { \
- int dolen = rxiov[iovno].iov_len - iovoff; \
- memcpy(((char *)pp) + pageoff, \
- ((char *)rxiov[iovno].iov_base) + iovoff, dolen); \
- auio->uio_resid -= dolen; \
- } while(0)
-
#define unlock_and_release_pages(auio)
#define release_full_page(pp, pageoff)
#else
typedef struct page * bypass_page_t;
-#define copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
- do { \
- char *address; \
- int dolen = auio->uio_iov[curiov].iov_len - pageoff; \
- address = kmap_atomic(pp, KM_USER0); \
- memcpy(address + pageoff, \
- (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
- kunmap_atomic(address, KM_USER0); \
- } while(0)
-
-#define copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curiov) \
- do { \
- char *address; \
- int dolen = rxiov[iovno].iov_len - iovoff; \
- address = kmap_atomic(pp, KM_USER0); \
- memcpy(address + pageoff, \
- (char *)(rxiov[iovno].iov_base) + iovoff, dolen); \
- kunmap_atomic(address, KM_USER0); \
- } while(0)
-
-
#define unlock_and_release_pages(auio) \
do { \
struct iovec *ciov; \
afs_warn("afs_NoCacheFetchProc: page not locked!\n"); \
put_page(pp); /* decrement refcount */ \
} while(0)
+#endif
+static void
+afs_bypass_copy_page(bypass_page_t pp, int pageoff, struct iovec *rxiov,
+ int iovno, int iovoff, struct uio *auio, int curiov, int partial)
+{
+ char *address;
+ int dolen;
+
+ if (partial)
+ dolen = auio->uio_iov[curiov].iov_len - pageoff;
+ else
+ dolen = rxiov[iovno].iov_len - iovoff;
+
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+ address = kmap_atomic(pp);
+# else
+ address = kmap_atomic(pp, KM_USER0);
+# endif
+#else
+ address = pp;
#endif
+ memcpy(address + pageoff, (char *)(rxiov[iovno].iov_base) + iovoff, dolen);
+#if !defined(UKERNEL)
+# if defined(KMAP_ATOMIC_TAKES_NO_KM_TYPE)
+ kunmap_atomic(address);
+# else
+ kunmap_atomic(address, KM_USER0);
+# endif
+#endif
+}
/* no-cache prefetch routine */
static afs_int32
goto done;
}
size -= bytes;
+ auio->uio_resid -= bytes;
iovno = 0;
}
pp = (bypass_page_t)auio->uio_iov[curpage].iov_base;
if (pageoff + (rxiov[iovno].iov_len - iovoff) <= auio->uio_iov[curpage].iov_len) {
/* Copy entire (or rest of) current iovec into current page */
if (pp)
- copy_pages(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
+ afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 0);
length -= (rxiov[iovno].iov_len - iovoff);
pageoff += rxiov[iovno].iov_len - iovoff;
iovno++;
} else {
/* Copy only what's needed to fill current page */
if (pp)
- copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage);
+ afs_bypass_copy_page(pp, pageoff, rxiov, iovno, iovoff, auio, curpage, 1);
length -= (auio->uio_iov[curpage].iov_len - pageoff);
iovoff += auio->uio_iov[curpage].iov_len - pageoff;
pageoff = auio->uio_iov[curpage].iov_len;
afs_int32 code;
afs_int32 bcnt;
struct brequest *breq;
- struct vrequest *areq;
-
- /* the reciever will free this */
- areq = osi_Alloc(sizeof(struct vrequest));
+ struct vrequest *areq = NULL;
- if (avc && avc->vc_error) {
+ if (avc->vc_error) {
code = EIO;
afs_warn("afs_ReadNoCache VCache Error!\n");
goto cleanup;
}
- if ((code = afs_InitReq(areq, acred))) {
- afs_warn("afs_ReadNoCache afs_InitReq error!\n");
- goto cleanup;
- }
AFS_GLOCK();
- code = afs_VerifyVCache(avc, areq);
+ /* the receiver will free areq */
+ code = afs_CreateReq(&areq, acred);
+ if (code) {
+ afs_warn("afs_ReadNoCache afs_CreateReq error!\n");
+ } else {
+ code = afs_VerifyVCache(avc, areq);
+ if (code) {
+ afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
+ }
+ }
AFS_GUNLOCK();
if (code) {
code = afs_CheckCode(code, areq, 11); /* failed to get it */
- afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
goto cleanup;
}
* processed, like unlocking the pages and freeing memory.
*/
unlock_and_release_pages(bparms->auio);
- osi_Free(areq, sizeof(struct vrequest));
+ AFS_GLOCK();
+ afs_DestroyReq(areq);
+ AFS_GUNLOCK();
osi_Free(bparms->auio->uio_iov,
bparms->auio->uio_iovcnt * sizeof(struct iovec));
osi_Free(bparms->auio, sizeof(struct uio));
iovecp = auio->uio_iov;
#endif
- tcallspec = (struct tlocal1 *) osi_Alloc(sizeof(struct tlocal1));
+ tcallspec = osi_Alloc(sizeof(struct tlocal1));
do {
tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
if (tc) {
if (bytes != sizeof(afs_int32)) {
length_hi = 0;
- code = rx_Error(tcall);
COND_GUNLOCK(locked);
- code = rx_EndCall(tcall, code);
+ code = rx_EndCall(tcall, RX_PROTOCOL_ERROR);
COND_RE_GLOCK(locked);
tcall = NULL;
}
} else {
afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
unlock_and_release_pages(auio);
+ afs_PutConn(tc, rxconn, SHARED_LOCK);
goto done;
}
if (code == 0) {
if (!code)
afs_ProcessFS(avc, &tcallspec->OutStatus, areq);
- osi_Free(areq, sizeof(struct vrequest));
+ afs_DestroyReq(areq);
osi_Free(tcallspec, sizeof(struct tlocal1));
osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL