#include <sys/adspace.h> /* for vm_att(), vm_det() */
#endif
-
+#if defined(AFS_CACHE_BYPASS)
+#include "afs/afs_bypasscache.h"
+#endif// defined(AFS_CACHE_BYPASS)
/* background request queue size */
afs_lock_t afs_xbrs; /* lock for brs */
static int brsInit = 0;
afs_rootFid.Cell = localcell;
if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
&& afs_globalVp) {
- struct vcache *tvc = afs_globalVp;
/* If we had a root fid before and it changed location we reset
* the afs_globalVp so that it will be reevaluated.
* Just decrement the reference count. This only occurs during
}
}
+#if defined(AFS_CACHE_BYPASS)
+#if 1 /* XXX Matt debugging */
+static
+#endif
+void
+BPrefetchNoCache(register struct brequest *ab)
+{
+ struct vrequest treq;
+ afs_size_t len;
+
+ if ((len = afs_InitReq(&treq, ab->cred)))
+ return;
+
+#ifndef UKERNEL
+ /* OS-specific prefetch routine */
+ afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
+#else
+#warning Cache-bypass code path not implemented in UKERNEL
+#endif
+}
+#endif
static void
BStore(register struct brequest *ab)
if (vcp->v.v_gnode->gn_mwrcnt) {
afs_offs_t newlength =
(afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
- if (vcp->m.Length < newlength) {
+ if (vcp->f.m.Length < newlength) {
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
__LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(vcp->m.Length),
+ ICL_HANDLE_OFFSET(vcp->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
- vcp->m.Length = newlength;
+ vcp->f.m.Length = newlength;
}
}
ReleaseWriteLock(&vcp->lock);
tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
+#if defined(AFS_CACHE_BYPASS)
+ else if (tb->opcode == BOP_FETCH_NOCACHE)
+ BPrefetchNoCache(tb);
+#endif
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)