#include <sys/adspace.h> /* for vm_att(), vm_det() */
#endif
-
+#if defined(AFS_CACHE_BYPASS)
+#include "afs/afs_bypasscache.h"
+#endif// defined(AFS_CACHE_BYPASS)
/* background request queue size */
afs_lock_t afs_xbrs; /* lock for brs */
static int brsInit = 0;
afs_int32 afs_CheckServerDaemonStarted = 0;
#ifndef DEFAULT_PROBE_INTERVAL
-#define DEFAULT_PROBE_INTERVAL 180 /* default to 3 min */
+#define DEFAULT_PROBE_INTERVAL 30 /* default to 3 min */
#endif
afs_int32 afs_probe_interval = DEFAULT_PROBE_INTERVAL;
afs_int32 afs_probe_all_interval = 600;
afs_int32 afs_nat_probe_interval = 60;
+afs_int32 afs_preCache = 0;
#define PROBE_WAIT() (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \
% (afs_probe_interval/2))))
}
afs_CheckServerDaemonStarted = 0;
}
-#define RECURSIVE_VFS_CONTEXT 1
-#if RECURSIVE_VFS_CONTEXT
+
extern int vfs_context_ref;
-#else
-#define vfs_context_ref 1
-#endif
+
void
afs_Daemon(void)
{
osi_Panic("vfs context already initialized");
while (afs_osi_ctxtp && vfs_context_ref)
afs_osi_Sleep(&afs_osi_ctxtp);
-#if RECURSIVE_VFS_CONTEXT
if (afs_osi_ctxtp && !vfs_context_ref)
vfs_context_rele(afs_osi_ctxtp);
-#endif
afs_osi_ctxtp = vfs_context_create(NULL);
afs_osi_ctxtp_initialized = 1;
#endif
afs_FlushReclaimedVcaches();
ReleaseWriteLock(&afs_xvcache);
afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
+#if 0
#ifdef AFS_DISCON_ENV
afs_StoreDirtyVcaches();
#endif
+#endif
afs_CheckRXEpoch();
last1MinCheck = now;
}
afs_rootFid.Cell = localcell;
if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
&& afs_globalVp) {
- struct vcache *tvc = afs_globalVp;
/* If we had a root fid before and it changed location we reset
* the afs_globalVp so that it will be reevaluated.
* Just decrement the reference count. This only occurs during
{
register struct dcache *tdc;
register struct vcache *tvc;
- afs_size_t offset, len;
+ afs_size_t offset, len, abyte, totallen = 0;
struct vrequest treq;
AFS_STATCNT(BPrefetch);
if ((len = afs_InitReq(&treq, ab->cred)))
return;
+ abyte = ab->size_parm[0];
tvc = ab->vc;
- tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
- if (tdc) {
- afs_PutDCache(tdc);
- }
+ do {
+ tdc = afs_GetDCache(tvc, abyte, &treq, &offset, &len, 1);
+ if (tdc) {
+ afs_PutDCache(tdc);
+ }
+ abyte+=len;
+ totallen += len;
+ } while ((totallen < afs_preCache) && tdc && (len > 0));
/* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
* use tdc from GetDCache since afs_GetDCache may fail, but someone may
* be waiting for our wakeup anyway.
}
}
+#if defined(AFS_CACHE_BYPASS)
+#if 1 /* XXX Matt debugging */
+static
+#endif
+void
+BPrefetchNoCache(register struct brequest *ab)
+{
+ struct vrequest treq;
+ afs_size_t len;
+
+ if ((len = afs_InitReq(&treq, ab->cred)))
+ return;
+
+#ifndef UKERNEL
+ /* OS-specific prefetch routine */
+ afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
+#else
+#warning Cache-bypass code path not implemented in UKERNEL
+#endif
+}
+#endif
static void
BStore(register struct brequest *ab)
if (vcp->v.v_gnode->gn_mwrcnt) {
afs_offs_t newlength =
(afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
- if (vcp->m.Length < newlength) {
+ if (vcp->f.m.Length < newlength) {
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
__LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(vcp->m.Length),
+ ICL_HANDLE_OFFSET(vcp->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
- vcp->m.Length = newlength;
+ vcp->f.m.Length = newlength;
}
}
ReleaseWriteLock(&vcp->lock);
tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
+#if defined(AFS_CACHE_BYPASS)
+ else if (tb->opcode == BOP_FETCH_NOCACHE)
+ BPrefetchNoCache(tb);
+#endif
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)