/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include <afsconfig.h>
#include "afs/param.h"
-RCSID
- ("$Header$");
#ifdef AFS_AIX51_ENV
#define __FULL_PROTO
#include <sys/adspace.h> /* for vm_att(), vm_det() */
#endif
-
+#if defined(AFS_CACHE_BYPASS)
+#include "afs/afs_bypasscache.h"
+#endif /* AFS_CACHE_BYPASS */
/* background request queue size */
afs_lock_t afs_xbrs; /* lock for brs */
static int brsInit = 0;
struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
static int afs_brs_count = 0; /* request counter, to service reqs in order */
-static int rxepoch_checked = 0;
-#define afs_CheckRXEpoch() {if (rxepoch_checked == 0 && rxkad_EpochWasSet) { \
- rxepoch_checked = 1; afs_GCUserData(/* force flag */ 1); } }
-
/* PAG garbage collection */
/* We induce a compile error if param.h does not define AFS_GCPAGS */
afs_int32 afs_gcpags = AFS_GCPAGS;
afs_int32 afs_probe_interval = DEFAULT_PROBE_INTERVAL;
afs_int32 afs_probe_all_interval = 600;
afs_int32 afs_nat_probe_interval = 60;
+afs_int32 afs_preCache = 0;
#define PROBE_WAIT() (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \
% (afs_probe_interval/2))))
last10MinCheck = lastCheck = osi_Time();
while (1) {
if (afs_termState == AFSOP_STOP_CS) {
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
break;
}
}
/* shutdown check. */
if (afs_termState == AFSOP_STOP_CS) {
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
break;
}
}
afs_CheckServerDaemonStarted = 0;
}
-#define RECURSIVE_VFS_CONTEXT 1
-#if RECURSIVE_VFS_CONTEXT
+
extern int vfs_context_ref;
-#else
-#define vfs_context_ref 1
-#endif
+
+/* This function always holds the GLOCK whilst it is running. The caller
+ * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK
+ * whilst we are sleeping, and regains it when we're woken up.
+ */
void
afs_Daemon(void)
{
struct afs_exporter *exporter;
afs_int32 now;
afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
- afs_int32 last1MinCheck;
+ afs_int32 last1MinCheck, last5MinCheck;
afs_uint32 lastCBSlotBump;
char cs_warned = 0;
AFS_STATCNT(afs_Daemon);
- last1MinCheck = last3MinCheck = last60MinCheck = last10MinCheck =
- lastNMinCheck = 0;
afs_rootFid.Fid.Volume = 0;
while (afs_initState < 101)
osi_Panic("vfs context already initialized");
while (afs_osi_ctxtp && vfs_context_ref)
afs_osi_Sleep(&afs_osi_ctxtp);
-#if RECURSIVE_VFS_CONTEXT
if (afs_osi_ctxtp && !vfs_context_ref)
vfs_context_rele(afs_osi_ctxtp);
-#endif
afs_osi_ctxtp = vfs_context_create(NULL);
afs_osi_ctxtp_initialized = 1;
#endif
last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
+ last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300);
lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
/* start off with afs_initState >= 101 (basic init done) */
if (afs_nfsexporter)
afs_FlushActiveVcaches(0); /* flush NFS writes */
afs_FlushVCBs(1); /* flush queued callbacks */
+
afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
rx_CheckPackets(); /* Does RX need more packets? */
afs_FlushReclaimedVcaches();
ReleaseWriteLock(&afs_xvcache);
afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
-#ifdef AFS_DISCON_ENV
+#if 0
afs_StoreDirtyVcaches();
#endif
- afs_CheckRXEpoch();
last1MinCheck = now;
}
* tickets */
last3MinCheck = now;
}
+
+ if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) {
+ /* start with trying to drop us back to our base usage */
+ int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats);
+
+ if (anumber > 0) {
+ ObtainWriteLock(&afs_xvcache, 734);
+ afs_ShakeLooseVCaches(anumber);
+ ReleaseWriteLock(&afs_xvcache);
+ }
+ last5MinCheck = now;
+ }
+
if (!afs_CheckServerDaemonStarted) {
/* Do the check here if the correct afsd is not installed. */
if (!cs_warned) {
cs_warned = 1;
- printf("Please install afsd with check server daemon.\n");
+ afs_warn("Please install afsd with check server daemon.\n");
}
if (lastNMinCheck + afs_probe_interval < now) {
/* only check down servers */
/* 18285 is because we're trying to divide evenly into 128, that is,
* CBSlotLen, while staying just under 20 seconds. If CBSlotLen
- * changes, should probably change this interval, too.
+ * changes, should probably change this interval, too.
* Some of the preceding actions may take quite some time, so we
* might not want to wait the entire interval */
now = 18285 - (osi_Time() - now);
if (afs_CheckServerDaemonStarted)
afs_termState = AFSOP_STOP_CS;
else
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
return;
}
afs_rootFid.Cell = localcell;
if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
&& afs_globalVp) {
- struct vcache *tvc = afs_globalVp;
/* If we had a root fid before and it changed location we reset
* the afs_globalVp so that it will be reevaluated.
* Just decrement the reference count. This only occurs during
*/
#ifdef AFS_LINUX20_ENV
{
- struct vrequest treq;
+ struct vrequest *treq = NULL;
struct vattr vattr;
cred_t *credp;
struct dentry *dp;
struct vcache *vcp;
-
+
afs_rootFid.Fid.Volume = volid;
afs_rootFid.Fid.Vnode = 1;
afs_rootFid.Fid.Unique = 1;
-
+
credp = crref();
- if (afs_InitReq(&treq, credp))
+ if (afs_CreateReq(&treq, credp))
goto out;
- vcp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
+ vcp = afs_GetVCache(&afs_rootFid, treq, NULL, NULL);
if (!vcp)
goto out;
afs_getattr(vcp, &vattr, credp);
afs_fill_inode(AFSTOV(vcp), &vattr);
-
+
dp = d_find_alias(AFSTOV(afs_globalVp));
-
+
#if defined(AFS_LINUX24_ENV)
+#if defined(HAVE_DCACHE_LOCK)
spin_lock(&dcache_lock);
+#else
+ spin_lock(&AFSTOV(vcp)->i_lock);
+#endif
#if defined(AFS_LINUX26_ENV)
spin_lock(&dp->d_lock);
#endif
#endif
+#if defined(D_ALIAS_IS_HLIST)
+ hlist_del_init(&dp->d_alias);
+ hlist_add_head(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
+#else
list_del_init(&dp->d_alias);
list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
+#endif
dp->d_inode = AFSTOV(vcp);
#if defined(AFS_LINUX24_ENV)
#if defined(AFS_LINUX26_ENV)
spin_unlock(&dp->d_lock);
#endif
+#if defined(HAVE_DCACHE_LOCK)
spin_unlock(&dcache_lock);
+#else
+ spin_unlock(&AFSTOV(vcp)->i_lock);
+#endif
#endif
dput(dp);
-
+
AFS_FAST_RELE(afs_globalVp);
afs_globalVp = vcp;
out:
crfree(credp);
+ afs_DestroyReq(treq);
}
#else
#ifdef AFS_DARWIN80_ENV
/* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
static void
-BPath(register struct brequest *ab)
+BPath(struct brequest *ab)
{
- register struct dcache *tdc = NULL;
+ struct dcache *tdc = NULL;
struct vcache *tvc = NULL;
struct vnode *tvn = NULL;
#ifdef AFS_LINUX22_ENV
struct dentry *dp = NULL;
#endif
afs_size_t offset, len;
- struct vrequest treq;
+ struct vrequest *treq = NULL;
afs_int32 code;
AFS_STATCNT(BPath);
- if ((code = afs_InitReq(&treq, ab->cred)))
+ if ((code = afs_CreateReq(&treq, ab->cred))) {
return;
+ }
AFS_GUNLOCK();
#ifdef AFS_LINUX22_ENV
code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
#endif
AFS_GLOCK();
osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
- if (code)
+ if (code) {
+ afs_DestroyReq(treq);
return;
+ }
/* now path may not have been in afs, so check that before calling our cache manager */
if (!tvn || !IsAfsVnode(tvn)) {
/* release it and give up */
AFS_RELE(tvn);
#endif
}
+ afs_DestroyReq(treq);
return;
}
tvc = VTOAFS(tvn);
/* here we know its an afs vnode, so we can get the data for the chunk */
- tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
+ tdc = afs_GetDCache(tvc, ab->size_parm[0], treq, &offset, &len, 1);
if (tdc) {
afs_PutDCache(tdc);
}
#else
AFS_RELE(tvn);
#endif
+ afs_DestroyReq(treq);
}
/* size_parm 0 to the fetch is the chunk number,
* size_parm 1 is true iff we should release the dcache entry here.
*/
static void
-BPrefetch(register struct brequest *ab)
+BPrefetch(struct brequest *ab)
{
- register struct dcache *tdc;
- register struct vcache *tvc;
- afs_size_t offset, len;
- struct vrequest treq;
+ struct dcache *tdc;
+ struct vcache *tvc;
+ afs_size_t offset, len, abyte, totallen = 0;
+ struct vrequest *treq = NULL;
+ int code;
AFS_STATCNT(BPrefetch);
- if ((len = afs_InitReq(&treq, ab->cred)))
+ if ((code = afs_CreateReq(&treq, ab->cred)))
return;
+ abyte = ab->size_parm[0];
tvc = ab->vc;
- tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
- if (tdc) {
- afs_PutDCache(tdc);
- }
+ do {
+ tdc = afs_GetDCache(tvc, abyte, treq, &offset, &len, 1);
+ if (tdc) {
+ afs_PutDCache(tdc);
+ }
+ abyte+=len;
+ totallen += len;
+ } while ((totallen < afs_preCache) && tdc && (len > 0));
/* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
* use tdc from GetDCache since afs_GetDCache may fail, but someone may
* be waiting for our wakeup anyway.
if (ab->size_parm[1]) {
afs_PutDCache(tdc); /* put this one back, too */
}
+ afs_DestroyReq(treq);
}
+#if defined(AFS_CACHE_BYPASS)
+static void
+BPrefetchNoCache(struct brequest *ab)
+{
+ struct vrequest *treq = NULL;
+ int code;
+
+ if ((code = afs_CreateReq(&treq, ab->cred)))
+ return;
+
+#ifndef UKERNEL
+ /* OS-specific prefetch routine */
+ afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
+#endif
+ afs_DestroyReq(treq);
+}
+#endif
static void
-BStore(register struct brequest *ab)
+BStore(struct brequest *ab)
{
- register struct vcache *tvc;
- register afs_int32 code;
- struct vrequest treq;
+ struct vcache *tvc;
+ afs_int32 code;
+ struct vrequest *treq = NULL;
#if defined(AFS_SGI_ENV)
struct cred *tmpcred;
#endif
AFS_STATCNT(BStore);
- if ((code = afs_InitReq(&treq, ab->cred)))
+ if ((code = afs_CreateReq(&treq, ab->cred)))
return;
- code = 0;
tvc = ab->vc;
#if defined(AFS_SGI_ENV)
/*
AFS_RWLOCK((vnode_t *) tvc, 1);
#endif
ObtainWriteLock(&tvc->lock, 209);
- code = afs_StoreOnLastReference(tvc, &treq);
+ code = afs_StoreOnLastReference(tvc, treq);
ReleaseWriteLock(&tvc->lock);
#if defined(AFS_SGI_ENV)
OSI_SET_CURRENT_CRED(tmpcred);
#endif
/* now set final return code, and wakeup anyone waiting */
if ((ab->flags & BUVALID) == 0) {
- ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */
+
+ /* To explain code_raw/code_checkcode:
+ * Anyone that's waiting won't have our treq, so they won't be able to
+ * call afs_CheckCode themselves on the return code we provide here.
+ * But if we give back only the afs_CheckCode value, they won't know
+ * what the "raw" value was. So give back both values, so the waiter
+ * can know the "raw" value for interpreting the value internally, as
+ * well as the afs_CheckCode value to give to the OS. */
+ ab->code_raw = code;
+ ab->code_checkcode = afs_CheckCode(code, treq, 430);
+
+ ab->flags |= BUVALID;
+ if (ab->flags & BUWAIT) {
+ ab->flags &= ~BUWAIT;
+ afs_osi_Wakeup(ab);
+ }
+ }
+ afs_DestroyReq(treq);
+}
+
+static void
+BPartialStore(struct brequest *ab)
+{
+ struct vcache *tvc;
+ afs_int32 code;
+ struct vrequest *treq = NULL;
+ int locked, shared_locked = 0;
+
+ AFS_STATCNT(BStore);
+ if ((code = afs_CreateReq(&treq, ab->cred)))
+ return;
+ tvc = ab->vc;
+ locked = tvc->lock.excl_locked? 1:0;
+ if (!locked)
+ ObtainWriteLock(&tvc->lock, 1209);
+ else if (!(tvc->lock.excl_locked & WRITE_LOCK)) {
+ shared_locked = 1;
+ ConvertSToRLock(&tvc->lock);
+ }
+ code = afs_StoreAllSegments(tvc, treq, AFS_ASYNC);
+ if (!locked)
+ ReleaseWriteLock(&tvc->lock);
+ else if (shared_locked)
+ ConvertSToRLock(&tvc->lock);
+ /* now set final return code, and wakeup anyone waiting */
+ if ((ab->flags & BUVALID) == 0) {
+ /* set final code, since treq doesn't go across processes */
+ ab->code_raw = code;
+ ab->code_checkcode = afs_CheckCode(code, treq, 43);
ab->flags |= BUVALID;
if (ab->flags & BUWAIT) {
ab->flags &= ~BUWAIT;
afs_osi_Wakeup(ab);
}
}
+ afs_DestroyReq(treq);
}
/* release a held request buffer */
void
-afs_BRelease(register struct brequest *ab)
+afs_BRelease(struct brequest *ab)
{
AFS_STATCNT(afs_BRelease);
- MObtainWriteLock(&afs_xbrs, 294);
+ ObtainWriteLock(&afs_xbrs, 294);
if (--ab->refCount <= 0) {
ab->flags = 0;
}
if (afs_brsWaiters)
afs_osi_Wakeup(&afs_brsWaiters);
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
}
/* return true if bkg fetch daemons are all busy */
}
struct brequest *
-afs_BQueue(register short aopcode, register struct vcache *avc,
- afs_int32 dontwait, afs_int32 ause, struct AFS_UCRED *acred,
- afs_size_t asparm0, afs_size_t asparm1, void *apparm0)
+afs_BQueue(short aopcode, struct vcache *avc,
+ afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred,
+ afs_size_t asparm0, afs_size_t asparm1, void *apparm0,
+ void *apparm1, void *apparm2)
{
- register int i;
- register struct brequest *tb;
+ int i;
+ struct brequest *tb;
AFS_STATCNT(afs_BQueue);
- MObtainWriteLock(&afs_xbrs, 296);
+ ObtainWriteLock(&afs_xbrs, 296);
while (1) {
tb = afs_brs;
for (i = 0; i < NBRS; i++, tb++) {
tb->opcode = aopcode;
tb->vc = avc;
tb->cred = acred;
- crhold(tb->cred);
+ if (tb->cred) {
+ crhold(tb->cred);
+ }
if (avc) {
- VN_HOLD(AFSTOV(avc));
+ AFS_FAST_HOLD(avc);
}
tb->refCount = ause + 1;
tb->size_parm[0] = asparm0;
tb->size_parm[1] = asparm1;
tb->ptr_parm[0] = apparm0;
+ tb->ptr_parm[1] = apparm1;
+ tb->ptr_parm[2] = apparm2;
tb->flags = 0;
- tb->code = 0;
+ tb->code_raw = tb->code_checkcode = 0;
tb->ts = afs_brs_count++;
/* if daemons are waiting for work, wake them up */
if (afs_brsDaemons > 0) {
afs_osi_Wakeup(&afs_brsDaemons);
}
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
return tb;
}
if (dontwait) {
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
return NULL;
}
/* no free buffers, sleep a while */
afs_brsWaiters++;
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
afs_osi_Sleep(&afs_brsWaiters);
- MObtainWriteLock(&afs_xbrs, 301);
+ ObtainWriteLock(&afs_xbrs, 301);
afs_brsWaiters--;
}
}
#ifdef AFS_AIX41_ENV
-/* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
+/* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
* The modifications here will work for either a UP or MP machine.
*/
struct buf *afs_asyncbuf = (struct buf *)0;
afs_int32 afs_biodcnt = 0;
/* in implementing this, I assumed that all external linked lists were
- * null-terminated.
+ * null-terminated.
*
* Several places in this code traverse a linked list. The algorithm
* used here is probably unfamiliar to most people. Careful examination
*
* This function obtains, and returns, a pointer to a buffer for
* processing by a daemon. It sleeps until such a buffer is available.
- * The source of buffers for it is the list afs_asyncbuf (see also
+ * The source of buffers for it is the list afs_asyncbuf (see also
* afs_gn_strategy). This function may be invoked concurrently by
* several processes, that is, several instances of the same daemon.
* afs_gn_strategy, which adds buffers to the list, runs at interrupt
*
* Since AIX 4.1 can wake just one process at a time, the separate sleep
* addresses have been removed.
- * Note that the kernel_lock is held until the e_sleep_thread() occurs.
+ * Note that the kernel_lock is held until the e_sleep_thread() occurs.
* The afs_asyncbuf_lock is primarily used to serialize access between
* process and interrupts.
*/
/* ??? Does the forward pointer of the returned buffer need to be NULL?
*/
- /* Disable interrupts from the strategy function, and save the
+ /* Disable interrupts from the strategy function, and save the
* prior priority level and lock access to the afs_asyncbuf.
*/
AFS_GUNLOCK();
/* For the convenience of other code, replace the gnodes in
* the b_vp field of bp and the other buffers on the b_work
- * chain with the corresponding vnodes.
+ * chain with the corresponding vnodes.
*
* ??? what happens to the gnodes? They're not just cut loose,
* are they?
limit_sigs(&sigbits, &osigbits); /* and already masked */
}
/* Main body starts here -- this is an intentional infinite loop, and
- * should NEVER exit
+ * should NEVER exit
*
- * Now, the loop will exit if get_bioreq() returns NULL, indicating
+ * Now, the loop will exit if get_bioreq() returns NULL, indicating
* that we've been interrupted.
*/
while (1) {
if (vcp->v.v_gnode->gn_mwrcnt) {
afs_offs_t newlength =
(afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
- if (vcp->m.Length < newlength) {
+ if (vcp->f.m.Length < newlength) {
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
__LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(vcp->m.Length),
+ ICL_HANDLE_OFFSET(vcp->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
- vcp->m.Length = newlength;
+ vcp->f.m.Length = newlength;
}
}
ReleaseWriteLock(&vcp->lock);
}
/* If the buffer represents a protection violation, rather than
- * an actual request for I/O, no special action need be taken.
+ * an actual request for I/O, no special action need be taken.
*/
if (bp->b_flags & B_PFPROT) {
iodone(bp); /* Notify all users of the buffer that we're done */
* buffer may be linked with other buffers via the b_work field.
* See also afs_gn_strategy. For each buffer in the chain (including
* bp) notify all users of the buffer that the daemon is finished
- * using it by calling iodone.
+ * using it by calling iodone.
* assumes iodone can modify the b_work field.
*/
for (tbp1 = bp;;) {
int afs_nbrs = 0;
+static_inline void
+afs_BackgroundDaemon_once(void)
+{
+ LOCK_INIT(&afs_xbrs, "afs_xbrs");
+ memset(afs_brs, 0, sizeof(afs_brs));
+ brsInit = 1;
+#if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
+ /*
+ * steal the first daemon for doing delayed DSlot flushing
+ * (see afs_GetDownDSlot)
+ */
+ AFS_GUNLOCK();
+ afs_sgidaemon();
+ exit(CLD_EXITED, 0);
+#endif
+}
+
+static_inline void
+brequest_release(struct brequest *tb)
+{
+ if (tb->vc) {
+ AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
+ tb->vc = NULL;
+ }
+ if (tb->cred) {
+ crfree(tb->cred);
+ tb->cred = (afs_ucred_t *)0;
+ }
+ afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
+}
+
+#ifdef AFS_NEW_BKG
+int
+afs_BackgroundDaemon(struct afs_uspc_param *uspc, void *param1, void *param2)
+#else
void
afs_BackgroundDaemon(void)
+#endif
{
struct brequest *tb;
int i, foundAny;
AFS_STATCNT(afs_BackgroundDaemon);
/* initialize subsystem */
- if (brsInit == 0) {
- LOCK_INIT(&afs_xbrs, "afs_xbrs");
- memset((char *)afs_brs, 0, sizeof(afs_brs));
- brsInit = 1;
-#if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
- /*
- * steal the first daemon for doing delayed DSlot flushing
- * (see afs_GetDownDSlot)
- */
- AFS_GUNLOCK();
- afs_sgidaemon();
- return;
+ if (brsInit == 0)
+ /* Irix with "short stack" exits */
+ afs_BackgroundDaemon_once();
+
+#ifdef AFS_NEW_BKG
+ /* If it's a re-entering syscall, complete the request and release */
+ if (uspc->ts > -1) {
+ tb = afs_brs;
+ for (i = 0; i < NBRS; i++, tb++) {
+ if (tb->ts == uspc->ts) {
+ /* copy the userspace status back in */
+ ((struct afs_uspc_param *) tb->ptr_parm[0])->retval =
+ uspc->retval;
+ /* mark it valid and notify our caller */
+ tb->flags |= BUVALID;
+ if (tb->flags & BUWAIT) {
+ tb->flags &= ~BUWAIT;
+ afs_osi_Wakeup(tb);
+ }
+ brequest_release(tb);
+ break;
+ }
+ }
+ } else {
+ afs_osi_MaskUserLoop();
#endif
+ /* Otherwise it's a new one */
+ afs_nbrs++;
+#ifdef AFS_NEW_BKG
}
- afs_nbrs++;
+#endif
- MObtainWriteLock(&afs_xbrs, 302);
+ ObtainWriteLock(&afs_xbrs, 302);
while (1) {
int min_ts = 0;
struct brequest *min_tb = NULL;
if (afs_termState == AFSOP_STOP_BKG) {
if (--afs_nbrs <= 0)
- afs_termState = AFSOP_STOP_TRUNCDAEMON;
- MReleaseWriteLock(&afs_xbrs);
+ afs_termState = AFSOP_STOP_RXCALLBACK;
+ ReleaseWriteLock(&afs_xbrs);
afs_osi_Wakeup(&afs_termState);
+#ifdef AFS_NEW_BKG
+ return -2;
+#else
return;
+#endif
}
/* find a request */
if ((tb = min_tb)) {
/* claim and process this request */
tb->flags |= BSTARTED;
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
foundAny = 1;
afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,
tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
+#if defined(AFS_CACHE_BYPASS)
+ else if (tb->opcode == BOP_FETCH_NOCACHE)
+ BPrefetchNoCache(tb);
+#endif
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)
BPath(tb);
+#ifdef AFS_DARWIN80_ENV
+ else if (tb->opcode == BOP_MOVE) {
+ memcpy(uspc, (struct afs_uspc_param *) tb->ptr_parm[0],
+ sizeof(struct afs_uspc_param));
+ uspc->ts = tb->ts;
+ /* string lengths capped in move vop; copy NUL tho */
+ memcpy(param1, (char *)tb->ptr_parm[1],
+ strlen(tb->ptr_parm[1])+1);
+ memcpy(param2, (char *)tb->ptr_parm[2],
+ strlen(tb->ptr_parm[2])+1);
+ return 0;
+ }
+#endif
+ else if (tb->opcode == BOP_PARTIAL_STORE)
+ BPartialStore(tb);
else
panic("background bop");
- if (tb->vc) {
- AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
- tb->vc = NULL;
- }
- if (tb->cred) {
- crfree(tb->cred);
- tb->cred = (struct AFS_UCRED *)0;
- }
- afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
- MObtainWriteLock(&afs_xbrs, 305);
+ brequest_release(tb);
+ ObtainWriteLock(&afs_xbrs, 305);
}
if (!foundAny) {
/* wait for new request */
afs_brsDaemons++;
- MReleaseWriteLock(&afs_xbrs);
+ ReleaseWriteLock(&afs_xbrs);
afs_osi_Sleep(&afs_brsDaemons);
- MObtainWriteLock(&afs_xbrs, 307);
+ ObtainWriteLock(&afs_xbrs, 307);
afs_brsDaemons--;
}
}
+#ifdef AFS_NEW_BKG
+ return -2;
+#endif
}
AFS_STATCNT(shutdown_daemons);
if (afs_cold_shutdown) {
afs_brsDaemons = brsInit = 0;
- rxepoch_checked = afs_nbrs = 0;
- memset((char *)afs_brs, 0, sizeof(afs_brs));
- memset((char *)&afs_xbrs, 0, sizeof(afs_lock_t));
+ afs_nbrs = 0;
+ memset(afs_brs, 0, sizeof(afs_brs));
+ memset(&afs_xbrs, 0, sizeof(afs_lock_t));
afs_brsWaiters = 0;
#ifdef AFS_AIX41_ENV
lock_free(&afs_asyncbuf_lock);
SPUNLOCK(afs_sgibklock, s);
AFS_GLOCK();
tdc->dflags &= ~DFEntryMod;
- afs_WriteDCache(tdc, 1);
+ osi_Assert(afs_WriteDCache(tdc, 1) == 0);
AFS_GUNLOCK();
s = SPLOCK(afs_sgibklock);
}