/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#if defined(AFS_CACHE_BYPASS)
#include "afs/afs_bypasscache.h"
-#endif// defined(AFS_CACHE_BYPASS)
+#endif /* AFS_CACHE_BYPASS */
/* background request queue size */
afs_lock_t afs_xbrs; /* lock for brs */
static int brsInit = 0;
last10MinCheck = lastCheck = osi_Time();
while (1) {
if (afs_termState == AFSOP_STOP_CS) {
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
break;
}
}
/* shutdown check. */
if (afs_termState == AFSOP_STOP_CS) {
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
break;
}
if (afs_nfsexporter)
afs_FlushActiveVcaches(0); /* flush NFS writes */
afs_FlushVCBs(1); /* flush queued callbacks */
+
afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
rx_CheckPackets(); /* Does RX need more packets? */
if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) {
/* start with trying to drop us back to our base usage */
- int anumber;
- if (afs_maxvcount <= afs_cacheStats)
- anumber = VCACHE_FREE;
- else
- anumber = VCACHE_FREE + (afs_maxvcount - afs_cacheStats);
-
- ObtainWriteLock(&afs_xvcache, 734);
- afs_ShakeLooseVCaches(anumber);
- ReleaseWriteLock(&afs_xvcache);
+ int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats);
+
+ if (anumber > 0) {
+ ObtainWriteLock(&afs_xvcache, 734);
+ afs_ShakeLooseVCaches(anumber);
+ ReleaseWriteLock(&afs_xvcache);
+ }
last5MinCheck = now;
}
/* 18285 is because we're trying to divide evenly into 128, that is,
* CBSlotLen, while staying just under 20 seconds. If CBSlotLen
- * changes, should probably change this interval, too.
+ * changes, should probably change this interval, too.
* Some of the preceding actions may take quite some time, so we
* might not want to wait the entire interval */
now = 18285 - (osi_Time() - now);
if (afs_CheckServerDaemonStarted)
afs_termState = AFSOP_STOP_CS;
else
- afs_termState = AFSOP_STOP_BKG;
+ afs_termState = AFSOP_STOP_TRUNCDAEMON;
afs_osi_Wakeup(&afs_termState);
return;
}
cred_t *credp;
struct dentry *dp;
struct vcache *vcp;
-
+
afs_rootFid.Fid.Volume = volid;
afs_rootFid.Fid.Vnode = 1;
afs_rootFid.Fid.Unique = 1;
-
+
credp = crref();
if (afs_InitReq(&treq, credp))
goto out;
goto out;
afs_getattr(vcp, &vattr, credp);
afs_fill_inode(AFSTOV(vcp), &vattr);
-
+
dp = d_find_alias(AFSTOV(afs_globalVp));
-
+
#if defined(AFS_LINUX24_ENV)
+#if defined(HAVE_DCACHE_LOCK)
spin_lock(&dcache_lock);
+#else
+ spin_lock(&AFSTOV(vcp)->i_lock);
+#endif
#if defined(AFS_LINUX26_ENV)
spin_lock(&dp->d_lock);
#endif
#endif
+#if defined(D_ALIAS_IS_HLIST)
+ hlist_del_init(&dp->d_alias);
+ hlist_add_head(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
+#else
list_del_init(&dp->d_alias);
list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
+#endif
dp->d_inode = AFSTOV(vcp);
#if defined(AFS_LINUX24_ENV)
#if defined(AFS_LINUX26_ENV)
spin_unlock(&dp->d_lock);
#endif
+#if defined(HAVE_DCACHE_LOCK)
spin_unlock(&dcache_lock);
+#else
+ spin_unlock(&AFSTOV(vcp)->i_lock);
+#endif
#endif
dput(dp);
-
+
AFS_FAST_RELE(afs_globalVp);
afs_globalVp = vcp;
out:
/* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
static void
-BPath(register struct brequest *ab)
+BPath(struct brequest *ab)
{
- register struct dcache *tdc = NULL;
+ struct dcache *tdc = NULL;
struct vcache *tvc = NULL;
struct vnode *tvn = NULL;
#ifdef AFS_LINUX22_ENV
* size_parm 1 is true iff we should release the dcache entry here.
*/
static void
-BPrefetch(register struct brequest *ab)
+BPrefetch(struct brequest *ab)
{
- register struct dcache *tdc;
- register struct vcache *tvc;
+ struct dcache *tdc;
+ struct vcache *tvc;
afs_size_t offset, len, abyte, totallen = 0;
struct vrequest treq;
if (tdc) {
afs_PutDCache(tdc);
}
- abyte+=len;
+ abyte+=len;
totallen += len;
} while ((totallen < afs_preCache) && tdc && (len > 0));
/* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
#if defined(AFS_CACHE_BYPASS)
static void
-BPrefetchNoCache(register struct brequest *ab)
+BPrefetchNoCache(struct brequest *ab)
{
struct vrequest treq;
afs_size_t len;
-
+
if ((len = afs_InitReq(&treq, ab->cred)))
return;
#endif
static void
-BStore(register struct brequest *ab)
+BStore(struct brequest *ab)
{
- register struct vcache *tvc;
- register afs_int32 code;
+ struct vcache *tvc;
+ afs_int32 code;
struct vrequest treq;
#if defined(AFS_SGI_ENV)
struct cred *tmpcred;
/* release a held request buffer */
void
-afs_BRelease(register struct brequest *ab)
+afs_BRelease(struct brequest *ab)
{
AFS_STATCNT(afs_BRelease);
}
struct brequest *
-afs_BQueue(register short aopcode, register struct vcache *avc,
+afs_BQueue(short aopcode, struct vcache *avc,
afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred,
afs_size_t asparm0, afs_size_t asparm1, void *apparm0,
void *apparm1, void *apparm2)
{
- register int i;
- register struct brequest *tb;
+ int i;
+ struct brequest *tb;
AFS_STATCNT(afs_BQueue);
ObtainWriteLock(&afs_xbrs, 296);
}
#ifdef AFS_AIX41_ENV
-/* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
+/* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
* The modifications here will work for either a UP or MP machine.
*/
struct buf *afs_asyncbuf = (struct buf *)0;
afs_int32 afs_biodcnt = 0;
/* in implementing this, I assumed that all external linked lists were
- * null-terminated.
+ * null-terminated.
*
* Several places in this code traverse a linked list. The algorithm
* used here is probably unfamiliar to most people. Careful examination
*
* This function obtains, and returns, a pointer to a buffer for
* processing by a daemon. It sleeps until such a buffer is available.
- * The source of buffers for it is the list afs_asyncbuf (see also
+ * The source of buffers for it is the list afs_asyncbuf (see also
* afs_gn_strategy). This function may be invoked concurrently by
* several processes, that is, several instances of the same daemon.
* afs_gn_strategy, which adds buffers to the list, runs at interrupt
*
* Since AIX 4.1 can wake just one process at a time, the separate sleep
* addresses have been removed.
- * Note that the kernel_lock is held until the e_sleep_thread() occurs.
+ * Note that the kernel_lock is held until the e_sleep_thread() occurs.
* The afs_asyncbuf_lock is primarily used to serialize access between
* process and interrupts.
*/
/* ??? Does the forward pointer of the returned buffer need to be NULL?
*/
- /* Disable interrupts from the strategy function, and save the
+ /* Disable interrupts from the strategy function, and save the
* prior priority level and lock access to the afs_asyncbuf.
*/
AFS_GUNLOCK();
/* For the convenience of other code, replace the gnodes in
* the b_vp field of bp and the other buffers on the b_work
- * chain with the corresponding vnodes.
+ * chain with the corresponding vnodes.
*
* ??? what happens to the gnodes? They're not just cut loose,
* are they?
limit_sigs(&sigbits, &osigbits); /* and already masked */
}
/* Main body starts here -- this is an intentional infinite loop, and
- * should NEVER exit
+ * should NEVER exit
*
- * Now, the loop will exit if get_bioreq() returns NULL, indicating
+ * Now, the loop will exit if get_bioreq() returns NULL, indicating
* that we've been interrupted.
*/
while (1) {
ReleaseWriteLock(&vcp->lock);
}
/* If the buffer represents a protection violation, rather than
- * an actual request for I/O, no special action need be taken.
+ * an actual request for I/O, no special action need be taken.
*/
if (bp->b_flags & B_PFPROT) {
iodone(bp); /* Notify all users of the buffer that we're done */
* buffer may be linked with other buffers via the b_work field.
* See also afs_gn_strategy. For each buffer in the chain (including
* bp) notify all users of the buffer that the daemon is finished
- * using it by calling iodone.
+ * using it by calling iodone.
* assumes iodone can modify the b_work field.
*/
for (tbp1 = bp;;) {
afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
}
-#ifdef AFS_DARWIN80_ENV
+#ifdef AFS_NEW_BKG
int
afs_BackgroundDaemon(struct afs_uspc_param *uspc, void *param1, void *param2)
#else
/* Irix with "short stack" exits */
afs_BackgroundDaemon_once();
-#ifdef AFS_DARWIN80_ENV
+#ifdef AFS_NEW_BKG
/* If it's a re-entering syscall, complete the request and release */
if (uspc->ts > -1) {
tb = afs_brs;
#endif
/* Otherwise it's a new one */
afs_nbrs++;
-#ifdef AFS_DARWIN80_ENV
+#ifdef AFS_NEW_BKG
}
#endif
if (afs_termState == AFSOP_STOP_BKG) {
if (--afs_nbrs <= 0)
- afs_termState = AFSOP_STOP_TRUNCDAEMON;
+ afs_termState = AFSOP_STOP_RXCALLBACK;
ReleaseWriteLock(&afs_xbrs);
afs_osi_Wakeup(&afs_termState);
-#ifdef AFS_DARWIN80_ENV
+#ifdef AFS_NEW_BKG
return -2;
#else
return;
tb->opcode);
if (tb->opcode == BOP_FETCH)
BPrefetch(tb);
-#if defined(AFS_CACHE_BYPASS)
+#if defined(AFS_CACHE_BYPASS)
else if (tb->opcode == BOP_FETCH_NOCACHE)
BPrefetchNoCache(tb);
-#endif
+#endif
else if (tb->opcode == BOP_STORE)
BStore(tb);
else if (tb->opcode == BOP_PATH)
afs_brsDaemons--;
}
}
-#ifdef AFS_DARWIN80_ENV
+#ifdef AFS_NEW_BKG
return -2;
#endif
}
SPUNLOCK(afs_sgibklock, s);
AFS_GLOCK();
tdc->dflags &= ~DFEntryMod;
- afs_WriteDCache(tdc, 1);
+ osi_Assert(afs_WriteDCache(tdc, 1) == 0);
AFS_GUNLOCK();
s = SPLOCK(afs_sgibklock);
}