* afs_FlushActiveVcaches
* afs_VerifyVCache2
* afs_WriteVCache
+ * afs_WriteVCacheDiscon
* afs_SimpleVStat
* afs_ProcessFS
* TellALittleWhiteLie
* afs_GetVCache
* afs_LookupVCache
* afs_GetRootVCache
+ * afs_UpdateStatus
* afs_FetchStatus
* afs_StuffVcache
* afs_PutVCache
#include <afsconfig.h>
#include "afs/param.h"
-RCSID
- ("$Header$");
-
-#include "afs/sysincludes.h" /*Standard vendor system headers */
-#include "afsincludes.h" /*AFS-based standard headers */
+#include "afs/sysincludes.h" /*Standard vendor system headers */
+#include "afsincludes.h" /*AFS-based standard headers */
#include "afs/afs_stats.h"
#include "afs/afs_cbqueue.h"
#include "afs/afs_osidnlc.h"
-#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
afs_int32 afs_vcount = 0; /* number of vcache in use now */
-#endif /* AFS_OSF_ENV */
#ifdef AFS_SGI_ENV
int afsvnumbers = 0;
#endif /* AFS_SGI64_ENV */
/* Exported variables */
+afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
+afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
#if !defined(AFS_LINUX22_ENV)
static struct vcache *freeVCList; /*Free list for stat cache entries */
+struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
static struct vcache *Initial_freeVCList; /*Initial list for above */
#endif
struct afs_q VLRU; /*vcache LRU */
afs_int32 afs_bulkStatsLost;
int afs_norefpanic = 0;
+
+/* Disk backed vcache definitions
+ * Both protected by xvcache */
+static int afs_nextVcacheSlot = 0;
+static struct afs_slotlist *afs_freeSlotList = NULL;
+
/* Forward declarations */
-static afs_int32 afs_QueueVCB(struct vcache *avc);
+static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
-/*
- * afs_HashCBRFid
- *
+/*!
* Generate an index into the hash table for a given Fid.
+ * \param fid
+ * \return The hash value.
*/
static int
afs_HashCBRFid(struct AFSFid *fid)
return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
}
-/*
- * afs_InsertHashCBR
- *
+/*!
* Insert a CBR entry into the hash table.
* Must be called with afs_xvcb held.
+ * \param cbr
+ * \return
*/
static void
afs_InsertHashCBR(struct afs_cbr *cbr)
afs_cbrHashT[slot] = cbr;
}
-/*
- * afs_FlushVCache
- *
- * Description:
- * Flush the given vcache entry.
+/*!
*
- * Parameters:
- * avc : Pointer to vcache entry to flush.
- * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
+ * Flush the given vcache entry.
*
* Environment:
* afs_xvcache lock must be held for writing upon entry to
* LOCK: afs_FlushVCache afs_xvcache W
* REFCNT: vcache ref count must be zero on entry except for osf1
* RACE: lock is dropped and reobtained, permitting race in caller
+ *
+ * \param avc Pointer to vcache entry to flush.
+ * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
+ *
*/
-
int
afs_FlushVCache(struct vcache *avc, int *slept)
{ /*afs_FlushVCache */
*slept = 0;
AFS_STATCNT(afs_FlushVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, avc->states);
-#ifdef AFS_OSF_ENV
- AFS_GUNLOCK();
- VN_LOCK(AFSTOV(avc));
- AFS_GLOCK();
-#endif
+ ICL_TYPE_INT32, avc->f.states);
code = osi_VM_FlushVCache(avc, slept);
if (code)
goto bad;
- if (avc->states & CVFlushed) {
+ if (avc->f.states & CVFlushed) {
code = EBUSY;
goto bad;
}
refpanic("LRU vs. Free inconsistency");
}
#endif
- avc->states |= CVFlushed;
+ avc->f.states |= CVFlushed;
/* pull the entry out of the lruq and put it on the free list */
QRemove(&avc->vlruq);
- avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
/* keep track of # of files that we bulk stat'd, but never used
* before they got recycled.
*/
- if (avc->states & CBulkStat)
+ if (avc->f.states & CBulkStat)
afs_bulkStatsLost++;
vcachegen++;
/* remove entry from the hash chain */
- i = VCHash(&avc->fid);
+ i = VCHash(&avc->f.fid);
uvc = &afs_vhashT[i];
for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
if (avc == wvc) {
AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
}
#endif
+#ifdef AFS_SUN510_ENV
+ /* As we use private vnodes, cleanup is up to us */
+ vn_reinit(AFSTOV(avc));
+#endif
afs_FreeAllAxs(&(avc->Access));
-
- /* we can't really give back callbacks on RO files, since the
- * server only tracks them on a per-volume basis, and we don't
- * know whether we still have some other files from the same
- * volume. */
- if ((avc->states & CRO) == 0 && avc->callback) {
- afs_QueueVCB(avc);
- }
+ if (!afs_shuttingdown)
+ afs_QueueVCB(avc, slept);
ObtainWriteLock(&afs_xcbhash, 460);
afs_DequeueCallback(avc); /* remove it from queued callbacks list */
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
ReleaseWriteLock(&afs_xcbhash);
- if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
+ if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(avc); /* if it (could be) a directory */
else
osi_dnlc_purgevp(avc);
* optimistic synchronization algorithm
*/
afs_allZaps++;
- if (avc->fid.Fid.Vnode & 1)
+ if (avc->f.fid.Fid.Vnode & 1)
afs_oddZaps++;
else
afs_evenZaps++;
-#if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
+ afs_vcount--;
+#if !defined(AFS_LINUX22_ENV)
/* put the entry in the free list */
avc->nextfree = freeVCList;
freeVCList = avc;
if (avc->vlruq.prev || avc->vlruq.next) {
refpanic("LRU vs. Free inconsistency");
}
- avc->states |= CVFlushed;
+ avc->f.states |= CVFlushed;
#else
/* This should put it back on the vnode free list since usecount is 1 */
- afs_vcount--;
vSetType(avc, VREG);
if (VREFCOUNT_GT(avc,0)) {
-#if defined(AFS_OSF_ENV)
- VN_UNLOCK(AFSTOV(avc));
-#endif
AFS_RELE(AFSTOV(avc));
+ afs_stats_cmperf.vcacheXAllocs--;
} else {
if (afs_norefpanic) {
- printf("flush vc refcnt < 1");
+ afs_warn("flush vc refcnt < 1");
afs_norefpanic++;
-#if defined(AFS_OSF_ENV)
- (void)vgone(avc, VX_NOSLEEP, NULL);
- AFS_GLOCK();
- VN_UNLOCK(AFSTOV(avc));
-#endif
} else
osi_Panic("flush vc refcnt < 1");
}
-#endif /* AFS_OSF_ENV */
+#endif /* AFS_LINUX22_ENV */
return 0;
bad:
-#ifdef AFS_OSF_ENV
- VN_UNLOCK(AFSTOV(avc));
-#endif
return code;
-
} /*afs_FlushVCache */
#ifndef AFS_SGI_ENV
-/*
- * afs_InactiveVCache
+/*!
+ * The core of the inactive vnode op for all but IRIX.
*
- * The core of the inactive vnode op for all but IRIX.
+ * \param avc
+ * \param acred
*/
void
-afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
+afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
{
AFS_STATCNT(afs_inactive);
- if (avc->states & CDirty) {
+ if (avc->f.states & CDirty) {
/* we can't keep trying to push back dirty data forever. Give up. */
afs_InvalidateAllSegments(avc); /* turns off dirty bit */
}
- avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
- avc->states &= ~CDirty; /* Turn it off */
- if (avc->states & CUnlinked) {
+ avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
+ avc->f.states &= ~CDirty; /* Turn it off */
+ if (avc->f.states & CUnlinked) {
if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
- avc->states |= CUnlinkedDel;
+ avc->f.states |= CUnlinkedDel;
return;
}
afs_remunlink(avc, 1); /* ignore any return code */
}
#endif
-/*
- * afs_AllocCBR
- *
- * Description: allocate a callback return structure from the
+/*!
+ * Allocate a callback return structure from the
* free list and return it.
*
- * Env: The alloc and free routines are both called with the afs_xvcb lock
+ * Environment: The alloc and free routines are both called with the afs_xvcb lock
* held, so we don't have to worry about blocking in osi_Alloc.
+ *
+ * \return The allocated afs_cbr.
*/
static struct afs_cbr *afs_cbrSpace = 0;
+/* if alloc limit below changes, fix me! */
+static struct afs_cbr *afs_cbrHeads[16];
struct afs_cbr *
afs_AllocCBR(void)
{
- register struct afs_cbr *tsp;
+ struct afs_cbr *tsp;
int i;
while (!afs_cbrSpace) {
- if (afs_stats_cmperf.CallBackAlloced >= 2) {
- /* don't allocate more than 2 * AFS_NCBRS for now */
+ if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
+ /* don't allocate more than 16 * AFS_NCBRS for now */
afs_FlushVCBs(0);
afs_stats_cmperf.CallBackFlushes++;
} else {
/* try allocating */
- tsp =
- (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
- sizeof(struct afs_cbr));
+ tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
+ osi_Assert(tsp != NULL);
for (i = 0; i < AFS_NCBRS - 1; i++) {
tsp[i].next = &tsp[i + 1];
}
tsp[AFS_NCBRS - 1].next = 0;
afs_cbrSpace = tsp;
+ afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
afs_stats_cmperf.CallBackAlloced++;
}
}
return tsp;
}
-/*
- * afs_FreeCBR
+/*!
+ * Free a callback return structure, removing it from all lists.
*
- * Description: free a callback return structure, removing it from all lists.
+ * Environment: the xvcb lock is held over these calls.
*
- * Parameters:
- * asp -- the address of the structure to free.
+ * \param asp The address of the structure to free.
*
- * Environment: the xvcb lock is held over these calls.
+ * \rerurn 0
*/
int
-afs_FreeCBR(register struct afs_cbr *asp)
+afs_FreeCBR(struct afs_cbr *asp)
{
*(asp->pprev) = asp->next;
if (asp->next)
return 0;
}
-/*
- * afs_FlushVCBs
- *
- * Description: flush all queued callbacks to all servers.
- *
- * Parameters: none.
+static void
+FlushAllVCBs(int nconns, struct rx_connection **rxconns,
+ struct afs_conn **conns)
+{
+ afs_int32 *results;
+ afs_int32 i;
+
+ results = afs_osi_Alloc(nconns * sizeof (afs_int32));
+ osi_Assert(results != NULL);
+
+ AFS_GUNLOCK();
+ multi_Rx(rxconns,nconns)
+ {
+ multi_RXAFS_GiveUpAllCallBacks();
+ results[multi_i] = multi_error;
+ } multi_End;
+ AFS_GLOCK();
+
+ /*
+ * Freeing the CBR will unlink it from the server's CBR list
+ * do it here, not in the loop, because a dynamic CBR will call
+ * into the memory management routines.
+ */
+ for ( i = 0 ; i < nconns ; i++ ) {
+ if (results[i] == 0) {
+ /* Unchain all of them */
+ while (conns[i]->parent->srvr->server->cbrs)
+ afs_FreeCBR(conns[i]->parent->srvr->server->cbrs);
+ }
+ }
+ afs_osi_Free(results, nconns * sizeof(afs_int32));
+}
+
+/*!
+ * Flush all queued callbacks to all servers.
*
* Environment: holds xvcb lock over RPC to guard against race conditions
* when a new callback is granted for the same file later on.
+ *
+ * \return 0 for success.
*/
afs_int32
afs_FlushVCBs(afs_int32 lockit)
struct server *tsp;
int i;
struct vrequest treq;
- struct conn *tc;
+ struct afs_conn *tc;
int safety1, safety2, safety3;
XSTATS_DECLS;
+
+ if (AFS_IS_DISCONNECTED)
+ return ENETDOWN;
+
if ((code = afs_InitReq(&treq, afs_osi_credp)))
return code;
treq.flags |= O_NONBLOCK;
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
+ osi_Assert(tfids != NULL);
if (lockit)
- MObtainWriteLock(&afs_xvcb, 273);
+ ObtainWriteLock(&afs_xvcb, 273);
+ /*
+ * Shutting down.
+ * First, attempt a multi across everything, all addresses
+ * for all servers we know of.
+ */
+
+ if (lockit == 2)
+ afs_LoopServers(AFS_LS_ALL, NULL, 0, FlushAllVCBs, NULL);
+
ObtainReadLock(&afs_xserver);
for (i = 0; i < NSERVERS; i++) {
for (safety1 = 0, tsp = afs_servers[i];
tcount = 0; /* number found so far */
for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
+ struct rx_connection *rxconn;
/* if buffer is full, or we've queued all we're going
* to from this server, we should flush out the
* callbacks.
cbArray.AFSCBs_val = callBacks;
memset(&callBacks[0], 0, sizeof(callBacks[0]));
callBacks[0].CallBackType = CB_EXCLUSIVE;
- for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
+ for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
tsp->cell->cellNum, &treq, 0,
- SHARED_LOCK);
+ SHARED_LOCK, 0, &rxconn);
if (tc) {
XSTATS_START_TIME
(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
RX_AFS_GUNLOCK();
code =
- RXAFS_GiveUpCallBacks(tc->id, &fidArray,
+ RXAFS_GiveUpCallBacks(rxconn, &fidArray,
&cbArray);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
if (!afs_Analyze
- (tc, code, 0, &treq,
+ (tc, rxconn, code, 0, &treq,
AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
tsp->cell)) {
break;
ReleaseReadLock(&afs_xserver);
if (lockit)
- MReleaseWriteLock(&afs_xvcb);
+ ReleaseWriteLock(&afs_xvcb);
afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
return 0;
}
-/*
- * afs_QueueVCB
- *
- * Description:
- * Queue a callback on the given fid.
- *
- * Parameters:
- * avc: vcache entry
+/*!
+ * Queue a callback on the given fid.
*
* Environment:
* Locks the xvcb lock.
* Called when the xvcache lock is already held.
+ * RACE: afs_xvcache may be dropped and reacquired
+ *
+ * \param avc vcache entry
+ * \param slep Set to 1 if we dropped afs_xvcache
+ * \return 1 if queued, 0 otherwise
*/
static afs_int32
-afs_QueueVCB(struct vcache *avc)
+afs_QueueVCB(struct vcache *avc, int *slept)
{
+ int queued = 0;
struct server *tsp;
struct afs_cbr *tcbp;
+ int reacquire = 0;
AFS_STATCNT(afs_QueueVCB);
+
+ ObtainWriteLock(&afs_xvcb, 274);
+
+ /* we can't really give back callbacks on RO files, since the
+ * server only tracks them on a per-volume basis, and we don't
+ * know whether we still have some other files from the same
+ * volume. */
+ if (!((avc->f.states & CRO) == 0 && avc->callback)) {
+ goto done;
+ }
+
/* The callback is really just a struct server ptr. */
tsp = (struct server *)(avc->callback);
+ if (!afs_cbrSpace) {
+ /* If we don't have CBR space, AllocCBR may block or hit the net for
+ * clearing up CBRs. Hitting the net may involve a fileserver
+ * needing to contact us, so we must drop xvcache so we don't block
+ * those requests from going through. */
+ reacquire = *slept = 1;
+ ReleaseWriteLock(&afs_xvcache);
+ }
+
/* we now have a pointer to the server, so we just allocate
* a queue entry and queue it.
*/
- MObtainWriteLock(&afs_xvcb, 274);
tcbp = afs_AllocCBR();
- tcbp->fid = avc->fid.Fid;
+ tcbp->fid = avc->f.fid.Fid;
tcbp->next = tsp->cbrs;
if (tsp->cbrs)
tcbp->pprev = &tsp->cbrs;
afs_InsertHashCBR(tcbp);
+ queued = 1;
+ done:
/* now release locks and return */
- MReleaseWriteLock(&afs_xvcb);
- return 0;
+ ReleaseWriteLock(&afs_xvcb);
+
+ if (reacquire) {
+ /* make sure this is after dropping xvcb, for locking order */
+ ObtainWriteLock(&afs_xvcache, 279);
+ }
+ return queued;
}
-/*
- * afs_RemoveVCB
- *
- * Description:
- * Remove a queued callback for a given Fid.
- *
- * Parameters:
- * afid: The fid we want cleansed of queued callbacks.
+/*!
+ * Remove a queued callback for a given Fid.
*
* Environment:
* Locks xvcb and xserver locks.
* Typically called with xdcache, xvcache and/or individual vcache
* entries locked.
+ *
+ * \param afid The fid we want cleansed of queued callbacks.
+ *
*/
void
struct afs_cbr *cbr, *ncbr;
AFS_STATCNT(afs_RemoveVCB);
- MObtainWriteLock(&afs_xvcb, 275);
+ ObtainWriteLock(&afs_xvcb, 275);
slot = afs_HashCBRFid(&afid->Fid);
ncbr = afs_cbrHashT[slot];
}
}
- MReleaseWriteLock(&afs_xvcb);
+ ReleaseWriteLock(&afs_xvcb);
}
-/*
- * afs_NewVCache
- *
- * Description:
- * This routine is responsible for allocating a new cache entry
- * from the free list. It formats the cache entry and inserts it
- * into the appropriate hash tables. It must be called with
- * afs_xvcache write-locked so as to prevent several processes from
- * trying to create a new cache entry simultaneously.
- *
- * Parameters:
- * afid : The file id of the file whose cache entry is being
- * created.
- */
-/* LOCK: afs_NewVCache afs_xvcache W */
-struct vcache *
-afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+void
+afs_FlushReclaimedVcaches(void)
{
+#if !defined(AFS_LINUX22_ENV)
struct vcache *tvc;
- afs_int32 i, j;
- afs_int32 anumber = VCACHE_FREE;
-#ifdef AFS_AIX_ENV
- struct gnode *gnodepnt;
-#endif
-#ifdef AFS_OSF_ENV
- struct vcache *nvc;
-#endif /* AFS_OSF_ENV */
- struct afs_q *tq, *uq;
int code, fv_slept;
+ struct vcache *tmpReclaimedVCList = NULL;
- AFS_STATCNT(afs_NewVCache);
-#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
-#if defined(AFS_OSF30_ENV) || defined(AFS_LINUX22_ENV)
- if (afs_vcount >= afs_maxvcount)
-#else
- /*
- * If we are using > 33 % of the total system vnodes for AFS vcache
- * entries or we are using the maximum number of vcache entries,
- * then free some. (if our usage is > 33% we should free some, if
- * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
- * we _must_ free some -- no choice).
- */
- if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount))
+ ObtainWriteLock(&afs_xvreclaim, 76);
+ while (ReclaimedVCList) {
+ tvc = ReclaimedVCList; /* take from free list */
+ ReclaimedVCList = tvc->nextfree;
+ tvc->nextfree = NULL;
+ code = afs_FlushVCache(tvc, &fv_slept);
+ if (code) {
+ /* Ok, so, if we got code != 0, uh, wtf do we do? */
+ /* Probably, build a temporary list and then put all back when we
+ get to the end of the list */
+ /* This is actually really crappy, but we need to not leak these.
+ We probably need a way to be smarter about this. */
+ tvc->nextfree = tmpReclaimedVCList;
+ tmpReclaimedVCList = tvc;
+ /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
+ }
+ if (tvc->f.states & (CVInit
+#ifdef AFS_DARWIN80_ENV
+ | CDeadVnode
#endif
- {
- int i;
- char *panicstr;
-
- i = 0;
- for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
- tvc = QTOV(tq);
- uq = QPrev(tq);
- if (tvc->states & CVFlushed) {
- refpanic("CVFlushed on VLRU");
- } else if (i++ > afs_maxvcount) {
- refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
- } else if (QNext(uq) != tq) {
- refpanic("VLRU inconsistent");
- } else if (!VREFCOUNT_GT(tvc,0)) {
- refpanic("refcnt 0 on VLRU");
- }
+ )) {
+ tvc->f.states &= ~(CVInit
+#ifdef AFS_DARWIN80_ENV
+ | CDeadVnode
+#endif
+ );
+ afs_osi_Wakeup(&tvc->f.states);
+ }
+ }
+ if (tmpReclaimedVCList)
+ ReclaimedVCList = tmpReclaimedVCList;
-#if defined(AFS_LINUX22_ENV)
- if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
- struct dentry *dentry;
- struct list_head *cur, *head;
- AFS_FAST_HOLD(tvc);
- AFS_GUNLOCK();
-#if defined(AFS_LINUX24_ENV)
- spin_lock(&dcache_lock);
+ ReleaseWriteLock(&afs_xvreclaim);
#endif
- head = &(AFSTOV(tvc))->i_dentry;
+}
-restart:
- cur = head;
- while ((cur = cur->next) != head) {
- dentry = list_entry(cur, struct dentry, d_alias);
+void
+afs_PostPopulateVCache(struct vcache *avc, struct VenusFid *afid, int seq)
+{
+ /*
+ * The proper value for mvstat (for root fids) is setup by the caller.
+ */
+ avc->mvstat = 0;
+ if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
+ avc->mvstat = 2;
- if (d_unhashed(dentry))
- continue;
+ if (afs_globalVFS == 0)
+ osi_Panic("afs globalvfs");
- dget_locked(dentry);
+ osi_PostPopulateVCache(avc);
-#if defined(AFS_LINUX24_ENV)
- spin_unlock(&dcache_lock);
-#endif
- if (d_invalidate(dentry) == -EBUSY) {
- dput(dentry);
- /* perhaps lock and try to continue? (use cur as head?) */
- goto inuse;
- }
- dput(dentry);
-#if defined(AFS_LINUX24_ENV)
- spin_lock(&dcache_lock);
-#endif
- goto restart;
- }
-#if defined(AFS_LINUX24_ENV)
- spin_unlock(&dcache_lock);
-#endif
- inuse:
- AFS_GLOCK();
- AFS_FAST_RELE(tvc);
- }
-#endif
+ avc->dchint = NULL;
+ osi_dnlc_purgedp(avc); /* this may be overkill */
+ memset(&(avc->callsort), 0, sizeof(struct afs_q));
+ avc->slocks = NULL;
+ avc->f.states &=~ CVInit;
+ if (seq) {
+ avc->f.states |= CBulkFetching;
+ avc->f.m.Length = seq;
+ }
+ afs_osi_Wakeup(&avc->f.states);
+}
- if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
- tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- code = afs_FlushVCache(tvc, &fv_slept);
- if (code == 0) {
- anumber--;
- }
- if (fv_slept) {
- uq = VLRU.prev;
- i = 0;
- continue; /* start over - may have raced. */
- }
- }
- if (tq == uq)
+int
+afs_ShakeLooseVCaches(afs_int32 anumber)
+{
+ afs_int32 i, loop;
+ struct vcache *tvc;
+ struct afs_q *tq, *uq;
+ int fv_slept, defersleep = 0;
+ afs_int32 target = anumber;
+
+ i = 0;
+ loop = 0;
+ for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
+ tvc = QTOV(tq);
+ uq = QPrev(tq);
+ if (tvc->f.states & CVFlushed) {
+ refpanic("CVFlushed on VLRU");
+ /* In the other path, this was 2 * afs_cacheStats */
+ } else if (!afsd_dynamic_vcaches && i++ > afs_maxvcount) {
+ refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
+ } else if (QNext(uq) != tq) {
+ refpanic("VLRU inconsistent");
+ } else if (tvc->f.states & CVInit) {
+ continue;
+ }
+
+ fv_slept = 0;
+ if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
+ anumber--;
+
+ if (fv_slept) {
+ if (loop++ > 100)
break;
+ uq = VLRU.prev;
+ i = 0;
+ continue; /* start over - may have raced. */
}
- if (anumber == VCACHE_FREE) {
- printf("afs_NewVCache: warning none freed, using %d of %d\n",
- afs_vcount, afs_maxvcount);
- if (afs_vcount >= afs_maxvcount) {
- printf("afs_NewVCache - none freed\n");
- return NULL;
+ if (tq == uq) {
+ if (anumber && !defersleep) {
+ defersleep = 1;
+ tq = VLRU.prev;
+ continue;
}
+ break;
}
}
+ if (!afsd_dynamic_vcaches && anumber == target) {
+ afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
+ afs_vcount, afs_maxvcount);
+ }
-#if defined(AFS_LINUX22_ENV)
+ return 0;
+}
+
+/* Alloc new vnode. */
+
+static struct vcache *
+afs_AllocVCache(void)
{
- struct inode *ip;
+ struct vcache *tvc;
- AFS_GUNLOCK();
- ip = new_inode(afs_globalVFS);
- if (!ip)
- osi_Panic("afs_NewVCache: no more inodes");
- AFS_GLOCK();
-#if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
- tvc = VTOAFS(ip);
-#else
- tvc = afs_osi_Alloc(sizeof(struct vcache));
- ip->u.generic_ip = tvc;
- tvc->v = ip;
-#endif
-}
-#else
- AFS_GUNLOCK();
- if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
- /* What should we do ???? */
- osi_Panic("afs_NewVCache: no more vnodes");
- }
- AFS_GLOCK();
+ tvc = osi_NewVnode();
- tvc = nvc;
- tvc->nextfree = NULL;
-#endif
afs_vcount++;
-#else /* AFS_OSF_ENV */
- /* pull out a free cache entry */
- if (!freeVCList) {
- i = 0;
- for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
- tvc = QTOV(tq);
- uq = QPrev(tq);
- if (tvc->states & CVFlushed) {
- refpanic("CVFlushed on VLRU");
- } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
- refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
- } else if (QNext(uq) != tq) {
- refpanic("VLRU inconsistent");
- } else if (tvc->states & CVInit) {
- continue;
- }
+ /* track the peak */
+ if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
+ afs_maxvcount = afs_vcount;
+ /*printf("peak vnodes: %d\n", afs_maxvcount);*/
+ }
- if (!VREFCOUNT_GT(tvc,0)
-#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
- || ((VREFCOUNT(tvc) == 1) &&
- (UBCINFOEXISTS(AFSTOV(tvc))))
-#endif
- && tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
-#if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
-#ifdef AFS_DARWIN80_ENV
- vnode_t tvp = AFSTOV(tvc);
- /* VREFCOUNT_GT only sees usecounts, not iocounts */
- /* so this may fail to actually recycle the vnode now */
- /* must call vnode_get to avoid races. */
- if (vnode_get(tvp) == 0) {
- fv_slept=1;
- /* must release lock, since vnode_put will immediately
- reclaim if there are no other users */
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- vnode_recycle(tvp);
- vnode_put(tvp);
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache, 336);
- }
- /* we can't use the vnode_recycle return value to figure
- * this out, since the iocount we have to hold makes it
- * always "fail" */
- if (AFSTOV(tvc) == tvp)
- code = EBUSY;
- else
- code = 0;
-#else
- /*
- * vgone() reclaims the vnode, which calls afs_FlushVCache(),
- * then it puts the vnode on the free list.
- * If we don't do this we end up with a cleaned vnode that's
- * not on the free list.
- * XXX assume FreeBSD is the same for now.
- */
- AFS_GUNLOCK();
- vgone(AFSTOV(tvc));
- fv_slept = 0;
- code = 0;
- AFS_GLOCK();
-#endif
-#else
- code = afs_FlushVCache(tvc, &fv_slept);
+ afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
+
+ /* If we create a new inode, we either give it a new slot number,
+ * or if one's available, use a slot number from the slot free list
+ */
+ if (afs_freeSlotList != NULL) {
+ struct afs_slotlist *tmp;
+
+ tvc->diskSlot = afs_freeSlotList->slot;
+ tmp = afs_freeSlotList;
+ afs_freeSlotList = tmp->next;
+ afs_osi_Free(tmp, sizeof(struct afs_slotlist));
+ } else {
+ tvc->diskSlot = afs_nextVcacheSlot++;
+ }
+
+ return tvc;
+}
+
+/* Pre populate a newly allocated vcache. On platforms where the actual
+ * vnode is attached to the vcache, this function is called before attachment,
+ * therefore it cannot perform any actions on the vnode itself */
+
+static void
+afs_PrePopulateVCache(struct vcache *avc, struct VenusFid *afid,
+ struct server *serverp) {
+
+ afs_uint32 slot;
+ slot = avc->diskSlot;
+
+ osi_PrePopulateVCache(avc);
+
+ avc->diskSlot = slot;
+ QZero(&avc->metadirty);
+
+ AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
+
+ avc->mvid = NULL;
+ avc->linkData = NULL;
+ avc->cbExpires = 0;
+ avc->opens = 0;
+ avc->execsOrWriters = 0;
+ avc->flockCount = 0;
+ avc->f.states = CVInit;
+ avc->last_looker = 0;
+ avc->f.fid = *afid;
+ avc->asynchrony = -1;
+ avc->vc_error = 0;
+
+ hzero(avc->mapDV);
+ avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
+ hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
+ avc->Access = NULL;
+ avc->callback = serverp; /* to minimize chance that clear
+ * request is lost */
+
+#if defined(AFS_CACHE_BYPASS)
+ avc->cachingStates = 0;
+ avc->cachingTransitions = 0;
#endif
- if (code == 0) {
- anumber--;
- }
- if (fv_slept) {
- uq = VLRU.prev;
- i = 0;
- continue; /* start over - may have raced. */
- }
+}
+
+void
+afs_FlushAllVCaches(void)
+{
+ int i;
+ struct vcache *tvc, *nvc;
+
+ ObtainWriteLock(&afs_xvcache, 867);
+
+ retry:
+ for (i = 0; i < VCSIZE; i++) {
+ for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
+ int slept;
+
+ nvc = tvc->hnext;
+ if (afs_FlushVCache(tvc, &slept)) {
+ afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
}
- if (tq == uq)
- break;
+ if (slept) {
+ goto retry;
+ }
+ tvc = nvc;
}
}
- if (!freeVCList) {
- /* none free, making one is better than a panic */
- afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
- tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
-#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
- tvc->v = NULL; /* important to clean this, or use memset 0 */
-#endif
-#ifdef KERNEL_HAVE_PIN
- pin((char *)tvc, sizeof(struct vcache)); /* XXX */
-#endif
-#if defined(AFS_SGI_ENV)
- {
- char name[METER_NAMSZ];
- memset(tvc, 0, sizeof(struct vcache));
- tvc->v.v_number = ++afsvnumbers;
- tvc->vc_rwlockid = OSI_NO_LOCKID;
- initnsema(&tvc->vc_rwlock, 1,
- makesname(name, "vrw", tvc->v.v_number));
-#ifndef AFS_SGI53_ENV
- initnsema(&tvc->v.v_sync, 0,
- makesname(name, "vsy", tvc->v.v_number));
-#endif
-#ifndef AFS_SGI62_ENV
- initnlock(&tvc->v.v_lock,
- makesname(name, "vlk", tvc->v.v_number));
-#endif
+
+ ReleaseWriteLock(&afs_xvcache);
+}
+
+/*!
+ * This routine is responsible for allocating a new cache entry
+ * from the free list. It formats the cache entry and inserts it
+ * into the appropriate hash tables. It must be called with
+ * afs_xvcache write-locked so as to prevent several processes from
+ * trying to create a new cache entry simultaneously.
+ *
+ * LOCK: afs_NewVCache afs_xvcache W
+ *
+ * \param afid The file id of the file whose cache entry is being created.
+ *
+ * \return The new vcache struct.
+ */
+
+static_inline struct vcache *
+afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
+{
+ struct vcache *tvc;
+ afs_int32 i, j;
+ afs_int32 anumber = VCACHE_FREE;
+
+ AFS_STATCNT(afs_NewVCache);
+
+ afs_FlushReclaimedVcaches();
+
+#if defined(AFS_LINUX22_ENV)
+ if(!afsd_dynamic_vcaches && afs_vcount >= afs_maxvcount) {
+ afs_ShakeLooseVCaches(anumber);
+ if (afs_vcount >= afs_maxvcount) {
+ afs_warn("afs_NewVCache - none freed\n");
+ return NULL;
}
-#endif /* AFS_SGI_ENV */
+ }
+ tvc = afs_AllocVCache();
+#else /* AFS_LINUX22_ENV */
+ /* pull out a free cache entry */
+ if (!freeVCList) {
+ afs_ShakeLooseVCaches(anumber);
+ }
+
+ if (!freeVCList) {
+ tvc = afs_AllocVCache();
} else {
tvc = freeVCList; /* take from free list */
freeVCList = tvc->nextfree;
tvc->nextfree = NULL;
- }
-#endif /* AFS_OSF_ENV */
+ afs_vcount++; /* balanced by FlushVCache */
+ } /* end of if (!freeVCList) */
+
+#endif /* AFS_LINUX22_ENV */
#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
if (tvc->v)
panic("afs_NewVCache(): free vcache with vnode attached");
#endif
-#if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
- memset((char *)tvc, 0, sizeof(struct vcache));
-#else
- tvc->uncred = 0;
-#endif
+ /* Populate the vcache with as much as we can. */
+ afs_PrePopulateVCache(tvc, afid, serverp);
- RWLOCK_INIT(&tvc->lock, "vcache lock");
-#if defined(AFS_SUN5_ENV)
- RWLOCK_INIT(&tvc->vlock, "vcache vlock");
-#endif /* defined(AFS_SUN5_ENV) */
-
- tvc->parentVnode = 0;
- tvc->mvid = NULL;
- tvc->linkData = NULL;
- tvc->cbExpires = 0;
- tvc->opens = 0;
- tvc->execsOrWriters = 0;
- tvc->flockCount = 0;
- tvc->anyAccess = 0;
- tvc->states = CVInit;
- tvc->last_looker = 0;
- tvc->fid = *afid;
- tvc->asynchrony = -1;
- tvc->vc_error = 0;
-#ifdef AFS_TEXT_ENV
- tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
-#endif
- hzero(tvc->mapDV);
- tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
- hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
- tvc->Access = NULL;
- tvc->callback = serverp; /* to minimize chance that clear
- * request is lost */
+ /* Thread the vcache onto the VLRU */
i = VCHash(afid);
j = VCHashV(afid);
tvc->hnext = afs_vhashT[i];
afs_vhashT[i] = tvc;
QAdd(&afs_vhashTV[j], &tvc->vhashq);
-
+
if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
refpanic("NewVCache VLRU inconsistent");
}
refpanic("NewVCache VLRU inconsistent4");
}
vcachegen++;
- /* it should now be safe to drop the xvcache lock */
-#ifdef AFS_OBSD_ENV
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- afs_nbsd_getnewvnode(tvc); /* includes one refcount */
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache,337);
- lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
-#endif
-#ifdef AFS_DARWIN_ENV
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- afs_darwin_getnewvnode(tvc); /* includes one refcount */
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache,338);
-#ifdef AFS_DARWIN80_ENV
- LOCKINIT(tvc->rwlock);
-#else
- lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
-#endif
-#endif
-#ifdef AFS_FBSD_ENV
- {
- struct vnode *vp;
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
-#if defined(AFS_FBSD60_ENV)
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
-#elif defined(AFS_FBSD50_ENV)
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#else
- if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#endif
- panic("afs getnewvnode"); /* can't happen */
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache,339);
- if (tvc->v != NULL) {
- /* I'd like to know if this ever happens...
- * We don't drop global for the rest of this function,
- * so if we do lose the race, the other thread should
- * have found the same vnode and finished initializing
- * the vcache entry. Is it conceivable that this vcache
- * entry could be recycled during this interval? If so,
- * then there probably needs to be some sort of additional
- * mutual exclusion (an Embryonic flag would suffice).
- * -GAW */
- printf("afs_NewVCache: lost the race\n");
- return (tvc);
- }
- tvc->v = vp;
- tvc->v->v_data = tvc;
- lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
- }
-#endif
-
-#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
+ /* it should now be safe to drop the xvcache lock - so attach an inode
+ * to this vcache, where necessary */
+ osi_AttachVnode(tvc, seq);
+
+ /* Get a reference count to hold this vcache for the VLRUQ. Note that
+ * we have to do this after attaching the vnode, because the reference
+ * count may be held in the vnode itself */
+
+#if defined(AFS_LINUX22_ENV)
/* Hold it for the LRU (should make count 2) */
- VN_HOLD(AFSTOV(tvc));
-#else /* AFS_OSF_ENV */
-#if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
+ AFS_FAST_HOLD(tvc);
+#elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
VREFCOUNT_SET(tvc, 1); /* us */
-#endif /* AFS_XBSD_ENV */
-#endif /* AFS_OSF_ENV */
-#ifdef AFS_AIX32_ENV
- LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
- tvc->vmh = tvc->segid = NULL;
- tvc->credp = NULL;
-#endif
-#ifdef AFS_BOZONLOCK_ENV
-#if defined(AFS_SUN5_ENV)
- rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
-
-#if defined(AFS_SUN55_ENV)
- /* This is required if the kaio (kernel aynchronous io)
- ** module is installed. Inside the kernel, the function
- ** check_vp( common/os/aio.c) checks to see if the kernel has
- ** to provide asynchronous io for this vnode. This
- ** function extracts the device number by following the
- ** v_data field of the vnode. If we do not set this field
- ** then the system panics. The value of the v_data field
- ** is not really important for AFS vnodes because the kernel
- ** does not do asynchronous io for regular files. Hence,
- ** for the time being, we fill up the v_data field with the
- ** vnode pointer itself. */
- tvc->v.v_data = (char *)tvc;
-#endif /* AFS_SUN55_ENV */
-#endif
- afs_BozonInit(&tvc->pvnLock, tvc);
-#endif
-
- /* initialize vnode data, note vrefCount is v.v_count */
-#ifdef AFS_AIX_ENV
- /* Don't forget to free the gnode space */
- tvc->v.v_gnode = gnodepnt =
- (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
- memset((char *)gnodepnt, 0, sizeof(struct gnode));
-#endif
-#ifdef AFS_SGI64_ENV
- memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
- bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
-#ifdef AFS_SGI65_ENV
- vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
- vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
-#else
- bhv_head_init(&(tvc->v.v_bh));
- bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
-#endif
-#ifdef AFS_SGI65_ENV
- tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
-#ifdef VNODE_TRACING
- tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
-#endif
- init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
- tvc->v.v_number);
- init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
- init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
-#endif
- vnode_pcache_init(&tvc->v);
-#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
- /* Above define is never true execpt in SGI test kernels. */
- init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
-#endif
-#ifdef INTR_KTHREADS
- AFS_VN_INIT_BUF_LOCK(&(tvc->v));
#endif
-#else
- SetAfsVnode(AFSTOV(tvc));
-#endif /* AFS_SGI64_ENV */
- /*
- * The proper value for mvstat (for root fids) is setup by the caller.
- */
- tvc->mvstat = 0;
- if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
- tvc->mvstat = 2;
- if (afs_globalVFS == 0)
- osi_Panic("afs globalvfs");
-#if !defined(AFS_LINUX22_ENV)
- vSetVfsp(tvc, afs_globalVFS);
+
+#if defined (AFS_FBSD_ENV)
+ if (tvc->f.states & CVInit)
#endif
- vSetType(tvc, VREG);
-#ifdef AFS_AIX_ENV
- tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
- tvc->v.v_vfsprev = NULL;
- afs_globalVFS->vfs_vnodes = &tvc->v;
- if (tvc->v.v_vfsnext != NULL)
- tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
- tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
- gnodepnt->gn_vnode = &tvc->v;
-#endif
-#if defined(AFS_DUX40_ENV)
- insmntque(tvc, afs_globalVFS, &afs_ubcops);
-#else
-#ifdef AFS_OSF_ENV
- /* Is this needed??? */
- insmntque(tvc, afs_globalVFS);
-#endif /* AFS_OSF_ENV */
-#endif /* AFS_DUX40_ENV */
-#if defined(AFS_SGI_ENV)
- VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
- osi_Assert((tvc->v.v_flag & VINACT) == 0);
- tvc->v.v_flag = 0;
- osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
- osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
- osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
- osi_Assert(tvc->v.v_filocks == NULL);
-#if !defined(AFS_SGI65_ENV)
- osi_Assert(tvc->v.v_filocksem == NULL);
-#endif
- osi_Assert(tvc->cred == NULL);
-#ifdef AFS_SGI64_ENV
- vnode_pcache_reinit(&tvc->v);
- tvc->v.v_rdev = NODEV;
-#endif
- vn_initlist((struct vnlist *)&tvc->v);
- tvc->lastr = 0;
-#endif /* AFS_SGI_ENV */
- tvc->dchint = NULL;
- osi_dnlc_purgedp(tvc); /* this may be overkill */
- memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
- tvc->slocks = NULL;
- tvc->states &=~ CVInit;
- afs_osi_Wakeup(&tvc->states);
+ afs_PostPopulateVCache(tvc, afid, seq);
return tvc;
-
} /*afs_NewVCache */
-/*
- * afs_FlushActiveVcaches
+struct vcache *
+afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+{
+ return afs_NewVCache_int(afid, serverp, 0);
+}
+
+struct vcache *
+afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
+{
+ return afs_NewVCache_int(afid, serverp, seq);
+}
+
+/*!
+ * ???
*
- * Description:
- * ???
+ * LOCK: afs_FlushActiveVcaches afs_xvcache N
*
- * Parameters:
- * doflocks : Do we handle flocks?
+ * \param doflocks : Do we handle flocks?
*/
-/* LOCK: afs_FlushActiveVcaches afs_xvcache N */
void
-afs_FlushActiveVcaches(register afs_int32 doflocks)
+afs_FlushActiveVcaches(afs_int32 doflocks)
{
- register struct vcache *tvc;
- register int i;
- register struct conn *tc;
- register afs_int32 code;
- register struct AFS_UCRED *cred = NULL;
+ struct vcache *tvc;
+ int i;
+ struct afs_conn *tc;
+ afs_int32 code;
+ afs_ucred_t *cred = NULL;
struct vrequest treq, ureq;
struct AFSVolSync tsync;
int didCore;
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- if (tvc->states & CVInit) continue;
+ if (tvc->f.states & CVInit) continue;
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode &&
- (tvc->states & (CCore|CUnlinkedDel) ||
+ if (tvc->f.states & CDeadVnode &&
+ (tvc->f.states & (CCore|CUnlinkedDel) ||
tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
#endif
if (doflocks && tvc->flockCount != 0) {
+ struct rx_connection *rxconn;
/* if this entry has an flock, send a keep-alive call out */
osi_vnhold(tvc, 0);
ReleaseReadLock(&afs_xvcache);
afs_InitReq(&treq, afs_osi_credp);
treq.flags |= O_NONBLOCK;
- tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
RX_AFS_GUNLOCK();
code =
- RXAFS_ExtendLock(tc->id,
- (struct AFSFid *)&tvc->fid.Fid,
+ RXAFS_ExtendLock(rxconn,
+ (struct AFSFid *)&tvc->f.fid.Fid,
&tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &tvc->fid, &treq,
+ (tc, rxconn, code, &tvc->f.fid, &treq,
AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
ReleaseWriteLock(&tvc->lock);
#endif
}
didCore = 0;
- if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
+ if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
/*
* Don't let it evaporate in case someone else is in
* this code. Also, drop the afs_xvcache lock while
AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
#endif
ObtainWriteLock(&tvc->lock, 52);
- if (tvc->states & CCore) {
- tvc->states &= ~CCore;
+ if (tvc->f.states & CCore) {
+ tvc->f.states &= ~CCore;
/* XXXX Find better place-holder for cred XXXX */
- cred = (struct AFS_UCRED *)tvc->linkData;
+ cred = (afs_ucred_t *)tvc->linkData;
tvc->linkData = NULL; /* XXX */
afs_InitReq(&ureq, cred);
afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
osi_FlushText(tvc);
didCore = 1;
if (code && code != VNOVNODE) {
- afs_StoreWarn(code, tvc->fid.Fid.Volume,
+ afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
/* /dev/console */ 1);
}
- } else if (tvc->states & CUnlinkedDel) {
+ } else if (tvc->f.states & CUnlinkedDel) {
/*
* Ignore errors
*/
-/*
- * afs_VerifyVCache
- *
- * Description:
- * Make sure a cache entry is up-to-date status-wise.
+/*!
+ * Make sure a cache entry is up-to-date status-wise.
*
* NOTE: everywhere that calls this can potentially be sped up
* by checking CStatd first, and avoiding doing the InitReq
* Anymore, the only places that call this KNOW already that the
* vcache is not up-to-date, so we don't screw around.
*
- * Parameters:
- * avc : Ptr to vcache entry to verify.
- * areq : ???
+ * \param avc : Ptr to vcache entry to verify.
+ * \param areq : ???
*/
+/*!
+ *
+ * Make sure a cache entry is up-to-date status-wise.
+ *
+ * NOTE: everywhere that calls this can potentially be sped up
+ * by checking CStatd first, and avoiding doing the InitReq
+ * if this is up-to-date.
+ *
+ * Anymore, the only places that call this KNOW already that the
+ * vcache is not up-to-date, so we don't screw around.
+ *
+ * \param avc Pointer to vcache entry to verify.
+ * \param areq
+ *
+ * \return 0 for success or other error codes.
+ */
int
afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
{
- register struct vcache *tvc;
+ struct vcache *tvc;
AFS_STATCNT(afs_VerifyVCache);
-#if defined(AFS_OSF_ENV)
- ObtainReadLock(&avc->lock);
- if (afs_IsWired(avc)) {
- ReleaseReadLock(&avc->lock);
- return 0;
- }
- ReleaseReadLock(&avc->lock);
-#endif /* AFS_OSF_ENV */
/* otherwise we must fetch the status info */
ObtainWriteLock(&avc->lock, 53);
- if (avc->states & CStatd) {
+ if (avc->f.states & CStatd) {
ReleaseWriteLock(&avc->lock);
return 0;
}
ObtainWriteLock(&afs_xcbhash, 461);
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
avc->callback = NULL;
afs_DequeueCallback(avc);
ReleaseWriteLock(&afs_xcbhash);
* it's possible that the contents of this directory, or this
* file's name have changed, thus invalidating the dnlc contents.
*/
- if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
+ if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(avc);
else
osi_dnlc_purgevp(avc);
/* fetch the status info */
- tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
+ tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
if (!tvc)
return ENOENT;
/* Put it back; caller has already incremented vrefCount */
} /*afs_VerifyVCache */
-/*
- * afs_SimpleVStat
+/*!
+ * Simple copy of stat info into cache.
*
- * Description:
- * Simple copy of stat info into cache.
- *
- * Parameters:
- * avc : Ptr to vcache entry involved.
- * astat : Ptr to stat info to copy.
+ * Callers:as of 1992-04-29, only called by WriteVCache
*
- * Environment:
- * Nothing interesting.
+ * \param avc Ptr to vcache entry involved.
+ * \param astat Ptr to stat info to copy.
*
- * Callers: as of 1992-04-29, only called by WriteVCache
*/
static void
-afs_SimpleVStat(register struct vcache *avc,
- register struct AFSFetchStatus *astat, struct vrequest *areq)
+afs_SimpleVStat(struct vcache *avc,
+ struct AFSFetchStatus *astat, struct vrequest *areq)
{
afs_size_t length;
AFS_STATCNT(afs_SimpleVStat);
-#ifdef AFS_SGI_ENV
- if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
- && !AFS_VN_MAPPED((vnode_t *) avc)) {
-#else
- if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
-#endif
#ifdef AFS_64BIT_CLIENT
FillInt64(length, astat->Length_hi, astat->Length);
#else /* AFS_64BIT_CLIENT */
length = astat->Length;
#endif /* AFS_64BIT_CLIENT */
+
#if defined(AFS_SGI_ENV)
+ if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
+ && !AFS_VN_MAPPED((vnode_t *) avc)) {
osi_Assert((valusema(&avc->vc_rwlock) <= 0)
&& (OSI_GET_LOCKID() == avc->vc_rwlockid));
- if (length < avc->m.Length) {
+ if (length < avc->f.m.Length) {
vnode_t *vp = (vnode_t *) avc;
osi_Assert(WriteLocked(&avc->lock));
AFS_GLOCK();
ObtainWriteLock(&avc->lock, 67);
}
+ }
#endif
- /* if writing the file, don't fetch over this value */
+
+ if (!afs_DirtyPages(avc)) {
+ /* if actively writing the file, don't fetch over this value */
afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
- avc->m.Length = length;
- avc->m.Date = astat->ClientModTime;
+ avc->f.m.Length = length;
+ avc->f.m.Date = astat->ClientModTime;
}
- avc->m.Owner = astat->Owner;
- avc->m.Group = astat->Group;
- avc->m.Mode = astat->UnixModeBits;
+ avc->f.m.Owner = astat->Owner;
+ avc->f.m.Group = astat->Group;
+ avc->f.m.Mode = astat->UnixModeBits;
if (vType(avc) == VREG) {
- avc->m.Mode |= S_IFREG;
+ avc->f.m.Mode |= S_IFREG;
} else if (vType(avc) == VDIR) {
- avc->m.Mode |= S_IFDIR;
+ avc->f.m.Mode |= S_IFDIR;
} else if (vType(avc) == VLNK) {
- avc->m.Mode |= S_IFLNK;
- if ((avc->m.Mode & 0111) == 0)
+ avc->f.m.Mode |= S_IFLNK;
+ if ((avc->f.m.Mode & 0111) == 0)
avc->mvstat = 1;
}
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
struct axscache *ac;
- avc->anyAccess = astat->AnonymousAccess;
+ avc->f.anyAccess = astat->AnonymousAccess;
#ifdef badidea
if ((astat->CallerAccess & ~astat->AnonymousAccess))
/* USED TO SAY :
afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
}
-
} /*afs_SimpleVStat */
-/*
- * afs_WriteVCache
+/*!
+ * Store the status info *only* back to the server for a
+ * fid/vrequest.
*
- * Description:
- * Store the status info *only* back to the server for a
- * fid/vrequest.
+ * Environment: Must be called with a shared lock held on the vnode.
*
- * Parameters:
- * avc : Ptr to the vcache entry.
- * astatus : Ptr to the status info to store.
- * areq : Ptr to the associated vrequest.
+ * \param avc Ptr to the vcache entry.
+ * \param astatus Ptr to the status info to store.
+ * \param areq Ptr to the associated vrequest.
*
- * Environment:
- * Must be called with a shared lock held on the vnode.
+ * \return Operation status.
*/
int
-afs_WriteVCache(register struct vcache *avc,
- register struct AFSStoreStatus *astatus,
+afs_WriteVCache(struct vcache *avc,
+ struct AFSStoreStatus *astatus,
struct vrequest *areq)
{
afs_int32 code;
- struct conn *tc;
+ struct afs_conn *tc;
struct AFSFetchStatus OutStatus;
struct AFSVolSync tsync;
+ struct rx_connection *rxconn;
XSTATS_DECLS;
AFS_STATCNT(afs_WriteVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
-
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
RX_AFS_GUNLOCK();
code =
- RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
+ RXAFS_StoreStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
astatus, &OutStatus, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
+ (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
SHARED_LOCK, NULL));
UpgradeSToWLock(&avc->lock, 20);
* it thought we were doing this after fetching new status
* over a file being written.
*/
- avc->m.Date = OutStatus.ClientModTime;
+ avc->f.m.Date = OutStatus.ClientModTime;
} else {
/* failure, set up to check with server next time */
ObtainWriteLock(&afs_xcbhash, 462);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
+ avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
ReleaseWriteLock(&afs_xcbhash);
- if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
+ if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(avc); /* if it (could be) a directory */
}
ConvertWToSLock(&avc->lock);
- return code;
+ return code;
+
+} /*afs_WriteVCache */
+
+/*!
+ * Store status info only locally, set the proper disconnection flags
+ * and add to dirty list.
+ *
+ * \param avc The vcache to be written locally.
+ * \param astatus Get attr fields from local store.
+ * \param attrs This one is only of the vs_size.
+ *
+ * \note Must be called with a shared lock on the vnode
+ */
+int
+afs_WriteVCacheDiscon(struct vcache *avc,
+ struct AFSStoreStatus *astatus,
+ struct vattr *attrs)
+{
+ afs_int32 code = 0;
+ afs_int32 flags = 0;
+
+ UpgradeSToWLock(&avc->lock, 700);
+
+ if (!astatus->Mask) {
+
+ return code;
+
+ } else {
+
+ /* Set attributes. */
+ if (astatus->Mask & AFS_SETMODTIME) {
+ avc->f.m.Date = astatus->ClientModTime;
+ flags |= VDisconSetTime;
+ }
+
+ if (astatus->Mask & AFS_SETOWNER) {
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Owner = astatus->Owner;*/
+ }
+
+ if (astatus->Mask & AFS_SETGROUP) {
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Group = astatus->Group;*/
+ }
+
+ if (astatus->Mask & AFS_SETMODE) {
+ avc->f.m.Mode = astatus->UnixModeBits;
+
+#if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
+
+ if (vType(avc) == VREG) {
+ avc->f.m.Mode |= S_IFREG;
+ } else if (vType(avc) == VDIR) {
+ avc->f.m.Mode |= S_IFDIR;
+ } else if (vType(avc) == VLNK) {
+ avc->f.m.Mode |= S_IFLNK;
+ if ((avc->f.m.Mode & 0111) == 0)
+ avc->mvstat = 1;
+ }
+#endif
+ flags |= VDisconSetMode;
+ } /* if(astatus.Mask & AFS_SETMODE) */
+
+ } /* if (!astatus->Mask) */
+
+ if (attrs->va_size > 0) {
+ /* XXX: Do I need more checks? */
+ /* Truncation operation. */
+ flags |= VDisconTrunc;
+ }
+
+ if (flags)
+ afs_DisconAddDirty(avc, flags, 1);
+
+ /* XXX: How about the rest of the fields? */
+
+ ConvertWToSLock(&avc->lock);
-} /*afs_WriteVCache */
+ return code;
+}
-/*
- * afs_ProcessFS
- *
- * Description:
- * Copy astat block into vcache info
+/*!
+ * Copy astat block into vcache info
*
- * Parameters:
- * avc : Ptr to vcache entry.
- * astat : Ptr to stat block to copy in.
- * areq : Ptr to associated request.
+ * \note This code may get dataversion and length out of sync if the file has
+ * been modified. This is less than ideal. I haven't thought about it sufficiently
+ * to be certain that it is adequate.
*
- * Environment:
- * Must be called under a write lock
+ * \note Environment: Must be called under a write lock
*
- * Note: this code may get dataversion and length out of sync if the file has
- * been modified. This is less than ideal. I haven't thought about
- * it sufficiently to be certain that it is adequate.
+ * \param avc Ptr to vcache entry.
+ * \param astat Ptr to stat block to copy in.
+ * \param areq Ptr to associated request.
*/
void
-afs_ProcessFS(register struct vcache *avc,
- register struct AFSFetchStatus *astat, struct vrequest *areq)
+afs_ProcessFS(struct vcache *avc,
+ struct AFSFetchStatus *astat, struct vrequest *areq)
{
afs_size_t length;
AFS_STATCNT(afs_ProcessFS);
* values.
*/
afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
- avc->m.Length = length;
- avc->m.Date = astat->ClientModTime;
- }
- hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
- avc->m.Owner = astat->Owner;
- avc->m.Mode = astat->UnixModeBits;
- avc->m.Group = astat->Group;
- avc->m.LinkCount = astat->LinkCount;
+ avc->f.m.Length = length;
+ avc->f.m.Date = astat->ClientModTime;
+ }
+ hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
+ avc->f.m.Owner = astat->Owner;
+ avc->f.m.Mode = astat->UnixModeBits;
+ avc->f.m.Group = astat->Group;
+ avc->f.m.LinkCount = astat->LinkCount;
if (astat->FileType == File) {
vSetType(avc, VREG);
- avc->m.Mode |= S_IFREG;
+ avc->f.m.Mode |= S_IFREG;
} else if (astat->FileType == Directory) {
vSetType(avc, VDIR);
- avc->m.Mode |= S_IFDIR;
+ avc->f.m.Mode |= S_IFDIR;
} else if (astat->FileType == SymbolicLink) {
- if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
+ if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
vSetType(avc, VDIR);
- avc->m.Mode |= S_IFDIR;
+ avc->f.m.Mode |= S_IFDIR;
} else {
vSetType(avc, VLNK);
- avc->m.Mode |= S_IFLNK;
+ avc->f.m.Mode |= S_IFLNK;
}
- if ((avc->m.Mode & 0111) == 0) {
+ if ((avc->f.m.Mode & 0111) == 0) {
avc->mvstat = 1;
}
}
- avc->anyAccess = astat->AnonymousAccess;
+ avc->f.anyAccess = astat->AnonymousAccess;
#ifdef badidea
if ((astat->CallerAccess & ~astat->AnonymousAccess))
/* USED TO SAY :
} /*afs_ProcessFS */
+/*!
+ * Get fid from server.
+ *
+ * \param afid
+ * \param areq Request to be passed on.
+ * \param name Name of ?? to lookup.
+ * \param OutStatus Fetch status.
+ * \param CallBackp
+ * \param serverp
+ * \param tsyncp
+ *
+ * \return Success status of operation.
+ */
int
-afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
+afs_RemoteLookup(struct VenusFid *afid, struct vrequest *areq,
char *name, struct VenusFid *nfid,
struct AFSFetchStatus *OutStatusp,
struct AFSCallBack *CallBackp, struct server **serverp,
struct AFSVolSync *tsyncp)
{
afs_int32 code;
- afs_uint32 start;
- register struct conn *tc;
+ struct afs_conn *tc;
+ struct rx_connection *rxconn;
struct AFSFetchStatus OutDirStatus;
XSTATS_DECLS;
if (!name)
name = ""; /* XXX */
do {
- tc = afs_Conn(afid, areq, SHARED_LOCK);
+ tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
if (tc) {
if (serverp)
- *serverp = tc->srvr->server;
- start = osi_Time();
+ *serverp = tc->parent->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
RX_AFS_GUNLOCK();
code =
- RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
+ RXAFS_Lookup(rxconn, (struct AFSFid *)&afid->Fid, name,
(struct AFSFid *)&nfid->Fid, OutStatusp,
&OutDirStatus, CallBackp, tsyncp);
RX_AFS_GLOCK();
} else
code = -1;
} while (afs_Analyze
- (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
+ (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
NULL));
return code;
}
-/*
+/*!
* afs_GetVCache
*
- * Description:
- * Given a file id and a vrequest structure, fetch the status
- * information associated with the file.
+ * Given a file id and a vrequest structure, fetch the status
+ * information associated with the file.
*
- * Parameters:
- * afid : File ID.
- * areq : Ptr to associated vrequest structure, specifying the
- * user whose authentication tokens will be used.
- * avc : caller may already have a vcache for this file, which is
- * already held.
+ * \param afid File ID.
+ * \param areq Ptr to associated vrequest structure, specifying the
+ * user whose authentication tokens will be used.
+ * \param avc Caller may already have a vcache for this file, which is
+ * already held.
*
- * Environment:
+ * \note Environment:
* The cache entry is returned with an increased vrefCount field.
* The entry must be discarded by calling afs_PutVCache when you
* are through using the pointer to the cache entry.
* of a parent dir cache entry, given a file (to check its access
* control list). It also allows renames to be handled easily by
* locking directories in a constant order.
- * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
+ *
+ * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
+ *
+ * \note Might have a vcache structure already, which must
+ * already be held by the caller
*/
- /* might have a vcache structure already, which must
- * already be held by the caller */
-
struct vcache *
-afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
+afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
afs_int32 * cached, struct vcache *avc)
{
afs_int32 code, newvcache = 0;
- register struct vcache *tvc;
+ struct vcache *tvc;
struct volume *tvp;
afs_int32 retry;
goto loop;
#endif
}
-
if (tvc) {
if (cached)
*cached = 1;
- osi_Assert((tvc->states & CVInit) == 0);
- if (tvc->states & CStatd) {
+ osi_Assert((tvc->f.states & CVInit) == 0);
+ /* If we are in readdir, return the vnode even if not statd */
+ if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
ReleaseSharedLock(&afs_xvcache);
return tvc;
}
newvcache = 1;
ConvertWToSLock(&afs_xvcache);
- if (!tvc)
+ if (tvc == NULL)
{
ReleaseSharedLock(&afs_xvcache);
return NULL;
ObtainWriteLock(&tvc->lock, 54);
- if (tvc->states & CStatd) {
- ReleaseWriteLock(&tvc->lock);
- return tvc;
- }
-#if defined(AFS_OSF_ENV)
- if (afs_IsWired(tvc)) {
+ if (tvc->f.states & CStatd) {
ReleaseWriteLock(&tvc->lock);
return tvc;
}
-#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
/* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
What about ubc? */
if (!iheldthelock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
/* this is messy. we can call fsync which will try to reobtain this */
- if (VTOAFS(vp) == tvc)
+ if (VTOAFS(vp) == tvc)
ReleaseWriteLock(&tvc->lock);
if (UBCINFOEXISTS(vp)) {
vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
}
- if (VTOAFS(vp) == tvc)
+ if (VTOAFS(vp) == tvc)
ObtainWriteLock(&tvc->lock, 954);
if (!iheldthelock)
VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
+#elif defined(AFS_FBSD80_ENV)
+ iheldthelock = VOP_ISLOCKED(vp);
+ if (!iheldthelock) {
+ /* nosleep/sleep lock order reversal */
+ int glocked = ISAFS_GLOCK();
+ if (glocked)
+ AFS_GUNLOCK();
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ if (glocked)
+ AFS_GLOCK();
+ }
+ vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, 0);
#elif defined(AFS_FBSD60_ENV)
iheldthelock = VOP_ISLOCKED(vp, curthread);
if (!iheldthelock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ AFS_GUNLOCK();
vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
+ AFS_GLOCK();
if (!iheldthelock)
VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
-#elif defined(AFS_FBSD50_ENV)
+#elif defined(AFS_FBSD_ENV)
iheldthelock = VOP_ISLOCKED(vp, curthread);
if (!iheldthelock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
if (!iheldthelock)
VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
-#elif defined(AFS_FBSD40_ENV)
- iheldthelock = VOP_ISLOCKED(vp, curproc);
- if (!iheldthelock)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
- vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
- if (!iheldthelock)
- VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
#elif defined(AFS_OBSD_ENV)
iheldthelock = VOP_ISLOCKED(vp, curproc);
if (!iheldthelock)
uvm_vnp_uncache(vp);
if (!iheldthelock)
VOP_UNLOCK(vp, 0, curproc);
+#elif defined(AFS_NBSD40_ENV)
+ iheldthelock = VOP_ISLOCKED(vp);
+ if (!iheldthelock) {
+ VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
+ }
+ uvm_vnp_uncache(vp);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, 0);
#endif
}
#endif
#endif
ObtainWriteLock(&afs_xcbhash, 464);
- tvc->states &= ~CUnique;
+ tvc->f.states &= ~CUnique;
tvc->callback = 0;
afs_DequeueCallback(tvc);
ReleaseWriteLock(&afs_xcbhash);
if (tvp) {
if ((tvp->states & VForeign)) {
if (newvcache)
- tvc->states |= CForeign;
+ tvc->f.states |= CForeign;
if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
&& (tvp->rootUnique == afid->Fid.Unique)) {
tvc->mvstat = 2;
}
}
if (tvp->states & VRO)
- tvc->states |= CRO;
+ tvc->f.states |= CRO;
if (tvp->states & VBackup)
- tvc->states |= CBackup;
+ tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
if (!tvc->mvid)
if (afs_DynrootNewVnode(tvc, &OutStatus)) {
afs_ProcessFS(tvc, &OutStatus, areq);
- tvc->states |= CStatd | CUnique;
+ tvc->f.states |= CStatd | CUnique;
+ tvc->f.parent.vnode = OutStatus.ParentVnode;
+ tvc->f.parent.unique = OutStatus.ParentUnique;
code = 0;
} else {
- code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
+
+ if (AFS_IS_DISCONNECTED) {
+ /* Nothing to do otherwise...*/
+ code = ENETDOWN;
+ /* printf("Network is down in afs_GetCache"); */
+ } else
+ code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
+
+ /* For the NFS translator's benefit, make sure
+ * non-directory vnodes always have their parent FID set
+ * correctly, even when created as a result of decoding an
+ * NFS filehandle. It would be nice to also do this for
+ * directories, but we can't because the fileserver fills
+ * in the FID of the directory itself instead of that of
+ * its parent.
+ */
+ if (!code && OutStatus.FileType != Directory &&
+ !tvc->f.parent.vnode) {
+ tvc->f.parent.vnode = OutStatus.ParentVnode;
+ tvc->f.parent.unique = OutStatus.ParentUnique;
+ /* XXX - SXW - It's conceivable we should mark ourselves
+ * as dirty again here, incase we've been raced
+ * out of the FetchStatus call.
+ */
+ }
}
}
+/*!
+ * Lookup a vcache by fid. Look inside the cache first, if not
+ * there, lookup the file on the server, and then get it's fresh
+ * cache entry.
+ *
+ * \param afid
+ * \param areq
+ * \param cached Is element cached? If NULL, don't answer.
+ * \param adp
+ * \param aname
+ *
+ * \return The found element or NULL.
+ */
struct vcache *
afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
afs_int32 * cached, struct vcache *adp, char *aname)
{
afs_int32 code, now, newvcache = 0;
struct VenusFid nfid;
- register struct vcache *tvc;
+ struct vcache *tvc;
struct volume *tvp;
struct AFSFetchStatus OutStatus;
struct AFSCallBack CallBack;
}
ObtainReadLock(&tvc->lock);
- if (tvc->states & CStatd) {
+ if (tvc->f.states & CStatd) {
if (cached) {
*cached = 1;
}
ReleaseReadLock(&tvc->lock);
return tvc;
}
- tvc->states &= ~CUnique;
+ tvc->f.states &= ~CUnique;
ReleaseReadLock(&tvc->lock);
afs_PutVCache(tvc);
nfid = *afid;
now = osi_Time();
origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
- code =
- afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
- &serverp, &tsync);
+
+ if (AFS_IS_DISCONNECTED) {
+ /* printf("Network is down in afs_LookupVcache\n"); */
+ code = ENETDOWN;
+ } else
+ code =
+ afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
+ &CallBack, &serverp, &tsync);
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
loop2:
if (tvp) {
if ((tvp->states & VForeign)) {
if (newvcache)
- tvc->states |= CForeign;
+ tvc->f.states |= CForeign;
if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
&& (tvp->rootUnique == afid->Fid.Unique))
tvc->mvstat = 2;
}
if (tvp->states & VRO)
- tvc->states |= CRO;
+ tvc->f.states |= CRO;
if (tvp->states & VBackup)
- tvc->states |= CBackup;
+ tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
if (!tvc->mvid)
if (code) {
ObtainWriteLock(&afs_xcbhash, 465);
afs_DequeueCallback(tvc);
- tvc->states &= ~(CStatd | CUnique);
+ tvc->f.states &= ~(CStatd | CUnique);
ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
if (tvp)
afs_PutVolume(tvp, READ_LOCK);
if (CallBack.ExpirationTime) {
tvc->callback = serverp;
tvc->cbExpires = CallBack.ExpirationTime + now;
- tvc->states |= CStatd | CUnique;
- tvc->states &= ~CBulkFetching;
+ tvc->f.states |= CStatd | CUnique;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
- } else if (tvc->states & CRO) {
+ } else if (tvc->f.states & CRO) {
/* adapt gives us an hour. */
tvc->cbExpires = 3600 + osi_Time();
- /*XXX*/ tvc->states |= CStatd | CUnique;
- tvc->states &= ~CBulkFetching;
+ /*XXX*/ tvc->f.states |= CStatd | CUnique;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvp);
} else {
tvc->callback = NULL;
afs_DequeueCallback(tvc);
- tvc->states &= ~(CStatd | CUnique);
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ tvc->f.states &= ~(CStatd | CUnique);
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
}
} else {
afs_DequeueCallback(tvc);
- tvc->states &= ~CStatd;
- tvc->states &= ~CUnique;
+ tvc->f.states &= ~CStatd;
+ tvc->f.states &= ~CUnique;
tvc->callback = NULL;
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
}
ReleaseWriteLock(&afs_xcbhash);
afs_int32 getNewFid = 0;
afs_uint32 start;
struct VenusFid nfid;
- register struct vcache *tvc;
+ struct vcache *tvc;
struct server *serverp = 0;
struct AFSFetchStatus OutStatus;
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
int origCBs = 0;
-#ifdef AFS_OSF_ENV
- int vg;
+#ifdef AFS_DARWIN80_ENV
+ vnode_t tvp;
#endif
start = osi_Time();
ObtainSharedLock(&afs_xvcache, 7);
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- if (!FidCmp(&(tvc->fid), afid)) {
- if (tvc->states & CVInit) {
+ if (!FidCmp(&(tvc->f.fid), afid)) {
+ if (tvc->f.states & CVInit) {
ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto rootvc_loop;
}
-#ifdef AFS_OSF_ENV
- /* Grab this vnode, possibly reactivating from the free list */
- /* for the present (95.05.25) everything on the hash table is
- * definitively NOT in the free list -- at least until afs_reclaim
- * can be safely implemented */
- AFS_GUNLOCK();
- vg = vget(AFSTOV(tvc)); /* this bumps ref count */
- AFS_GLOCK();
- if (vg)
- continue;
-#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode) {
+ if (tvc->f.states & CDeadVnode) {
ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto rootvc_loop;
- }
- if (vnode_get(AFSTOV(tvc))) /* this bumps ref count */
- continue;
+ }
+ tvp = AFSTOV(tvc);
+ if (vnode_get(tvp)) /* this bumps ref count */
+ continue;
+ if (vnode_ref(tvp)) {
+ AFS_GUNLOCK();
+ /* AFSTOV(tvc) may be NULL */
+ vnode_put(tvp);
+ AFS_GLOCK();
+ continue;
+ }
#endif
break;
}
}
- if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
+ if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
/* Mount point no longer stat'd or unknown. FID may have changed. */
-#ifdef AFS_OSF_ENV
- if (tvc)
- AFS_RELE(AFSTOV(tvc));
-#endif
getNewFid = 1;
ReleaseSharedLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
if (tvc) {
AFS_GUNLOCK();
vnode_put(AFSTOV(tvc));
+ vnode_rele(AFSTOV(tvc));
AFS_GLOCK();
}
#endif
if (cached)
*cached = 1;
afs_stats_cmperf.vcacheHits++;
-#ifdef AFS_OSF_ENV
+#if defined(AFS_DARWIN80_ENV)
/* we already bumped the ref count in the for loop above */
-#else /* AFS_OSF_ENV */
+#else /* AFS_DARWIN80_ENV */
osi_vnhold(tvc, 0);
#endif
UpgradeSToWLock(&afs_xvcache, 24);
ReleaseWriteLock(&afs_xvcache);
- if (tvc->states & CStatd) {
+ if (tvc->f.states & CStatd) {
return tvc;
} else {
ObtainReadLock(&tvc->lock);
- tvc->states &= ~CUnique;
+ tvc->f.states &= ~CUnique;
tvc->callback = NULL; /* redundant, perhaps */
ReleaseReadLock(&tvc->lock);
}
afs_FreeAllAxs(&(tvc->Access));
if (newvcache)
- tvc->states |= CForeign;
+ tvc->f.states |= CForeign;
if (tvolp->states & VRO)
- tvc->states |= CRO;
+ tvc->f.states |= CRO;
if (tvolp->states & VBackup)
- tvc->states |= CBackup;
+ tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
&& (tvolp->rootUnique == afid->Fid.Unique)) {
ObtainWriteLock(&afs_xcbhash, 467);
afs_DequeueCallback(tvc);
tvc->callback = NULL;
- tvc->states &= ~(CStatd | CUnique);
+ tvc->f.states &= ~(CStatd | CUnique);
ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
ReleaseWriteLock(&tvc->lock);
afs_PutVCache(tvc);
ObtainWriteLock(&afs_xcbhash, 468);
if (origCBs == afs_allCBs) {
- tvc->states |= CTruth;
+ tvc->f.states |= CTruth;
tvc->callback = serverp;
if (CallBack.ExpirationTime != 0) {
tvc->cbExpires = CallBack.ExpirationTime + start;
- tvc->states |= CStatd;
- tvc->states &= ~CBulkFetching;
+ tvc->f.states |= CStatd;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
- } else if (tvc->states & CRO) {
+ } else if (tvc->f.states & CRO) {
/* adapt gives us an hour. */
tvc->cbExpires = 3600 + osi_Time();
- /*XXX*/ tvc->states |= CStatd;
- tvc->states &= ~CBulkFetching;
+ /*XXX*/ tvc->f.states |= CStatd;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvolp);
}
} else {
afs_DequeueCallback(tvc);
tvc->callback = NULL;
- tvc->states &= ~(CStatd | CUnique);
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ tvc->f.states &= ~(CStatd | CUnique);
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
}
ReleaseWriteLock(&afs_xcbhash);
}
+/*!
+ * Update callback status and (sometimes) attributes of a vnode.
+ * Called after doing a fetch status RPC. Whilst disconnected, attributes
+ * shouldn't be written to the vcache here.
+ *
+ * \param avc
+ * \param afid
+ * \param areq
+ * \param Outsp Server status after rpc call.
+ * \param acb Callback for this vnode.
+ *
+ * \note The vcache must be write locked.
+ */
+void
+afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
+ struct vrequest *areq, struct AFSFetchStatus *Outsp,
+ struct AFSCallBack *acb, afs_uint32 start)
+{
+ struct volume *volp;
+
+ if (!AFS_IN_SYNC)
+ /* Dont write status in vcache if resyncing after a disconnection. */
+ afs_ProcessFS(avc, Outsp, areq);
+
+ volp = afs_GetVolume(afid, areq, READ_LOCK);
+ ObtainWriteLock(&afs_xcbhash, 469);
+ avc->f.states |= CTruth;
+ if (avc->callback /* check for race */ ) {
+ if (acb->ExpirationTime != 0) {
+ avc->cbExpires = acb->ExpirationTime + start;
+ avc->f.states |= CStatd;
+ avc->f.states &= ~CBulkFetching;
+ afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
+ } else if (avc->f.states & CRO) {
+ /* ordinary callback on a read-only volume -- AFS 3.2 style */
+ avc->cbExpires = 3600 + start;
+ avc->f.states |= CStatd;
+ avc->f.states &= ~CBulkFetching;
+ afs_QueueCallback(avc, CBHash(3600), volp);
+ } else {
+ afs_DequeueCallback(avc);
+ avc->callback = NULL;
+ avc->f.states &= ~(CStatd | CUnique);
+ if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
+ osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ }
+ } else {
+ afs_DequeueCallback(avc);
+ avc->callback = NULL;
+ avc->f.states &= ~(CStatd | CUnique);
+ if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
+ osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ }
+ ReleaseWriteLock(&afs_xcbhash);
+ if (volp)
+ afs_PutVolume(volp, READ_LOCK);
+}
+
+void
+afs_BadFetchStatus(struct afs_conn *tc)
+{
+ int addr = ntohl(tc->parent->srvr->sa_ip);
+ afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
+ (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff,
+ (addr) & 0xff);
+ afs_warn("afs: This suggests the server may be sending bad data that "
+ "can lead to availability issues or data corruption. The "
+ "issue has been avoided for now, but it may not always be "
+ "detectable. Please upgrade the server if possible.\n");
+}
+
+/**
+ * Check if a given AFSFetchStatus structure is sane.
+ *
+ * @param[in] tc The server from which we received the status
+ * @param[in] status The status we received
+ *
+ * @return whether the given structure is valid or not
+ * @retval 0 the structure is fine
+ * @retval nonzero the structure looks like garbage; act as if we received
+ * the returned error code from the server
+ */
+int
+afs_CheckFetchStatus(struct afs_conn *tc, struct AFSFetchStatus *status)
+{
+ if (status->errorCode ||
+ status->InterfaceVersion != 1 ||
+ !(status->FileType > Invalid && status->FileType <= SymbolicLink) ||
+ status->ParentVnode == 0 || status->ParentUnique == 0) {
+
+ afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
+ (unsigned)status->errorCode, (unsigned)status->InterfaceVersion,
+ (unsigned)status->FileType, (unsigned)status->ParentVnode,
+ (unsigned)status->ParentUnique);
+ afs_BadFetchStatus(tc);
+
+ return VBUSY;
+ }
+ return 0;
+}
-/*
- * must be called with avc write-locked
+/*!
+ * Must be called with avc write-locked
* don't absolutely have to invalidate the hint unless the dv has
* changed, but be sure to get it right else there will be consistency bugs.
*/
{
int code;
afs_uint32 start = 0;
- register struct conn *tc;
+ struct afs_conn *tc;
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
- struct volume *volp;
+ struct rx_connection *rxconn;
XSTATS_DECLS;
do {
- tc = afs_Conn(afid, areq, SHARED_LOCK);
+ tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
avc->dchint = NULL; /* invalidate hints */
if (tc) {
- avc->callback = tc->srvr->server;
+ avc->callback = tc->parent->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
RX_AFS_GUNLOCK();
code =
- RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
+ RXAFS_FetchStatus(rxconn, (struct AFSFid *)&afid->Fid, Outsp,
&CallBack, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
+ if (code == 0) {
+ code = afs_CheckFetchStatus(tc, Outsp);
+ }
+
} else
code = -1;
} while (afs_Analyze
- (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
+ (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
SHARED_LOCK, NULL));
if (!code) {
- afs_ProcessFS(avc, Outsp, areq);
- volp = afs_GetVolume(afid, areq, READ_LOCK);
- ObtainWriteLock(&afs_xcbhash, 469);
- avc->states |= CTruth;
- if (avc->callback /* check for race */ ) {
- if (CallBack.ExpirationTime != 0) {
- avc->cbExpires = CallBack.ExpirationTime + start;
- avc->states |= CStatd;
- avc->states &= ~CBulkFetching;
- afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
- } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
- avc->cbExpires = 3600 + start;
- avc->states |= CStatd;
- avc->states &= ~CBulkFetching;
- afs_QueueCallback(avc, CBHash(3600), volp);
- } else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->states &= ~(CStatd | CUnique);
- if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
- }
- } else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->states &= ~(CStatd | CUnique);
- if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
- }
- ReleaseWriteLock(&afs_xcbhash);
- if (volp)
- afs_PutVolume(volp, READ_LOCK);
+ afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
} else {
/* used to undo the local callback, but that's too extreme.
* There are plenty of good reasons that fetchstatus might return
* Nothing interesting.
*/
void
-afs_StuffVcache(register struct VenusFid *afid,
+afs_StuffVcache(struct VenusFid *afid,
struct AFSFetchStatus *OutStatus,
- struct AFSCallBack *CallBack, register struct conn *tc,
+ struct AFSCallBack *CallBack, struct afs_conn *tc,
struct vrequest *areq)
{
- register afs_int32 code, i, newvcache = 0;
- register struct vcache *tvc;
+ afs_int32 code, i, newvcache = 0;
+ struct vcache *tvc;
struct AFSVolSync tsync;
struct volume *tvp;
struct axscache *ac;
ReleaseSharedLock(&afs_xvcache);
ObtainWriteLock(&tvc->lock, 58);
- tvc->states &= ~CStatd;
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ tvc->f.states &= ~CStatd;
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
/* Is it always appropriate to throw away all the access rights? */
tvp = afs_GetVolume(afid, areq, READ_LOCK);
if (tvp) {
if (newvcache && (tvp->states & VForeign))
- tvc->states |= CForeign;
+ tvc->f.states |= CForeign;
if (tvp->states & VRO)
- tvc->states |= CRO;
+ tvc->f.states |= CRO;
if (tvp->states & VBackup)
- tvc->states |= CBackup;
+ tvc->f.states |= CBackup;
/*
* Now, copy ".." entry back out of volume structure, if
* necessary
ObtainWriteLock(&afs_xcbhash, 470);
if (CallBack->ExpirationTime != 0) {
tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
- tvc->states |= CStatd;
- tvc->states &= ~CBulkFetching;
+ tvc->f.states |= CStatd;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
- } else if (tvc->states & CRO) {
+ } else if (tvc->f.states & CRO) {
/* old-fashioned AFS 3.2 style */
tvc->cbExpires = 3600 + osi_Time();
- /*XXX*/ tvc->states |= CStatd;
- tvc->states &= ~CBulkFetching;
+ /*XXX*/ tvc->f.states |= CStatd;
+ tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvp);
} else {
afs_DequeueCallback(tvc);
tvc->callback = NULL;
- tvc->states &= ~(CStatd | CUnique);
- if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
+ tvc->f.states &= ~(CStatd | CUnique);
+ if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
}
ReleaseWriteLock(&afs_xcbhash);
} /*afs_StuffVcache */
#endif
-/*
- * afs_PutVCache
- *
- * Description:
- * Decrements the reference count on a cache entry.
+/*!
+ * Decrements the reference count on a cache entry.
*
- * Parameters:
- * avc : Pointer to the cache entry to decrement.
+ * \param avc Pointer to the cache entry to decrement.
*
- * Environment:
- * Nothing interesting.
+ * \note Environment: Nothing interesting.
*/
void
-afs_PutVCache(register struct vcache *avc)
+afs_PutVCache(struct vcache *avc)
{
AFS_STATCNT(afs_PutVCache);
#ifdef AFS_DARWIN80_ENV
} /*afs_PutVCache */
-static void findvc_sleep(struct vcache *avc, int flag) {
+/*!
+ * Reset a vcache entry, so local contents are ignored, and the
+ * server will be reconsulted next time the vcache is used
+ *
+ * \param avc Pointer to the cache entry to reset
+ * \param acred
+ *
+ * \note avc must be write locked on entry
+ */
+void
+afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred)
+{
+ ObtainWriteLock(&afs_xcbhash, 456);
+ afs_DequeueCallback(avc);
+ avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
+ ReleaseWriteLock(&afs_xcbhash);
+ /* now find the disk cache entries */
+ afs_TryToSmush(avc, acred, 1);
+ osi_dnlc_purgedp(avc);
+ if (avc->linkData && !(avc->f.states & CCore)) {
+ afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
+ avc->linkData = NULL;
+ }
+}
+
+/*!
+ * Sleepa when searching for a vcache. Releases all the pending locks,
+ * sleeps then obtains the previously released locks.
+ *
+ * \param vcache Enter sleep state.
+ * \param flag Determines what locks to use.
+ *
+ * \return
+ */
+static void
+findvc_sleep(struct vcache *avc, int flag)
+{
if (flag & IS_SLOCK) {
ReleaseSharedLock(&afs_xvcache);
} else {
ReleaseReadLock(&afs_xvcache);
}
}
- afs_osi_Sleep(&avc->states);
+ afs_osi_Sleep(&avc->f.states);
if (flag & IS_SLOCK) {
ObtainSharedLock(&afs_xvcache, 341);
} else {
}
}
}
-/*
- * afs_FindVCache
+
+/*!
+ * Add a reference on an existing vcache entry.
*
- * Description:
- * Find a vcache entry given a fid.
+ * \param tvc Pointer to the vcache.
*
- * Parameters:
- * afid : Pointer to the fid whose cache entry we desire.
- * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
- * unlock the vnode, and try again.
- * flags: bit 1 to specify whether to compute hit statistics. Not
- * set if FindVCache is called as part of internal bookkeeping.
+ * \note Environment: Must be called with at least one reference from
+ * elsewhere on the vcache, even if that reference will be dropped.
+ * The global lock is required.
*
- * Environment:
- * Must be called with the afs_xvcache lock at least held at
- * the read level. In order to do the VLRU adjustment, the xvcache lock
- * must be shared-- we upgrade it here.
+ * \return 0 on success, -1 on failure.
+ */
+
+int
+afs_RefVCache(struct vcache *tvc)
+{
+#ifdef AFS_DARWIN80_ENV
+ vnode_t tvp;
+#endif
+
+ /* AFS_STATCNT(afs_RefVCache); */
+
+#ifdef AFS_DARWIN80_ENV
+ tvp = AFSTOV(tvc);
+ if (vnode_get(tvp))
+ return -1;
+ if (vnode_ref(tvp)) {
+ AFS_GUNLOCK();
+ /* AFSTOV(tvc) may be NULL */
+ vnode_put(tvp);
+ AFS_GLOCK();
+ return -1;
+ }
+#else
+ osi_vnhold(tvc, 0);
+#endif
+ return 0;
+} /*afs_RefVCache */
+
+/*!
+ * Find a vcache entry given a fid.
+ *
+ * \param afid Pointer to the fid whose cache entry we desire.
+ * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
+ * unlock the vnode, and try again.
+ * \param flag Bit 1 to specify whether to compute hit statistics. Not
+ * set if FindVCache is called as part of internal bookkeeping.
+ *
+ * \note Environment: Must be called with the afs_xvcache lock at least held at
+ * the read level. In order to do the VLRU adjustment, the xvcache lock
+ * must be shared-- we upgrade it here.
*/
struct vcache *
afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
{
- register struct vcache *tvc;
+ struct vcache *tvc;
afs_int32 i;
-#if defined( AFS_OSF_ENV)
- int vg;
+#ifdef AFS_DARWIN80_ENV
+ struct vcache *deadvc = NULL, *livevc = NULL;
+ vnode_t tvp;
#endif
AFS_STATCNT(afs_FindVCache);
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
if (FidMatches(afid, tvc)) {
- if (tvc->states & CVInit) {
+ if (tvc->f.states & CVInit) {
findvc_sleep(tvc, flag);
goto findloop;
- }
-#ifdef AFS_OSF_ENV
- /* Grab this vnode, possibly reactivating from the free list */
- AFS_GUNLOCK();
- vg = vget(AFSTOV(tvc));
- AFS_GLOCK();
- if (vg)
- continue;
-#endif /* AFS_OSF_ENV */
+ }
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode) {
- findvc_sleep(tvc, flag);
+ if (tvc->f.states & CDeadVnode) {
+ findvc_sleep(tvc, flag);
goto findloop;
}
- if (vnode_get(AFSTOV(tvc)))
- continue;
#endif
break;
}
if (tvc) {
if (retry)
*retry = 0;
-#if !defined(AFS_OSF_ENV)
- osi_vnhold(tvc, retry); /* already held, above */
- if (retry && *retry)
- return 0;
-#endif
-#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
- tvc->states |= CUBCinit;
+#if defined(AFS_DARWIN80_ENV)
+ tvp = AFSTOV(tvc);
+ if (vnode_get(tvp))
+ tvp = NULL;
+ if (tvp && vnode_ref(tvp)) {
+ AFS_GUNLOCK();
+ /* AFSTOV(tvc) may be NULL */
+ vnode_put(tvp);
+ AFS_GLOCK();
+ tvp = NULL;
+ }
+ if (!tvp) {
+ tvc = NULL;
+ return tvc;
+ }
+#elif defined(AFS_DARWIN_ENV)
+ tvc->f.states |= CUBCinit;
AFS_GUNLOCK();
if (UBCINFOMISSING(AFSTOV(tvc)) ||
UBCINFORECLAIMED(AFSTOV(tvc))) {
ubc_info_init(AFSTOV(tvc));
}
AFS_GLOCK();
- tvc->states &= ~CUBCinit;
+ tvc->f.states &= ~CUBCinit;
+#else
+ osi_vnhold(tvc, retry); /* already held, above */
+ if (retry && *retry)
+ return 0;
#endif
/*
* only move to front of vlru if we have proper vcache locking)
return tvc;
} /*afs_FindVCache */
-/*
- * afs_NFSFindVCache
- *
- * Description:
- * Find a vcache entry given a fid. Does a wildcard match on what we
- * have for the fid. If more than one entry, don't return anything.
+/*!
+ * Find a vcache entry given a fid. Does a wildcard match on what we
+ * have for the fid. If more than one entry, don't return anything.
*
- * Parameters:
- * avcp : Fill in pointer if we found one and only one.
- * afid : Pointer to the fid whose cache entry we desire.
- * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
+ * \param avcp Fill in pointer if we found one and only one.
+ * \param afid Pointer to the fid whose cache entry we desire.
+ * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
* unlock the vnode, and try again.
- * flags: bit 1 to specify whether to compute hit statistics. Not
+ * \param flags bit 1 to specify whether to compute hit statistics. Not
* set if FindVCache is called as part of internal bookkeeping.
*
- * Environment:
- * Must be called with the afs_xvcache lock at least held at
- * the read level. In order to do the VLRU adjustment, the xvcache lock
- * must be shared-- we upgrade it here.
+ * \note Environment: Must be called with the afs_xvcache lock at least held at
+ * the read level. In order to do the VLRU adjustment, the xvcache lock
+ * must be shared-- we upgrade it here.
*
- * Return value:
- * number of matches found.
+ * \return Number of matches found.
*/
int afs_duplicate_nfs_fids = 0;
afs_int32
afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
{
- register struct vcache *tvc;
+ struct vcache *tvc;
afs_int32 i;
afs_int32 count = 0;
struct vcache *found_tvc = NULL;
-#ifdef AFS_OSF_ENV
- int vg;
+#ifdef AFS_DARWIN80_ENV
+ vnode_t tvp;
#endif
AFS_STATCNT(afs_FindVCache);
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
/* Match only on what we have.... */
- if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
- && (tvc->fid.Fid.Volume == afid->Fid.Volume)
- && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
- && (tvc->fid.Cell == afid->Cell)) {
- if (tvc->states & CVInit) {
- int lock;
+ if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
+ && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
+ && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
+ && (tvc->f.fid.Cell == afid->Cell)) {
+ if (tvc->f.states & CVInit) {
ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop;
}
-#ifdef AFS_OSF_ENV
- /* Grab this vnode, possibly reactivating from the free list */
- AFS_GUNLOCK();
- vg = vget(AFSTOV(tvc));
- AFS_GLOCK();
- if (vg) {
- /* This vnode no longer exists. */
- continue;
- }
-#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode) {
+ if (tvc->f.states & CDeadVnode) {
ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop;
- }
- if (vnode_get(AFSTOV(tvc))) {
- /* This vnode no longer exists. */
- continue;
- }
+ }
+ tvp = AFSTOV(tvc);
+ if (vnode_get(tvp)) {
+ /* This vnode no longer exists. */
+ continue;
+ }
+ if (vnode_ref(tvp)) {
+ /* This vnode no longer exists. */
+ AFS_GUNLOCK();
+ /* AFSTOV(tvc) may be NULL */
+ vnode_put(tvp);
+ AFS_GLOCK();
+ continue;
+ }
#endif /* AFS_DARWIN80_ENV */
count++;
if (found_tvc) {
/* Duplicates */
-#ifdef AFS_OSF_ENV
- /* Drop our reference counts. */
- vrele(AFSTOV(tvc));
- vrele(AFSTOV(found_tvc));
-#endif
afs_duplicate_nfs_fids++;
ReleaseSharedLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
tvc = found_tvc;
/* should I have a read lock on the vnode here? */
if (tvc) {
+#ifndef AFS_DARWIN80_ENV
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
afs_int32 retry = 0;
osi_vnhold(tvc, &retry);
goto loop;
}
#else
-#if !defined(AFS_OSF_ENV)
osi_vnhold(tvc, (int *)0); /* already held, above */
#endif
#endif
-/*
- * afs_vcacheInit
- *
+/*!
* Initialize vcache related variables
+ *
+ * \param astatSize
*/
void
afs_vcacheInit(int astatSize)
{
- register struct vcache *tvp;
+#if !defined(AFS_LINUX22_ENV)
+ struct vcache *tvp;
+#endif
int i;
-#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
if (!afs_maxvcount) {
-#if defined(AFS_LINUX22_ENV)
afs_maxvcount = astatSize; /* no particular limit on linux? */
-#elif defined(AFS_OSF30_ENV)
- afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
-#else
- afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
-#endif
- if (astatSize < afs_maxvcount) {
- afs_maxvcount = astatSize;
- }
}
-#else /* AFS_OSF_ENV */
+#if !defined(AFS_LINUX22_ENV)
freeVCList = NULL;
#endif
- RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
+ AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
LOCK_INIT(&afs_xvcb, "afs_xvcb");
-#if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
+#if !defined(AFS_LINUX22_ENV)
/* Allocate and thread the struct vcache entries */
- tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
- memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
+ tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
+ osi_Assert(tvp != NULL);
+ memset(tvp, 0, sizeof(struct vcache) * astatSize);
Initial_freeVCList = tvp;
freeVCList = &(tvp[0]);
tvp[i].nextfree = &(tvp[i + 1]);
}
tvp[astatSize - 1].nextfree = NULL;
-#ifdef KERNEL_HAVE_PIN
+# ifdef KERNEL_HAVE_PIN
pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
-#endif
+# endif
#endif
#if defined(AFS_SGI_ENV)
QInit(&afs_vhashTV[i]);
}
-/*
- * shutdown_vcache
- *
+/*!
+ * Shutdown vcache.
*/
void
shutdown_vcache(void)
{
int i;
- struct afs_cbr *tsp, *nsp;
+ struct afs_cbr *tsp;
/*
- * XXX We may potentially miss some of the vcaches because if when there're no
- * free vcache entries and all the vcache entries are active ones then we allocate
- * an additional one - admittedly we almost never had that occur.
+ * XXX We may potentially miss some of the vcaches because if when
+ * there are no free vcache entries and all the vcache entries are active
+ * ones then we allocate an additional one - admittedly we almost never
+ * had that occur.
*/
{
- register struct afs_q *tq, *uq;
- register struct vcache *tvc;
+ struct afs_q *tq, *uq = NULL;
+ struct vcache *tvc;
for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
tvc->linkData = 0;
}
- afs_FreeAllAxs(&(tvc->Access));
+ if (tvc->Access)
+ afs_FreeAllAxs(&(tvc->Access));
}
afs_vhashT[i] = 0;
}
/*
* Free any leftover callback queue
*/
- for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
- nsp = tsp->next;
+ for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
+ tsp = afs_cbrHeads[i];
+ afs_cbrHeads[i] = 0;
afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
}
afs_cbrSpace = 0;
-#ifdef KERNEL_HAVE_PIN
- unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
-#if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
+#if !defined(AFS_LINUX22_ENV)
afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
-#if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
+# ifdef KERNEL_HAVE_PIN
+ unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
+# endif
+
freeVCList = Initial_freeVCList = 0;
#endif
- RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
+
+ AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
LOCK_INIT(&afs_xvcb, "afs_xvcb");
QInit(&VLRU);
for(i = 0; i < VCSIZE; ++i)
QInit(&afs_vhashTV[i]);
}
+
+void
+afs_DisconGiveUpCallbacks(void)
+{
+ int i;
+ struct vcache *tvc;
+ int nq=0;
+
+ ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
+
+ retry:
+ /* Somehow, walk the set of vcaches, with each one coming out as tvc */
+ for (i = 0; i < VCSIZE; i++) {
+ for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
+ int slept = 0;
+ if (afs_QueueVCB(tvc, &slept)) {
+ tvc->callback = NULL;
+ nq++;
+ }
+ if (slept) {
+ goto retry;
+ }
+ }
+ }
+
+ ReleaseWriteLock(&afs_xvcache);
+
+ afs_FlushVCBs(2);
+}
+
+/*!
+ *
+ * Clear the Statd flag from all vcaches
+ *
+ * This function removes the Statd flag from all vcaches. It's used by
+ * disconnected mode to tidy up during reconnection
+ *
+ */
+void
+afs_ClearAllStatdFlag(void)
+{
+ int i;
+ struct vcache *tvc;
+
+ ObtainWriteLock(&afs_xvcache, 715);
+
+ for (i = 0; i < VCSIZE; i++) {
+ for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
+ tvc->f.states &= ~(CStatd|CUnique);
+ }
+ }
+ ReleaseWriteLock(&afs_xvcache);
+}