/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include <afsconfig.h>
#include <afs/param.h>
-#include <roken.h>
-
#include <afs/stds.h>
+#include <roken.h>
+
#include <windows.h>
#include <winsock2.h>
#include <nb30.h>
#include "afsd.h"
#include "cm_btree.h"
+#include <afs/unified_afs.h>
/*extern void afsi_log(char *pattern, ...);*/
void cm_AdjustScacheLRU(cm_scache_t *scp)
{
lock_AssertWrite(&cm_scacheLock);
- osi_QRemoveHT((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
- osi_QAddH((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
+ if (!(scp->flags & CM_SCACHEFLAG_DELETED)) {
+ osi_QRemoveHT((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
+ osi_QAddH((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **) &cm_data.scacheLRULastp, &scp->q);
+ }
}
/* call with cm_scacheLock write-locked and scp rw held */
cm_scache_t **lscpp;
cm_scache_t *tscp;
int i;
-
+
lock_AssertWrite(&cm_scacheLock);
lock_AssertWrite(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_INHASH) {
if (tscp == scp) {
*lscpp = scp->nextp;
scp->nextp = NULL;
- scp->flags &= ~CM_SCACHEFLAG_INHASH;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_INHASH);
break;
}
}
LARGE_INTEGER start, end;
if (!dirlock && !lock_TryWrite(&scp->dirlock)) {
- /*
+ /*
* We are not holding the dirlock and obtaining it
* requires that we drop the scp->rw. As a result
- * we will leave the dirBplus tree intact but
+ * we will leave the dirBplus tree intact but
* invalidate the version number so that whatever
* operation is currently active can safely complete
- * but the contents will be ignored on the next
+ * but the contents will be ignored on the next
* directory operation.
*/
scp->dirDataVersion = CM_SCACHE_VERSION_BAD;
scp->dirBplus = NULL;
scp->dirDataVersion = CM_SCACHE_VERSION_BAD;
QueryPerformanceCounter(&end);
-
- if (!dirlock)
+
+ if (!dirlock)
lock_ReleaseWrite(&scp->dirlock);
bplus_free_time += (end.QuadPart - start.QuadPart);
/* called with cm_scacheLock and scp write-locked; recycles an existing scp. */
long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags)
{
+ cm_fid_t fid;
+ afs_uint32 fileType;
+ int callback;
+
+ lock_AssertWrite(&cm_scacheLock);
+ lock_AssertWrite(&scp->rw);
+
if (scp->refCount != 0) {
return -1;
}
return -1;
}
+ if (scp->redirBufCount != 0) {
+ return -1;
+ }
+
+ fid = scp->fid;
+ fileType = scp->fileType;
+ callback = scp->cbExpires ? 1 : 0;
+
cm_RemoveSCacheFromHashTable(scp);
-#if 0
- if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) {
- osi_queueData_t *qdp;
- cm_buf_t *bufp;
+ if (scp->fileType == CM_SCACHETYPE_DIRECTORY &&
+ !cm_accessPerFileCheck) {
+ cm_volume_t *volp = cm_GetVolumeByFID(&scp->fid);
- while(qdp = scp->bufWritesp) {
- bufp = osi_GetQData(qdp);
- osi_QRemove((osi_queue_t **) &scp->bufWritesp, &qdp->q);
- osi_QDFree(qdp);
- if (bufp) {
- lock_ObtainMutex(&bufp->mx);
- bufp->cmFlags &= ~CM_BUF_CMSTORING;
- bufp->flags &= ~CM_BUF_DIRTY;
- bufp->dirty_offset = 0;
- bufp->dirty_length = 0;
- bufp->flags |= CM_BUF_ERROR;
- bufp->error = VNOVNODE;
- bufp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
- bufp->dirtyCounter++;
- if (bufp->flags & CM_BUF_WAITING) {
- osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
- osi_Wakeup((long) &bufp);
- }
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
- }
- }
- while(qdp = scp->bufReadsp) {
- bufp = osi_GetQData(qdp);
- osi_QRemove((osi_queue_t **) &scp->bufReadsp, &qdp->q);
- osi_QDFree(qdp);
- if (bufp) {
- lock_ObtainMutex(&bufp->mx);
- bufp->cmFlags &= ~CM_BUF_CMFETCHING;
- bufp->flags &= ~CM_BUF_DIRTY;
- bufp->dirty_offset = 0;
- bufp->dirty_length = 0;
- bufp->flags |= CM_BUF_ERROR;
- bufp->error = VNOVNODE;
- bufp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
- bufp->dirtyCounter++;
- if (bufp->flags & CM_BUF_WAITING) {
- osi_Log2(afsd_logp, "CM RecycleSCache Waking [scp 0x%x] bufp 0x%x", scp, bufp);
- osi_Wakeup((long) &bufp);
- }
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
- }
+ if (volp) {
+ if (!(volp->flags & CM_VOLUMEFLAG_DFS_VOLUME))
+ cm_EAccesClearParentEntries(&fid);
+
+ cm_PutVolume(volp);
}
- buf_CleanDirtyBuffers(scp);
- } else {
- /* look for things that shouldn't still be set */
- osi_assertx(scp->bufWritesp == NULL, "non-null cm_scache_t bufWritesp");
- osi_assertx(scp->bufReadsp == NULL, "non-null cm_scache_t bufReadsp");
}
-#endif
/* invalidate so next merge works fine;
* also initialize some flags */
scp->fileType = 0;
- scp->flags &= ~(CM_SCACHEFLAG_STATD
- | CM_SCACHEFLAG_DELETED
+ _InterlockedAnd(&scp->flags,
+ ~( CM_SCACHEFLAG_DELETED
| CM_SCACHEFLAG_RO
| CM_SCACHEFLAG_PURERO
| CM_SCACHEFLAG_OVERQUOTA
| CM_SCACHEFLAG_OUTOFSPACE
- | CM_SCACHEFLAG_EACCESS);
+ | CM_SCACHEFLAG_ASYNCSTORING));
scp->serverModTime = 0;
scp->dataVersion = CM_SCACHE_VERSION_BAD;
scp->bufDataVersionLow = CM_SCACHE_VERSION_BAD;
scp->cbServerp = NULL;
}
scp->cbExpires = 0;
+ scp->cbIssued = 0;
scp->volumeCreationDate = 0;
scp->fid.vnode = 0;
scp->mask = 0;
/* discard symlink info */
+ scp->mpDataVersion = CM_SCACHE_VERSION_BAD;
scp->mountPointStringp[0] = '\0';
memset(&scp->mountRootFid, 0, sizeof(cm_fid_t));
memset(&scp->dotdotFid, 0, sizeof(cm_fid_t));
cm_FreeAllACLEnts(scp);
cm_ResetSCacheDirectory(scp, 0);
+
+ if (RDR_Initialized && callback) {
+ /*
+ * We drop the cm_scacheLock because it may be required to
+ * satisfy an ioctl request from the redirector. It should
+ * be safe to hold the scp->rw lock here because at this
+ * point (a) the object has just been recycled so the fid
+ * is nul and there are no requests that could possibly
+ * be issued by the redirector that would depend upon it.
+ */
+ lock_ReleaseWrite(&cm_scacheLock);
+ RDR_InvalidateObject( fid.cell, fid.volume, fid.vnode,
+ fid.unique, fid.hash,
+ fileType, AFS_INVALIDATE_EXPIRED);
+ lock_ObtainWrite(&cm_scacheLock);
+ }
+
return 0;
}
-/*
+/*
* called with cm_scacheLock write-locked; find a vnode to recycle.
* Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches).
- * returns scp->mx held.
+ * returns scp->rw write-locked.
*/
-cm_scache_t *cm_GetNewSCache(void)
+cm_scache_t *
+cm_GetNewSCache(afs_uint32 locked)
{
- cm_scache_t *scp;
- int retry = 0;
+ cm_scache_t *scp = NULL;
+ cm_scache_t *scp_prev = NULL;
+ cm_scache_t *scp_next = NULL;
+ int attempt = 0;
- lock_AssertWrite(&cm_scacheLock);
-#if 0
- /* first pass - look for deleted objects */
- for ( scp = cm_data.scacheLRULastp;
- scp;
- scp = (cm_scache_t *) osi_QPrev(&scp->q))
- {
- osi_assertx(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep,
- "invalid cm_scache_t address");
-
- if (scp->refCount == 0) {
- if (scp->flags & CM_SCACHEFLAG_DELETED) {
- if (!lock_TryWrite(&scp->rw))
- continue;
-
- osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp);
- if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) {
-
- /* we found an entry, so return it */
- /* now remove from the LRU queue and put it back at the
- * head of the LRU queue.
- */
- cm_AdjustScacheLRU(scp);
-
- /* and we're done */
- return scp;
- }
- lock_ReleaseWrite(&scp->rw);
- osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp);
- } else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
- if (!lock_TryWrite(&scp->rw))
- continue;
-
- /* we found an entry, so return it */
- /* now remove from the LRU queue and put it back at the
- * head of the LRU queue.
- */
- cm_AdjustScacheLRU(scp);
-
- /* and we're done */
- return scp;
- }
- }
- }
- osi_Log0(afsd_logp, "GetNewSCache no deleted or recycled entries available for reuse");
-#endif
+ if (locked)
+ lock_AssertWrite(&cm_scacheLock);
+ else
+ lock_ObtainWrite(&cm_scacheLock);
if (cm_data.currentSCaches >= cm_data.maxSCaches) {
/* There were no deleted scache objects that we could use. Try to find
* one that simply hasn't been used in a while.
*/
- for ( scp = cm_data.scacheLRULastp;
- scp;
- scp = (cm_scache_t *) osi_QPrev(&scp->q))
- {
- /* It is possible for the refCount to be zero and for there still
- * to be outstanding dirty buffers. If there are dirty buffers,
- * we must not recycle the scp. */
- if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
- if (!buf_DirtyBuffersExist(&scp->fid)) {
- if (!lock_TryWrite(&scp->rw))
- continue;
-
- if (!cm_RecycleSCache(scp, 0)) {
- /* we found an entry, so return it */
- /* now remove from the LRU queue and put it back at the
- * head of the LRU queue.
- */
- cm_AdjustScacheLRU(scp);
-
- /* and we're done */
- return scp;
+ for (attempt = 0 ; attempt < 128; attempt++) {
+ afs_uint32 count = 0;
+
+ for ( scp = cm_data.scacheLRULastp;
+ scp;
+ scp = (cm_scache_t *) osi_QPrev(&scp->q))
+ {
+ /*
+ * We save the prev and next pointers in the
+ * LRU because we are going to drop the cm_scacheLock and
+ * the order of the list could change out from beneath us.
+ * If both changed, it means that this entry has been moved
+ * within the LRU and it should no longer be recycled.
+ */
+ scp_prev = (cm_scache_t *) osi_QPrev(&scp->q);
+ scp_next = (cm_scache_t *) osi_QNext(&scp->q);
+ count++;
+
+ /* It is possible for the refCount to be zero and for there still
+ * to be outstanding dirty buffers. If there are dirty buffers,
+ * we must not recycle the scp.
+ *
+ * If the object is in use by the redirector, then avoid recycling
+ * it unless we have to.
+ */
+ if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
+ afs_uint32 buf_dirty = 0;
+ afs_uint32 buf_rdr = 0;
+
+ lock_ReleaseWrite(&cm_scacheLock);
+ buf_dirty = buf_DirtyBuffersExist(&scp->fid);
+ if (!buf_dirty)
+ buf_rdr = buf_RDRBuffersExist(&scp->fid);
+
+ if (!buf_dirty && !buf_rdr) {
+ cm_fid_t fid;
+ afs_uint32 fileType;
+ int success;
+
+ success = lock_TryWrite(&scp->rw);
+
+ lock_ObtainWrite(&cm_scacheLock);
+ if (scp_prev != (cm_scache_t *) osi_QPrev(&scp->q) &&
+ scp_next != (cm_scache_t *) osi_QNext(&scp->q))
+ {
+ osi_Log1(afsd_logp, "GetNewSCache scp 0x%p; LRU order changed", scp);
+ if (success)
+ lock_ReleaseWrite(&scp->rw);
+ break;
+ } else if (!success) {
+ osi_Log1(afsd_logp, "GetNewSCache failed to obtain lock scp 0x%p", scp);
+ continue;
+ }
+
+ /* Found a likely candidate. Save type and fid in case we succeed */
+ fid = scp->fid;
+ fileType = scp->fileType;
+
+ if (!cm_RecycleSCache(scp, 0)) {
+ /* we found an entry, so return it.
+ * remove from the LRU queue and put it back at the
+ * head of the LRU queue.
+ */
+ cm_AdjustScacheLRU(scp);
+
+ /* and we're done - SUCCESS */
+ osi_assertx(!(scp->flags & CM_SCACHEFLAG_INHASH), "CM_SCACHEFLAG_INHASH set");
+ goto done;
+ }
+ lock_ReleaseWrite(&scp->rw);
+ } else {
+ if (buf_rdr)
+ osi_Log1(afsd_logp,"GetNewSCache redirector is holding extents scp 0x%p", scp);
+ else
+ osi_Log1(afsd_logp, "GetNewSCache dirty buffers scp 0x%p", scp);
+
+ lock_ObtainWrite(&cm_scacheLock);
+ if (scp_prev != (cm_scache_t *) osi_QPrev(&scp->q) &&
+ scp_next != (cm_scache_t *) osi_QNext(&scp->q))
+ {
+ osi_Log1(afsd_logp, "GetNewSCache scp 0x%p; LRU order changed", scp);
+ break;
+ }
}
- lock_ReleaseWrite(&scp->rw);
- } else {
- osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp);
}
- }
- }
- osi_Log1(afsd_logp, "GetNewSCache all scache entries in use (retry = %d)", retry);
+ } /* for */
- return NULL;
+ osi_Log2(afsd_logp, "GetNewSCache all scache entries in use (attempt = %d, count = %u)", attempt, count);
+ if (scp == NULL) {
+ /*
+ * The entire LRU queue was walked and no available cm_scache_t was
+ * found. Drop the cm_scacheLock and sleep for a moment to give a
+ * chance for cm_scache_t objects to be released.
+ */
+ lock_ReleaseWrite(&cm_scacheLock);
+ Sleep(50);
+ lock_ObtainWrite(&cm_scacheLock);
+ }
+ }
+ /* FAILURE */
+ scp = NULL;
+ goto done;
}
-
+
/* if we get here, we should allocate a new scache entry. We either are below
* quota or we have a leak and need to allocate a new one to avoid panicing.
*/
- scp = cm_data.scacheBaseAddress + cm_data.currentSCaches;
+ scp = cm_data.scacheBaseAddress + InterlockedIncrement(&cm_data.currentSCaches) - 1;
osi_assertx(scp >= cm_data.scacheBaseAddress && scp < (cm_scache_t *)cm_data.scacheHashTablep,
"invalid cm_scache_t address");
memset(scp, 0, sizeof(cm_scache_t));
#ifdef USE_BPLUS
lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK);
#endif
+ lock_InitializeMutex(&scp->redirMx, "cm_scache_t redirMx", LOCK_HIERARCHY_SCACHE_REDIRMX);
scp->serverLock = -1;
+ scp->dataVersion = CM_SCACHE_VERSION_BAD;
+ scp->bufDataVersionLow = CM_SCACHE_VERSION_BAD;
+ scp->lockDataVersion = CM_SCACHE_VERSION_BAD;
+ scp->mpDataVersion = CM_SCACHE_VERSION_BAD;
/* and put it in the LRU queue */
- osi_QAdd((osi_queue_t **) &cm_data.scacheLRUFirstp, &scp->q);
- if (!cm_data.scacheLRULastp)
- cm_data.scacheLRULastp = scp;
- cm_data.currentSCaches++;
+ osi_QAddH((osi_queue_t **) &cm_data.scacheLRUFirstp, (osi_queue_t **)&cm_data.scacheLRULastp, &scp->q);
cm_dnlcPurgedp(scp); /* make doubly sure that this is not in dnlc */
- cm_dnlcPurgevp(scp);
+ cm_dnlcPurgevp(scp);
scp->allNextp = cm_data.allSCachesp;
cm_data.allSCachesp = scp;
+
+ done:
+ if (!locked)
+ lock_ReleaseWrite(&cm_scacheLock);
+
return scp;
-}
+}
void cm_SetFid(cm_fid_t *fidp, afs_uint32 cell, afs_uint32 volume, afs_uint32 vnode, afs_uint32 unique)
{
fidp->volume = volume;
fidp->vnode = vnode;
fidp->unique = unique;
- fidp->hash = ((cell & 0xF) << 28) | ((volume & 0x3F) << 22) | ((vnode & 0x7FF) << 11) | (unique & 0x7FF);
+ CM_FID_GEN_HASH(fidp);
}
/* like strcmp, only for fids */
return 1;
if (ap->vnode != bp->vnode)
return 1;
- if (ap->volume != bp->volume)
+ if (ap->volume != bp->volume)
return 1;
- if (ap->unique != bp->unique)
+ if (ap->unique != bp->unique)
return 1;
- if (ap->cell != bp->cell)
+ if (ap->cell != bp->cell)
return 1;
return 0;
}
cm_data.fakeSCache.magic = CM_SCACHE_MAGIC;
cm_data.fakeSCache.cbServerp = (struct cm_server *)(-1);
cm_data.fakeSCache.cbExpires = (time_t)-1;
+ cm_data.fakeSCache.cbExpires = time(NULL);
/* can leave clientModTime at 0 */
cm_data.fakeSCache.fileType = CM_SCACHETYPE_FILE;
cm_data.fakeSCache.unixModeBits = 0777;
lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE);
lock_InitializeRWLock(&cm_data.fakeSCache.bufCreateLock, "cm_scache_t bufCreateLock", LOCK_HIERARCHY_SCACHE_BUFCREATE);
lock_InitializeRWLock(&cm_data.fakeSCache.dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK);
+ lock_InitializeMutex(&cm_data.fakeSCache.redirMx, "cm_scache_t redirMx", LOCK_HIERARCHY_SCACHE_REDIRMX);
}
long
return -17;
}
- for ( scp = cm_data.scacheLRUFirstp, lscp = NULL, i = 0;
+ for ( scp = cm_data.scacheLRUFirstp, lscp = NULL, i = 0;
scp;
lscp = scp, scp = (cm_scache_t *) osi_QNext(&scp->q), i++ ) {
if (scp->magic != CM_SCACHE_MAGIC) {
cm_GiveUpAllCallbacksAllServersMulti(TRUE);
- /*
+ /*
* After this call all servers are marked down.
* Do not clear the callbacks, instead change the
* expiration time so that the callbacks will be expired
* when the servers are marked back up. However, we
- * want the callbacks to be preserved as long as the
+ * want the callbacks to be preserved as long as the
* servers are down. That way if the machine resumes
* without network, the stat cache item will still be
* considered valid.
long
cm_ShutdownSCache(void)
{
- cm_scache_t * scp;
+ cm_scache_t * scp, * nextp;
+
+ cm_GiveUpAllCallbacksAllServersMulti(FALSE);
lock_ObtainWrite(&cm_scacheLock);
for ( scp = cm_data.allSCachesp; scp;
- scp = scp->allNextp ) {
+ scp = nextp ) {
+ nextp = scp->allNextp;
+ lock_ReleaseWrite(&cm_scacheLock);
+#ifdef USE_BPLUS
+ lock_ObtainWrite(&scp->dirlock);
+#endif
+ lock_ObtainWrite(&scp->rw);
+ lock_ObtainWrite(&cm_scacheLock);
+
if (scp->randomACLp) {
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainWrite(&scp->rw);
- lock_ObtainWrite(&cm_scacheLock);
cm_FreeAllACLEnts(scp);
- lock_ReleaseWrite(&scp->rw);
}
if (scp->cbServerp) {
scp->cbServerp = NULL;
}
scp->cbExpires = 0;
- scp->flags &= ~CM_SCACHEFLAG_CALLBACK;
+ scp->cbIssued = 0;
+ lock_ReleaseWrite(&scp->rw);
#ifdef USE_BPLUS
if (scp->dirBplus)
freeBtree(scp->dirBplus);
scp->dirBplus = NULL;
scp->dirDataVersion = CM_SCACHE_VERSION_BAD;
+ lock_ReleaseWrite(&scp->dirlock);
lock_FinalizeRWLock(&scp->dirlock);
#endif
lock_FinalizeRWLock(&scp->rw);
lock_FinalizeRWLock(&scp->bufCreateLock);
+ lock_FinalizeMutex(&scp->redirMx);
}
lock_ReleaseWrite(&cm_scacheLock);
- cm_GiveUpAllCallbacksAllServersMulti(FALSE);
-
return cm_dnlcShutdown();
}
void cm_InitSCache(int newFile, long maxSCaches)
{
static osi_once_t once;
-
+
if (osi_Once(&once)) {
lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock", LOCK_HIERARCHY_SCACHE_GLOBAL);
if ( newFile ) {
#endif
scp->cbServerp = NULL;
scp->cbExpires = 0;
+ scp->cbIssued = 0;
scp->volumeCreationDate = 0;
scp->fileLocksH = NULL;
scp->fileLocksT = NULL;
scp->openShares = 0;
scp->openExcls = 0;
scp->waitCount = 0;
+ scp->activeRPCs = 0;
#ifdef USE_BPLUS
scp->dirBplus = NULL;
scp->dirDataVersion = CM_SCACHE_VERSION_BAD;
#endif
scp->waitQueueT = NULL;
- scp->flags &= ~CM_SCACHEFLAG_WAITING;
+ _InterlockedAnd(&scp->flags, ~(CM_SCACHEFLAG_WAITING | CM_SCACHEFLAG_RDR_IN_USE));
+
+ scp->redirBufCount = 0;
+ scp->redirQueueT = NULL;
+ scp->redirQueueH = NULL;
+ lock_InitializeMutex(&scp->redirMx, "cm_scache_t redirMx", LOCK_HIERARCHY_SCACHE_REDIRMX);
}
}
cm_allFileLocks = NULL;
}
#ifdef DEBUG_REFCOUNT
-long cm_GetSCacheDbg(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
+long cm_GetSCacheDbg(cm_fid_t *fidp, cm_fid_t *parentFidp, cm_scache_t **outScpp, cm_user_t *userp,
cm_req_t *reqp, char * file, long line)
#else
-long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp,
+long cm_GetSCache(cm_fid_t *fidp, cm_fid_t *parentFidp, cm_scache_t **outScpp, cm_user_t *userp,
cm_req_t *reqp)
#endif
{
long hash;
cm_scache_t *scp = NULL;
+ cm_scache_t *newScp = NULL;
long code;
cm_volume_t *volp = NULL;
cm_cell_t *cellp;
int special = 0; // yj: boolean variable to test if file is on root.afs
int isRoot = 0;
extern cm_fid_t cm_rootFid;
-
+ afs_int32 refCount;
+
hash = CM_SCACHE_HASH(fidp);
-
+
if (fidp->cell == 0)
return CM_ERROR_INVAL;
#ifdef AFS_FREELANCE_CLIENT
- special = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
+ special = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
!(fidp->vnode==0x1 && fidp->unique==0x1));
- isRoot = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
+ isRoot = (fidp->cell==AFS_FAKE_ROOT_CELL_ID &&
fidp->volume==AFS_FAKE_ROOT_VOL_ID &&
fidp->vnode==0x1 && fidp->unique==0x1);
#endif
// yj: check if we have the scp, if so, we don't need
// to do anything else
- lock_ObtainWrite(&cm_scacheLock);
+ lock_ObtainRead(&cm_scacheLock);
for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
if (cm_FidCmp(fidp, &scp->fid) == 0) {
#ifdef DEBUG_REFCOUNT
osi_Log1(afsd_logp,"cm_GetSCache (1) scp 0x%p", scp);
#endif
#ifdef AFS_FREELANCE_CLIENT
- if (cm_freelanceEnabled && special &&
+ if (cm_freelanceEnabled && special &&
cm_data.fakeDirVersion != scp->dataVersion)
break;
#endif
+ if (parentFidp && scp->parentVnode == 0) {
+ scp->parentVnode = parentFidp->vnode;
+ scp->parentUnique = parentFidp->unique;
+ }
cm_HoldSCacheNoLock(scp);
*outScpp = scp;
+ lock_ConvertRToW(&cm_scacheLock);
cm_AdjustScacheLRU(scp);
lock_ReleaseWrite(&cm_scacheLock);
return 0;
}
}
+ lock_ReleaseRead(&cm_scacheLock);
// yj: when we get here, it means we don't have an scp
// so we need to either load it or fake it, depending
if (cm_freelanceEnabled && isRoot) {
osi_Log0(afsd_logp,"cm_GetSCache Freelance and isRoot");
/* freelance: if we are trying to get the root scp for the first
- * time, we will just put in a place holder entry.
+ * time, we will just put in a place holder entry.
*/
volp = NULL;
}
-
+
if (cm_freelanceEnabled && special) {
- lock_ReleaseWrite(&cm_scacheLock);
osi_Log0(afsd_logp,"cm_GetSCache Freelance and special");
if (cm_getLocalMountPointChange()) {
cm_reInitLocalMountPoints();
}
- lock_ObtainWrite(&cm_scacheLock);
if (scp == NULL) {
- scp = cm_GetNewSCache(); /* returns scp->rw held */
+ scp = cm_GetNewSCache(FALSE); /* returns scp->rw held */
if (scp == NULL) {
osi_Log0(afsd_logp,"cm_GetSCache unable to obtain *new* scache entry");
- lock_ReleaseWrite(&cm_scacheLock);
return CM_ERROR_WOULDBLOCK;
}
} else {
- lock_ReleaseWrite(&cm_scacheLock);
lock_ObtainWrite(&scp->rw);
- lock_ObtainWrite(&cm_scacheLock);
}
scp->fid = *fidp;
- scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID;
- scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID;
- scp->dotdotFid.unique=1;
- scp->dotdotFid.vnode=1;
- scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
+ cm_SetFid(&scp->dotdotFid,AFS_FAKE_ROOT_CELL_ID,AFS_FAKE_ROOT_VOL_ID,1,1);
+ if (parentFidp) {
+ scp->parentVnode = parentFidp->vnode;
+ scp->parentUnique = parentFidp->unique;
+ }
+ _InterlockedOr(&scp->flags, (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO));
+ lock_ObtainWrite(&cm_scacheLock);
if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
scp->nextp = cm_data.scacheHashTablep[hash];
cm_data.scacheHashTablep[hash] = scp;
- scp->flags |= CM_SCACHEFLAG_INHASH;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_INHASH);
}
- scp->refCount = 1;
- osi_Log1(afsd_logp,"cm_GetSCache (freelance) sets refCount to 1 scp 0x%x", scp);
+ refCount = InterlockedIncrement(&scp->refCount);
+ osi_Log2(afsd_logp,"cm_GetSCache (freelance) sets refCount to 1 scp 0x%p refCount %d", scp, refCount);
+ lock_ReleaseWrite(&cm_scacheLock);
/* must be called after the scp->fid is set */
cm_FreelanceFetchMountPointString(scp);
cm_FreelanceFetchFileType(scp);
-
+
scp->length.LowPart = (DWORD)strlen(scp->mountPointStringp)+4;
scp->length.HighPart = 0;
scp->owner=0x0;
scp->lockDataVersion=CM_SCACHE_VERSION_BAD; /* no lock yet */
scp->fsLockCount=0;
lock_ReleaseWrite(&scp->rw);
- lock_ReleaseWrite(&cm_scacheLock);
*outScpp = scp;
#ifdef DEBUG_REFCOUNT
afsi_log("%s:%d cm_GetSCache (2) scp 0x%p ref %d", file, line, scp, scp->refCount);
// end of yj code
#endif /* AFS_FREELANCE_CLIENT */
+ /* we don't have the fid, recycle something */
+ newScp = cm_GetNewSCache(FALSE); /* returns scp->rw held */
+ if (newScp == NULL) {
+ osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry");
+ return CM_ERROR_WOULDBLOCK;
+ }
+#ifdef DEBUG_REFCOUNT
+ afsi_log("%s:%d cm_GetNewSCache returns scp 0x%p flags 0x%x", file, line, newScp, newScp->flags);
+#endif
+ osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%p flags 0x%x", newScp, newScp->flags);
+
/* otherwise, we need to find the volume */
if (!cm_freelanceEnabled || !isRoot) {
- lock_ReleaseWrite(&cm_scacheLock); /* for perf. reasons */
cellp = cm_FindCellByID(fidp->cell, 0);
- if (!cellp)
+ if (!cellp) {
+ /* put back newScp so it can be reused */
+ lock_ObtainWrite(&cm_scacheLock);
+ newScp->flags |= CM_SCACHEFLAG_DELETED;
+ cm_AdjustScacheLRU(newScp);
+ lock_ReleaseWrite(&newScp->rw);
+ lock_ReleaseWrite(&cm_scacheLock);
return CM_ERROR_NOSUCHCELL;
+ }
code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp, CM_GETVOL_FLAG_CREATE, &volp);
- if (code)
+ if (code) {
+ /* put back newScp so it can be reused */
+ lock_ObtainWrite(&cm_scacheLock);
+ newScp->flags |= CM_SCACHEFLAG_DELETED;
+ cm_AdjustScacheLRU(newScp);
+ lock_ReleaseWrite(&newScp->rw);
+ lock_ReleaseWrite(&cm_scacheLock);
return code;
- lock_ObtainWrite(&cm_scacheLock);
+ }
}
-
- /* otherwise, we have the volume, now reverify that the scp doesn't
- * exist, and proceed.
+
+ /*
+ * otherwise, we have the volume, now reverify that the scp doesn't
+ * exist, and proceed. make sure that we hold the cm_scacheLock
+ * write-locked until the scp is put into the hash table in order
+ * to avoid a race.
*/
+ lock_ObtainWrite(&cm_scacheLock);
for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
if (cm_FidCmp(fidp, &scp->fid) == 0) {
#ifdef DEBUG_REFCOUNT
afsi_log("%s:%d cm_GetSCache (3) scp 0x%p ref %d", file, line, scp, scp->refCount);
osi_Log1(afsd_logp,"cm_GetSCache (3) scp 0x%p", scp);
#endif
+ if (parentFidp && scp->parentVnode == 0) {
+ scp->parentVnode = parentFidp->vnode;
+ scp->parentUnique = parentFidp->unique;
+ }
+ if (volp)
+ cm_PutVolume(volp);
cm_HoldSCacheNoLock(scp);
cm_AdjustScacheLRU(scp);
+
+ /* put back newScp so it can be reused */
+ newScp->flags |= CM_SCACHEFLAG_DELETED;
+ cm_AdjustScacheLRU(newScp);
+ lock_ReleaseWrite(&newScp->rw);
lock_ReleaseWrite(&cm_scacheLock);
- if (volp)
- cm_PutVolume(volp);
+
*outScpp = scp;
return 0;
}
}
-
- /* now, if we don't have the fid, recycle something */
- scp = cm_GetNewSCache(); /* returns scp->rw held */
- if (scp == NULL) {
- osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry");
- lock_ReleaseWrite(&cm_scacheLock);
- if (volp)
- cm_PutVolume(volp);
- return CM_ERROR_WOULDBLOCK;
- }
-#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetNewSCache returns scp 0x%p flags 0x%x", file, line, scp, scp->flags);
-#endif
- osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%p flags 0x%x", scp, scp->flags);
-
- osi_assertx(!(scp->flags & CM_SCACHEFLAG_INHASH), "CM_SCACHEFLAG_INHASH set");
+ scp = newScp;
scp->fid = *fidp;
if (!cm_freelanceEnabled || !isRoot) {
- /* if this scache entry represents a volume root then we need
- * to copy the dotdotFipd from the volume structure where the
+ /* if this scache entry represents a volume root then we need
+ * to copy the dotdotFid from the volume structure where the
* "master" copy is stored (defect 11489)
*/
if (volp->vol[ROVOL].ID == fidp->volume) {
- scp->flags |= (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO);
+ _InterlockedOr(&scp->flags, (CM_SCACHEFLAG_PURERO | CM_SCACHEFLAG_RO));
if (scp->fid.vnode == 1 && scp->fid.unique == 1)
scp->dotdotFid = cm_VolumeStateByType(volp, ROVOL)->dotdotFid;
} else if (volp->vol[BACKVOL].ID == fidp->volume) {
- scp->flags |= CM_SCACHEFLAG_RO;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_RO);
if (scp->fid.vnode == 1 && scp->fid.unique == 1)
scp->dotdotFid = cm_VolumeStateByType(volp, BACKVOL)->dotdotFid;
} else {
scp->dotdotFid = cm_VolumeStateByType(volp, RWVOL)->dotdotFid;
}
}
+ if (parentFidp) {
+ scp->parentVnode = parentFidp->vnode;
+ scp->parentUnique = parentFidp->unique;
+ }
if (volp)
cm_PutVolume(volp);
+
scp->nextp = cm_data.scacheHashTablep[hash];
cm_data.scacheHashTablep[hash] = scp;
- scp->flags |= CM_SCACHEFLAG_INHASH;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_INHASH);
+ refCount = InterlockedIncrement(&scp->refCount);
+ lock_ReleaseWrite(&cm_scacheLock);
lock_ReleaseWrite(&scp->rw);
- scp->refCount = 1;
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetSCache sets refCount to 1 scp 0x%x", file, line, scp);
+ afsi_log("%s:%d cm_GetSCache sets refCount to 1 scp 0x%p refCount %d", file, line, scp, refCount);
#endif
- osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp);
+ osi_Log2(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%p refCount %d", scp, refCount);
- /* XXX - The following fields in the cm_scache are
+ /* XXX - The following fields in the cm_scache are
* uninitialized:
* fileType
* parentVnode
* parentUnique
*/
-
+
/* now we have a held scache entry; just return it */
*outScpp = scp;
#ifdef DEBUG_REFCOUNT
afsi_log("%s:%d cm_GetSCache (4) scp 0x%p ref %d", file, line, scp, scp->refCount);
osi_Log1(afsd_logp,"cm_GetSCache (4) scp 0x%p", scp);
#endif
- lock_ReleaseWrite(&cm_scacheLock);
return 0;
}
-/* Returns a held reference to the scache's parent
+/* Returns a held reference to the scache's parent
* if it exists */
cm_scache_t * cm_FindSCacheParent(cm_scache_t * scp)
{
cm_fid_t parent_fid;
cm_scache_t * pscp = NULL;
+ if (scp->parentVnode == 0)
+ return NULL;
+
lock_ObtainWrite(&cm_scacheLock);
cm_SetFid(&parent_fid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique);
/* synchronize a fetch, store, read, write, fetch status or store status.
* Called with scache mutex held, and returns with it held, but temporarily
* drops it during the fetch.
- *
+ *
* At most one flag can be on in flags, if this is an RPC request.
*
* Also, if we're fetching or storing data, we must ensure that we have a buffer.
* possibly resulting in a bogus truncation. The simplest way to avoid this
* is to serialize all StoreData RPC's. This is the reason we defined
* CM_SCACHESYNC_STOREDATA_EXCL and CM_SCACHEFLAG_DATASTORING.
+ *
+ * CM_SCACHESYNC_BULKREAD is used to permit synchronization of multiple bulk
+ * readers which may be requesting overlapping ranges.
*/
long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *reqp,
afs_uint32 rights, afs_uint32 flags)
afs_uint32 sleep_buf_cmflags = 0;
afs_uint32 sleep_scp_bufs = 0;
int wakeupCycle;
+ afs_int32 waitCount;
+ afs_int32 waitRequests;
lock_AssertWrite(&scp->rw);
if ((flags & CM_SCACHESYNC_FORCECB) || !cm_HaveCallback(scp)) {
osi_Log1(afsd_logp, "CM SyncOp getting callback on scp 0x%p",
scp);
- if (bufLocked)
+
+ if (cm_EAccesFindEntry(userp, &scp->fid))
+ return CM_ERROR_NOACCESS;
+
+ if (bufLocked)
lock_ReleaseMutex(&bufp->mx);
code = cm_GetCallback(scp, userp, reqp, (flags & CM_SCACHESYNC_FORCECB)?1:0);
if (bufLocked) {
lock_ObtainMutex(&bufp->mx);
lock_ObtainWrite(&scp->rw);
}
- if (code)
+ if (code)
return code;
flags &= ~CM_SCACHESYNC_FORCECB; /* only force once */
continue;
if ((rights & (PRSFS_WRITE|PRSFS_DELETE)) && (scp->flags & CM_SCACHEFLAG_RO))
return CM_ERROR_READONLY;
- if (cm_HaveAccessRights(scp, userp, rights, &outRights)) {
- if (~outRights & rights)
+ if (cm_HaveAccessRights(scp, userp, reqp, rights, &outRights)) {
+ if (~outRights & rights)
return CM_ERROR_NOACCESS;
}
else {
lock_ObtainMutex(&bufp->mx);
lock_ObtainWrite(&scp->rw);
}
- if (code)
+ if (code)
return code;
continue;
}
}
+ if (flags & CM_SCACHESYNC_BULKREAD) {
+ /* Don't allow concurrent fiddling with lock lists */
+ if (scp->flags & CM_SCACHEFLAG_BULKREADING) {
+ osi_Log1(afsd_logp, "CM SyncOp scp 0x%p is BULKREADING want BULKREAD", scp);
+ goto sleep;
+ }
+ }
+
/* if we get here, we're happy */
break;
sleep:
- /* first check if we're not supposed to wait: fail
+ /* first check if we're not supposed to wait: fail
* in this case, returning with everything still locked.
*/
- if (flags & CM_SCACHESYNC_NOWAIT)
+ if (flags & CM_SCACHESYNC_NOWAIT)
return CM_ERROR_WOULDBLOCK;
/* These are used for minidump debugging */
/* wait here, then try again */
osi_Log1(afsd_logp, "CM SyncOp sleeping scp 0x%p", scp);
- if ( scp->flags & CM_SCACHEFLAG_WAITING ) {
- scp->waitCount++;
- scp->waitRequests++;
- osi_Log3(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING already set for 0x%p; %d threads; %d requests",
- scp, scp->waitCount, scp->waitRequests);
+
+ waitCount = InterlockedIncrement(&scp->waitCount);
+ waitRequests = InterlockedIncrement(&scp->waitRequests);
+ if (waitCount > 1) {
+ osi_Log3(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING already set for 0x%p; %d threads; %d requests",
+ scp, waitCount, waitRequests);
} else {
osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING set for 0x%p", scp);
- scp->flags |= CM_SCACHEFLAG_WAITING;
- scp->waitCount = scp->waitRequests = 1;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_WAITING);
}
cm_SyncOpAddToWaitQueue(scp, flags, bufp);
wakeupCycle = 0;
do {
- if (bufLocked)
+ if (bufLocked)
lock_ReleaseMutex(&bufp->mx);
osi_SleepW((LONG_PTR) &scp->flags, &scp->rw);
- if (bufLocked)
+ if (bufLocked)
lock_ObtainMutex(&bufp->mx);
lock_ObtainWrite(&scp->rw);
} while (!cm_SyncOpCheckContinue(scp, flags, bufp));
cm_UpdateServerPriority();
- scp->waitCount--;
- osi_Log3(afsd_logp, "CM SyncOp woke! scp 0x%p; still waiting %d threads of %d requests",
- scp, scp->waitCount, scp->waitRequests);
- if (scp->waitCount == 0) {
+ waitCount = InterlockedDecrement(&scp->waitCount);
+ osi_Log3(afsd_logp, "CM SyncOp woke! scp 0x%p; still waiting %d threads of %d requests",
+ scp, waitCount, scp->waitRequests);
+ if (waitCount == 0) {
osi_Log1(afsd_logp, "CM SyncOp CM_SCACHEFLAG_WAITING reset for 0x%p", scp);
- scp->flags &= ~CM_SCACHEFLAG_WAITING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_WAITING);
scp->waitRequests = 0;
}
} /* big while loop */
-
+
/* now, update the recorded state for RPC-type calls */
if (flags & CM_SCACHESYNC_FETCHSTATUS)
- scp->flags |= CM_SCACHEFLAG_FETCHING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_FETCHING);
if (flags & CM_SCACHESYNC_STORESTATUS)
- scp->flags |= CM_SCACHEFLAG_STORING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_STORING);
if (flags & CM_SCACHESYNC_SETSIZE)
- scp->flags |= CM_SCACHEFLAG_SIZESETTING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_SIZESETTING);
if (flags & CM_SCACHESYNC_STORESIZE)
- scp->flags |= CM_SCACHEFLAG_SIZESTORING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_SIZESTORING);
if (flags & CM_SCACHESYNC_GETCALLBACK)
- scp->flags |= CM_SCACHEFLAG_GETCALLBACK;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_GETCALLBACK);
if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
- scp->flags |= CM_SCACHEFLAG_DATASTORING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_DATASTORING);
if (flags & CM_SCACHESYNC_ASYNCSTORE)
- scp->flags |= CM_SCACHEFLAG_ASYNCSTORING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_ASYNCSTORING);
if (flags & CM_SCACHESYNC_LOCK)
- scp->flags |= CM_SCACHEFLAG_LOCKING;
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_LOCKING);
+ if (flags & CM_SCACHESYNC_BULKREAD)
+ _InterlockedOr(&scp->flags, CM_SCACHEFLAG_BULKREADING);
/* now update the buffer pointer */
if (bufp && (flags & CM_SCACHESYNC_FETCHDATA)) {
osi_SetQData(qdp, bufp);
buf_Hold(bufp);
- bufp->cmFlags |= CM_BUF_CMFETCHING;
+ _InterlockedOr(&bufp->cmFlags, CM_BUF_CMFETCHING);
osi_QAdd((osi_queue_t **) &scp->bufReadsp, &qdp->q);
}
qdp = osi_QDAlloc();
osi_SetQData(qdp, bufp);
buf_Hold(bufp);
- bufp->cmFlags |= CM_BUF_CMSTORING;
+ _InterlockedOr(&bufp->cmFlags, CM_BUF_CMSTORING);
osi_QAdd((osi_queue_t **) &scp->bufWritesp, &qdp->q);
}
if (bufp && (flags & CM_SCACHESYNC_WRITE)) {
/* mark the buffer as being written to. */
- bufp->cmFlags |= CM_BUF_CMWRITING;
+ _InterlockedOr(&bufp->cmFlags, CM_BUF_CMWRITING);
}
return 0;
/* now, update the recorded state for RPC-type calls */
if (flags & CM_SCACHESYNC_FETCHSTATUS)
- scp->flags &= ~CM_SCACHEFLAG_FETCHING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_FETCHING);
if (flags & CM_SCACHESYNC_STORESTATUS)
- scp->flags &= ~CM_SCACHEFLAG_STORING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_STORING);
if (flags & CM_SCACHESYNC_SETSIZE)
- scp->flags &= ~CM_SCACHEFLAG_SIZESETTING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_SIZESETTING);
if (flags & CM_SCACHESYNC_STORESIZE)
- scp->flags &= ~CM_SCACHEFLAG_SIZESTORING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_SIZESTORING);
if (flags & CM_SCACHESYNC_GETCALLBACK)
- scp->flags &= ~CM_SCACHEFLAG_GETCALLBACK;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_GETCALLBACK);
if (flags & CM_SCACHESYNC_STOREDATA_EXCL)
- scp->flags &= ~CM_SCACHEFLAG_DATASTORING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_DATASTORING);
if (flags & CM_SCACHESYNC_ASYNCSTORE)
- scp->flags &= ~CM_SCACHEFLAG_ASYNCSTORING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_ASYNCSTORING);
if (flags & CM_SCACHESYNC_LOCK)
- scp->flags &= ~CM_SCACHEFLAG_LOCKING;
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_LOCKING);
+ if (flags & CM_SCACHESYNC_BULKREAD)
+ _InterlockedAnd(&scp->flags, ~CM_SCACHEFLAG_BULKREADING);
/* now update the buffer pointer */
if (bufp && (flags & CM_SCACHESYNC_FETCHDATA)) {
/* ensure that the buffer is in the I/O list */
for (qdp = scp->bufReadsp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
tbufp = osi_GetQData(qdp);
- if (tbufp == bufp)
+ if (tbufp == bufp)
break;
}
if (qdp) {
osi_QDFree(qdp);
release = 1;
}
- bufp->cmFlags &= ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED);
+ _InterlockedAnd(&bufp->cmFlags, ~(CM_BUF_CMFETCHING | CM_BUF_CMFULLYFETCHED));
if (bufp->flags & CM_BUF_WAITING) {
osi_Log2(afsd_logp, "CM SyncOpDone FetchData Waking [scp 0x%p] bufp 0x%p", scp, bufp);
osi_Wakeup((LONG_PTR) &bufp);
/* ensure that the buffer is in the I/O list */
for (qdp = scp->bufWritesp; qdp; qdp = (osi_queueData_t *) osi_QNext(&qdp->q)) {
tbufp = osi_GetQData(qdp);
- if (tbufp == bufp)
+ if (tbufp == bufp)
break;
}
if (qdp) {
osi_QDFree(qdp);
release = 1;
}
- bufp->cmFlags &= ~CM_BUF_CMSTORING;
+ _InterlockedAnd(&bufp->cmFlags, ~CM_BUF_CMSTORING);
if (bufp->flags & CM_BUF_WAITING) {
osi_Log2(afsd_logp, "CM SyncOpDone StoreData Waking [scp 0x%p] bufp 0x%p", scp, bufp);
osi_Wakeup((LONG_PTR) &bufp);
if (bufp && (flags & CM_SCACHESYNC_WRITE)) {
osi_assertx(bufp->cmFlags & CM_BUF_CMWRITING, "!CM_BUF_CMWRITING");
- bufp->cmFlags &= ~CM_BUF_CMWRITING;
+ _InterlockedAnd(&bufp->cmFlags, ~CM_BUF_CMWRITING);
}
/* and wakeup anyone who is waiting */
- if (scp->flags & CM_SCACHEFLAG_WAITING) {
+ if ((scp->flags & CM_SCACHEFLAG_WAITING) ||
+ !osi_QIsEmpty(&scp->waitQueueH)) {
osi_Log3(afsd_logp, "CM SyncOpDone 0x%x Waking scp 0x%p bufp 0x%p", flags, scp, bufp);
osi_Wakeup((LONG_PTR) &scp->flags);
}
-}
+}
+
+static afs_uint32
+dv_diff(afs_uint64 dv1, afs_uint64 dv2)
+{
+ if ( dv1 - dv2 > 0x7FFFFFFF )
+ return (afs_uint32)(dv2 - dv1);
+ else
+ return (afs_uint32)(dv1 - dv2);
+}
+
+long
+cm_IsStatusValid(AFSFetchStatus *statusp)
+{
+ if (statusp->InterfaceVersion != 0x1 ||
+ !(statusp->FileType > 0 && statusp->FileType <= SymbolicLink)) {
+ return 0;
+ }
+
+ return 1;
+}
/* merge in a response from an RPC. The scp must be locked, and the callback
* is optional.
* handled after the callback breaking is done, but only one of whose calls
* started before that, can cause old info to be merged from the first call.
*/
-void cm_MergeStatus(cm_scache_t *dscp,
- cm_scache_t *scp, AFSFetchStatus *statusp,
+long cm_MergeStatus(cm_scache_t *dscp,
+ cm_scache_t *scp, AFSFetchStatus *statusp,
AFSVolSync *volsyncp,
cm_user_t *userp, cm_req_t *reqp, afs_uint32 flags)
{
afs_uint64 dataVersion;
struct cm_volume *volp = NULL;
struct cm_cell *cellp = NULL;
+ int rdr_invalidate = 0;
+ afs_uint32 activeRPCs;
+
+ lock_AssertWrite(&scp->rw);
+
+ activeRPCs = 1 + InterlockedDecrement(&scp->activeRPCs);
// yj: i want to create some fake status for the /afs directory and the
// entries under that directory
}
#endif /* AFS_FREELANCE_CLIENT */
- if (statusp->errorCode != 0) {
- scp->flags |= CM_SCACHEFLAG_EACCESS;
- osi_Log2(afsd_logp, "Merge, Failure scp %x code 0x%x", scp, statusp->errorCode);
+ if (!cm_IsStatusValid(statusp)) {
+ osi_Log3(afsd_logp, "Merge: Bad Status scp 0x%p Invalid InterfaceVersion %d FileType %d",
+ scp, statusp->InterfaceVersion, statusp->FileType);
+ return CM_ERROR_INVAL;
+ }
- scp->fileType = 0; /* unknown */
+ if (statusp->errorCode != 0) {
+ switch (statusp->errorCode) {
+ case EACCES:
+ case UAEACCES:
+ case EPERM:
+ case UAEPERM:
+ cm_EAccesAddEntry(userp, &scp->fid, &dscp->fid);
+ }
+ osi_Log2(afsd_logp, "Merge, Failure scp 0x%p code 0x%x", scp, statusp->errorCode);
+
+ if (scp->fid.vnode & 0x1)
+ scp->fileType = CM_SCACHETYPE_DIRECTORY;
+ else
+ scp->fileType = CM_SCACHETYPE_UNKNOWN;
scp->serverModTime = 0;
scp->clientModTime = 0;
scp->bufDataVersionLow = CM_SCACHE_VERSION_BAD;
scp->fsLockCount = 0;
- if (dscp) {
+ if (dscp && dscp != scp) {
scp->parentVnode = dscp->fid.vnode;
scp->parentUnique = dscp->fid.unique;
} else {
scp->parentVnode = 0;
scp->parentUnique = 0;
}
- goto done;
- } else {
- scp->flags &= ~CM_SCACHEFLAG_EACCESS;
+
+ if (RDR_Initialized)
+ rdr_invalidate = 1;
}
dataVersion = statusp->dataVersionHigh;
dataVersion <<= 32;
dataVersion |= statusp->DataVersion;
- if (!(flags & CM_MERGEFLAG_FORCE) &&
+ if (!(flags & CM_MERGEFLAG_FORCE) &&
dataVersion < scp->dataVersion &&
scp->dataVersion != CM_SCACHE_VERSION_BAD) {
scp->cbServerp->addr.sin_addr.s_addr,
volp ? volp->namep : "(unknown)");
}
- osi_Log3(afsd_logp, "Bad merge, scp %x, scp dv %d, RPC dv %d",
+
+ osi_Log3(afsd_logp, "Bad merge, scp 0x%p, scp dv %d, RPC dv %d",
scp, scp->dataVersion, dataVersion);
/* we have a number of data fetch/store operations running
* concurrently, and we can tell which one executed last at the
*/
if (!(scp->flags & CM_SCACHEFLAG_RO))
goto done;
- }
+ }
- if (cm_readonlyVolumeVersioning)
+ /*
+ * The first field of the volsync parameter is supposed to be the
+ * volume creation date. Unfortunately, pre-OpenAFS 1.4.11 and 1.6.0
+ * file servers do not populate the VolSync structure for BulkStat and
+ * InlineBulkStat RPCs. As a result, the volume creation date is not
+ * trustworthy when status is obtained via [Inline]BulkStatus RPCs.
+ * If cm_readonlyVolumeVersioning is set, it is assumed that all file
+ * servers populate the VolSync structure at all times.
+ */
+ if (cm_readonlyVolumeVersioning || !(flags & CM_MERGEFLAG_BULKSTAT))
scp->volumeCreationDate = volsyncp->spare1; /* volume creation date */
+ else
+ scp->volumeCreationDate = 0;
scp->serverModTime = statusp->ServerModTime;
scp->fileType = CM_SCACHETYPE_MOUNTPOINT;
else
scp->fileType = CM_SCACHETYPE_SYMLINK;
- }
+ }
else {
- osi_Log2(afsd_logp, "Merge, Invalid File Type (%d), scp %x", statusp->FileType, scp);
+ osi_Log2(afsd_logp, "Merge, Invalid File Type (%d), scp 0x%p", statusp->FileType, scp);
scp->fileType = CM_SCACHETYPE_INVALID; /* invalid */
}
/* and other stuff */
scp->parentVnode = statusp->ParentVnode;
scp->parentUnique = statusp->ParentUnique;
- scp->fsLockCount = statusp->lockCount;
+
+ /* -1 is a write lock; any positive values are read locks */
+ scp->fsLockCount = (afs_int32)statusp->lockCount;
/* and merge in the private acl cache info, if this is more than the public
* info; merge in the public stuff in any case.
cm_AddACLCache(scp, userp, statusp->CallerAccess);
}
- if (scp->dataVersion != 0 &&
- (!(flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) && dataVersion != scp->dataVersion ||
- (flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) && dataVersion - scp->dataVersion > 1)) {
- /*
+ if (dataVersion != 0 && scp->dataVersion != CM_SCACHE_VERSION_BAD &&
+ (!(flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) && (dataVersion != scp->dataVersion) ||
+ (flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) &&
+ (dv_diff(dataVersion, scp->dataVersion) > activeRPCs))) {
+ /*
* We now know that all of the data buffers that we have associated
* with this scp are invalid. Subsequent operations will go faster
* if the buffers are removed from the hash tables.
*
- * We do not remove directory buffers if the dataVersion delta is 1 because
+ * We do not remove directory buffers if the dataVersion delta is 'activeRPCs' because
* those version numbers will be updated as part of the directory operation.
*
* We do not remove storedata buffers because they will still be valid.
for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=nextBp)
{
nextBp = bp->fileHashp;
- /*
+ /*
* if the buffer belongs to this stat cache entry
* and the buffer mutex can be obtained, check the
* reference count and if it is zero, remove the buffer
*/
if (cm_FidCmp(&scp->fid, &bp->fid) == 0 &&
lock_TryMutex(&bp->mx)) {
- if (bp->refCount == 0 &&
- !(bp->flags & CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)) {
+ if (bp->refCount == 0 &&
+ !(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)) &&
+ !(bp->qFlags & CM_BUF_QREDIR)) {
prevBp = bp->fileHashBackp;
bp->fileHashBackp = bp->fileHashp = NULL;
if (prevBp)
j = BUF_HASH(&bp->fid, &bp->offset);
lbpp = &(cm_data.buf_scacheHashTablepp[j]);
- for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
- if (tbp == bp)
+ for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = tbp->hashp) {
+ if (tbp == bp)
break;
}
+ /* we better find it */
+ osi_assertx(tbp != NULL, "cm_MergeStatus: buf_scacheHashTablepp table screwup");
+
*lbpp = bp->hashp; /* hash out */
bp->hashp = NULL;
- bp->qFlags &= ~CM_BUF_QINHASH;
+ _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINHASH);
}
lock_ReleaseMutex(&bp->mx);
}
lock_ReleaseWrite(&buf_globalLock);
}
- /*
- * If the dataVersion has changed, the mountPointStringp must be cleared
- * in order to force a re-evaluation by cm_HandleLink(). The Windows CM
- * does not update a mountpoint or symlink by altering the contents of
- * the file data; but the Unix CM does.
- */
- if (scp->dataVersion != dataVersion)
- scp->mountPointStringp[0] = '\0';
+ if (scp->dataVersion != dataVersion && !(flags & CM_MERGEFLAG_FETCHDATA)) {
+ osi_Log5(afsd_logp, "cm_MergeStatus data version change scp 0x%p cell %u vol %u vn %u uniq %u",
+ scp, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
- /* We maintain a range of buffer dataVersion values which are considered
+ osi_Log4(afsd_logp, ".... oldDV 0x%x:%x -> newDV 0x%x:%x",
+ (afs_uint32)((scp->dataVersion >> 32) & 0xFFFFFFFF),
+ (afs_uint32)(scp->dataVersion & 0xFFFFFFFF),
+ (afs_uint32)((dataVersion >> 32) & 0xFFFFFFFF),
+ (afs_uint32)(dataVersion & 0xFFFFFFFF));
+ }
+
+ /* We maintain a range of buffer dataVersion values which are considered
* valid. This avoids the need to update the dataVersion on each buffer
- * object during an uncontested storeData operation. As a result this
+ * object during an uncontested storeData operation. As a result this
* merge status no longer has performance characteristics derived from
* the size of the file.
+ *
+ * For directory buffers, only current dataVersion values are up to date.
*/
- if (((flags & CM_MERGEFLAG_STOREDATA) && dataVersion - scp->dataVersion > 1) ||
- (!(flags & CM_MERGEFLAG_STOREDATA) && scp->dataVersion != dataVersion) ||
- scp->bufDataVersionLow == 0)
+ if (((flags & (CM_MERGEFLAG_STOREDATA|CM_MERGEFLAG_DIROP)) && (dv_diff(dataVersion, scp->dataVersion) > activeRPCs)) ||
+ (!(flags & (CM_MERGEFLAG_STOREDATA|CM_MERGEFLAG_DIROP)) && (scp->dataVersion != dataVersion)) ||
+ scp->bufDataVersionLow == CM_SCACHE_VERSION_BAD ||
+ scp->fileType == CM_SCACHETYPE_DIRECTORY ||
+ flags & CM_MERGEFLAG_CACHE_BYPASS) {
scp->bufDataVersionLow = dataVersion;
-
+ }
+
+ if (RDR_Initialized) {
+ /*
+ * The redirector maintains its own cached status information which
+ * must be updated when a DV change occurs that is not the result
+ * of a redirector initiated data change.
+ *
+ * If the current old DV is BAD, send a DV change notification.
+ *
+ * If the DV has changed and request was not initiated by the
+ * redirector, send a DV change notification.
+ *
+ * If the request was initiated by the redirector, send a notification
+ * for store and directory operations that result in a DV change greater
+ * than the number of active RPCs or any other operation that results
+ * in an unexpected DV change such as FetchStatus.
+ */
+
+ if (scp->dataVersion == CM_SCACHE_VERSION_BAD && dataVersion != 0) {
+ rdr_invalidate = 1;
+ } else if (!(reqp->flags & CM_REQ_SOURCE_REDIR) && scp->dataVersion != dataVersion) {
+ rdr_invalidate = 1;
+ } else if (reqp->flags & CM_REQ_SOURCE_REDIR) {
+ if (!(flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) &&
+ (dv_diff(dataVersion, scp->dataVersion) > activeRPCs - 1)) {
+ rdr_invalidate = 1;
+ } else if ((flags & (CM_MERGEFLAG_DIROP|CM_MERGEFLAG_STOREDATA)) &&
+ dv_diff(dataVersion, scp->dataVersion) > activeRPCs) {
+ rdr_invalidate = 1;
+ }
+ }
+ }
scp->dataVersion = dataVersion;
- /*
+ /*
* If someone is waiting for status information, we can wake them up
- * now even though the entity that issued the FetchStatus may not
+ * now even though the entity that issued the FetchStatus may not
* have completed yet.
*/
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_FETCHSTATUS);
lock_ReleaseWrite(&volp->rw);
}
}
+
+ /* Remove cached EACCES / EPERM errors if the file is a directory */
+ if (scp->fileType == CM_SCACHETYPE_DIRECTORY &&
+ !(volp && (volp->flags & CM_VOLUMEFLAG_DFS_VOLUME)) &&
+ !cm_accessPerFileCheck)
+ {
+ cm_EAccesClearParentEntries(&scp->fid);
+ }
+
done:
if (volp)
cm_PutVolume(volp);
+ /*
+ * The scache rw lock cannot be held across the invalidation.
+ * Doing so can result in deadlocks with other threads processing
+ * requests initiated by the afs redirector.
+ */
+ if (rdr_invalidate) {
+ lock_ReleaseWrite(&scp->rw);
+ RDR_InvalidateObject(scp->fid.cell, scp->fid.volume, scp->fid.vnode,
+ scp->fid.unique, scp->fid.hash,
+ scp->fileType, AFS_INVALIDATE_DATA_VERSION);
+ lock_ObtainWrite(&scp->rw);
+ }
+
+ return 0;
}
/* note that our stat cache info is incorrect, so force us eventually
scp->cbServerp = NULL;
}
scp->cbExpires = 0;
- scp->volumeCreationDate = 0;
- scp->flags &= ~(CM_SCACHEFLAG_CALLBACK | CM_SCACHEFLAG_LOCAL);
+ scp->cbIssued = 0;
+ _InterlockedAnd(&scp->flags, ~(CM_SCACHEFLAG_LOCAL | CM_SCACHEFLAG_RDR_IN_USE));
cm_dnlcPurgedp(scp);
cm_dnlcPurgevp(scp);
cm_FreeAllACLEnts(scp);
if (scp->fileType == CM_SCACHETYPE_DFSLINK)
cm_VolStatus_Invalidate_DFS_Mapping(scp);
-
- /* Force mount points and symlinks to be re-evaluated */
- scp->mountPointStringp[0] = '\0';
}
void cm_AFSFidFromFid(AFSFid *afsFidp, cm_fid_t *fidp)
afsFidp->Volume = fidp->volume;
afsFidp->Vnode = fidp->vnode;
afsFidp->Unique = fidp->unique;
-}
+}
#ifdef DEBUG_REFCOUNT
void cm_HoldSCacheNoLockDbg(cm_scache_t *scp, char * file, long line)
#else
void cm_HoldSCacheNoLock(cm_scache_t *scp)
#endif
-{
+{
afs_int32 refCount;
osi_assertx(scp != NULL, "null cm_scache_t");
long lockstate;
lockstate = lock_GetRWLockState(&cm_scacheLock);
- if (lockstate != OSI_RWLOCK_WRITEHELD)
+ if (lockstate != OSI_RWLOCK_WRITEHELD)
lock_ReleaseRead(&cm_scacheLock);
else
lock_ReleaseWrite(&cm_scacheLock);
if (refCount == 0 && deleted) {
lock_ObtainWrite(&cm_scacheLock);
cm_RecycleSCache(scp, 0);
- if (lockstate != OSI_RWLOCK_WRITEHELD)
+ if (lockstate != OSI_RWLOCK_WRITEHELD)
lock_ConvertWToR(&cm_scacheLock);
} else {
- if (lockstate != OSI_RWLOCK_WRITEHELD)
+ if (lockstate != OSI_RWLOCK_WRITEHELD)
lock_ObtainRead(&cm_scacheLock);
else
lock_ObtainWrite(&cm_scacheLock);
#else
void cm_ReleaseSCache(cm_scache_t *scp)
#endif
-{
+{
afs_int32 refCount;
osi_assertx(scp != NULL, "null cm_scache_t");
{
long hash;
cm_scache_t *scp;
-
+
hash = CM_SCACHE_HASH(fidp);
-
+
osi_assertx(fidp->cell != 0, "unassigned cell value");
lock_ObtainWrite(&cm_scacheLock);
return 0;
}
-/* dump all scp's that have reference count > 0 to a file.
- * cookie is used to identify this batch for easy parsing,
- * and it a string provided by a caller
+/* dump all scp's that have reference count > 0 to a file.
+ * cookie is used to identify this batch for easy parsing,
+ * and it a string provided by a caller
*/
int cm_DumpSCache(FILE *outputFile, char *cookie, int lock)
{
osi_queue_t *q;
char output[2048];
int i;
-
+
if (lock)
lock_ObtainRead(&cm_scacheLock);
-
+
sprintf(output, "%s - dumping all scache - cm_data.currentSCaches=%d, cm_data.maxSCaches=%d\r\n", cookie, cm_data.currentSCaches, cm_data.maxSCaches);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
-
- for (scp = cm_data.allSCachesp; scp; scp = scp->allNextp)
+
+ for (scp = cm_data.allSCachesp; scp; scp = scp->allNextp)
{
time_t t;
char *srvStr = NULL;
if (scp->cbServerp) {
if (!((scp->cbServerp->flags & CM_SERVERFLAG_UUID) &&
UuidToString((UUID *)&scp->cbServerp->uuid, &srvStr) == RPC_S_OK)) {
- afs_asprintf(&srvStr, "%.0I", scp->cbServerp->addr.sin_addr.s_addr);
+ srvStr = malloc(16); /* enough for 255.255.255.255 */
+ if (srvStr != NULL)
+ afs_inet_ntoa_r(scp->cbServerp->addr.sin_addr.s_addr, srvStr);
srvStrRpc = FALSE;
}
}
}
sprintf(output,
"%s scp=0x%p, fid (cell=%d, volume=%d, vnode=%d, unique=%d) type=%d dv=%I64d len=0x%I64x "
- "mp='%s' Locks (server=0x%x shared=%d excl=%d clnt=%d) fsLockCount=%d linkCount=%d anyAccess=0x%x "
+ "mpDV=%I64d mp='%s' Locks (server=0x%x shared=%d excl=%d clnt=%d) fsLockCount=%d linkCount=%d anyAccess=0x%x "
"flags=0x%x cbServer='%s' cbExpires='%s' volumeCreationDate='%s' refCount=%u\r\n",
cookie, scp, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique,
- scp->fileType, scp->dataVersion, scp->length.QuadPart, scp->mountPointStringp,
+ scp->fileType, scp->dataVersion, scp->length.QuadPart, scp->mpDataVersion, scp->mountPointStringp,
scp->serverLock, scp->sharedLocks, scp->exclusiveLocks, scp->clientLocks, scp->fsLockCount,
scp->linkCount, scp->anyAccess, scp->flags, srvStr ? srvStr : "<none>", cbt ? cbt : "<none>",
cdrot ? cdrot : "<none>", scp->refCount);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
for (q = scp->fileLocksH; q; q = osi_QNext(q)) {
- cm_file_lock_t * lockp = (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
+ cm_file_lock_t * lockp = fileq_to_cm_file_lock_t(q);
sprintf(output, " %s lockp=0x%p scp=0x%p, cm_userp=0x%p offset=0x%I64x len=0x%08I64x type=0x%x "
"key=0x%I64x flags=0x%x update=0x%I64u\r\n",
cookie, lockp, lockp->scp, lockp->userp, lockp->range.offset, lockp->range.length,
lockp->lockType, lockp->key, lockp->flags, (afs_uint64)lockp->lastUpdate);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- }
+ }
sprintf(output, " %s - done dumping scp locks\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
if (cdrot)
free(cdrot);
}
-
+
sprintf(output, "%s - Done dumping all scache.\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
sprintf(output, "%s - dumping cm_data.scacheHashTable - cm_data.scacheHashTableSize=%d\r\n",
cookie, cm_data.scacheHashTableSize);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
-
+
for (i = 0; i < cm_data.scacheHashTableSize; i++)
{
- for(scp = cm_data.scacheHashTablep[i]; scp; scp=scp->nextp)
+ for(scp = cm_data.scacheHashTablep[i]; scp; scp=scp->nextp)
{
- sprintf(output, "%s scp=0x%p, hash=%d, fid (cell=%d, volume=%d, vnode=%d, unique=%d)\r\n",
+ sprintf(output, "%s scp=0x%p, hash=%d, fid (cell=%d, volume=%d, vnode=%d, unique=%d)\r\n",
cookie, scp, i, scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
}
for (q = cm_allFileLocks; q; q = osi_QNext(q)) {
cm_file_lock_t * lockp = (cm_file_lock_t *)q;
- sprintf(output, "%s filelockp=0x%p scp=0x%p, cm_userp=0x%p offset=0x%I64x len=0x%08I64x type=0x%x key=0x%I64x flags=0x%x update=0x%I64u\r\n",
- cookie, lockp, lockp->scp, lockp->userp, lockp->range.offset, lockp->range.length,
+ sprintf(output, "%s filelockp=0x%p scp=0x%p, cm_userp=0x%p offset=0x%I64x len=0x%08I64x type=0x%x key=0x%I64x flags=0x%x update=0x%I64u\r\n",
+ cookie, lockp, lockp->scp, lockp->userp, lockp->range.offset, lockp->range.length,
lockp->lockType, lockp->key, lockp->flags, (afs_uint64)lockp->lastUpdate);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- }
+ }
sprintf(output, "%s - done dumping all file locks\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
if (lock)
- lock_ReleaseRead(&cm_scacheLock);
- return (0);
+ lock_ReleaseRead(&cm_scacheLock);
+ return (0);
}