int dcacheDisabled = 0;
struct afs_cacheOps afs_UfsCacheOps = {
+#if defined(AFS_SGI_ENV) && !defined(__c99)
osi_UFSOpen,
osi_UFSTruncate,
afs_osi_Read,
afs_UFSGetDSlot,
afs_UFSGetVolSlot,
afs_UFSHandleLink,
+#else
+ .open = osi_UFSOpen,
+ .truncate = osi_UFSTruncate,
+ .fread = afs_osi_Read,
+ .fwrite = afs_osi_Write,
+ .close = osi_UFSClose,
+ .vread = afs_UFSRead,
+ .vwrite = afs_UFSWrite,
+ .GetDSlot = afs_UFSGetDSlot,
+ .GetVolSlot = afs_UFSGetVolSlot,
+ .HandleLink = afs_UFSHandleLink,
+#endif
};
struct afs_cacheOps afs_MemCacheOps = {
+#if (defined(AFS_SGI_ENV) && !defined(__c99))
afs_MemCacheOpen,
afs_MemCacheTruncate,
afs_MemReadBlk,
afs_MemGetDSlot,
afs_MemGetVolSlot,
afs_MemHandleLink,
+#else
+ .open = afs_MemCacheOpen,
+ .truncate = afs_MemCacheTruncate,
+ .fread = afs_MemReadBlk,
+ .fwrite = afs_MemWriteBlk,
+ .close = afs_MemCacheClose,
+ .vread = afs_MemRead,
+ .vwrite = afs_MemWrite,
+ .GetDSlot = afs_MemGetDSlot,
+ .GetVolSlot = afs_MemGetVolSlot,
+ .HandleLink = afs_MemHandleLink,
+#endif
};
int cacheDiskType; /*Type of backing disk for cache */
afs_TruncateDaemonRunning = 1;
while (1) {
cb_lowat = PERCENT((CM_DCACHESPACEFREEPCT - CM_DCACHEEXTRAPCT), afs_cacheBlocks);
- MObtainWriteLock(&afs_xdcache, 266);
+ ObtainWriteLock(&afs_xdcache, 266);
if (afs_CacheTooFull) {
int space_needed, slots_needed;
/* if we get woken up, we should try to clean something out */
if (!afs_CacheIsTooFull())
afs_CacheTooFull = 0;
} /* end of cache cleanup */
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*
* This is a defensive check to try to avoid starving threads
afs_stats_AddTo(CTD_stats.CTD_sleepTime, CTD_tmpTime);
}
if (afs_termState == AFSOP_STOP_TRUNCDAEMON) {
-#ifdef AFS_AFSDB_ENV
afs_termState = AFSOP_STOP_AFSDB;
-#else
- afs_termState = AFSOP_STOP_RXEVENT;
-#endif
afs_osi_Wakeup(&afs_termState);
break;
}
afs_size_t tchunkoffset = 0;
afid = &tdc->f.fid;
/* xdcache is lower than the xvcache lock */
- MReleaseWriteLock(&afs_xdcache);
- MObtainReadLock(&afs_xvcache);
+ ReleaseWriteLock(&afs_xdcache);
+ ObtainReadLock(&afs_xvcache);
tvc = afs_FindVCache(afid, 0, 0 /* no stats, no vlru */ );
- MReleaseReadLock(&afs_xvcache);
- MObtainWriteLock(&afs_xdcache, 527);
+ ReleaseReadLock(&afs_xvcache);
+ ObtainWriteLock(&afs_xdcache, 527);
skip = 0;
if (tdc->refCount > 1)
skip = 1;
if (!skip && (chunkFlags & IFAnyPages)) {
int code;
- MReleaseWriteLock(&afs_xdcache);
- MObtainWriteLock(&tvc->vlock, 543);
+ ReleaseWriteLock(&afs_xdcache);
+ ObtainWriteLock(&tvc->vlock, 543);
if (tvc->multiPage) {
skip = 1;
goto endmultipage;
tvc->vstates |= VPageCleaning;
/* block getting new pages */
tvc->activeV++;
- MReleaseWriteLock(&tvc->vlock);
+ ReleaseWriteLock(&tvc->vlock);
/* One last recheck */
- MObtainWriteLock(&afs_xdcache, 333);
+ ObtainWriteLock(&afs_xdcache, 333);
chunkFlags = afs_indexFlags[tdc->index];
if (tdc->refCount > 1 || (chunkFlags & IFDataMod)
|| (osi_Active(tvc) && (tvc->f.states & CDCLock)
&& (chunkFlags & IFAnyPages))) {
skip = 1;
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
goto endputpage;
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
code = osi_VM_GetDownD(tvc, tdc);
- MObtainWriteLock(&afs_xdcache, 269);
+ ObtainWriteLock(&afs_xdcache, 269);
/* we actually removed all pages, clean and dirty */
if (code == 0) {
afs_indexFlags[tdc->index] &=
~(IFDirtyPages | IFAnyPages);
} else
skip = 1;
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
endputpage:
- MObtainWriteLock(&tvc->vlock, 544);
+ ObtainWriteLock(&tvc->vlock, 544);
if (--tvc->activeV == 0
&& (tvc->vstates & VRevokeWait)) {
tvc->vstates &= ~VRevokeWait;
afs_osi_Wakeup((char *)&tvc->vstates);
}
endmultipage:
- MReleaseWriteLock(&tvc->vlock);
+ ReleaseWriteLock(&tvc->vlock);
} else
#endif /* AFS_SUN5_ENV */
{
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
}
afs_PutVCache(tvc); /*XXX was AFS_FAST_RELE?*/
- MObtainWriteLock(&afs_xdcache, 528);
+ ObtainWriteLock(&afs_xdcache, 528);
if (afs_indexFlags[tdc->index] &
(IFDataMod | IFDirtyPages | IFAnyPages))
skip = 1;
AFS_STATCNT(afs_FreeDiscardedDCache);
- MObtainWriteLock(&afs_xdcache, 510);
+ ObtainWriteLock(&afs_xdcache, 510);
if (!afs_blocksDiscarded) {
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
return;
}
afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
/* We can lock because we just took it off the free list */
ObtainWriteLock(&tdc->lock, 626);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*
* Truncate the element to reclaim its space
/*
* Free the element we just truncated
*/
- MObtainWriteLock(&afs_xdcache, 511);
+ ObtainWriteLock(&afs_xdcache, 511);
afs_indexFlags[tdc->index] &= ~IFDiscarded;
afs_FreeDCache(tdc);
tdc->f.states &= ~(DRO|DBackup|DRW);
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
}
/*!
* Both pvnLock and lock are write held.
*/
void
-afs_TryToSmush(register struct vcache *avc, struct AFS_UCRED *acred, int sync)
+afs_TryToSmush(register struct vcache *avc, afs_ucred_t *acred, int sync)
{
register struct dcache *tdc;
register int index;
* Get the hash chain containing all dce's for this fid
*/
i = DVHash(&avc->f.fid);
- MObtainWriteLock(&afs_xdcache, 277);
+ ObtainWriteLock(&afs_xdcache, 277);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index]; /* next pointer this hash table */
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
}
ReleaseWriteLock(&avc->vlock);
#endif
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*
* It's treated like a callback so that when we do lookups we'll
* invalidate the unique bit if any
totalChunks, (totalLength + 1));
*/
i = DVHash(&avc->f.fid);
- MObtainWriteLock(&afs_xdcache, 1001);
+ ObtainWriteLock(&afs_xdcache, 1001);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index];
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
afs_PutDCache(tdc);
}
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*printf("Missing %d chunks\n", totalChunks);*/
* after write-locking the dcache.
*/
i = DCHash(&avc->f.fid, chunk);
- MObtainWriteLock(&afs_xdcache, 278);
+ ObtainWriteLock(&afs_xdcache, 278);
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
if (index != NULLIDX) {
hset(afs_indexTimes[tdc->index], afs_indexCounter);
hadd32(afs_indexCounter, 1);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
return tdc;
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
return NULL;
} /*afs_FindDCache */
*
* \return The new dcache.
*/
-struct dcache *afs_AllocDCache(struct vcache *avc,
- afs_int32 chunk,
- afs_int32 lock,
- struct VenusFid *ashFid)
+struct dcache *
+afs_AllocDCache(struct vcache *avc, afs_int32 chunk, afs_int32 lock,
+ struct VenusFid *ashFid)
{
struct dcache *tdc = NULL;
afs_uint32 size = 0;
* entries from the free list, and thereby assuming them to be not
* referenced and not locked.
*/
- MObtainReadLock(&afs_xdcache);
+ ObtainReadLock(&afs_xdcache);
dcLocked = (0 == NBObtainSharedLock(&tdc->lock, 601));
if (dcLocked && (tdc->index != NULLIDX)
tdc->refCount++;
ReleaseWriteLock(&tdc->tlock);
- MReleaseReadLock(&afs_xdcache);
+ ReleaseReadLock(&afs_xdcache);
shortcut = 1;
if (hsame(tdc->f.versionNo, avc->f.m.DataVersion)
&& !(tdc->dflags & DFFetching)) {
afs_stats_cmperf.dcacheHits++;
- MObtainWriteLock(&afs_xdcache, 559);
+ ObtainWriteLock(&afs_xdcache, 559);
QRemove(&tdc->lruq);
QAdd(&afs_DLRU, &tdc->lruq);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/* Locks held:
* avc->lock(R) if setLocks && !slowPass
} else {
if (dcLocked)
ReleaseSharedLock(&tdc->lock);
- MReleaseReadLock(&afs_xdcache);
+ ReleaseReadLock(&afs_xdcache);
}
if (!shortcut)
/* check to make sure our space is fine */
afs_MaybeWakeupTruncateDaemon();
- MObtainWriteLock(&afs_xdcache, 280);
+ ObtainWriteLock(&afs_xdcache, 280);
us = NULLIDX;
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
afs_dcnextTbl[index] = afs_dchashTbl[i];
afs_dchashTbl[i] = index;
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
ObtainSharedLock(&tdc->lock, 606);
break; /* leaving refCount high for caller */
}
#endif
osi_Panic("getdcache");
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*
* Locks held:
* avc->lock(R) if setLocks
tdc->dflags = DFEntryMod;
tdc->mflags = 0;
afs_MaybeWakeupTruncateDaemon();
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
ConvertWToSLock(&tdc->lock);
}
}
#endif /* AFS_SGI_ENV */
if (AFS_CHUNKTOBASE(chunk) + adjustsize >= avc->f.m.Length &&
#else /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
-#if defined(AFS_SUN5_ENV) || defined(AFS_OSF_ENV)
+#if defined(AFS_SUN5_ENV)
if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length)) &&
#else
if (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length &&
/* Fix up LRU info */
if (tdc) {
- MObtainWriteLock(&afs_xdcache, 602);
+ ObtainWriteLock(&afs_xdcache, 602);
hset(afs_indexTimes[tdc->index], afs_indexCounter);
hadd32(afs_indexCounter, 1);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/* return the data */
if (vType(avc) == VDIR)
* holding afs_xdcache. So we enter xdcache, get a reference
* for every dcache entry, and exit xdcache.
*/
- MObtainWriteLock(&afs_xdcache, 283);
+ ObtainWriteLock(&afs_xdcache, 283);
QInit(&DirtyQ);
for (i = 0; i < afs_cacheFiles; i++) {
tdc = afs_indexTable[i];
QAdd(&DirtyQ, &tdc->dirty);
}
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
/*
* Now, for each dcache entry we found, check if it's dirty.
/* Now that we have the write lock, double-check */
if (wrLock && (tdc->dflags & DFEntryMod)) {
tdc->dflags &= ~DFEntryMod;
- MObtainWriteLock(&afs_xdcache, 620);
+ ObtainWriteLock(&afs_xdcache, 620);
afs_WriteDCache(tdc, 1);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
touchedit = 1;
}
if (wrLock)
afs_PutDCache(tdc);
}
- MObtainWriteLock(&afs_xdcache, 617);
+ ObtainWriteLock(&afs_xdcache, 617);
if (!touchedit && (cacheDiskType != AFS_FCACHE_TYPE_MEM)) {
/* Touch the file to make sure that the mtime on the file is kept
* up-to-date to avoid losing cached files on cold starts because
theader.version = AFS_CI_VERSION;
afs_osi_Write(afs_cacheInodep, 0, &theader, sizeof(theader));
}
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
}
/*
if (index >= afs_cacheFiles)
return EINVAL;
- MObtainWriteLock(&afs_xdcache, 282);
+ ObtainWriteLock(&afs_xdcache, 282);
tdc = afs_GetDSlot(index, NULL);
ReleaseReadLock(&tdc->tlock);
- MReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&afs_xdcache);
ObtainWriteLock(&tdc->lock, 621);
- MObtainWriteLock(&afs_xdcache, 622);
+ ObtainWriteLock(&afs_xdcache, 622);
if (afile) {
code = afs_LookupInodeByPath(afile, &tdc->f.inode.ufs, NULL);
if (code) {
}
} else {
/* Add any other 'complex' inode types here ... */
-#if defined(UKERNEL) || !defined(LINUX_USE_FH)
+#if !defined(LINUX_USE_FH) && !defined(AFS_CACHE_VNODE_PATH)
tdc->f.inode.ufs = ainode;
#else
osi_Panic("Can't init cache with inode numbers when complex inodes are "
/* ablocks is reported in 1K blocks */
code = afs_InitMemCache(afiles, AFS_FIRSTCSIZE, aflags);
if (code != 0) {
- printf("afsd: memory cache too large for available memory.\n");
- printf("afsd: AFS files cannot be accessed.\n\n");
+ afs_warn("afsd: memory cache too large for available memory.\n");
+ afs_warn("afsd: AFS files cannot be accessed.\n\n");
dcacheDisabled = 1;
afiles = ablocks = 0;
} else
- printf("Memory cache: Allocating %d dcache entries...",
+ afs_warn("Memory cache: Allocating %d dcache entries...",
aDentries);
} else {
cacheDiskType = AFS_FCACHE_TYPE_UFS;
/* Allocate and zero the pointer array to the dcache entries */
afs_indexTable = (struct dcache **)
afs_osi_Alloc(sizeof(struct dcache *) * afiles);
- memset((char *)afs_indexTable, 0, sizeof(struct dcache *) * afiles);
+ memset(afs_indexTable, 0, sizeof(struct dcache *) * afiles);
afs_indexTimes =
(afs_hyper_t *) afs_osi_Alloc(afiles * sizeof(afs_hyper_t));
- memset((char *)afs_indexTimes, 0, afiles * sizeof(afs_hyper_t));
+ memset(afs_indexTimes, 0, afiles * sizeof(afs_hyper_t));
afs_indexUnique =
(afs_int32 *) afs_osi_Alloc(afiles * sizeof(afs_uint32));
- memset((char *)afs_indexUnique, 0, afiles * sizeof(afs_uint32));
+ memset(afs_indexUnique, 0, afiles * sizeof(afs_uint32));
afs_indexFlags = (u_char *) afs_osi_Alloc(afiles * sizeof(u_char));
- memset((char *)afs_indexFlags, 0, afiles * sizeof(char));
+ memset(afs_indexFlags, 0, afiles * sizeof(char));
/* Allocate and thread the struct dcache entries themselves */
tdp = afs_Initial_freeDSList =
(struct dcache *)afs_osi_Alloc(aDentries * sizeof(struct dcache));
- memset((char *)tdp, 0, aDentries * sizeof(struct dcache));
+ memset(tdp, 0, aDentries * sizeof(struct dcache));
#ifdef KERNEL_HAVE_PIN
pin((char *)afs_indexTable, sizeof(struct dcache *) * afiles); /* XXX */
pin((char *)afs_indexTimes, sizeof(afs_hyper_t) * afiles); /* XXX */
{
int i;
+#ifdef AFS_CACHE_VNODE_PATH
+ if (cacheDiskType != AFS_FCACHE_TYPE_MEM) {
+ struct dcache *tdc;
+ for (i = 0; i < afs_cacheFiles; i++) {
+ tdc = afs_indexTable[i];
+ if (tdc) {
+ afs_osi_FreeStr(tdc->f.inode.ufs);
+ }
+ }
+ }
+#endif
+
afs_osi_Free(afs_dvnextTbl, afs_cacheFiles * sizeof(afs_int32));
afs_osi_Free(afs_dcnextTbl, afs_cacheFiles * sizeof(afs_int32));
afs_osi_Free(afs_indexTable, afs_cacheFiles * sizeof(struct dcache *));
struct dcache *
afs_ObtainDCacheForWriting(struct vcache *avc, afs_size_t filePos,
afs_size_t len, struct vrequest *areq,
- int noLock) {
+ int noLock)
+{
struct dcache *tdc = NULL;
afs_size_t offset;
* \note The vcache entry must be write locked.
* \note The dcache entry must be read locked.
*/
-int afs_MakeShadowDir(struct vcache *avc, struct dcache *adc)
+int
+afs_MakeShadowDir(struct vcache *avc, struct dcache *adc)
{
int i, code, ret_code = 0, written, trans_size;
struct dcache *new_dc = NULL;
/* Alloc a 4k block. */
data = (char *) afs_osi_Alloc(4096);
if (!data) {
- printf("afs_MakeShadowDir: could not alloc data\n");
+ afs_warn("afs_MakeShadowDir: could not alloc data\n");
ret_code = ENOMEM;
goto done;
}
*
* \note avc must be write locked.
*/
-void afs_DeleteShadowDir(struct vcache *avc)
+void
+afs_DeleteShadowDir(struct vcache *avc)
{
struct dcache *tdc;
struct VenusFid shadow_fid;
* \param alen The new length of the file
*
*/
-void afs_PopulateDCache(struct vcache *avc, afs_size_t apos, struct vrequest *areq) {
+void
+afs_PopulateDCache(struct vcache *avc, afs_size_t apos, struct vrequest *areq)
+{
struct dcache *tdc;
afs_size_t len, offset;
afs_int32 start, end;