return 1;
/* This should be replaced with some sort of user configurable function */
- if (avc->states & CRO) {
+ if (avc->f.states & CRO) {
return 2;
- } else if (avc->states & CBackup) {
+ } else if (avc->f.states & CBackup) {
return 1;
} else {
/* RW */
afs_uint32 maxVictimPtr; /* where it is */
int discard;
int curbucket;
+#if defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
int vfslocked;
+#endif
#if defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
vfslocked = VFS_LOCK_GIANT(afs_globalVFS);
if (((phase & 1) == 0) && osi_Active(tvc))
skip = 1;
if (((phase & 1) == 1) && osi_Active(tvc)
- && (tvc->states & CDCLock)
+ && (tvc->f.states & CDCLock)
&& (chunkFlags & IFAnyPages))
skip = 1;
if (chunkFlags & IFDataMod)
MObtainWriteLock(&afs_xdcache, 333);
chunkFlags = afs_indexFlags[tdc->index];
if (tdc->refCount > 1 || (chunkFlags & IFDataMod)
- || (osi_Active(tvc) && (tvc->states & CDCLock)
+ || (osi_Active(tvc) && (tvc->f.states & CDCLock)
&& (chunkFlags & IFAnyPages))) {
skip = 1;
MReleaseWriteLock(&afs_xdcache);
register int i;
AFS_STATCNT(afs_TryToSmush);
afs_Trace2(afs_iclSetp, CM_TRACE_TRYTOSMUSH, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
sync = 1; /* XX Temp testing XX */
#if defined(AFS_SUN5_ENV)
/*
* Get the hash chain containing all dce's for this fid
*/
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
MObtainWriteLock(&afs_xdcache, 277);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index]; /* next pointer this hash table */
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
int releaseTlock = 1;
tdc = afs_GetDSlot(index, NULL);
- if (!FidCmp(&tdc->f.fid, &avc->fid)) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
if (sync) {
if ((afs_indexFlags[index] & IFDataMod) == 0
&& tdc->refCount == 1) {
afs_uint32 totalChunks = 0;
struct dcache *tdc;
- totalLength = avc->m.Length;
- if (avc->truncPos < totalLength)
- totalLength = avc->truncPos;
+ totalLength = avc->f.m.Length;
+ if (avc->f.truncPos < totalLength)
+ totalLength = avc->f.truncPos;
/* Length is 0, no chunk missing. */
if (totalLength == 0)
totalLength--;
totalChunks = (AFS_CHUNK(totalLength) + 1);
+ /* If we're a directory, we only ever have one chunk, regardless of
+ * the size of the dir.
+ */
+ if (avc->f.fid.Fid.Vnode & 1 || vType(avc) == VDIR)
+ totalChunks = 1;
+
/*
printf("Should have %d chunks for %u bytes\n",
totalChunks, (totalLength + 1));
*/
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
MObtainWriteLock(&afs_xdcache, 1001);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index];
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
- if (!FidCmp(&tdc->f.fid, &avc->fid)) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
totalChunks--;
}
ReleaseReadLock(&tdc->tlock);
* Hash on the [fid, chunk] and get the corresponding dcache index
* after write-locking the dcache.
*/
- i = DCHash(&avc->fid, chunk);
+ i = DCHash(&avc->f.fid, chunk);
MObtainWriteLock(&afs_xdcache, 278);
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
ReleaseReadLock(&tdc->tlock);
- if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk) {
break; /* leaving refCount high for caller */
}
afs_PutDCache(tdc);
#endif /* AFS_NOSTATS */
afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_INT32, alen);
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
while (alen > 0) {
tlen = (alen > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : alen);
}
}
afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_INT32, alen);
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
osi_FreeLargeSpace(tbuffer);
return 0;
* We do not do this for AFS file servers because they sometimes
* return large negative numbers as the transfer size.
*/
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
moredata = length & 0x80000000;
length &= ~0x80000000;
} else {
tdc->f.fid = *ashFid;
else
/* Use normal vcache's fid otherwise. */
- tdc->f.fid = avc->fid;
- if (avc->states & CRO)
+ tdc->f.fid = avc->f.fid;
+ if (avc->f.states & CRO)
tdc->f.states = DRO;
- else if (avc->states & CBackup)
+ else if (avc->f.states & CBackup)
tdc->f.states = DBackup;
else
tdc->f.states = DRW;
updateV2DC(int lockVc, struct vcache *v, struct dcache *d, int src)
{
if (!lockVc || 0 == NBObtainWriteLock(&v->lock, src)) {
- if (hsame(v->m.DataVersion, d->f.versionNo) && v->callback)
+ if (hsame(v->f.m.DataVersion, d->f.versionNo) && v->callback)
v->dchint = d;
if (lockVc)
ReleaseWriteLock(&v->lock);
struct tlocal1 *tsmall = 0;
register struct dcache *tdc;
register struct osi_file *file;
- register struct conn *tc;
+ register struct afs_conn *tc;
int downDCount = 0;
struct server *newCallback = NULL;
char setNewCallback;
* Determine the chunk number and offset within the chunk corresponding
* to the desired byte.
*/
- if (avc->fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
+ if (avc->f.fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
chunk = 0;
} else {
chunk = AFS_CHUNK(abyte);
dcLocked = (0 == NBObtainSharedLock(&tdc->lock, 601));
if (dcLocked && (tdc->index != NULLIDX)
- && !FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk
+ && !FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk
&& !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
/* got the right one. It might not be the right version, and it
* might be fetching, but it's the right dcache entry.
MReleaseReadLock(&afs_xdcache);
shortcut = 1;
- if (hsame(tdc->f.versionNo, avc->m.DataVersion)
+ if (hsame(tdc->f.versionNo, avc->f.m.DataVersion)
&& !(tdc->dflags & DFFetching)) {
afs_stats_cmperf.dcacheHits++;
* avc->lock(W) if !setLocks || slowPass
*/
- i = DCHash(&avc->fid, chunk);
+ i = DCHash(&avc->f.fid, chunk);
/* check to make sure our space is fine */
afs_MaybeWakeupTruncateDaemon();
MObtainWriteLock(&afs_xdcache, 280);
us = NULLIDX;
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
ReleaseReadLock(&tdc->tlock);
/*
* avc->lock(W) if !setLocks || slowPass
* afs_xdcache(W)
*/
- if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk) {
/* Move it up in the beginning of the list */
if (afs_dchashTbl[i] != index) {
afs_dcnextTbl[us] = afs_dcnextTbl[index];
if (afs_discardDCList == NULLIDX && afs_freeDCList == NULLIDX) {
while (1) {
if (!setLocks)
- avc->states |= CDCLock;
+ avc->f.states |= CDCLock;
/* just need slots */
afs_GetDownD(5, (int *)0, afs_DCGetBucket(avc));
if (!setLocks)
- avc->states &= ~CDCLock;
+ avc->f.states &= ~CDCLock;
if (afs_discardDCList != NULLIDX
|| afs_freeDCList != NULLIDX)
break;
*/
afs_dcnextTbl[tdc->index] = afs_dchashTbl[i];
afs_dchashTbl[i] = tdc->index;
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
afs_dvnextTbl[tdc->index] = afs_dvhashTbl[i];
afs_dvhashTbl[i] = tdc->index;
tdc->dflags = DFEntryMod;
afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE2, ICL_TYPE_POINTER, avc,
ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
hgetlo(tdc->f.versionNo), ICL_TYPE_INT32,
- hgetlo(avc->m.DataVersion));
+ hgetlo(avc->f.m.DataVersion));
/*
* Here we have the entry in tdc, with its refCount incremented.
* Note: we don't use the S-lock on avc; it costs concurrency when
ICL_TYPE_INT32, aflags, ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(abyte), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(Position));
- if ((aflags & 4) && (hiszero(avc->m.DataVersion)))
+ if ((aflags & 4) && (hiszero(avc->f.m.DataVersion)))
doAdjustSize = 1;
- if ((AFS_CHUNKTOBASE(chunk) >= avc->m.Length) ||
+ if ((AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length) ||
((aflags & 4) && (abyte == Position) && (tlen >= size)))
overWriteWholeChunk = 1;
if (doAdjustSize || overWriteWholeChunk) {
if (doAdjustSize)
adjustsize = 4096;
#endif /* AFS_SGI_ENV */
- if (AFS_CHUNKTOBASE(chunk) + adjustsize >= avc->m.Length &&
+ if (AFS_CHUNKTOBASE(chunk) + adjustsize >= avc->f.m.Length &&
#else /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
#if defined(AFS_SUN5_ENV) || defined(AFS_OSF_ENV)
- if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->m.Length)) &&
+ if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length)) &&
#else
- if (AFS_CHUNKTOBASE(chunk) >= avc->m.Length &&
+ if (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length &&
#endif
#endif /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
- !hsame(avc->m.DataVersion, tdc->f.versionNo))
+ !hsame(avc->f.m.DataVersion, tdc->f.versionNo))
doReallyAdjustSize = 1;
if (doReallyAdjustSize || overWriteWholeChunk) {
afs_CFileTruncate(file, 0);
afs_CFileClose(file);
afs_AdjustSize(tdc, 0);
- hset(tdc->f.versionNo, avc->m.DataVersion);
+ hset(tdc->f.versionNo, avc->f.m.DataVersion);
tdc->dflags |= DFEntryMod;
ConvertWToSLock(&tdc->lock);
* avc->lock(W) if !setLocks || slowPass
* tdc->lock(S)
*/
- if (!hsame(avc->m.DataVersion, tdc->f.versionNo) && !overWriteWholeChunk) {
+ if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) && !overWriteWholeChunk) {
/*
* Version number mismatch.
*/
* flush. Clearly, at least, we don't have to flush the file more
* often than it changes
*/
- if (hcmp(avc->flushDV, avc->m.DataVersion) < 0) {
+ if (hcmp(avc->flushDV, avc->f.m.DataVersion) < 0) {
/*
* By here, the cache entry is always write-locked. We can
* deadlock if we call osi_Flush with the cache entry locked...
*/
/* Watch for standard race condition around osi_FlushText */
- if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
updateV2DC(setLocks, avc, tdc, 569); /* set hint */
afs_stats_cmperf.dcacheHits++;
ConvertWToSLock(&tdc->lock);
}
/* Do not fetch data beyond truncPos. */
- maxGoodLength = avc->m.Length;
- if (avc->truncPos < maxGoodLength)
- maxGoodLength = avc->truncPos;
+ maxGoodLength = avc->f.m.Length;
+ if (avc->f.truncPos < maxGoodLength)
+ maxGoodLength = avc->f.truncPos;
Position = AFS_CHUNKBASE(abyte);
if (vType(avc) == VDIR) {
- size = avc->m.Length;
+ size = avc->f.m.Length;
if (size > tdc->f.chunkBytes) {
/* pre-reserve space for file */
afs_AdjustSize(tdc, size);
#else
file = afs_CFileOpen(tdc->f.inode);
#endif
- afs_RemoveVCB(&avc->fid);
+ afs_RemoveVCB(&avc->f.fid);
tdc->f.states |= DWriting;
tdc->dflags |= DFFetching;
tdc->validPos = Position; /* which is AFS_CHUNKBASE(abyte) */
* Remember if we are doing the reading from a replicated volume,
* and how many times we've zipped around the fetch/analyze loop.
*/
- fromReplica = (avc->states & CRO) ? 1 : 0;
+ fromReplica = (avc->f.states & CRO) ? 1 : 0;
numFetchLoops = 0;
accP = &(afs_stats_cmfullperf.accessinf);
if (fromReplica)
#endif /* AFS_NOSTATS */
/* this is a cache miss */
afs_Trace4(afs_iclSetp, CM_TRACE_FETCHPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(Position), ICL_TYPE_INT32, size);
if (size)
* tdc->lock(W)
*/
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
afs_int32 length_hi, length, bytes;
#ifndef AFS_NOSTATS
RX_AFS_GUNLOCK();
code =
StartRXAFS_FetchData64(tcall,
- (struct AFSFid *)&avc->fid.
+ (struct AFSFid *)&avc->f.fid.
Fid, Position, tsize);
if (code != 0) {
RX_AFS_GLOCK();
tcall = rx_NewCall(tc->id);
code =
StartRXAFS_FetchData(tcall, (struct AFSFid *)
- &avc->fid.Fid, pos,
+ &avc->f.fid.Fid, pos,
size);
RX_AFS_GLOCK();
}
RX_AFS_GUNLOCK();
code =
StartRXAFS_FetchData(tcall,
- (struct AFSFid *)&avc->fid.Fid,
+ (struct AFSFid *)&avc->f.fid.Fid,
Position, size);
RX_AFS_GLOCK();
if (code == 0) {
if (!setLocks || slowPass) {
ObtainWriteLock(&afs_xcbhash, 453);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
avc->callback = NULL;
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
} else {
/* Something lost. Forget about performance, and go
}
} while (afs_Analyze
- (tc, code, &avc->fid, areq,
+ (tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_FETCHDATA, SHARED_LOCK, NULL));
/*
if (!afs_IsDynroot(avc)) {
ObtainWriteLock(&afs_xcbhash, 454);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
/*
* Locks held:
/*
* See if this was a reference to a file in the local cell.
*/
- if (afs_IsPrimaryCellNum(avc->fid.Cell))
+ if (afs_IsPrimaryCellNum(avc->f.fid.Cell))
afs_stats_cmperf.dlocalAccesses++;
else
afs_stats_cmperf.dremoteAccesses++;
*/
afs_hyper_t currentDV, statusDV;
- hset(currentDV, avc->m.DataVersion);
+ hset(currentDV, avc->f.m.DataVersion);
if (setNewCallback && avc->callback != newCallback)
doVcacheUpdate = 1;
hset64(statusDV, tsmall->OutStatus.dataVersionHigh,
tsmall->OutStatus.DataVersion);
- if (setVcacheStatus && avc->m.Length != tsmall->OutStatus.Length)
+ if (setVcacheStatus && avc->f.m.Length != tsmall->OutStatus.Length)
doVcacheUpdate = 1;
if (setVcacheStatus && !hsame(currentDV, statusDV))
doVcacheUpdate = 1;
if (doVcacheUpdate) {
ObtainWriteLock(&avc->lock, 615);
- if (!hsame(avc->m.DataVersion, currentDV)) {
+ if (!hsame(avc->f.m.DataVersion, currentDV)) {
/* We lose. Someone will beat us to it. */
doVcacheUpdate = 0;
ReleaseWriteLock(&avc->lock);
* I think this is redundant now because this sort of thing
* is already being handled by the higher-level code.
*/
- if ((avc->states & CSafeStore) == 0) {
+ if ((avc->f.states & CSafeStore) == 0) {
tb->code = 0;
tb->flags |= BUVALID;
if (tb->flags & BUWAIT) {
tdc->f.fh_type = osi_get_fh(filevp, &tdc->f.fh, &max_len);
#else
tdc->f.inode = VTOI(filevp->d_inode)->i_number;
+ dput(filevp);
#endif
#else
tdc->f.inode = afs_vnodeToInumber(filevp);
#if defined(LINUX_USE_FH)
tfile = osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
#else
- tfile = osi_UFSOpen(ainode);
+ tfile = osi_UFSOpen(tdc->f.inode);
#endif
code = afs_osi_Stat(tfile, &tstat);
if (code)
}
+/*!
+ * Get a dcache ready for writing, respecting the current cache size limits
+ *
+ * len is required because afs_GetDCache with flag == 4 expects the length
+ * field to be filled. It decides from this whether it's necessary to fetch
+ * data into the chunk before writing or not (when the whole chunk is
+ * overwritten!).
+ *
+ * \param avc The vcache to fetch a dcache for
+ * \param filePos The start of the section to be written
+ * \param len The length of the section to be written
+ * \param areq
+ * \param noLock
+ *
+ * \return If successful, a reference counted dcache with tdc->lock held. Lock
+ * must be released and afs_PutDCache() called to free dcache.
+ * NULL on failure
+ *
+ * \note avc->lock must be held on entry. Function may release and reobtain
+ * avc->lock and GLOCK.
+ */
+
+struct dcache *
+afs_ObtainDCacheForWriting(struct vcache *avc, afs_size_t filePos,
+ afs_size_t len, struct vrequest *areq,
+ int noLock) {
+ struct dcache *tdc = NULL;
+ afs_size_t offset;
+
+ /* read the cached info */
+ if (noLock) {
+ tdc = afs_FindDCache(avc, filePos);
+ if (tdc)
+ ObtainWriteLock(&tdc->lock, 657);
+ } else if (afs_blocksUsed >
+ PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
+ tdc = afs_FindDCache(avc, filePos);
+ if (tdc) {
+ ObtainWriteLock(&tdc->lock, 658);
+ if (!hsame(tdc->f.versionNo, avc->f.m.DataVersion)
+ || (tdc->dflags & DFFetching)) {
+ ReleaseWriteLock(&tdc->lock);
+ afs_PutDCache(tdc);
+ tdc = NULL;
+ }
+ }
+ if (!tdc) {
+ afs_MaybeWakeupTruncateDaemon();
+ while (afs_blocksUsed >
+ PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
+ ReleaseWriteLock(&avc->lock);
+ if (afs_blocksUsed - afs_blocksDiscarded >
+ PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
+ afs_WaitForCacheDrain = 1;
+ afs_osi_Sleep(&afs_WaitForCacheDrain);
+ }
+ afs_MaybeFreeDiscardedDCache();
+ afs_MaybeWakeupTruncateDaemon();
+ ObtainWriteLock(&avc->lock, 509);
+ }
+ avc->f.states |= CDirty;
+ tdc = afs_GetDCache(avc, filePos, areq, &offset, &len, 4);
+ if (tdc)
+ ObtainWriteLock(&tdc->lock, 659);
+ }
+ } else {
+ tdc = afs_GetDCache(avc, filePos, areq, &offset, &len, 4);
+ if (tdc)
+ ObtainWriteLock(&tdc->lock, 660);
+ }
+ if (tdc) {
+ if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
+ afs_stats_cmperf.cacheCurrDirtyChunks++;
+ afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
+ }
+ if (!(tdc->f.states & DWriting)) {
+ /* don't mark entry as mod if we don't have to */
+ tdc->f.states |= DWriting;
+ tdc->dflags |= DFEntryMod;
+ }
+ }
+ return tdc;
+}
+
#if defined(AFS_DISCON_ENV)
/*!
- * Make a shadow copy of a dir's dcaches. It's used for disconnected
+ * Make a shadow copy of a dir's dcache. It's used for disconnected
* operations like remove/create/rename to keep the original directory data.
* On reconnection, we can diff the original data with the server and get the
* server changes and with the local data to get the local changes.
*
* \param avc The dir vnode.
+ * \param adc The dir dcache.
*
* \return 0 for success.
*
- * \note The only lock allowed to be set is the dir's vcache entry, and it
- * must be set in write mode.
* \note The vcache entry must be write locked.
+ * \note The dcache entry must be read locked.
*/
-int afs_MakeShadowDir(struct vcache *avc)
+int afs_MakeShadowDir(struct vcache *avc, struct dcache *adc)
{
- int j, i, index, code, ret_code = 0, offset, trans_size, block;
- struct dcache *tdc, *new_dc = NULL;
+ int i, code, ret_code = 0, written, trans_size;
+ struct dcache *new_dc = NULL;
struct osi_file *tfile_src, *tfile_dst;
struct VenusFid shadow_fid;
char *data;
if (vType(avc) != VDIR)
return ENOTDIR;
+ if (avc->f.shadow.vnode || avc->f.shadow.unique)
+ return EEXIST;
+
/* Generate a fid for the shadow dir. */
- shadow_fid.Cell = avc->fid.Cell;
- shadow_fid.Fid.Volume = avc->fid.Fid.Volume;
+ shadow_fid.Cell = avc->f.fid.Cell;
+ shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
afs_GenShadowFid(&shadow_fid);
- /* For each dcache, do copy it into a new fresh one. */
- i = DVHash(&avc->fid);
- for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
- /* Making sure that this isn't going to get locked twice. */
- if (!lock_held) {
- /* XXX: Moved it from outside of the loop.
- * Maybe it's not quite okay because of the use of
- * dvhashTbl (once) in the for statement.
- */
- ObtainWriteLock(&afs_xdcache, 716);
- lock_held = 1;
- }
-
- i = afs_dvnextTbl[index];
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
- tdc = afs_GetDSlot(index, NULL);
-
- ReleaseReadLock(&tdc->tlock);
-
- if (!FidCmp(&tdc->f.fid, &avc->fid)) {
-
- /* Got a dir's dcache. */
- lock_held = 0;
+ ObtainWriteLock(&afs_xdcache, 716);
- /* Get a fresh dcache. */
- new_dc = afs_AllocDCache(avc, 0, 0, &shadow_fid);
+ /* Get a fresh dcache. */
+ new_dc = afs_AllocDCache(avc, 0, 0, &shadow_fid);
- /* Unlock hash for now. Don't need it during operations on the
- * dcache. Oh, and we can't use it because of the locking
- * hierarchy...
- */
- /* XXX: So much for lock ierarchy, the afs_AllocDCache doesn't
- * respect it.
- */
- //ReleaseWriteLock(&afs_xdcache);
-
- ObtainReadLock(&tdc->lock);
-
- /* Set up the new fid. */
- /* Copy interesting data from original dir dcache. */
- new_dc->mflags = tdc->mflags;
- new_dc->dflags = tdc->dflags;
- new_dc->f.modTime = tdc->f.modTime;
- new_dc->f.versionNo = tdc->f.versionNo;
- new_dc->f.states = tdc->f.states;
- new_dc->f.chunk= tdc->f.chunk;
- new_dc->f.chunkBytes = tdc->f.chunkBytes;
-
- /*
- * Now add to the two hash chains - note that i is still set
- * from the above DCHash call.
- */
- //ObtainWriteLock(&afs_xdcache, 713);
+ ObtainReadLock(&adc->mflock);
- j = DCHash(&shadow_fid, 0);
- afs_dcnextTbl[new_dc->index] = afs_dchashTbl[j];
- afs_dchashTbl[j] = new_dc->index;
+ /* Set up the new fid. */
+ /* Copy interesting data from original dir dcache. */
+ new_dc->mflags = adc->mflags;
+ new_dc->dflags = adc->dflags;
+ new_dc->f.modTime = adc->f.modTime;
+ new_dc->f.versionNo = adc->f.versionNo;
+ new_dc->f.states = adc->f.states;
+ new_dc->f.chunk= adc->f.chunk;
+ new_dc->f.chunkBytes = adc->f.chunkBytes;
- j = DVHash(&shadow_fid);
- afs_dvnextTbl[new_dc->index] = afs_dvhashTbl[j];
- afs_dvhashTbl[j] = new_dc->index;
- afs_MaybeWakeupTruncateDaemon();
+ ReleaseReadLock(&adc->mflock);
+
+ /* Now add to the two hash chains */
+ i = DCHash(&shadow_fid, 0);
+ afs_dcnextTbl[new_dc->index] = afs_dchashTbl[i];
+ afs_dchashTbl[i] = new_dc->index;
- ReleaseWriteLock(&afs_xdcache);
+ i = DVHash(&shadow_fid);
+ afs_dvnextTbl[new_dc->index] = afs_dvhashTbl[i];
+ afs_dvhashTbl[i] = new_dc->index;
- /* Alloc a 4k block. */
- data = (char *) afs_osi_Alloc(4096);
- if (!data) {
- printf("afs_MakeShadowDir: could not alloc data\n");
- ret_code = ENOMEM;
- goto done;
- }
+ ReleaseWriteLock(&afs_xdcache);
- /* Open the files. */
- tfile_src = afs_CFileOpen(tdc->f.inode);
- tfile_dst = afs_CFileOpen(new_dc->f.inode);
+ /* Alloc a 4k block. */
+ data = (char *) afs_osi_Alloc(4096);
+ if (!data) {
+ printf("afs_MakeShadowDir: could not alloc data\n");
+ ret_code = ENOMEM;
+ goto done;
+ }
- /* Init no of blocks to be read and offset. */
- block = (tdc->f.chunkBytes / 4096);
- offset = 0;
+ /* Open the files. */
+ tfile_src = afs_CFileOpen(adc->f.inode);
+ tfile_dst = afs_CFileOpen(new_dc->f.inode);
- /* And now copy dir dcache data into this dcache,
- * 4k at a time.
- */
- while (block >= 0) {
-
- /* Last chunk might have less bytes to transfer. */
- if (!block) {
- /* Last block. */
- trans_size = (tdc->f.chunkBytes % 4096);
- if (!trans_size)
- /* An exact no of 4k blocks. */
- break;
- } else
- trans_size = 4096;
-
- /* Read a chunk from the dcache. */
- code = afs_CFileRead(tfile_src, offset, data, trans_size);
- if (code < trans_size) {
- /* Can't access file, stop doing stuff and return error. */
- ret_code = EIO;
- break;
- }
+ /* And now copy dir dcache data into this dcache,
+ * 4k at a time.
+ */
+ written = 0;
+ while (written < adc->f.chunkBytes) {
+ trans_size = adc->f.chunkBytes - written;
+ if (trans_size > 4096)
+ trans_size = 4096;
+
+ /* Read a chunk from the dcache. */
+ code = afs_CFileRead(tfile_src, written, data, trans_size);
+ if (code < trans_size) {
+ ret_code = EIO;
+ break;
+ }
- /* Write it to the new dcache. */
- code = afs_CFileWrite(tfile_dst, offset, data, trans_size);
- if (code < trans_size) {
- ret_code = EIO;
- break;
- }
+ /* Write it to the new dcache. */
+ code = afs_CFileWrite(tfile_dst, written, data, trans_size);
+ if (code < trans_size) {
+ ret_code = EIO;
+ break;
+ }
- block--;
- offset += 4096;
- } /* while (block) */
+ written+=trans_size;
+ }
- afs_CFileClose(tfile_dst);
- afs_CFileClose(tfile_src);
+ afs_CFileClose(tfile_dst);
+ afs_CFileClose(tfile_src);
- afs_osi_Free(data, 4096);
+ afs_osi_Free(data, 4096);
- ReleaseWriteLock(&new_dc->lock);
- ReleaseReadLock(&tdc->lock);
-
- afs_PutDCache(new_dc);
- } /* if dcache fid match */
- afs_PutDCache(tdc);
- } /* if unuiquifier match */
- }
-done:
- if (lock_held)
- ReleaseWriteLock(&afs_xdcache);
+ ReleaseWriteLock(&new_dc->lock);
+ afs_PutDCache(new_dc);
if (!ret_code) {
- if (!avc->ddirty_flags) {
- ObtainWriteLock(&afs_DDirtyVCListLock, 763);
- AFS_DISCON_ADD_DIRTY(avc, 1);
- ReleaseWriteLock(&afs_DDirtyVCListLock);
- }
- avc->shVnode = shadow_fid.Fid.Vnode;
- avc->shUnique = shadow_fid.Fid.Unique;
- avc->ddirty_flags |= VDisconShadowed;
+ ObtainWriteLock(&afs_xvcache, 763);
+ ObtainWriteLock(&afs_disconDirtyLock, 765);
+ QAdd(&afs_disconShadow, &avc->shadowq);
+ osi_vnhold(avc, 0);
+ ReleaseWriteLock(&afs_disconDirtyLock);
+ ReleaseWriteLock(&afs_xvcache);
+
+ avc->f.shadow.vnode = shadow_fid.Fid.Vnode;
+ avc->f.shadow.unique = shadow_fid.Fid.Unique;
}
+done:
return ret_code;
}
struct dcache *tdc;
struct VenusFid shadow_fid;
- shadow_fid.Cell = avc->fid.Cell;
- shadow_fid.Fid.Volume = avc->fid.Fid.Volume;
- shadow_fid.Fid.Vnode = avc->shVnode;
- shadow_fid.Fid.Unique = avc->shUnique;
+ shadow_fid.Cell = avc->f.fid.Cell;
+ shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
+ shadow_fid.Fid.Vnode = avc->f.shadow.vnode;
+ shadow_fid.Fid.Unique = avc->f.shadow.unique;
tdc = afs_FindDCacheByFid(&shadow_fid);
if (tdc) {
afs_DiscardDCache(tdc);
afs_PutDCache(tdc);
}
- /* Remove shadowed dir flag. */
- avc->ddirty_flags &= ~VDisconShadowed;
+ avc->f.shadow.vnode = avc->f.shadow.unique = 0;
+ ObtainWriteLock(&afs_disconDirtyLock, 708);
+ QRemove(&avc->shadowq);
+ ReleaseWriteLock(&afs_disconDirtyLock);
+}
+
+/*!
+ * Populate a dcache with empty chunks up to a given file size,
+ * used before extending a file in order to avoid 'holes' which
+ * we can't access in disconnected mode.
+ *
+ * \param avc The vcache which is being extended (locked)
+ * \param alen The new length of the file
+ *
+ */
+void afs_PopulateDCache(struct vcache *avc, afs_size_t apos, struct vrequest *areq) {
+ struct dcache *tdc;
+ afs_size_t len, offset;
+ afs_int32 start, end;
+
+ /* We're doing this to deal with the situation where we extend
+ * by writing after lseek()ing past the end of the file . If that
+ * extension skips chunks, then those chunks won't be created, and
+ * GetDCache will assume that they have to be fetched from the server.
+ * So, for each chunk between the current file position, and the new
+ * length we GetDCache for that chunk.
+ */
+
+ if (AFS_CHUNK(apos) == 0 || apos <= avc->f.m.Length)
+ return;
+
+ if (avc->f.m.Length == 0)
+ start = 0;
+ else
+ start = AFS_CHUNK(avc->f.m.Length)+1;
+
+ end = AFS_CHUNK(apos);
+
+ while (start<end) {
+ len = AFS_CHUNKTOSIZE(start);
+ tdc = afs_GetDCache(avc, AFS_CHUNKTOBASE(start), areq, &offset, &len, 4);
+ if (tdc)
+ afs_PutDCache(tdc);
+ start++;
+ }
}
+
#endif