2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(struct vcache *avc, struct vrequest *areq)
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
46 struct rx_call *tcall;
47 struct rx_connection *rxconn;
48 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->f.m.Length);
53 tlen = avc->f.m.Length;
54 if (avc->f.truncPos < tlen)
55 tlen = avc->f.truncPos;
56 avc->f.truncPos = AFS_NOTRUNC;
57 avc->f.states &= ~CExtendedFile;
58 memset(&InStatus, 0, sizeof(InStatus));
61 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
63 #ifdef AFS_64BIT_CLIENT
67 tcall = rx_NewCall(rxconn);
69 /* Set the client mod time since we always want the file
70 * to have the client's mod time and not the server's one
71 * (to avoid problems with make, etc.) It almost always
72 * works fine with standard afs because them server/client
73 * times are in sync and more importantly this storemini
74 * it's a special call that would typically be followed by
75 * the proper store-data or store-status calls.
77 InStatus.Mask = AFS_SETMODTIME;
78 InStatus.ClientModTime = avc->f.m.Date;
79 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
80 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
81 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
83 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
84 ICL_HANDLE_OFFSET(tlen));
86 #ifdef AFS_64BIT_CLIENT
87 if (!afs_serverHasNo64Bit(tc)) {
89 StartRXAFS_StoreData64(tcall,
90 (struct AFSFid *)&avc->f.fid.Fid,
91 &InStatus, avc->f.m.Length,
92 (afs_size_t) 0, tlen);
97 if ((avc->f.m.Length > 0x7fffffff) ||
98 (tlen > 0x7fffffff) ||
99 ((0x7fffffff - tlen) < avc->f.m.Length)) {
104 StartRXAFS_StoreData(tcall,
105 (struct AFSFid *)&avc->f.fid.Fid,
106 &InStatus, l1, 0, l2);
108 #else /* AFS_64BIT_CLIENT */
110 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
111 &InStatus, avc->f.m.Length, 0, tlen);
112 #endif /* AFS_64BIT_CLIENT */
114 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
116 #ifdef AFS_64BIT_CLIENT
119 code = rx_EndCall(tcall, code);
122 #ifdef AFS_64BIT_CLIENT
123 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
124 afs_serverSetNo64Bit(tc);
127 #endif /* AFS_64BIT_CLIENT */
131 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
135 afs_ProcessFS(avc, &OutStatus, areq);
141 * afs_StoreAllSegments
144 * Stores all modified segments back to server
147 * avc : Pointer to vcache entry.
148 * areq : Pointer to request structure.
151 * Called with avc write-locked.
153 #if defined (AFS_HPUX_ENV)
154 int NCHUNKSATONCE = 3;
156 int NCHUNKSATONCE = 64;
162 afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
168 afs_int32 origCBs, foreign = 0;
170 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
171 struct dcache **dcList;
172 unsigned int i, j, minj, moredata, high, off;
173 afs_size_t maxStoredLength; /* highest offset we've written to server. */
174 int safety, marineronce = 0;
176 AFS_STATCNT(afs_StoreAllSegments);
178 hash = DVHash(&avc->f.fid);
179 foreign = (avc->f.states & CForeign);
180 dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
181 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
182 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
183 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
184 /* In the aix vm implementation we need to do the vm_writep even
185 * on the memcache case since that's we adjust the file's size
186 * and finish flushing partial vm pages.
188 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
189 (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
190 (sync & AFS_LASTSTORE))
191 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
193 /* If we're not diskless, reading a file may stress the VM
194 * system enough to cause a pageout, and this vnode would be
195 * locked when the pageout occurs. We can prevent this problem
196 * by making sure all dirty pages are already flushed. We don't
197 * do this when diskless because reading a diskless (i.e.
198 * memory-resident) chunk doesn't require using new VM, and we
199 * also don't want to dump more dirty data into a diskless cache,
200 * since they're smaller, and we might exceed its available
203 #if defined(AFS_SUN5_ENV)
204 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
205 osi_VM_TryToSmush(avc, CRED(), 1);
208 osi_VM_StoreAllSegments(avc);
210 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
211 /* This will probably make someone sad ... */
212 /*printf("Net down in afs_StoreSegments\n");*/
217 * Can't do this earlier because osi_VM_StoreAllSegments drops locks
218 * and can indirectly do some stores that increase the DV.
220 hset(oldDV, avc->f.m.DataVersion);
221 hset(newDV, avc->f.m.DataVersion);
223 ConvertWToSLock(&avc->lock);
226 * Subsequent code expects a sorted list, and it expects all the
227 * chunks in the list to be contiguous, so we need a sort and a
228 * while loop in here, too - but this will work for a first pass...
229 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
230 * bin sort, I guess. Chunk numbers start with 0
232 * - Have to get a write lock on xdcache because GetDSlot might need it (if
233 * the chunk doesn't have a dcache struct).
234 * This seems like overkill in most cases.
235 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
236 * xdcache, then relock xdcache and try to use index. It is done
237 * a lot elsewhere in the CM, but I'm not buying that argument.
238 * - should be able to check IFDataMod without doing the GetDSlot (just
239 * hold afs_xdcache). That way, it's easy to do this without the
240 * writelock on afs_xdcache, and we save unneccessary disk
241 * operations. I don't think that works, 'cuz the next pointers
244 origCBs = afs_allCBs;
250 memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
254 /* lock and start over from beginning of hash chain
255 * in order to avoid a race condition. */
256 ObtainWriteLock(&afs_xdcache, 284);
257 index = afs_dvhashTbl[hash];
259 for (j = 0; index != NULLIDX;) {
260 if ((afs_indexFlags[index] & IFDataMod)
261 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
262 tdc = afs_GetValidDSlot(index); /* refcount+1. */
264 ReleaseWriteLock(&afs_xdcache);
268 ReleaseReadLock(&tdc->tlock);
269 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
270 off = tdc->f.chunk - minj;
271 if (off < NCHUNKSATONCE) {
273 osi_Panic("dclist slot already in use!");
274 if (afs_mariner && !marineronce) {
275 /* first chunk only */
276 afs_MarinerLog("store$Storing", avc);
283 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
284 * can't grab here, due to lock ordering with afs_xdcache.
285 * So, disable this shortcut for now. -- kolya 2001-10-13
287 /* shortcut: big win for little files */
288 /* tlen -= tdc->f.chunkBytes;
295 if (j == NCHUNKSATONCE)
302 index = afs_dvnextTbl[index];
304 ReleaseWriteLock(&afs_xdcache);
306 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
307 /* "moredata" just says "there are more dirty chunks yet to come".
311 afs_CacheStoreVCache(dcList, avc, areq, sync,
312 minj, high, moredata,
313 &newDV, &maxStoredLength);
314 /* Release any zero-length dcache entries in our interval
315 * that we locked but didn't store back above.
317 for (j = 0; j <= high; j++) {
320 osi_Assert(tdc->f.chunkBytes == 0);
321 ReleaseSharedLock(&tdc->lock);
327 minj += NCHUNKSATONCE;
328 } while (!code && moredata);
331 UpgradeSToWLock(&avc->lock, 29);
333 /* send a trivial truncation store if did nothing else */
336 * Call StoreMini if we haven't written enough data to extend the
337 * file at the fileserver to the client's notion of the file length.
339 if ((avc->f.truncPos != AFS_NOTRUNC)
340 || ((avc->f.states & CExtendedFile)
341 && (maxStoredLength < avc->f.m.Length))) {
342 code = afs_StoreMini(avc, areq);
344 hadd32(newDV, 1); /* just bumped here, too */
346 avc->f.states &= ~CExtendedFile;
350 * Finally, turn off DWriting, turn on DFEntryMod,
351 * update f.versionNo.
352 * A lot of this could be integrated into the loop above
363 NCHUNKSATONCE * sizeof(struct dcache *));
365 /* overkill, but it gets the lock in case GetDSlot needs it */
366 ObtainWriteLock(&afs_xdcache, 285);
368 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
369 index != NULLIDX && safety < afs_cacheFiles + 2;
370 index = afs_dvnextTbl[index]) {
372 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
373 tdc = afs_GetValidDSlot(index);
375 /* This is okay; since manipulating the dcaches at this
376 * point is best-effort. We only get a dcache here to
377 * increment the dv and turn off DWriting. If we were
378 * supposed to do that for a dcache, but could not
379 * due to an I/O error, it just means the dv won't
380 * be updated so we don't be able to use that cached
381 * chunk in the future. That's inefficient, but not
385 ReleaseReadLock(&tdc->tlock);
387 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
388 && tdc->f.chunk >= minj) {
389 off = tdc->f.chunk - minj;
390 if (off < NCHUNKSATONCE) {
391 /* this is the file, and the correct chunk range */
392 if (j >= NCHUNKSATONCE)
394 ("Too many dcache entries in range\n");
399 if (j == NCHUNKSATONCE)
407 ReleaseWriteLock(&afs_xdcache);
409 for (i = 0; i < j; i++) {
410 /* Iterate over the dcache entries we collected above */
412 ObtainSharedLock(&tdc->lock, 677);
414 /* was code here to clear IFDataMod, but it should only be done
415 * in storedcache and storealldcache.
417 /* Only increase DV if we had up-to-date data to start with.
418 * Otherwise, we could be falsely upgrading an old chunk
419 * (that we never read) into one labelled with the current
420 * DV #. Also note that we check that no intervening stores
421 * occurred, otherwise we might mislabel cache information
422 * for a chunk that we didn't store this time
424 /* Don't update the version number if it's not yet set. */
425 if (!hsame(tdc->f.versionNo, h_unset)
426 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
428 if ((!(afs_dvhack || foreign)
429 && hsame(avc->f.m.DataVersion, newDV))
430 || ((afs_dvhack || foreign)
431 && (origCBs == afs_allCBs))) {
432 /* no error, this is the DV */
434 UpgradeSToWLock(&tdc->lock, 678);
435 hset(tdc->f.versionNo, avc->f.m.DataVersion);
436 tdc->dflags |= DFEntryMod;
437 /* DWriting may not have gotten cleared above, if all
438 * we did was a StoreMini */
439 tdc->f.states &= ~DWriting;
440 ConvertWToSLock(&tdc->lock);
444 ReleaseSharedLock(&tdc->lock);
448 minj += NCHUNKSATONCE;
455 * Invalidate chunks after an error for ccores files since
456 * afs_inactive won't be called for these and they won't be
457 * invalidated. Also discard data if it's a permanent error from the
460 if (areq->permWriteError || (avc->f.states & CCore)) {
461 afs_InvalidateAllSegments(avc);
464 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
465 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
466 /* would like a Trace5, but it doesn't exist... */
467 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
468 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
469 avc->lock.excl_locked);
470 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
471 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
472 avc->lock.readers_reading, ICL_TYPE_INT32,
473 avc->lock.num_waiting);
476 * Finally, if updated DataVersion matches newDV, we did all of the
477 * stores. If mapDV indicates that the page cache was flushed up
478 * to when we started the store, then we can relabel them as flushed
479 * as recently as newDV.
480 * Turn off CDirty bit because the stored data is now in sync with server.
482 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
483 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
484 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
485 hset(avc->mapDV, newDV);
486 avc->f.states &= ~CDirty;
489 osi_FreeLargeSpace(dcList);
491 /* If not the final write a temporary error is ok. */
492 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
497 } /*afs_StoreAllSegments (new 03/02/94) */
501 * afs_InvalidateAllSegments
504 * Invalidates all chunks for a given file
507 * avc : Pointer to vcache entry.
510 * For example, called after an error has been detected. Called
511 * with avc write-locked, and afs_xdcache unheld.
515 afs_InvalidateAllSegments(struct vcache *avc)
520 struct dcache **dcList;
521 int i, dcListMax, dcListCount;
523 AFS_STATCNT(afs_InvalidateAllSegments);
524 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
525 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
526 hash = DVHash(&avc->f.fid);
527 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
528 avc->f.states &= ~CExtendedFile; /* not any more */
529 afs_StaleVCacheFlags(avc, 0, CDirty);
530 /* Blow away pages; for now, only for Solaris */
531 #if (defined(AFS_SUN5_ENV))
532 if (WriteLocked(&avc->lock))
533 osi_ReleaseVM(avc, (afs_ucred_t *)0);
536 * Block out others from screwing with this table; is a read lock
539 ObtainWriteLock(&afs_xdcache, 286);
542 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
543 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
544 tdc = afs_GetValidDSlot(index);
546 /* In the case of fatal errors during stores, we MUST
547 * invalidate all of the relevant chunks. Otherwise, the chunks
548 * will be left with the 'new' data that was never successfully
549 * written to the server, but the DV in the dcache is still the
550 * old DV. So, we may indefinitely serve data to applications
551 * that is not actually in the file on the fileserver. If we
552 * cannot afs_GetValidDSlot the appropriate entries, currently
553 * there is no way to ensure the dcache is invalidated. So for
554 * now, to avoid risking serving bad data from the cache, panic
556 osi_Panic("afs_InvalidateAllSegments tdc count");
558 ReleaseReadLock(&tdc->tlock);
559 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
563 index = afs_dvnextTbl[index];
566 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
569 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
570 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
571 tdc = afs_GetValidDSlot(index);
573 /* We cannot proceed after getting this error; we risk serving
574 * incorrect data to applications. So panic instead. See the
575 * above comment next to the previous afs_GetValidDSlot call
577 osi_Panic("afs_InvalidateAllSegments tdc store");
579 ReleaseReadLock(&tdc->tlock);
580 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
581 /* same file? we'll zap it */
582 if (afs_indexFlags[index] & IFDataMod) {
583 afs_stats_cmperf.cacheCurrDirtyChunks--;
584 /* don't write it back */
585 afs_indexFlags[index] &= ~IFDataMod;
587 afs_indexFlags[index] &= ~IFAnyPages;
588 if (dcListCount < dcListMax)
589 dcList[dcListCount++] = tdc;
596 index = afs_dvnextTbl[index];
598 ReleaseWriteLock(&afs_xdcache);
600 for (i = 0; i < dcListCount; i++) {
603 ObtainWriteLock(&tdc->lock, 679);
605 if (vType(avc) == VDIR)
607 ReleaseWriteLock(&tdc->lock);
611 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
618 * Extend a cache file
620 * \param avc pointer to vcache to extend data for
621 * \param alen Length to extend file to
624 * \note avc must be write locked. May release and reobtain avc and GLOCK
627 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq)
629 afs_size_t offset, toAdd;
630 struct osi_file *tfile;
635 zeros = afs_osi_Alloc(AFS_PAGESIZE);
638 memset(zeros, 0, AFS_PAGESIZE);
640 while (avc->f.m.Length < alen) {
641 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
647 toAdd = alen - avc->f.m.Length;
649 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
650 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
651 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
653 tfile = afs_CFileOpen(&tdc->f.inode);
655 while(tdc->validPos < avc->f.m.Length + toAdd) {
658 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
659 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
661 code = afs_CFileWrite(tfile,
662 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
664 tdc->validPos += towrite;
666 afs_CFileClose(tfile);
667 afs_AdjustSize(tdc, offset + toAdd );
668 avc->f.m.Length += toAdd;
669 ReleaseWriteLock(&tdc->lock);
673 afs_osi_Free(zeros, AFS_PAGESIZE);
678 * afs_TruncateAllSegments
681 * Truncate a cache file.
684 * avc : Ptr to vcache entry to truncate.
685 * alen : Number of bytes to make the file.
686 * areq : Ptr to request structure.
689 * Called with avc write-locked; in VFS40 systems, pvnLock is also
693 afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
694 struct vrequest *areq, afs_ucred_t *acred)
702 struct dcache **tdcArray = NULL;
704 AFS_STATCNT(afs_TruncateAllSegments);
705 avc->f.m.Date = osi_Time();
706 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
707 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
708 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
709 if (alen >= avc->f.m.Length) {
711 * Special speedup since Sun's vm extends the file this way;
712 * we've never written to the file thus we can just set the new
713 * length and avoid the needless calls below.
714 * Also used for ftruncate calls which can extend the file.
715 * To completely minimize the possible extra StoreMini RPC, we really
716 * should keep the ExtendedPos as well and clear this flag if we
717 * truncate below that value before we store the file back.
719 avc->f.states |= CExtendedFile;
720 avc->f.m.Length = alen;
723 #if (defined(AFS_SUN5_ENV))
725 /* Zero unused portion of last page */
726 osi_VM_PreTruncate(avc, alen, acred);
730 #if (defined(AFS_SUN5_ENV))
731 ObtainWriteLock(&avc->vlock, 546);
732 avc->activeV++; /* Block new getpages */
733 ReleaseWriteLock(&avc->vlock);
736 ReleaseWriteLock(&avc->lock);
739 /* Flush pages beyond end-of-file. */
740 osi_VM_Truncate(avc, alen, acred);
743 ObtainWriteLock(&avc->lock, 79);
745 avc->f.m.Length = alen;
747 if (alen < avc->f.truncPos)
748 avc->f.truncPos = alen;
749 code = DVHash(&avc->f.fid);
751 /* block out others from screwing with this table */
752 ObtainWriteLock(&afs_xdcache, 287);
755 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
756 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
757 tdc = afs_GetValidDSlot(index);
759 ReleaseWriteLock(&afs_xdcache);
763 ReleaseReadLock(&tdc->tlock);
764 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
768 index = afs_dvnextTbl[index];
771 /* Now allocate space where we can save those dcache entries, and
772 * do a second pass over them.. Since we're holding xdcache, it
773 * shouldn't be changing.
775 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
778 for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
779 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
780 tdc = afs_GetValidDSlot(index);
782 /* make sure we put back all of the tdcArray members before
784 /* remember, the last valid tdc is at dcPos-1, so start at
785 * dcPos-1, not at dcPos itself. */
786 for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
787 tdc = tdcArray[dcPos];
793 ReleaseReadLock(&tdc->tlock);
794 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
795 /* same file, and modified, we'll store it back */
796 if (dcPos < dcCount) {
797 tdcArray[dcPos++] = tdc;
807 ReleaseWriteLock(&afs_xdcache);
809 /* Now we loop over the array of dcache entries and truncate them */
810 for (index = 0; index < dcPos; index++) {
811 struct osi_file *tfile;
813 tdc = tdcArray[index];
815 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
818 ObtainSharedLock(&tdc->lock, 672);
819 if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
820 UpgradeSToWLock(&tdc->lock, 673);
821 tdc->f.states |= DWriting;
822 tfile = afs_CFileOpen(&tdc->f.inode);
824 afs_CFileTruncate(tfile, (afs_int32)newSize);
825 afs_CFileClose(tfile);
826 afs_AdjustSize(tdc, (afs_int32)newSize);
827 if (alen < tdc->validPos) {
828 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
831 tdc->validPos = alen;
833 ConvertWToSLock(&tdc->lock);
835 ReleaseSharedLock(&tdc->lock);
843 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
845 #if (defined(AFS_SUN5_ENV))
846 ObtainWriteLock(&avc->vlock, 547);
847 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
848 avc->vstates &= ~VRevokeWait;
849 afs_osi_Wakeup((char *)&avc->vstates);
851 ReleaseWriteLock(&avc->vlock);