2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(struct vcache *avc, struct vrequest *areq)
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
46 struct rx_call *tcall;
47 struct rx_connection *rxconn;
48 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->f.m.Length);
53 tlen = avc->f.m.Length;
54 if (avc->f.truncPos < tlen)
55 tlen = avc->f.truncPos;
56 avc->f.truncPos = AFS_NOTRUNC;
57 avc->f.states &= ~CExtendedFile;
60 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
62 #ifdef AFS_64BIT_CLIENT
66 tcall = rx_NewCall(rxconn);
68 /* Set the client mod time since we always want the file
69 * to have the client's mod time and not the server's one
70 * (to avoid problems with make, etc.) It almost always
71 * works fine with standard afs because them server/client
72 * times are in sync and more importantly this storemini
73 * it's a special call that would typically be followed by
74 * the proper store-data or store-status calls.
76 InStatus.Mask = AFS_SETMODTIME;
77 InStatus.ClientModTime = avc->f.m.Date;
78 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
79 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
80 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
81 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
83 ICL_HANDLE_OFFSET(tlen));
85 #ifdef AFS_64BIT_CLIENT
86 if (!afs_serverHasNo64Bit(tc)) {
88 StartRXAFS_StoreData64(tcall,
89 (struct AFSFid *)&avc->f.fid.Fid,
90 &InStatus, avc->f.m.Length,
91 (afs_size_t) 0, tlen);
96 if ((avc->f.m.Length > 0x7fffffff) ||
97 (tlen > 0x7fffffff) ||
98 ((0x7fffffff - tlen) < avc->f.m.Length)) {
103 StartRXAFS_StoreData(tcall,
104 (struct AFSFid *)&avc->f.fid.Fid,
105 &InStatus, l1, 0, l2);
107 #else /* AFS_64BIT_CLIENT */
109 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
110 &InStatus, avc->f.m.Length, 0, tlen);
111 #endif /* AFS_64BIT_CLIENT */
113 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
115 #ifdef AFS_64BIT_CLIENT
118 code = rx_EndCall(tcall, code);
121 #ifdef AFS_64BIT_CLIENT
122 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
123 afs_serverSetNo64Bit(tc);
126 #endif /* AFS_64BIT_CLIENT */
130 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
134 afs_ProcessFS(avc, &OutStatus, areq);
140 * afs_StoreAllSegments
143 * Stores all modified segments back to server
146 * avc : Pointer to vcache entry.
147 * areq : Pointer to request structure.
150 * Called with avc write-locked.
152 #if defined (AFS_HPUX_ENV)
153 int NCHUNKSATONCE = 3;
155 int NCHUNKSATONCE = 64;
161 afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
167 afs_int32 origCBs, foreign = 0;
169 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
170 struct dcache **dcList;
171 unsigned int i, j, minj, moredata, high, off;
172 afs_size_t maxStoredLength; /* highest offset we've written to server. */
173 int safety, marineronce = 0;
175 AFS_STATCNT(afs_StoreAllSegments);
177 hset(oldDV, avc->f.m.DataVersion);
178 hset(newDV, avc->f.m.DataVersion);
179 hash = DVHash(&avc->f.fid);
180 foreign = (avc->f.states & CForeign);
181 dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
182 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
183 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
184 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
185 /* In the aix vm implementation we need to do the vm_writep even
186 * on the memcache case since that's we adjust the file's size
187 * and finish flushing partial vm pages.
189 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
190 (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
191 (sync & AFS_LASTSTORE))
192 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
194 /* If we're not diskless, reading a file may stress the VM
195 * system enough to cause a pageout, and this vnode would be
196 * locked when the pageout occurs. We can prevent this problem
197 * by making sure all dirty pages are already flushed. We don't
198 * do this when diskless because reading a diskless (i.e.
199 * memory-resident) chunk doesn't require using new VM, and we
200 * also don't want to dump more dirty data into a diskless cache,
201 * since they're smaller, and we might exceed its available
204 #if defined(AFS_SUN5_ENV)
205 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
206 osi_VM_TryToSmush(avc, CRED(), 1);
209 osi_VM_StoreAllSegments(avc);
211 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
212 /* This will probably make someone sad ... */
213 /*printf("Net down in afs_StoreSegments\n");*/
216 ConvertWToSLock(&avc->lock);
219 * Subsequent code expects a sorted list, and it expects all the
220 * chunks in the list to be contiguous, so we need a sort and a
221 * while loop in here, too - but this will work for a first pass...
222 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
223 * bin sort, I guess. Chunk numbers start with 0
225 * - Have to get a write lock on xdcache because GetDSlot might need it (if
226 * the chunk doesn't have a dcache struct).
227 * This seems like overkill in most cases.
228 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
229 * xdcache, then relock xdcache and try to use index. It is done
230 * a lot elsewhere in the CM, but I'm not buying that argument.
231 * - should be able to check IFDataMod without doing the GetDSlot (just
232 * hold afs_xdcache). That way, it's easy to do this without the
233 * writelock on afs_xdcache, and we save unneccessary disk
234 * operations. I don't think that works, 'cuz the next pointers
237 origCBs = afs_allCBs;
243 memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
247 /* lock and start over from beginning of hash chain
248 * in order to avoid a race condition. */
249 ObtainWriteLock(&afs_xdcache, 284);
250 index = afs_dvhashTbl[hash];
252 for (j = 0; index != NULLIDX;) {
253 if ((afs_indexFlags[index] & IFDataMod)
254 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
255 tdc = afs_GetValidDSlot(index); /* refcount+1. */
257 ReleaseWriteLock(&afs_xdcache);
261 ReleaseReadLock(&tdc->tlock);
262 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
263 off = tdc->f.chunk - minj;
264 if (off < NCHUNKSATONCE) {
266 osi_Panic("dclist slot already in use!");
267 if (afs_mariner && !marineronce) {
268 /* first chunk only */
269 afs_MarinerLog("store$Storing", avc);
276 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
277 * can't grab here, due to lock ordering with afs_xdcache.
278 * So, disable this shortcut for now. -- kolya 2001-10-13
280 /* shortcut: big win for little files */
281 /* tlen -= tdc->f.chunkBytes;
288 if (j == NCHUNKSATONCE)
295 index = afs_dvnextTbl[index];
297 ReleaseWriteLock(&afs_xdcache);
299 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
300 /* "moredata" just says "there are more dirty chunks yet to come".
304 afs_CacheStoreVCache(dcList, avc, areq, sync,
305 minj, high, moredata,
306 &newDV, &maxStoredLength);
307 /* Release any zero-length dcache entries in our interval
308 * that we locked but didn't store back above.
310 for (j = 0; j <= high; j++) {
313 osi_Assert(tdc->f.chunkBytes == 0);
314 ReleaseSharedLock(&tdc->lock);
320 minj += NCHUNKSATONCE;
321 } while (!code && moredata);
324 UpgradeSToWLock(&avc->lock, 29);
326 /* send a trivial truncation store if did nothing else */
329 * Call StoreMini if we haven't written enough data to extend the
330 * file at the fileserver to the client's notion of the file length.
332 if ((avc->f.truncPos != AFS_NOTRUNC)
333 || ((avc->f.states & CExtendedFile)
334 && (maxStoredLength < avc->f.m.Length))) {
335 code = afs_StoreMini(avc, areq);
337 hadd32(newDV, 1); /* just bumped here, too */
339 avc->f.states &= ~CExtendedFile;
343 * Finally, turn off DWriting, turn on DFEntryMod,
344 * update f.versionNo.
345 * A lot of this could be integrated into the loop above
356 NCHUNKSATONCE * sizeof(struct dcache *));
358 /* overkill, but it gets the lock in case GetDSlot needs it */
359 ObtainWriteLock(&afs_xdcache, 285);
361 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
362 index != NULLIDX && safety < afs_cacheFiles + 2;
363 index = afs_dvnextTbl[index]) {
365 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
366 tdc = afs_GetValidDSlot(index);
368 /* This is okay; since manipulating the dcaches at this
369 * point is best-effort. We only get a dcache here to
370 * increment the dv and turn off DWriting. If we were
371 * supposed to do that for a dcache, but could not
372 * due to an I/O error, it just means the dv won't
373 * be updated so we don't be able to use that cached
374 * chunk in the future. That's inefficient, but not
378 ReleaseReadLock(&tdc->tlock);
380 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
381 && tdc->f.chunk >= minj) {
382 off = tdc->f.chunk - minj;
383 if (off < NCHUNKSATONCE) {
384 /* this is the file, and the correct chunk range */
385 if (j >= NCHUNKSATONCE)
387 ("Too many dcache entries in range\n");
392 if (j == NCHUNKSATONCE)
400 ReleaseWriteLock(&afs_xdcache);
402 for (i = 0; i < j; i++) {
403 /* Iterate over the dcache entries we collected above */
405 ObtainSharedLock(&tdc->lock, 677);
407 /* was code here to clear IFDataMod, but it should only be done
408 * in storedcache and storealldcache.
410 /* Only increase DV if we had up-to-date data to start with.
411 * Otherwise, we could be falsely upgrading an old chunk
412 * (that we never read) into one labelled with the current
413 * DV #. Also note that we check that no intervening stores
414 * occurred, otherwise we might mislabel cache information
415 * for a chunk that we didn't store this time
417 /* Don't update the version number if it's not yet set. */
418 if (!hsame(tdc->f.versionNo, h_unset)
419 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
421 if ((!(afs_dvhack || foreign)
422 && hsame(avc->f.m.DataVersion, newDV))
423 || ((afs_dvhack || foreign)
424 && (origCBs == afs_allCBs))) {
425 /* no error, this is the DV */
427 UpgradeSToWLock(&tdc->lock, 678);
428 hset(tdc->f.versionNo, avc->f.m.DataVersion);
429 tdc->dflags |= DFEntryMod;
430 /* DWriting may not have gotten cleared above, if all
431 * we did was a StoreMini */
432 tdc->f.states &= ~DWriting;
433 ConvertWToSLock(&tdc->lock);
437 ReleaseSharedLock(&tdc->lock);
441 minj += NCHUNKSATONCE;
448 * Invalidate chunks after an error for ccores files since
449 * afs_inactive won't be called for these and they won't be
450 * invalidated. Also discard data if it's a permanent error from the
453 if (areq->permWriteError || (avc->f.states & CCore)) {
454 afs_InvalidateAllSegments(avc);
457 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
458 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
459 /* would like a Trace5, but it doesn't exist... */
460 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
461 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
462 avc->lock.excl_locked);
463 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
464 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
465 avc->lock.readers_reading, ICL_TYPE_INT32,
466 avc->lock.num_waiting);
469 * Finally, if updated DataVersion matches newDV, we did all of the
470 * stores. If mapDV indicates that the page cache was flushed up
471 * to when we started the store, then we can relabel them as flushed
472 * as recently as newDV.
473 * Turn off CDirty bit because the stored data is now in sync with server.
475 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
476 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
477 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
478 hset(avc->mapDV, newDV);
479 avc->f.states &= ~CDirty;
482 osi_FreeLargeSpace(dcList);
484 /* If not the final write a temporary error is ok. */
485 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
490 } /*afs_StoreAllSegments (new 03/02/94) */
494 * afs_InvalidateAllSegments
497 * Invalidates all chunks for a given file
500 * avc : Pointer to vcache entry.
503 * For example, called after an error has been detected. Called
504 * with avc write-locked, and afs_xdcache unheld.
508 afs_InvalidateAllSegments(struct vcache *avc)
513 struct dcache **dcList;
514 int i, dcListMax, dcListCount;
516 AFS_STATCNT(afs_InvalidateAllSegments);
517 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
518 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
519 hash = DVHash(&avc->f.fid);
520 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
521 avc->f.states &= ~CExtendedFile; /* not any more */
522 ObtainWriteLock(&afs_xcbhash, 459);
523 afs_DequeueCallback(avc);
524 avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */
525 ReleaseWriteLock(&afs_xcbhash);
526 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
527 osi_dnlc_purgedp(avc);
528 /* Blow away pages; for now, only for Solaris */
529 #if (defined(AFS_SUN5_ENV))
530 if (WriteLocked(&avc->lock))
531 osi_ReleaseVM(avc, (afs_ucred_t *)0);
534 * Block out others from screwing with this table; is a read lock
537 ObtainWriteLock(&afs_xdcache, 286);
540 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
541 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
542 tdc = afs_GetValidDSlot(index);
544 /* In the case of fatal errors during stores, we MUST
545 * invalidate all of the relevant chunks. Otherwise, the chunks
546 * will be left with the 'new' data that was never successfully
547 * written to the server, but the DV in the dcache is still the
548 * old DV. So, we may indefintely serve serve applications data
549 * that is not actually in the file on the fileserver. If we
550 * cannot afs_GetValidDSlot the appropriate entries, currently
551 * there is no way to ensure the dcache is invalidated. So for
552 * now, to avoid risking serving bad data from the cache, panic
554 osi_Panic("afs_InvalidateAllSegments tdc count");
556 ReleaseReadLock(&tdc->tlock);
557 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
561 index = afs_dvnextTbl[index];
564 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
567 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
568 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
569 tdc = afs_GetValidDSlot(index);
571 /* We cannot proceed after getting this error; we risk serving
572 * incorrect data to applications. So panic instead. See the
573 * above comment next to the previous afs_GetValidDSlot call
575 osi_Panic("afs_InvalidateAllSegments tdc store");
577 ReleaseReadLock(&tdc->tlock);
578 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
579 /* same file? we'll zap it */
580 if (afs_indexFlags[index] & IFDataMod) {
581 afs_stats_cmperf.cacheCurrDirtyChunks--;
582 /* don't write it back */
583 afs_indexFlags[index] &= ~IFDataMod;
585 afs_indexFlags[index] &= ~IFAnyPages;
586 if (dcListCount < dcListMax)
587 dcList[dcListCount++] = tdc;
594 index = afs_dvnextTbl[index];
596 ReleaseWriteLock(&afs_xdcache);
598 for (i = 0; i < dcListCount; i++) {
601 ObtainWriteLock(&tdc->lock, 679);
603 if (vType(avc) == VDIR)
605 ReleaseWriteLock(&tdc->lock);
609 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
616 * Extend a cache file
618 * \param avc pointer to vcache to extend data for
619 * \param alen Length to extend file to
622 * \note avc must be write locked. May release and reobtain avc and GLOCK
625 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq)
627 afs_size_t offset, toAdd;
628 struct osi_file *tfile;
633 zeros = afs_osi_Alloc(AFS_PAGESIZE);
636 memset(zeros, 0, AFS_PAGESIZE);
638 while (avc->f.m.Length < alen) {
639 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
645 toAdd = alen - avc->f.m.Length;
647 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
648 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
649 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
651 tfile = afs_CFileOpen(&tdc->f.inode);
652 while(tdc->validPos < avc->f.m.Length + toAdd) {
655 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
656 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
658 code = afs_CFileWrite(tfile,
659 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
661 tdc->validPos += towrite;
663 afs_CFileClose(tfile);
664 afs_AdjustSize(tdc, offset + toAdd );
665 avc->f.m.Length += toAdd;
666 ReleaseWriteLock(&tdc->lock);
670 afs_osi_Free(zeros, AFS_PAGESIZE);
675 * afs_TruncateAllSegments
678 * Truncate a cache file.
681 * avc : Ptr to vcache entry to truncate.
682 * alen : Number of bytes to make the file.
683 * areq : Ptr to request structure.
686 * Called with avc write-locked; in VFS40 systems, pvnLock is also
690 afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
691 struct vrequest *areq, afs_ucred_t *acred)
699 struct dcache **tdcArray = NULL;
701 AFS_STATCNT(afs_TruncateAllSegments);
702 avc->f.m.Date = osi_Time();
703 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
704 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
705 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
706 if (alen >= avc->f.m.Length) {
708 * Special speedup since Sun's vm extends the file this way;
709 * we've never written to the file thus we can just set the new
710 * length and avoid the needless calls below.
711 * Also used for ftruncate calls which can extend the file.
712 * To completely minimize the possible extra StoreMini RPC, we really
713 * should keep the ExtendedPos as well and clear this flag if we
714 * truncate below that value before we store the file back.
716 avc->f.states |= CExtendedFile;
717 avc->f.m.Length = alen;
720 #if (defined(AFS_SUN5_ENV))
722 /* Zero unused portion of last page */
723 osi_VM_PreTruncate(avc, alen, acred);
727 #if (defined(AFS_SUN5_ENV))
728 ObtainWriteLock(&avc->vlock, 546);
729 avc->activeV++; /* Block new getpages */
730 ReleaseWriteLock(&avc->vlock);
733 ReleaseWriteLock(&avc->lock);
736 /* Flush pages beyond end-of-file. */
737 osi_VM_Truncate(avc, alen, acred);
740 ObtainWriteLock(&avc->lock, 79);
742 avc->f.m.Length = alen;
744 if (alen < avc->f.truncPos)
745 avc->f.truncPos = alen;
746 code = DVHash(&avc->f.fid);
748 /* block out others from screwing with this table */
749 ObtainWriteLock(&afs_xdcache, 287);
752 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
753 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
754 tdc = afs_GetValidDSlot(index);
756 ReleaseWriteLock(&afs_xdcache);
760 ReleaseReadLock(&tdc->tlock);
761 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
765 index = afs_dvnextTbl[index];
768 /* Now allocate space where we can save those dcache entries, and
769 * do a second pass over them.. Since we're holding xdcache, it
770 * shouldn't be changing.
772 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
775 for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
776 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
777 tdc = afs_GetValidDSlot(index);
779 /* make sure we put back all of the tdcArray members before
781 /* remember, the last valid tdc is at dcPos-1, so start at
782 * dcPos-1, not at dcPos itself. */
783 for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
784 tdc = tdcArray[dcPos];
790 ReleaseReadLock(&tdc->tlock);
791 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
792 /* same file, and modified, we'll store it back */
793 if (dcPos < dcCount) {
794 tdcArray[dcPos++] = tdc;
804 ReleaseWriteLock(&afs_xdcache);
806 /* Now we loop over the array of dcache entries and truncate them */
807 for (index = 0; index < dcPos; index++) {
808 struct osi_file *tfile;
810 tdc = tdcArray[index];
812 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
815 ObtainSharedLock(&tdc->lock, 672);
816 if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
817 UpgradeSToWLock(&tdc->lock, 673);
818 tdc->f.states |= DWriting;
819 tfile = afs_CFileOpen(&tdc->f.inode);
820 afs_CFileTruncate(tfile, (afs_int32)newSize);
821 afs_CFileClose(tfile);
822 afs_AdjustSize(tdc, (afs_int32)newSize);
823 if (alen < tdc->validPos) {
824 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
827 tdc->validPos = alen;
829 ConvertWToSLock(&tdc->lock);
831 ReleaseSharedLock(&tdc->lock);
839 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
841 #if (defined(AFS_SUN5_ENV))
842 ObtainWriteLock(&avc->vlock, 547);
843 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
844 avc->vstates &= ~VRevokeWait;
845 afs_osi_Wakeup((char *)&avc->vstates);
847 ReleaseWriteLock(&avc->vlock);