2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(struct vcache *avc, struct vrequest *areq)
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
46 struct rx_call *tcall;
47 struct rx_connection *rxconn;
48 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->f.m.Length);
53 tlen = avc->f.m.Length;
54 if (avc->f.truncPos < tlen)
55 tlen = avc->f.truncPos;
56 avc->f.truncPos = AFS_NOTRUNC;
57 avc->f.states &= ~CExtendedFile;
58 memset(&InStatus, 0, sizeof(InStatus));
61 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
63 #ifdef AFS_64BIT_CLIENT
67 tcall = rx_NewCall(rxconn);
69 /* Set the client mod time since we always want the file
70 * to have the client's mod time and not the server's one
71 * (to avoid problems with make, etc.) It almost always
72 * works fine with standard afs because them server/client
73 * times are in sync and more importantly this storemini
74 * it's a special call that would typically be followed by
75 * the proper store-data or store-status calls.
77 InStatus.Mask = AFS_SETMODTIME;
78 InStatus.ClientModTime = avc->f.m.Date;
79 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
80 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
81 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
83 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
84 ICL_HANDLE_OFFSET(tlen));
86 #ifdef AFS_64BIT_CLIENT
87 if (!afs_serverHasNo64Bit(tc)) {
89 StartRXAFS_StoreData64(tcall,
90 (struct AFSFid *)&avc->f.fid.Fid,
91 &InStatus, avc->f.m.Length,
92 (afs_size_t) 0, tlen);
97 if ((avc->f.m.Length > 0x7fffffff) ||
98 (tlen > 0x7fffffff) ||
99 ((0x7fffffff - tlen) < avc->f.m.Length)) {
104 StartRXAFS_StoreData(tcall,
105 (struct AFSFid *)&avc->f.fid.Fid,
106 &InStatus, l1, 0, l2);
108 #else /* AFS_64BIT_CLIENT */
110 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
111 &InStatus, avc->f.m.Length, 0, tlen);
112 #endif /* AFS_64BIT_CLIENT */
114 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
116 #ifdef AFS_64BIT_CLIENT
119 code = rx_EndCall(tcall, code);
122 #ifdef AFS_64BIT_CLIENT
123 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
124 afs_serverSetNo64Bit(tc);
127 #endif /* AFS_64BIT_CLIENT */
131 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
135 afs_ProcessFS(avc, &OutStatus, areq);
141 * afs_StoreAllSegments
144 * Stores all modified segments back to server
147 * avc : Pointer to vcache entry.
148 * areq : Pointer to request structure.
151 * Called with avc write-locked.
153 #if defined (AFS_HPUX_ENV)
154 int NCHUNKSATONCE = 3;
156 int NCHUNKSATONCE = 64;
162 afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
168 afs_int32 origCBs, foreign = 0;
170 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
171 struct dcache **dcList;
172 unsigned int i, j, minj, moredata, high, off;
173 afs_size_t maxStoredLength; /* highest offset we've written to server. */
174 int safety, marineronce = 0;
176 AFS_STATCNT(afs_StoreAllSegments);
178 hash = DVHash(&avc->f.fid);
179 foreign = (avc->f.states & CForeign);
180 dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
181 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
182 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
183 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
184 /* In the aix vm implementation we need to do the vm_writep even
185 * on the memcache case since that's we adjust the file's size
186 * and finish flushing partial vm pages.
188 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
189 (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
190 (sync & AFS_LASTSTORE))
191 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
193 /* If we're not diskless, reading a file may stress the VM
194 * system enough to cause a pageout, and this vnode would be
195 * locked when the pageout occurs. We can prevent this problem
196 * by making sure all dirty pages are already flushed. We don't
197 * do this when diskless because reading a diskless (i.e.
198 * memory-resident) chunk doesn't require using new VM, and we
199 * also don't want to dump more dirty data into a diskless cache,
200 * since they're smaller, and we might exceed its available
203 #if defined(AFS_SUN5_ENV)
204 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
205 osi_VM_TryToSmush(avc, CRED(), 1);
208 osi_VM_StoreAllSegments(avc);
210 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
211 /* This will probably make someone sad ... */
212 /*printf("Net down in afs_StoreSegments\n");*/
217 * Can't do this earlier because osi_VM_StoreAllSegments drops locks
218 * and can indirectly do some stores that increase the DV.
220 hset(oldDV, avc->f.m.DataVersion);
221 hset(newDV, avc->f.m.DataVersion);
223 ConvertWToSLock(&avc->lock);
226 * Subsequent code expects a sorted list, and it expects all the
227 * chunks in the list to be contiguous, so we need a sort and a
228 * while loop in here, too - but this will work for a first pass...
229 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
230 * bin sort, I guess. Chunk numbers start with 0
232 * - Have to get a write lock on xdcache because GetDSlot might need it (if
233 * the chunk doesn't have a dcache struct).
234 * This seems like overkill in most cases.
235 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
236 * xdcache, then relock xdcache and try to use index. It is done
237 * a lot elsewhere in the CM, but I'm not buying that argument.
238 * - should be able to check IFDataMod without doing the GetDSlot (just
239 * hold afs_xdcache). That way, it's easy to do this without the
240 * writelock on afs_xdcache, and we save unneccessary disk
241 * operations. I don't think that works, 'cuz the next pointers
244 origCBs = afs_allCBs;
250 memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
254 /* lock and start over from beginning of hash chain
255 * in order to avoid a race condition. */
256 ObtainWriteLock(&afs_xdcache, 284);
257 index = afs_dvhashTbl[hash];
259 for (j = 0; index != NULLIDX;) {
260 if ((afs_indexFlags[index] & IFDataMod)
261 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
262 tdc = afs_GetValidDSlot(index); /* refcount+1. */
264 ReleaseWriteLock(&afs_xdcache);
268 ReleaseReadLock(&tdc->tlock);
269 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
270 off = tdc->f.chunk - minj;
271 if (off < NCHUNKSATONCE) {
273 osi_Panic("dclist slot already in use!");
274 if (afs_mariner && !marineronce) {
275 /* first chunk only */
276 afs_MarinerLog("store$Storing", avc);
283 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
284 * can't grab here, due to lock ordering with afs_xdcache.
285 * So, disable this shortcut for now. -- kolya 2001-10-13
287 /* shortcut: big win for little files */
288 /* tlen -= tdc->f.chunkBytes;
295 if (j == NCHUNKSATONCE)
302 index = afs_dvnextTbl[index];
304 ReleaseWriteLock(&afs_xdcache);
306 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
307 /* "moredata" just says "there are more dirty chunks yet to come".
311 afs_CacheStoreVCache(dcList, avc, areq, sync,
312 minj, high, moredata,
313 &newDV, &maxStoredLength);
314 /* Release any zero-length dcache entries in our interval
315 * that we locked but didn't store back above.
317 for (j = 0; j <= high; j++) {
320 osi_Assert(tdc->f.chunkBytes == 0);
321 ReleaseSharedLock(&tdc->lock);
327 minj += NCHUNKSATONCE;
328 } while (!code && moredata);
331 UpgradeSToWLock(&avc->lock, 29);
333 /* send a trivial truncation store if did nothing else */
336 * Call StoreMini if we haven't written enough data to extend the
337 * file at the fileserver to the client's notion of the file length.
339 if ((avc->f.truncPos != AFS_NOTRUNC)
340 || ((avc->f.states & CExtendedFile)
341 && (maxStoredLength < avc->f.m.Length))) {
342 code = afs_StoreMini(avc, areq);
344 hadd32(newDV, 1); /* just bumped here, too */
346 avc->f.states &= ~CExtendedFile;
350 * Finally, turn off DWriting, turn on DFEntryMod,
351 * update f.versionNo.
352 * A lot of this could be integrated into the loop above
363 NCHUNKSATONCE * sizeof(struct dcache *));
365 /* overkill, but it gets the lock in case GetDSlot needs it */
366 ObtainWriteLock(&afs_xdcache, 285);
368 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
369 index != NULLIDX && safety < afs_cacheFiles + 2;
370 index = afs_dvnextTbl[index]) {
372 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
373 tdc = afs_GetValidDSlot(index);
375 /* This is okay; since manipulating the dcaches at this
376 * point is best-effort. We only get a dcache here to
377 * increment the dv and turn off DWriting. If we were
378 * supposed to do that for a dcache, but could not
379 * due to an I/O error, it just means the dv won't
380 * be updated so we don't be able to use that cached
381 * chunk in the future. That's inefficient, but not
385 ReleaseReadLock(&tdc->tlock);
387 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
388 && tdc->f.chunk >= minj) {
389 off = tdc->f.chunk - minj;
390 if (off < NCHUNKSATONCE) {
391 /* this is the file, and the correct chunk range */
392 if (j >= NCHUNKSATONCE)
394 ("Too many dcache entries in range\n");
399 if (j == NCHUNKSATONCE)
407 ReleaseWriteLock(&afs_xdcache);
409 for (i = 0; i < j; i++) {
410 /* Iterate over the dcache entries we collected above */
412 ObtainSharedLock(&tdc->lock, 677);
414 /* was code here to clear IFDataMod, but it should only be done
415 * in storedcache and storealldcache.
417 /* Only increase DV if we had up-to-date data to start with.
418 * Otherwise, we could be falsely upgrading an old chunk
419 * (that we never read) into one labelled with the current
420 * DV #. Also note that we check that no intervening stores
421 * occurred, otherwise we might mislabel cache information
422 * for a chunk that we didn't store this time
424 /* Don't update the version number if it's not yet set. */
425 if (!hsame(tdc->f.versionNo, h_unset)
426 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
428 if ((!(afs_dvhack || foreign)
429 && hsame(avc->f.m.DataVersion, newDV))
430 || ((afs_dvhack || foreign)
431 && (origCBs == afs_allCBs))) {
432 /* no error, this is the DV */
434 UpgradeSToWLock(&tdc->lock, 678);
435 hset(tdc->f.versionNo, avc->f.m.DataVersion);
436 tdc->dflags |= DFEntryMod;
437 /* DWriting may not have gotten cleared above, if all
438 * we did was a StoreMini */
439 tdc->f.states &= ~DWriting;
440 ConvertWToSLock(&tdc->lock);
444 ReleaseSharedLock(&tdc->lock);
448 minj += NCHUNKSATONCE;
455 * Invalidate chunks after an error for ccores files since
456 * afs_inactive won't be called for these and they won't be
457 * invalidated. Also discard data if it's a permanent error from the
460 if (areq->permWriteError || (avc->f.states & CCore)) {
461 afs_InvalidateAllSegments(avc);
464 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
465 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
466 /* would like a Trace5, but it doesn't exist... */
467 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
468 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
469 avc->lock.excl_locked);
470 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
471 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
472 avc->lock.readers_reading, ICL_TYPE_INT32,
473 avc->lock.num_waiting);
476 * Finally, if updated DataVersion matches newDV, we did all of the
477 * stores. If mapDV indicates that the page cache was flushed up
478 * to when we started the store, then we can relabel them as flushed
479 * as recently as newDV.
480 * Turn off CDirty bit because the stored data is now in sync with server.
482 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
483 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
484 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
485 hset(avc->mapDV, newDV);
486 avc->f.states &= ~CDirty;
489 osi_FreeLargeSpace(dcList);
491 /* If not the final write a temporary error is ok. */
492 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
497 } /*afs_StoreAllSegments (new 03/02/94) */
501 * afs_InvalidateAllSegments
504 * Invalidates all chunks for a given file
507 * avc : Pointer to vcache entry.
510 * For example, called after an error has been detected. Called
511 * with avc write-locked, and afs_xdcache unheld.
515 afs_InvalidateAllSegments(struct vcache *avc)
520 struct dcache **dcList;
521 int i, dcListMax, dcListCount;
523 AFS_STATCNT(afs_InvalidateAllSegments);
524 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
525 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
526 hash = DVHash(&avc->f.fid);
527 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
528 avc->f.states &= ~CExtendedFile; /* not any more */
529 ObtainWriteLock(&afs_xcbhash, 459);
530 afs_DequeueCallback(avc);
531 avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */
532 ReleaseWriteLock(&afs_xcbhash);
533 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
534 osi_dnlc_purgedp(avc);
535 /* Blow away pages; for now, only for Solaris */
536 #if (defined(AFS_SUN5_ENV))
537 if (WriteLocked(&avc->lock))
538 osi_ReleaseVM(avc, (afs_ucred_t *)0);
541 * Block out others from screwing with this table; is a read lock
544 ObtainWriteLock(&afs_xdcache, 286);
547 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
548 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
549 tdc = afs_GetValidDSlot(index);
551 /* In the case of fatal errors during stores, we MUST
552 * invalidate all of the relevant chunks. Otherwise, the chunks
553 * will be left with the 'new' data that was never successfully
554 * written to the server, but the DV in the dcache is still the
555 * old DV. So, we may indefinitely serve data to applications
556 * that is not actually in the file on the fileserver. If we
557 * cannot afs_GetValidDSlot the appropriate entries, currently
558 * there is no way to ensure the dcache is invalidated. So for
559 * now, to avoid risking serving bad data from the cache, panic
561 osi_Panic("afs_InvalidateAllSegments tdc count");
563 ReleaseReadLock(&tdc->tlock);
564 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
568 index = afs_dvnextTbl[index];
571 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
574 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
575 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
576 tdc = afs_GetValidDSlot(index);
578 /* We cannot proceed after getting this error; we risk serving
579 * incorrect data to applications. So panic instead. See the
580 * above comment next to the previous afs_GetValidDSlot call
582 osi_Panic("afs_InvalidateAllSegments tdc store");
584 ReleaseReadLock(&tdc->tlock);
585 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
586 /* same file? we'll zap it */
587 if (afs_indexFlags[index] & IFDataMod) {
588 afs_stats_cmperf.cacheCurrDirtyChunks--;
589 /* don't write it back */
590 afs_indexFlags[index] &= ~IFDataMod;
592 afs_indexFlags[index] &= ~IFAnyPages;
593 if (dcListCount < dcListMax)
594 dcList[dcListCount++] = tdc;
601 index = afs_dvnextTbl[index];
603 ReleaseWriteLock(&afs_xdcache);
605 for (i = 0; i < dcListCount; i++) {
608 ObtainWriteLock(&tdc->lock, 679);
610 if (vType(avc) == VDIR)
612 ReleaseWriteLock(&tdc->lock);
616 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
623 * Extend a cache file
625 * \param avc pointer to vcache to extend data for
626 * \param alen Length to extend file to
629 * \note avc must be write locked. May release and reobtain avc and GLOCK
632 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq)
634 afs_size_t offset, toAdd;
635 struct osi_file *tfile;
640 zeros = afs_osi_Alloc(AFS_PAGESIZE);
643 memset(zeros, 0, AFS_PAGESIZE);
645 while (avc->f.m.Length < alen) {
646 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
652 toAdd = alen - avc->f.m.Length;
654 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
655 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
656 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
658 tfile = afs_CFileOpen(&tdc->f.inode);
659 while(tdc->validPos < avc->f.m.Length + toAdd) {
662 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
663 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
665 code = afs_CFileWrite(tfile,
666 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
668 tdc->validPos += towrite;
670 afs_CFileClose(tfile);
671 afs_AdjustSize(tdc, offset + toAdd );
672 avc->f.m.Length += toAdd;
673 ReleaseWriteLock(&tdc->lock);
677 afs_osi_Free(zeros, AFS_PAGESIZE);
682 * afs_TruncateAllSegments
685 * Truncate a cache file.
688 * avc : Ptr to vcache entry to truncate.
689 * alen : Number of bytes to make the file.
690 * areq : Ptr to request structure.
693 * Called with avc write-locked; in VFS40 systems, pvnLock is also
697 afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
698 struct vrequest *areq, afs_ucred_t *acred)
706 struct dcache **tdcArray = NULL;
708 AFS_STATCNT(afs_TruncateAllSegments);
709 avc->f.m.Date = osi_Time();
710 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
711 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
712 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
713 if (alen >= avc->f.m.Length) {
715 * Special speedup since Sun's vm extends the file this way;
716 * we've never written to the file thus we can just set the new
717 * length and avoid the needless calls below.
718 * Also used for ftruncate calls which can extend the file.
719 * To completely minimize the possible extra StoreMini RPC, we really
720 * should keep the ExtendedPos as well and clear this flag if we
721 * truncate below that value before we store the file back.
723 avc->f.states |= CExtendedFile;
724 avc->f.m.Length = alen;
727 #if (defined(AFS_SUN5_ENV))
729 /* Zero unused portion of last page */
730 osi_VM_PreTruncate(avc, alen, acred);
734 #if (defined(AFS_SUN5_ENV))
735 ObtainWriteLock(&avc->vlock, 546);
736 avc->activeV++; /* Block new getpages */
737 ReleaseWriteLock(&avc->vlock);
740 ReleaseWriteLock(&avc->lock);
743 /* Flush pages beyond end-of-file. */
744 osi_VM_Truncate(avc, alen, acred);
747 ObtainWriteLock(&avc->lock, 79);
749 avc->f.m.Length = alen;
751 if (alen < avc->f.truncPos)
752 avc->f.truncPos = alen;
753 code = DVHash(&avc->f.fid);
755 /* block out others from screwing with this table */
756 ObtainWriteLock(&afs_xdcache, 287);
759 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
760 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
761 tdc = afs_GetValidDSlot(index);
763 ReleaseWriteLock(&afs_xdcache);
767 ReleaseReadLock(&tdc->tlock);
768 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
772 index = afs_dvnextTbl[index];
775 /* Now allocate space where we can save those dcache entries, and
776 * do a second pass over them.. Since we're holding xdcache, it
777 * shouldn't be changing.
779 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
782 for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
783 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
784 tdc = afs_GetValidDSlot(index);
786 /* make sure we put back all of the tdcArray members before
788 /* remember, the last valid tdc is at dcPos-1, so start at
789 * dcPos-1, not at dcPos itself. */
790 for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
791 tdc = tdcArray[dcPos];
797 ReleaseReadLock(&tdc->tlock);
798 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
799 /* same file, and modified, we'll store it back */
800 if (dcPos < dcCount) {
801 tdcArray[dcPos++] = tdc;
811 ReleaseWriteLock(&afs_xdcache);
813 /* Now we loop over the array of dcache entries and truncate them */
814 for (index = 0; index < dcPos; index++) {
815 struct osi_file *tfile;
817 tdc = tdcArray[index];
819 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
822 ObtainSharedLock(&tdc->lock, 672);
823 if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
824 UpgradeSToWLock(&tdc->lock, 673);
825 tdc->f.states |= DWriting;
826 tfile = afs_CFileOpen(&tdc->f.inode);
827 afs_CFileTruncate(tfile, (afs_int32)newSize);
828 afs_CFileClose(tfile);
829 afs_AdjustSize(tdc, (afs_int32)newSize);
830 if (alen < tdc->validPos) {
831 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
834 tdc->validPos = alen;
836 ConvertWToSLock(&tdc->lock);
838 ReleaseSharedLock(&tdc->lock);
846 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
848 #if (defined(AFS_SUN5_ENV))
849 ObtainWriteLock(&avc->vlock, 547);
850 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
851 avc->vstates &= ~VRevokeWait;
852 afs_osi_Wakeup((char *)&avc->vstates);
854 ReleaseWriteLock(&avc->vlock);