2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(struct vcache *avc, struct vrequest *areq)
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
46 struct rx_call *tcall;
47 struct rx_connection *rxconn;
48 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->f.m.Length);
53 tlen = avc->f.m.Length;
54 if (avc->f.truncPos < tlen)
55 tlen = avc->f.truncPos;
56 avc->f.truncPos = AFS_NOTRUNC;
57 avc->f.states &= ~CExtendedFile;
60 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
62 #ifdef AFS_64BIT_CLIENT
66 tcall = rx_NewCall(rxconn);
68 /* Set the client mod time since we always want the file
69 * to have the client's mod time and not the server's one
70 * (to avoid problems with make, etc.) It almost always
71 * works fine with standard afs because them server/client
72 * times are in sync and more importantly this storemini
73 * it's a special call that would typically be followed by
74 * the proper store-data or store-status calls.
76 InStatus.Mask = AFS_SETMODTIME;
77 InStatus.ClientModTime = avc->f.m.Date;
78 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
79 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
80 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
81 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
83 ICL_HANDLE_OFFSET(tlen));
85 #ifdef AFS_64BIT_CLIENT
86 if (!afs_serverHasNo64Bit(tc)) {
88 StartRXAFS_StoreData64(tcall,
89 (struct AFSFid *)&avc->f.fid.Fid,
90 &InStatus, avc->f.m.Length,
91 (afs_size_t) 0, tlen);
96 if ((avc->f.m.Length > 0x7fffffff) ||
97 (tlen > 0x7fffffff) ||
98 ((0x7fffffff - tlen) < avc->f.m.Length)) {
103 StartRXAFS_StoreData(tcall,
104 (struct AFSFid *)&avc->f.fid.Fid,
105 &InStatus, l1, 0, l2);
107 #else /* AFS_64BIT_CLIENT */
109 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
110 &InStatus, avc->f.m.Length, 0, tlen);
111 #endif /* AFS_64BIT_CLIENT */
113 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
115 #ifdef AFS_64BIT_CLIENT
118 code = rx_EndCall(tcall, code);
121 #ifdef AFS_64BIT_CLIENT
122 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
123 afs_serverSetNo64Bit(tc);
126 #endif /* AFS_64BIT_CLIENT */
130 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
134 afs_ProcessFS(avc, &OutStatus, areq);
140 * afs_StoreAllSegments
143 * Stores all modified segments back to server
146 * avc : Pointer to vcache entry.
147 * areq : Pointer to request structure.
150 * Called with avc write-locked.
152 #if defined (AFS_HPUX_ENV)
153 int NCHUNKSATONCE = 3;
155 int NCHUNKSATONCE = 64;
161 afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
167 afs_int32 origCBs, foreign = 0;
169 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
170 struct dcache **dcList;
171 unsigned int i, j, minj, moredata, high, off;
172 afs_size_t maxStoredLength; /* highest offset we've written to server. */
173 int safety, marineronce = 0;
175 AFS_STATCNT(afs_StoreAllSegments);
177 hash = DVHash(&avc->f.fid);
178 foreign = (avc->f.states & CForeign);
179 dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
180 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
181 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
182 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
183 /* In the aix vm implementation we need to do the vm_writep even
184 * on the memcache case since that's we adjust the file's size
185 * and finish flushing partial vm pages.
187 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
188 (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
189 (sync & AFS_LASTSTORE))
190 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
192 /* If we're not diskless, reading a file may stress the VM
193 * system enough to cause a pageout, and this vnode would be
194 * locked when the pageout occurs. We can prevent this problem
195 * by making sure all dirty pages are already flushed. We don't
196 * do this when diskless because reading a diskless (i.e.
197 * memory-resident) chunk doesn't require using new VM, and we
198 * also don't want to dump more dirty data into a diskless cache,
199 * since they're smaller, and we might exceed its available
202 #if defined(AFS_SUN5_ENV)
203 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
204 osi_VM_TryToSmush(avc, CRED(), 1);
207 osi_VM_StoreAllSegments(avc);
209 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
210 /* This will probably make someone sad ... */
211 /*printf("Net down in afs_StoreSegments\n");*/
216 * Can't do this earlier because osi_VM_StoreAllSegments drops locks
217 * and can indirectly do some stores that increase the DV.
219 hset(oldDV, avc->f.m.DataVersion);
220 hset(newDV, avc->f.m.DataVersion);
222 ConvertWToSLock(&avc->lock);
225 * Subsequent code expects a sorted list, and it expects all the
226 * chunks in the list to be contiguous, so we need a sort and a
227 * while loop in here, too - but this will work for a first pass...
228 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
229 * bin sort, I guess. Chunk numbers start with 0
231 * - Have to get a write lock on xdcache because GetDSlot might need it (if
232 * the chunk doesn't have a dcache struct).
233 * This seems like overkill in most cases.
234 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
235 * xdcache, then relock xdcache and try to use index. It is done
236 * a lot elsewhere in the CM, but I'm not buying that argument.
237 * - should be able to check IFDataMod without doing the GetDSlot (just
238 * hold afs_xdcache). That way, it's easy to do this without the
239 * writelock on afs_xdcache, and we save unneccessary disk
240 * operations. I don't think that works, 'cuz the next pointers
243 origCBs = afs_allCBs;
249 memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
253 /* lock and start over from beginning of hash chain
254 * in order to avoid a race condition. */
255 ObtainWriteLock(&afs_xdcache, 284);
256 index = afs_dvhashTbl[hash];
258 for (j = 0; index != NULLIDX;) {
259 if ((afs_indexFlags[index] & IFDataMod)
260 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
261 tdc = afs_GetValidDSlot(index); /* refcount+1. */
263 ReleaseWriteLock(&afs_xdcache);
267 ReleaseReadLock(&tdc->tlock);
268 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
269 off = tdc->f.chunk - minj;
270 if (off < NCHUNKSATONCE) {
272 osi_Panic("dclist slot already in use!");
273 if (afs_mariner && !marineronce) {
274 /* first chunk only */
275 afs_MarinerLog("store$Storing", avc);
282 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
283 * can't grab here, due to lock ordering with afs_xdcache.
284 * So, disable this shortcut for now. -- kolya 2001-10-13
286 /* shortcut: big win for little files */
287 /* tlen -= tdc->f.chunkBytes;
294 if (j == NCHUNKSATONCE)
301 index = afs_dvnextTbl[index];
303 ReleaseWriteLock(&afs_xdcache);
305 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
306 /* "moredata" just says "there are more dirty chunks yet to come".
310 afs_CacheStoreVCache(dcList, avc, areq, sync,
311 minj, high, moredata,
312 &newDV, &maxStoredLength);
313 /* Release any zero-length dcache entries in our interval
314 * that we locked but didn't store back above.
316 for (j = 0; j <= high; j++) {
319 osi_Assert(tdc->f.chunkBytes == 0);
320 ReleaseSharedLock(&tdc->lock);
326 minj += NCHUNKSATONCE;
327 } while (!code && moredata);
330 UpgradeSToWLock(&avc->lock, 29);
332 /* send a trivial truncation store if did nothing else */
335 * Call StoreMini if we haven't written enough data to extend the
336 * file at the fileserver to the client's notion of the file length.
338 if ((avc->f.truncPos != AFS_NOTRUNC)
339 || ((avc->f.states & CExtendedFile)
340 && (maxStoredLength < avc->f.m.Length))) {
341 code = afs_StoreMini(avc, areq);
343 hadd32(newDV, 1); /* just bumped here, too */
345 avc->f.states &= ~CExtendedFile;
349 * Finally, turn off DWriting, turn on DFEntryMod,
350 * update f.versionNo.
351 * A lot of this could be integrated into the loop above
362 NCHUNKSATONCE * sizeof(struct dcache *));
364 /* overkill, but it gets the lock in case GetDSlot needs it */
365 ObtainWriteLock(&afs_xdcache, 285);
367 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
368 index != NULLIDX && safety < afs_cacheFiles + 2;
369 index = afs_dvnextTbl[index]) {
371 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
372 tdc = afs_GetValidDSlot(index);
374 /* This is okay; since manipulating the dcaches at this
375 * point is best-effort. We only get a dcache here to
376 * increment the dv and turn off DWriting. If we were
377 * supposed to do that for a dcache, but could not
378 * due to an I/O error, it just means the dv won't
379 * be updated so we don't be able to use that cached
380 * chunk in the future. That's inefficient, but not
384 ReleaseReadLock(&tdc->tlock);
386 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
387 && tdc->f.chunk >= minj) {
388 off = tdc->f.chunk - minj;
389 if (off < NCHUNKSATONCE) {
390 /* this is the file, and the correct chunk range */
391 if (j >= NCHUNKSATONCE)
393 ("Too many dcache entries in range\n");
398 if (j == NCHUNKSATONCE)
406 ReleaseWriteLock(&afs_xdcache);
408 for (i = 0; i < j; i++) {
409 /* Iterate over the dcache entries we collected above */
411 ObtainSharedLock(&tdc->lock, 677);
413 /* was code here to clear IFDataMod, but it should only be done
414 * in storedcache and storealldcache.
416 /* Only increase DV if we had up-to-date data to start with.
417 * Otherwise, we could be falsely upgrading an old chunk
418 * (that we never read) into one labelled with the current
419 * DV #. Also note that we check that no intervening stores
420 * occurred, otherwise we might mislabel cache information
421 * for a chunk that we didn't store this time
423 /* Don't update the version number if it's not yet set. */
424 if (!hsame(tdc->f.versionNo, h_unset)
425 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
427 if ((!(afs_dvhack || foreign)
428 && hsame(avc->f.m.DataVersion, newDV))
429 || ((afs_dvhack || foreign)
430 && (origCBs == afs_allCBs))) {
431 /* no error, this is the DV */
433 UpgradeSToWLock(&tdc->lock, 678);
434 hset(tdc->f.versionNo, avc->f.m.DataVersion);
435 tdc->dflags |= DFEntryMod;
436 /* DWriting may not have gotten cleared above, if all
437 * we did was a StoreMini */
438 tdc->f.states &= ~DWriting;
439 ConvertWToSLock(&tdc->lock);
443 ReleaseSharedLock(&tdc->lock);
447 minj += NCHUNKSATONCE;
454 * Invalidate chunks after an error for ccores files since
455 * afs_inactive won't be called for these and they won't be
456 * invalidated. Also discard data if it's a permanent error from the
459 if (areq->permWriteError || (avc->f.states & CCore)) {
460 afs_InvalidateAllSegments(avc);
463 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
464 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
465 /* would like a Trace5, but it doesn't exist... */
466 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
467 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
468 avc->lock.excl_locked);
469 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
470 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
471 avc->lock.readers_reading, ICL_TYPE_INT32,
472 avc->lock.num_waiting);
475 * Finally, if updated DataVersion matches newDV, we did all of the
476 * stores. If mapDV indicates that the page cache was flushed up
477 * to when we started the store, then we can relabel them as flushed
478 * as recently as newDV.
479 * Turn off CDirty bit because the stored data is now in sync with server.
481 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
482 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
483 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
484 hset(avc->mapDV, newDV);
485 avc->f.states &= ~CDirty;
488 osi_FreeLargeSpace(dcList);
490 /* If not the final write a temporary error is ok. */
491 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
496 } /*afs_StoreAllSegments (new 03/02/94) */
500 * afs_InvalidateAllSegments
503 * Invalidates all chunks for a given file
506 * avc : Pointer to vcache entry.
509 * For example, called after an error has been detected. Called
510 * with avc write-locked, and afs_xdcache unheld.
514 afs_InvalidateAllSegments(struct vcache *avc)
519 struct dcache **dcList;
520 int i, dcListMax, dcListCount;
522 AFS_STATCNT(afs_InvalidateAllSegments);
523 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
524 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
525 hash = DVHash(&avc->f.fid);
526 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
527 avc->f.states &= ~CExtendedFile; /* not any more */
528 ObtainWriteLock(&afs_xcbhash, 459);
529 afs_DequeueCallback(avc);
530 avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */
531 ReleaseWriteLock(&afs_xcbhash);
532 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
533 osi_dnlc_purgedp(avc);
534 /* Blow away pages; for now, only for Solaris */
535 #if (defined(AFS_SUN5_ENV))
536 if (WriteLocked(&avc->lock))
537 osi_ReleaseVM(avc, (afs_ucred_t *)0);
540 * Block out others from screwing with this table; is a read lock
543 ObtainWriteLock(&afs_xdcache, 286);
546 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
547 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
548 tdc = afs_GetValidDSlot(index);
550 /* In the case of fatal errors during stores, we MUST
551 * invalidate all of the relevant chunks. Otherwise, the chunks
552 * will be left with the 'new' data that was never successfully
553 * written to the server, but the DV in the dcache is still the
554 * old DV. So, we may indefinitely serve data to applications
555 * that is not actually in the file on the fileserver. If we
556 * cannot afs_GetValidDSlot the appropriate entries, currently
557 * there is no way to ensure the dcache is invalidated. So for
558 * now, to avoid risking serving bad data from the cache, panic
560 osi_Panic("afs_InvalidateAllSegments tdc count");
562 ReleaseReadLock(&tdc->tlock);
563 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
567 index = afs_dvnextTbl[index];
570 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
573 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
574 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
575 tdc = afs_GetValidDSlot(index);
577 /* We cannot proceed after getting this error; we risk serving
578 * incorrect data to applications. So panic instead. See the
579 * above comment next to the previous afs_GetValidDSlot call
581 osi_Panic("afs_InvalidateAllSegments tdc store");
583 ReleaseReadLock(&tdc->tlock);
584 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
585 /* same file? we'll zap it */
586 if (afs_indexFlags[index] & IFDataMod) {
587 afs_stats_cmperf.cacheCurrDirtyChunks--;
588 /* don't write it back */
589 afs_indexFlags[index] &= ~IFDataMod;
591 afs_indexFlags[index] &= ~IFAnyPages;
592 if (dcListCount < dcListMax)
593 dcList[dcListCount++] = tdc;
600 index = afs_dvnextTbl[index];
602 ReleaseWriteLock(&afs_xdcache);
604 for (i = 0; i < dcListCount; i++) {
607 ObtainWriteLock(&tdc->lock, 679);
609 if (vType(avc) == VDIR)
611 ReleaseWriteLock(&tdc->lock);
615 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
622 * Extend a cache file
624 * \param avc pointer to vcache to extend data for
625 * \param alen Length to extend file to
628 * \note avc must be write locked. May release and reobtain avc and GLOCK
631 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq)
633 afs_size_t offset, toAdd;
634 struct osi_file *tfile;
639 zeros = afs_osi_Alloc(AFS_PAGESIZE);
642 memset(zeros, 0, AFS_PAGESIZE);
644 while (avc->f.m.Length < alen) {
645 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
651 toAdd = alen - avc->f.m.Length;
653 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
654 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
655 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
657 tfile = afs_CFileOpen(&tdc->f.inode);
658 while(tdc->validPos < avc->f.m.Length + toAdd) {
661 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
662 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
664 code = afs_CFileWrite(tfile,
665 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
667 tdc->validPos += towrite;
669 afs_CFileClose(tfile);
670 afs_AdjustSize(tdc, offset + toAdd );
671 avc->f.m.Length += toAdd;
672 ReleaseWriteLock(&tdc->lock);
676 afs_osi_Free(zeros, AFS_PAGESIZE);
681 * afs_TruncateAllSegments
684 * Truncate a cache file.
687 * avc : Ptr to vcache entry to truncate.
688 * alen : Number of bytes to make the file.
689 * areq : Ptr to request structure.
692 * Called with avc write-locked; in VFS40 systems, pvnLock is also
696 afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
697 struct vrequest *areq, afs_ucred_t *acred)
705 struct dcache **tdcArray = NULL;
707 AFS_STATCNT(afs_TruncateAllSegments);
708 avc->f.m.Date = osi_Time();
709 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
710 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
711 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
712 if (alen >= avc->f.m.Length) {
714 * Special speedup since Sun's vm extends the file this way;
715 * we've never written to the file thus we can just set the new
716 * length and avoid the needless calls below.
717 * Also used for ftruncate calls which can extend the file.
718 * To completely minimize the possible extra StoreMini RPC, we really
719 * should keep the ExtendedPos as well and clear this flag if we
720 * truncate below that value before we store the file back.
722 avc->f.states |= CExtendedFile;
723 avc->f.m.Length = alen;
726 #if (defined(AFS_SUN5_ENV))
728 /* Zero unused portion of last page */
729 osi_VM_PreTruncate(avc, alen, acred);
733 #if (defined(AFS_SUN5_ENV))
734 ObtainWriteLock(&avc->vlock, 546);
735 avc->activeV++; /* Block new getpages */
736 ReleaseWriteLock(&avc->vlock);
739 ReleaseWriteLock(&avc->lock);
742 /* Flush pages beyond end-of-file. */
743 osi_VM_Truncate(avc, alen, acred);
746 ObtainWriteLock(&avc->lock, 79);
748 avc->f.m.Length = alen;
750 if (alen < avc->f.truncPos)
751 avc->f.truncPos = alen;
752 code = DVHash(&avc->f.fid);
754 /* block out others from screwing with this table */
755 ObtainWriteLock(&afs_xdcache, 287);
758 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
759 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
760 tdc = afs_GetValidDSlot(index);
762 ReleaseWriteLock(&afs_xdcache);
766 ReleaseReadLock(&tdc->tlock);
767 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
771 index = afs_dvnextTbl[index];
774 /* Now allocate space where we can save those dcache entries, and
775 * do a second pass over them.. Since we're holding xdcache, it
776 * shouldn't be changing.
778 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
781 for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
782 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
783 tdc = afs_GetValidDSlot(index);
785 /* make sure we put back all of the tdcArray members before
787 /* remember, the last valid tdc is at dcPos-1, so start at
788 * dcPos-1, not at dcPos itself. */
789 for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
790 tdc = tdcArray[dcPos];
796 ReleaseReadLock(&tdc->tlock);
797 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
798 /* same file, and modified, we'll store it back */
799 if (dcPos < dcCount) {
800 tdcArray[dcPos++] = tdc;
810 ReleaseWriteLock(&afs_xdcache);
812 /* Now we loop over the array of dcache entries and truncate them */
813 for (index = 0; index < dcPos; index++) {
814 struct osi_file *tfile;
816 tdc = tdcArray[index];
818 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
821 ObtainSharedLock(&tdc->lock, 672);
822 if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
823 UpgradeSToWLock(&tdc->lock, 673);
824 tdc->f.states |= DWriting;
825 tfile = afs_CFileOpen(&tdc->f.inode);
826 afs_CFileTruncate(tfile, (afs_int32)newSize);
827 afs_CFileClose(tfile);
828 afs_AdjustSize(tdc, (afs_int32)newSize);
829 if (alen < tdc->validPos) {
830 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
833 tdc->validPos = alen;
835 ConvertWToSLock(&tdc->lock);
837 ReleaseSharedLock(&tdc->lock);
845 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
847 #if (defined(AFS_SUN5_ENV))
848 ObtainWriteLock(&avc->vlock, 547);
849 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
850 avc->vstates &= ~VRevokeWait;
851 afs_osi_Wakeup((char *)&avc->vstates);
853 ReleaseWriteLock(&avc->vlock);