2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(struct vcache *avc, struct vrequest *areq)
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
46 struct rx_call *tcall;
47 struct rx_connection *rxconn;
48 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->f.m.Length);
53 tlen = avc->f.m.Length;
54 if (avc->f.truncPos < tlen)
55 tlen = avc->f.truncPos;
56 avc->f.truncPos = AFS_NOTRUNC;
57 avc->f.states &= ~CExtendedFile;
58 memset(&InStatus, 0, sizeof(InStatus));
61 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
63 #ifdef AFS_64BIT_CLIENT
67 tcall = rx_NewCall(rxconn);
69 /* Set the client mod time since we always want the file
70 * to have the client's mod time and not the server's one
71 * (to avoid problems with make, etc.) It almost always
72 * works fine with standard afs because them server/client
73 * times are in sync and more importantly this storemini
74 * it's a special call that would typically be followed by
75 * the proper store-data or store-status calls.
77 InStatus.Mask = AFS_SETMODTIME;
78 InStatus.ClientModTime = avc->f.m.Date;
79 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
80 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
81 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
83 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
84 ICL_HANDLE_OFFSET(tlen));
86 #ifdef AFS_64BIT_CLIENT
87 if (!afs_serverHasNo64Bit(tc)) {
89 StartRXAFS_StoreData64(tcall,
90 (struct AFSFid *)&avc->f.fid.Fid,
91 &InStatus, avc->f.m.Length,
92 (afs_size_t) 0, tlen);
97 if ((avc->f.m.Length > 0x7fffffff) ||
98 (tlen > 0x7fffffff) ||
99 ((0x7fffffff - tlen) < avc->f.m.Length)) {
104 StartRXAFS_StoreData(tcall,
105 (struct AFSFid *)&avc->f.fid.Fid,
106 &InStatus, l1, 0, l2);
108 #else /* AFS_64BIT_CLIENT */
110 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
111 &InStatus, avc->f.m.Length, 0, tlen);
112 #endif /* AFS_64BIT_CLIENT */
114 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
116 #ifdef AFS_64BIT_CLIENT
119 code = rx_EndCall(tcall, code);
122 #ifdef AFS_64BIT_CLIENT
123 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
124 afs_serverSetNo64Bit(tc);
127 #endif /* AFS_64BIT_CLIENT */
131 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
135 afs_ProcessFS(avc, &OutStatus, areq);
141 * afs_StoreAllSegments
144 * Stores all modified segments back to server
147 * avc : Pointer to vcache entry.
148 * areq : Pointer to request structure.
151 * Called with avc write-locked.
153 #if defined (AFS_HPUX_ENV)
154 int NCHUNKSATONCE = 3;
156 int NCHUNKSATONCE = 64;
162 afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
168 afs_int32 origCBs, foreign = 0;
170 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
171 struct dcache **dcList;
172 unsigned int i, j, minj, moredata, high, off;
173 afs_size_t maxStoredLength; /* highest offset we've written to server. */
174 int safety, marineronce = 0;
176 AFS_STATCNT(afs_StoreAllSegments);
178 hash = DVHash(&avc->f.fid);
179 foreign = (avc->f.states & CForeign);
180 dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
181 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
182 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
183 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
184 /* In the aix vm implementation we need to do the vm_writep even
185 * on the memcache case since that's we adjust the file's size
186 * and finish flushing partial vm pages.
188 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
189 (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
190 (sync & AFS_LASTSTORE))
191 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
193 /* If we're not diskless, reading a file may stress the VM
194 * system enough to cause a pageout, and this vnode would be
195 * locked when the pageout occurs. We can prevent this problem
196 * by making sure all dirty pages are already flushed. We don't
197 * do this when diskless because reading a diskless (i.e.
198 * memory-resident) chunk doesn't require using new VM, and we
199 * also don't want to dump more dirty data into a diskless cache,
200 * since they're smaller, and we might exceed its available
203 #if defined(AFS_SUN5_ENV)
204 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
205 osi_VM_TryToSmush(avc, CRED(), 1);
208 osi_VM_StoreAllSegments(avc);
210 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
211 /* This will probably make someone sad ... */
212 /*printf("Net down in afs_StoreSegments\n");*/
217 * Can't do this earlier because osi_VM_StoreAllSegments drops locks
218 * and can indirectly do some stores that increase the DV.
220 hset(oldDV, avc->f.m.DataVersion);
221 hset(newDV, avc->f.m.DataVersion);
223 ConvertWToSLock(&avc->lock);
226 * Subsequent code expects a sorted list, and it expects all the
227 * chunks in the list to be contiguous, so we need a sort and a
228 * while loop in here, too - but this will work for a first pass...
229 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
230 * bin sort, I guess. Chunk numbers start with 0
232 * - Have to get a write lock on xdcache because GetDSlot might need it (if
233 * the chunk doesn't have a dcache struct).
234 * This seems like overkill in most cases.
235 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
236 * xdcache, then relock xdcache and try to use index. It is done
237 * a lot elsewhere in the CM, but I'm not buying that argument.
238 * - should be able to check IFDataMod without doing the GetDSlot (just
239 * hold afs_xdcache). That way, it's easy to do this without the
240 * writelock on afs_xdcache, and we save unneccessary disk
241 * operations. I don't think that works, 'cuz the next pointers
244 origCBs = afs_allCBs;
250 memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
254 /* lock and start over from beginning of hash chain
255 * in order to avoid a race condition. */
256 ObtainWriteLock(&afs_xdcache, 284);
257 index = afs_dvhashTbl[hash];
259 for (j = 0; index != NULLIDX;) {
260 if ((afs_indexFlags[index] & IFDataMod)
261 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
262 tdc = afs_GetValidDSlot(index); /* refcount+1. */
264 ReleaseWriteLock(&afs_xdcache);
268 ReleaseReadLock(&tdc->tlock);
269 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
270 off = tdc->f.chunk - minj;
271 if (off < NCHUNKSATONCE) {
273 osi_Panic("dclist slot already in use!");
274 if (afs_mariner && !marineronce) {
275 /* first chunk only */
276 afs_MarinerLog("store$Storing", avc);
283 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
284 * can't grab here, due to lock ordering with afs_xdcache.
285 * So, disable this shortcut for now. -- kolya 2001-10-13
287 /* shortcut: big win for little files */
288 /* tlen -= tdc->f.chunkBytes;
295 if (j == NCHUNKSATONCE)
302 index = afs_dvnextTbl[index];
304 ReleaseWriteLock(&afs_xdcache);
306 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
307 /* "moredata" just says "there are more dirty chunks yet to come".
311 afs_CacheStoreVCache(dcList, avc, areq, sync,
312 minj, high, moredata,
313 &newDV, &maxStoredLength);
314 /* Release any zero-length dcache entries in our interval
315 * that we locked but didn't store back above.
317 for (j = 0; j <= high; j++) {
320 osi_Assert(tdc->f.chunkBytes == 0);
321 ReleaseSharedLock(&tdc->lock);
327 minj += NCHUNKSATONCE;
328 } while (!code && moredata);
331 UpgradeSToWLock(&avc->lock, 29);
333 /* send a trivial truncation store if did nothing else */
336 * Call StoreMini if we haven't written enough data to extend the
337 * file at the fileserver to the client's notion of the file length.
339 if ((avc->f.truncPos != AFS_NOTRUNC)
340 || ((avc->f.states & CExtendedFile)
341 && (maxStoredLength < avc->f.m.Length))) {
342 code = afs_StoreMini(avc, areq);
344 hadd32(newDV, 1); /* just bumped here, too */
346 avc->f.states &= ~CExtendedFile;
350 * Finally, turn off DWriting, turn on DFEntryMod,
351 * update f.versionNo.
352 * A lot of this could be integrated into the loop above
363 NCHUNKSATONCE * sizeof(struct dcache *));
365 /* overkill, but it gets the lock in case GetDSlot needs it */
366 ObtainWriteLock(&afs_xdcache, 285);
368 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
369 index != NULLIDX && safety < afs_cacheFiles + 2;
370 index = afs_dvnextTbl[index]) {
372 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
373 tdc = afs_GetValidDSlot(index);
375 /* This is okay; since manipulating the dcaches at this
376 * point is best-effort. We only get a dcache here to
377 * increment the dv and turn off DWriting. If we were
378 * supposed to do that for a dcache, but could not
379 * due to an I/O error, it just means the dv won't
380 * be updated so we don't be able to use that cached
381 * chunk in the future. That's inefficient, but not
385 ReleaseReadLock(&tdc->tlock);
387 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
388 && tdc->f.chunk >= minj) {
389 off = tdc->f.chunk - minj;
390 if (off < NCHUNKSATONCE) {
391 /* this is the file, and the correct chunk range */
392 if (j >= NCHUNKSATONCE)
394 ("Too many dcache entries in range\n");
399 if (j == NCHUNKSATONCE)
407 ReleaseWriteLock(&afs_xdcache);
409 for (i = 0; i < j; i++) {
410 /* Iterate over the dcache entries we collected above */
412 ObtainSharedLock(&tdc->lock, 677);
414 /* was code here to clear IFDataMod, but it should only be done
415 * in storedcache and storealldcache.
417 /* Only increase DV if we had up-to-date data to start with.
418 * Otherwise, we could be falsely upgrading an old chunk
419 * (that we never read) into one labelled with the current
420 * DV #. Also note that we check that no intervening stores
421 * occurred, otherwise we might mislabel cache information
422 * for a chunk that we didn't store this time
424 /* Don't update the version number if it's not yet set. */
425 if (!hsame(tdc->f.versionNo, h_unset)
426 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
428 if ((!(afs_dvhack || foreign)
429 && hsame(avc->f.m.DataVersion, newDV))
430 || ((afs_dvhack || foreign)
431 && (origCBs == afs_allCBs))) {
432 /* no error, this is the DV */
434 UpgradeSToWLock(&tdc->lock, 678);
435 hset(tdc->f.versionNo, avc->f.m.DataVersion);
436 tdc->dflags |= DFEntryMod;
437 /* DWriting may not have gotten cleared above, if all
438 * we did was a StoreMini */
439 tdc->f.states &= ~DWriting;
440 ConvertWToSLock(&tdc->lock);
444 ReleaseSharedLock(&tdc->lock);
448 minj += NCHUNKSATONCE;
455 * Invalidate chunks after an error for ccores files since
456 * afs_inactive won't be called for these and they won't be
457 * invalidated. Also discard data if it's a permanent error from the
460 if (areq->permWriteError || (avc->f.states & CCore)) {
461 afs_InvalidateAllSegments(avc);
464 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
465 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
466 /* would like a Trace5, but it doesn't exist... */
467 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
468 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
469 avc->lock.excl_locked);
470 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
471 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
472 avc->lock.readers_reading, ICL_TYPE_INT32,
473 avc->lock.num_waiting);
476 * Finally, if updated DataVersion matches newDV, we did all of the
477 * stores. If mapDV indicates that the page cache was flushed up
478 * to when we started the store, then we can relabel them as flushed
479 * as recently as newDV.
480 * Turn off CDirty bit because the stored data is now in sync with server.
482 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
483 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
484 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
485 hset(avc->mapDV, newDV);
486 avc->f.states &= ~CDirty;
489 osi_FreeLargeSpace(dcList);
491 /* If not the final write a temporary error is ok. */
492 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
497 } /*afs_StoreAllSegments (new 03/02/94) */
500 afs_InvalidateAllSegments_once(struct vcache *avc)
505 struct dcache **dcList = NULL;
506 int i, dcListMax, dcListCount = 0;
508 AFS_STATCNT(afs_InvalidateAllSegments);
509 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
510 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
511 hash = DVHash(&avc->f.fid);
512 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
513 avc->f.states &= ~CExtendedFile; /* not any more */
514 afs_StaleVCacheFlags(avc, 0, CDirty);
515 /* Blow away pages; for now, only for Solaris */
516 #if (defined(AFS_SUN5_ENV))
517 if (WriteLocked(&avc->lock))
518 osi_ReleaseVM(avc, (afs_ucred_t *)0);
521 * Block out others from screwing with this table; is a read lock
524 ObtainWriteLock(&afs_xdcache, 286);
527 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
528 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
529 tdc = afs_GetValidDSlot(index);
533 ReleaseReadLock(&tdc->tlock);
534 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
538 index = afs_dvnextTbl[index];
541 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
543 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
544 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
545 tdc = afs_GetValidDSlot(index);
549 ReleaseReadLock(&tdc->tlock);
550 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
551 /* same file? we'll zap it */
552 if (afs_indexFlags[index] & IFDataMod) {
553 afs_stats_cmperf.cacheCurrDirtyChunks--;
554 /* don't write it back */
555 afs_indexFlags[index] &= ~IFDataMod;
557 afs_indexFlags[index] &= ~IFAnyPages;
558 if (dcListCount < dcListMax)
559 dcList[dcListCount++] = tdc;
566 index = afs_dvnextTbl[index];
568 ReleaseWriteLock(&afs_xdcache);
570 for (i = 0; i < dcListCount; i++) {
573 ObtainWriteLock(&tdc->lock, 679);
575 if (vType(avc) == VDIR)
577 ReleaseWriteLock(&tdc->lock);
581 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
586 ReleaseWriteLock(&afs_xdcache);
589 for (i = 0; i < dcListCount; i++) {
595 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
602 * afs_InvalidateAllSegments
605 * Invalidates all chunks for a given file
608 * avc : Pointer to vcache entry.
611 * For example, called after an error has been detected. Called
612 * with avc write-locked, and afs_xdcache unheld.
616 afs_InvalidateAllSegments(struct vcache *avc)
619 afs_uint32 last_warn;
621 code = afs_InvalidateAllSegments_once(avc);
623 /* Success; nothing more to do. */
628 * If afs_InvalidateAllSegments_once failed, we cannot simply return an
629 * error to our caller. This function is called when we encounter a fatal
630 * error during stores, in which case we MUST invalidate all chunks for the
631 * given file. If we fail to invalidate some chunks, they will be left with
632 * the 'new' dirty/written data that was never successfully stored on the
633 * server, but the DV in the dcache is still the old DV. So, if its left
634 * alone, we may indefinitely serve data to applications that is not
635 * actually in the file on the fileserver.
637 * So to make sure we never serve userspace bad data after such a failure,
638 * we must keep trying to invalidate the dcaches for the given file. (Note
639 * that we cannot simply set a flag on the vcache to retry the invalidate
640 * later on, because the vcache may go away, but the 'bad' dcaches could
641 * remain.) We do this below, via background daemon requests because in
642 * some scenarios we can always get I/O errors on accessing the cache if we
643 * access via a user pid. (e.g. on LINUX, this can happen if the pid has a
644 * pending SIGKILL.) Doing this via background daemon ops should avoid
648 last_warn = osi_Time();
649 afs_warn("afs: Failed to invalidate cache chunks for fid %d.%d.%d.%d; our "
650 "local disk cache may be throwing errors. We must invalidate "
651 "these chunks to avoid possibly serving incorrect data, so we'll "
652 "retry until we succeed. If AFS access seems to hang, this may "
654 avc->f.fid.Cell, avc->f.fid.Fid.Volume, avc->f.fid.Fid.Vnode,
655 avc->f.fid.Fid.Unique);
658 static const afs_uint32 warn_int = 60*60; /* warn once every hour */
659 afs_uint32 now = osi_Time();
662 if (now < last_warn || now - last_warn > warn_int) {
664 afs_warn("afs: Still trying to invalidate cache chunks for fid "
665 "%d.%d.%d.%d. We will retry until we succeed; if AFS "
666 "access seems to hang, this may be why.\n",
667 avc->f.fid.Cell, avc->f.fid.Fid.Volume,
668 avc->f.fid.Fid.Vnode, avc->f.fid.Fid.Unique);
671 /* Wait 10 seconds between attempts. */
672 afs_osi_Wait(1000 * 10, NULL, 0);
675 * Ask a background daemon to do this request for us. Note that _we_ hold
676 * the write lock on 'avc', while the background daemon does the work. This
677 * is a little weird, but it helps avoid any issues with lock ordering
678 * or if our caller does not expect avc->lock to be dropped while
681 bp = afs_BQueue(BOP_INVALIDATE_SEGMENTS, avc, 0, 1, NULL, 0, 0, NULL,
683 while ((bp->flags & BUVALID) == 0) {
694 * Extend a cache file
696 * \param avc pointer to vcache to extend data for
697 * \param alen Length to extend file to
700 * \note avc must be write locked. May release and reobtain avc and GLOCK
703 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq)
705 afs_size_t offset, toAdd;
706 struct osi_file *tfile;
711 zeros = afs_osi_Alloc(AFS_PAGESIZE);
714 memset(zeros, 0, AFS_PAGESIZE);
716 while (avc->f.m.Length < alen) {
717 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
723 toAdd = alen - avc->f.m.Length;
725 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
726 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
727 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
729 tfile = afs_CFileOpen(&tdc->f.inode);
731 while(tdc->validPos < avc->f.m.Length + toAdd) {
734 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
735 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
737 code = afs_CFileWrite(tfile,
738 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
740 tdc->validPos += towrite;
742 afs_CFileClose(tfile);
743 afs_AdjustSize(tdc, offset + toAdd );
744 avc->f.m.Length += toAdd;
745 ReleaseWriteLock(&tdc->lock);
749 afs_osi_Free(zeros, AFS_PAGESIZE);
754 * afs_TruncateAllSegments
757 * Truncate a cache file.
760 * avc : Ptr to vcache entry to truncate.
761 * alen : Number of bytes to make the file.
762 * areq : Ptr to request structure.
765 * Called with avc write-locked; in VFS40 systems, pvnLock is also
769 afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
770 struct vrequest *areq, afs_ucred_t *acred)
778 struct dcache **tdcArray = NULL;
780 AFS_STATCNT(afs_TruncateAllSegments);
781 avc->f.m.Date = osi_Time();
782 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
783 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
784 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
785 if (alen >= avc->f.m.Length) {
787 * Special speedup since Sun's vm extends the file this way;
788 * we've never written to the file thus we can just set the new
789 * length and avoid the needless calls below.
790 * Also used for ftruncate calls which can extend the file.
791 * To completely minimize the possible extra StoreMini RPC, we really
792 * should keep the ExtendedPos as well and clear this flag if we
793 * truncate below that value before we store the file back.
795 avc->f.states |= CExtendedFile;
796 avc->f.m.Length = alen;
799 #if (defined(AFS_SUN5_ENV))
801 /* Zero unused portion of last page */
802 osi_VM_PreTruncate(avc, alen, acred);
806 #if (defined(AFS_SUN5_ENV))
807 ObtainWriteLock(&avc->vlock, 546);
808 avc->activeV++; /* Block new getpages */
809 ReleaseWriteLock(&avc->vlock);
812 ReleaseWriteLock(&avc->lock);
815 /* Flush pages beyond end-of-file. */
816 osi_VM_Truncate(avc, alen, acred);
819 ObtainWriteLock(&avc->lock, 79);
821 avc->f.m.Length = alen;
823 if (alen < avc->f.truncPos)
824 avc->f.truncPos = alen;
825 code = DVHash(&avc->f.fid);
827 /* block out others from screwing with this table */
828 ObtainWriteLock(&afs_xdcache, 287);
831 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
832 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
833 tdc = afs_GetValidDSlot(index);
835 ReleaseWriteLock(&afs_xdcache);
839 ReleaseReadLock(&tdc->tlock);
840 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
844 index = afs_dvnextTbl[index];
847 /* Now allocate space where we can save those dcache entries, and
848 * do a second pass over them.. Since we're holding xdcache, it
849 * shouldn't be changing.
851 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
854 for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
855 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
856 tdc = afs_GetValidDSlot(index);
858 /* make sure we put back all of the tdcArray members before
860 /* remember, the last valid tdc is at dcPos-1, so start at
861 * dcPos-1, not at dcPos itself. */
862 for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
863 tdc = tdcArray[dcPos];
869 ReleaseReadLock(&tdc->tlock);
870 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
871 /* same file, and modified, we'll store it back */
872 if (dcPos < dcCount) {
873 tdcArray[dcPos++] = tdc;
883 ReleaseWriteLock(&afs_xdcache);
885 /* Now we loop over the array of dcache entries and truncate them */
886 for (index = 0; index < dcPos; index++) {
887 struct osi_file *tfile;
889 tdc = tdcArray[index];
891 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
894 ObtainSharedLock(&tdc->lock, 672);
895 if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
896 UpgradeSToWLock(&tdc->lock, 673);
897 tdc->f.states |= DWriting;
898 tfile = afs_CFileOpen(&tdc->f.inode);
900 afs_CFileTruncate(tfile, (afs_int32)newSize);
901 afs_CFileClose(tfile);
902 afs_AdjustSize(tdc, (afs_int32)newSize);
903 if (alen < tdc->validPos) {
904 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
907 tdc->validPos = alen;
909 ConvertWToSLock(&tdc->lock);
911 ReleaseSharedLock(&tdc->lock);
919 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
921 #if (defined(AFS_SUN5_ENV))
922 ObtainWriteLock(&avc->vlock, 547);
923 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
924 avc->vstates &= ~VRevokeWait;
925 afs_osi_Wakeup((char *)&avc->vstates);
927 ReleaseWriteLock(&avc->vlock);