2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
19 #include "afs/sysincludes.h" /*Standard vendor system headers */
20 #include "afsincludes.h" /*AFS-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs/afs_cbqueue.h"
23 #include "afs/afs_osidnlc.h"
25 afs_uint32 afs_stampValue = 0;
31 * Send a truncation request to a FileServer.
37 * We're write-locked upon entry.
41 afs_StoreMini(register struct vcache *avc, struct vrequest *areq)
43 register struct conn *tc;
44 struct AFSStoreStatus InStatus;
45 struct AFSFetchStatus OutStatus;
46 struct AFSVolSync tsync;
47 register afs_int32 code;
48 register struct rx_call *tcall;
49 afs_size_t tlen, xlen = 0;
51 AFS_STATCNT(afs_StoreMini);
52 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
53 ICL_TYPE_INT32, avc->m.Length);
55 if (avc->truncPos < tlen)
57 avc->truncPos = AFS_NOTRUNC;
58 avc->states &= ~CExtendedFile;
61 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
65 tcall = rx_NewCall(tc->id);
67 /* Set the client mod time since we always want the file
68 * to have the client's mod time and not the server's one
69 * (to avoid problems with make, etc.) It almost always
70 * works fine with standard afs because them server/client
71 * times are in sync and more importantly this storemini
72 * it's a special call that would typically be followed by
73 * the proper store-data or store-status calls.
75 InStatus.Mask = AFS_SETMODTIME;
76 InStatus.ClientModTime = avc->m.Date;
77 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
78 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
79 &avc->fid.Fid, ICL_TYPE_OFFSET,
80 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
81 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
82 ICL_HANDLE_OFFSET(tlen));
84 #ifdef AFS_64BIT_CLIENT
85 if (!afs_serverHasNo64Bit(tc)) {
87 StartRXAFS_StoreData64(tcall,
88 (struct AFSFid *)&avc->fid.Fid,
89 &InStatus, avc->m.Length,
90 (afs_size_t) 0, tlen);
96 StartRXAFS_StoreData(tcall,
97 (struct AFSFid *)&avc->fid.Fid,
98 &InStatus, l1, 0, l2);
100 #else /* AFS_64BIT_CLIENT */
102 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->fid.Fid,
103 &InStatus, avc->m.Length, 0, tlen);
104 #endif /* AFS_64BIT_CLIENT */
106 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
108 code = rx_EndCall(tcall, code);
111 #ifdef AFS_64BIT_CLIENT
112 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
113 afs_serverSetNo64Bit(tc);
116 #endif /* AFS_64BIT_CLIENT */
120 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
124 afs_ProcessFS(avc, &OutStatus, areq);
127 afs_InvalidateAllSegments(avc);
133 unsigned int storeallmissing = 0;
134 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
136 * afs_StoreAllSegments
139 * Stores all modified segments back to server
142 * avc : Pointer to vcache entry.
143 * areq : Pointer to request structure.
146 * Called with avc write-locked.
148 #if defined (AFS_HPUX_ENV)
149 int NCHUNKSATONCE = 3;
151 int NCHUNKSATONCE = 64;
157 afs_StoreAllSegments(register struct vcache *avc, struct vrequest *areq,
160 register struct dcache *tdc;
161 register afs_int32 code = 0;
162 register afs_int32 index;
163 register afs_int32 origCBs, foreign = 0;
165 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
166 struct dcache **dcList, **dclist;
167 unsigned int i, j, minj, moredata, high, off;
169 afs_size_t maxStoredLength; /* highest offset we've written to server. */
172 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
173 osi_timeval_t xferStartTime, /*FS xfer start time */
174 xferStopTime; /*FS xfer stop time */
175 afs_size_t bytesToXfer; /* # bytes to xfer */
176 afs_size_t bytesXferred; /* # bytes actually xferred */
177 #endif /* AFS_NOSTATS */
180 AFS_STATCNT(afs_StoreAllSegments);
182 hset(oldDV, avc->m.DataVersion);
183 hset(newDV, avc->m.DataVersion);
184 hash = DVHash(&avc->fid);
185 foreign = (avc->states & CForeign);
186 dcList = (struct dcache **)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
187 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
188 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
189 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
190 /* In the aix vm implementation we need to do the vm_writep even
191 * on the memcache case since that's we adjust the file's size
192 * and finish flushing partial vm pages.
194 if (cacheDiskType != AFS_FCACHE_TYPE_MEM)
195 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
197 /* If we're not diskless, reading a file may stress the VM
198 * system enough to cause a pageout, and this vnode would be
199 * locked when the pageout occurs. We can prevent this problem
200 * by making sure all dirty pages are already flushed. We don't
201 * do this when diskless because reading a diskless (i.e.
202 * memory-resident) chunk doesn't require using new VM, and we
203 * also don't want to dump more dirty data into a diskless cache,
204 * since they're smaller, and we might exceed its available
207 #if defined(AFS_SUN5_ENV)
208 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
209 osi_VM_TryToSmush(avc, CRED(), 1);
212 osi_VM_StoreAllSegments(avc);
215 ConvertWToSLock(&avc->lock);
218 * Subsequent code expects a sorted list, and it expects all the
219 * chunks in the list to be contiguous, so we need a sort and a
220 * while loop in here, too - but this will work for a first pass...
221 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
222 * bin sort, I guess. Chunk numbers start with 0
224 * - Have to get a write lock on xdcache because GetDSlot might need it (if
225 * the chunk doesn't have a dcache struct).
226 * This seems like overkill in most cases.
227 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
228 * xdcache, then relock xdcache and try to use index. It is done
229 * a lot elsewhere in the CM, but I'm not buying that argument.
230 * - should be able to check IFDataMod without doing the GetDSlot (just
231 * hold afs_xdcache). That way, it's easy to do this without the
232 * writelock on afs_xdcache, and we save unneccessary disk
233 * operations. I don't think that works, 'cuz the next pointers
236 origCBs = afs_allCBs;
239 tlen = avc->m.Length;
243 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
247 /* lock and start over from beginning of hash chain
248 * in order to avoid a race condition. */
249 MObtainWriteLock(&afs_xdcache, 284);
250 index = afs_dvhashTbl[hash];
252 for (j = 0; index != NULLIDX;) {
253 if ((afs_indexFlags[index] & IFDataMod)
254 && (afs_indexUnique[index] == avc->fid.Fid.Unique)) {
255 tdc = afs_GetDSlot(index, 0); /* refcount+1. */
256 ReleaseReadLock(&tdc->tlock);
257 if (!FidCmp(&tdc->f.fid, &avc->fid) && tdc->f.chunk >= minj) {
258 off = tdc->f.chunk - minj;
259 if (off < NCHUNKSATONCE) {
261 osi_Panic("dclist slot already in use!");
266 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
267 * can't grab here, due to lock ordering with afs_xdcache.
268 * So, disable this shortcut for now. -- kolya 2001-10-13
270 /* shortcut: big win for little files */
271 /* tlen -= tdc->f.chunkBytes;
278 if (j == NCHUNKSATONCE)
285 index = afs_dvnextTbl[index];
287 MReleaseWriteLock(&afs_xdcache);
289 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
290 /* "moredata" just says "there are more dirty chunks yet to come".
294 static afs_uint32 lp1 = 10000, lp2 = 10000;
296 struct AFSStoreStatus InStatus;
297 struct AFSFetchStatus OutStatus;
299 afs_size_t base, bytes;
302 unsigned int first = 0;
305 struct osi_file *tfile;
306 struct rx_call *tcall;
308 for (bytes = 0, j = 0; !code && j <= high; j++) {
310 ObtainSharedLock(&(dcList[j]->lock), 629);
313 bytes += dcList[j]->f.chunkBytes;
314 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
315 && (dcList[j]->f.chunk - minj < high)
317 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
321 if (bytes && (j == high || !dcList[j + 1])) {
322 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
323 base = AFS_CHUNKTOBASE(first + minj);
326 * take a list of dcache structs and send them all off to the server
327 * the list must be in order, and the chunks contiguous.
328 * Note - there is no locking done by this code currently. For
329 * safety's sake, xdcache could be locked over the entire call.
330 * However, that pretty well ties up all the threads. Meantime, all
331 * the chunks _MUST_ have their refcounts bumped.
332 * The writes done before a store back will clear setuid-ness
334 * We can permit CacheStoreProc to wake up the user process IFF we
335 * are doing the last RPC for this close, ie, storing back the last
336 * set of contiguous chunks of a file.
339 dclist = &dcList[first];
340 nchunks = 1 + j - first;
341 nomore = !(moredata || (j != high));
342 InStatus.ClientModTime = avc->m.Date;
343 InStatus.Mask = AFS_SETMODTIME;
344 if (sync & AFS_SYNC) {
345 InStatus.Mask |= AFS_FSYNC;
347 tlen = lmin(avc->m.Length, avc->truncPos);
348 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
349 ICL_TYPE_FID, &avc->fid.Fid, ICL_TYPE_OFFSET,
350 ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET,
351 ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET,
352 ICL_HANDLE_OFFSET(tlen));
356 tc = afs_Conn(&avc->fid, areq, 0);
360 tcall = rx_NewCall(tc->id);
361 #ifdef AFS_64BIT_CLIENT
362 if (!afs_serverHasNo64Bit(tc)) {
364 StartRXAFS_StoreData64(tcall,
370 if (tlen > 0xFFFFFFFF) {
373 afs_int32 t1, t2, t3;
378 StartRXAFS_StoreData(tcall,
385 #else /* AFS_64BIT_CLIENT */
387 StartRXAFS_StoreData(tcall,
388 (struct AFSFid *)&avc->
389 fid.Fid, &InStatus, base,
391 #endif /* AFS_64BIT_CLIENT */
398 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
399 avc->truncPos = AFS_NOTRUNC;
401 for (i = 0; i < nchunks && !code; i++) {
404 afs_warn("afs: missing dcache!\n");
406 continue; /* panic? */
408 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2,
409 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32,
410 tdc->f.chunk, ICL_TYPE_INT32,
411 tdc->index, ICL_TYPE_INT32,
415 if (avc->asynchrony == -1) {
416 if (afs_defaultAsynchrony >
418 shouldwake = &nomore;
420 } else if ((afs_uint32) avc->asynchrony >=
422 shouldwake = &nomore;
425 tfile = afs_CFileOpen(tdc->f.inode);
428 &(afs_stats_cmfullperf.rpc.
430 [AFS_STATS_FS_XFERIDX_STOREDATA]);
431 osi_GetuTime(&xferStartTime);
434 afs_CacheStoreProc(tcall, tfile,
435 tdc->f.chunkBytes, avc,
436 shouldwake, &bytesToXfer,
439 osi_GetuTime(&xferStopTime);
442 (xferP->numSuccesses)++;
443 afs_stats_XferSumBytes
444 [AFS_STATS_FS_XFERIDX_STOREDATA] +=
447 (afs_stats_XferSumBytes
448 [AFS_STATS_FS_XFERIDX_STOREDATA] >> 10);
449 afs_stats_XferSumBytes
450 [AFS_STATS_FS_XFERIDX_STOREDATA] &= 0x3FF;
451 if (bytesXferred < xferP->minBytes)
452 xferP->minBytes = bytesXferred;
453 if (bytesXferred > xferP->maxBytes)
454 xferP->maxBytes = bytesXferred;
457 * Tally the size of the object. Note: we tally the actual size,
458 * NOT the number of bytes that made it out over the wire.
460 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
462 else if (bytesToXfer <=
463 AFS_STATS_MAXBYTES_BUCKET1)
465 else if (bytesToXfer <=
466 AFS_STATS_MAXBYTES_BUCKET2)
468 else if (bytesToXfer <=
469 AFS_STATS_MAXBYTES_BUCKET3)
471 else if (bytesToXfer <=
472 AFS_STATS_MAXBYTES_BUCKET4)
474 else if (bytesToXfer <=
475 AFS_STATS_MAXBYTES_BUCKET5)
477 else if (bytesToXfer <=
478 AFS_STATS_MAXBYTES_BUCKET6)
480 else if (bytesToXfer <=
481 AFS_STATS_MAXBYTES_BUCKET7)
486 afs_stats_GetDiff(elapsedTime, xferStartTime,
488 afs_stats_AddTo((xferP->sumTime),
490 afs_stats_SquareAddTo((xferP->sqrTime),
492 if (afs_stats_TimeLessThan
493 (elapsedTime, (xferP->minTime))) {
494 afs_stats_TimeAssign((xferP->minTime),
497 if (afs_stats_TimeGreaterThan
498 (elapsedTime, (xferP->maxTime))) {
499 afs_stats_TimeAssign((xferP->maxTime),
505 afs_CacheStoreProc(tcall, tfile,
506 tdc->f.chunkBytes, avc,
507 shouldwake, &lp1, &lp2);
508 #endif /* AFS_NOSTATS */
509 afs_CFileClose(tfile);
510 if ((tdc->f.chunkBytes < afs_OtherCSize)
511 && (i < (nchunks - 1)) && code == 0) {
512 int bsent, tlen, sbytes =
513 afs_OtherCSize - tdc->f.chunkBytes;
515 osi_AllocLargeSpace(AFS_LRALLOCSIZ);
520 AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ :
522 memset(tbuffer, 0, tlen);
524 bsent = rx_Write(tcall, tbuffer, tlen);
528 code = -33; /* XXX */
533 osi_FreeLargeSpace(tbuffer);
535 stored += tdc->f.chunkBytes;
537 /* ideally, I'd like to unlock the dcache and turn
538 * off the writing bit here, but that would
539 * require being able to retry StoreAllSegments in
540 * the event of a failure. It only really matters
541 * if user can't read from a 'locked' dcache or
542 * one which has the writing bit turned on. */
545 struct AFSVolSync tsync;
548 EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
553 doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
558 code2 = rx_EndCall(tcall, code);
563 #ifdef AFS_64BIT_CLIENT
564 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
565 afs_serverSetNo64Bit(tc);
568 #endif /* AFS_64BIT_CLIENT */
570 (tc, code, &avc->fid, areq,
571 AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK,
574 /* put back all remaining locked dcache entries */
575 for (i = 0; i < nchunks; i++) {
578 if (afs_indexFlags[tdc->index] & IFDataMod) {
580 * LOCKXXX -- should hold afs_xdcache(W) when
581 * modifying afs_indexFlags.
583 afs_indexFlags[tdc->index] &= ~IFDataMod;
584 afs_stats_cmperf.cacheCurrDirtyChunks--;
585 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
586 if (sync & AFS_VMSYNC_INVAL) {
587 /* since we have invalidated all the pages of this
588 ** vnode by calling osi_VM_TryToSmush, we can
589 ** safely mark this dcache entry as not having
590 ** any pages. This vnode now becomes eligible for
591 ** reclamation by getDownD.
593 afs_indexFlags[tdc->index] &= ~IFAnyPages;
597 UpgradeSToWLock(&tdc->lock, 628);
598 tdc->f.states &= ~DWriting; /* correct? */
599 tdc->dflags |= DFEntryMod;
600 ReleaseWriteLock(&tdc->lock);
602 /* Mark the entry as released */
607 /* Now copy out return params */
608 UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */
609 afs_ProcessFS(avc, &OutStatus, areq);
610 /* Keep last (max) size of file on server to see if
611 * we need to call afs_StoreMini to extend the file.
614 maxStoredLength = OutStatus.Length;
615 ConvertWToSLock(&avc->lock);
620 for (j++; j <= high; j++) {
622 ReleaseSharedLock(&(dcList[j]->lock));
623 afs_PutDCache(dcList[j]);
624 /* Releasing entry */
630 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
631 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
636 /* Release any zero-length dcache entries in our interval
637 * that we locked but didn't store back above.
639 for (j = 0; j <= high; j++) {
642 osi_Assert(tdc->f.chunkBytes == 0);
643 ReleaseSharedLock(&tdc->lock);
649 minj += NCHUNKSATONCE;
650 } while (!code && moredata);
652 UpgradeSToWLock(&avc->lock, 29);
654 /* send a trivial truncation store if did nothing else */
657 * Call StoreMini if we haven't written enough data to extend the
658 * file at the fileserver to the client's notion of the file length.
660 if ((avc->truncPos != AFS_NOTRUNC) || ((avc->states & CExtendedFile)
661 && (maxStoredLength <
663 code = afs_StoreMini(avc, areq);
665 hadd32(newDV, 1); /* just bumped here, too */
667 avc->states &= ~CExtendedFile;
671 * Finally, turn off DWriting, turn on DFEntryMod,
672 * update f.versionNo.
673 * A lot of this could be integrated into the loop above
683 memset((char *)dcList, 0,
684 NCHUNKSATONCE * sizeof(struct dcache *));
686 /* overkill, but it gets the lock in case GetDSlot needs it */
687 MObtainWriteLock(&afs_xdcache, 285);
689 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
690 index != NULLIDX && safety < afs_cacheFiles + 2;) {
692 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
693 tdc = afs_GetDSlot(index, 0);
694 ReleaseReadLock(&tdc->tlock);
696 if (!FidCmp(&tdc->f.fid, &avc->fid)
697 && tdc->f.chunk >= minj) {
698 off = tdc->f.chunk - minj;
699 if (off < NCHUNKSATONCE) {
700 /* this is the file, and the correct chunk range */
701 if (j >= NCHUNKSATONCE)
703 ("Too many dcache entries in range\n");
708 if (j == NCHUNKSATONCE)
716 index = afs_dvnextTbl[index];
718 MReleaseWriteLock(&afs_xdcache);
720 for (i = 0; i < j; i++) {
721 /* Iterate over the dcache entries we collected above */
723 ObtainSharedLock(&tdc->lock, 677);
725 /* was code here to clear IFDataMod, but it should only be done
726 * in storedcache and storealldcache.
728 /* Only increase DV if we had up-to-date data to start with.
729 * Otherwise, we could be falsely upgrading an old chunk
730 * (that we never read) into one labelled with the current
731 * DV #. Also note that we check that no intervening stores
732 * occurred, otherwise we might mislabel cache information
733 * for a chunk that we didn't store this time
735 /* Don't update the version number if it's not yet set. */
736 if (!hsame(tdc->f.versionNo, h_unset)
737 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
739 if ((!(afs_dvhack || foreign)
740 && hsame(avc->m.DataVersion, newDV))
741 || ((afs_dvhack || foreign)
742 && (origCBs == afs_allCBs))) {
743 /* no error, this is the DV */
745 UpgradeSToWLock(&tdc->lock, 678);
746 hset(tdc->f.versionNo, avc->m.DataVersion);
747 tdc->dflags |= DFEntryMod;
748 ConvertWToSLock(&tdc->lock);
752 ReleaseSharedLock(&tdc->lock);
756 minj += NCHUNKSATONCE;
763 * Invalidate chunks after an error for ccores files since
764 * afs_inactive won't be called for these and they won't be
765 * invalidated. Also discard data if it's a permanent error from the
768 if (areq->permWriteError || (avc->states & (CCore1 | CCore))) {
769 afs_InvalidateAllSegments(avc);
772 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
773 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, code);
774 /* would like a Trace5, but it doesn't exist... */
775 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
776 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
777 avc->lock.excl_locked);
778 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
779 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
780 avc->lock.readers_reading, ICL_TYPE_INT32,
781 avc->lock.num_waiting);
784 * Finally, if updated DataVersion matches newDV, we did all of the
785 * stores. If mapDV indicates that the page cache was flushed up
786 * to when we started the store, then we can relabel them as flushed
787 * as recently as newDV.
788 * Turn off CDirty bit because the stored data is now in sync with server.
790 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
791 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
792 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
793 hset(avc->mapDV, newDV);
794 avc->states &= ~CDirty;
797 osi_FreeLargeSpace(dcList);
799 /* If not the final write a temporary error is ok. */
800 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
805 } /*afs_StoreAllSegments (new 03/02/94) */
809 * afs_InvalidateAllSegments
812 * Invalidates all chunks for a given file
815 * avc : Pointer to vcache entry.
818 * For example, called after an error has been detected. Called
819 * with avc write-locked, and afs_xdcache unheld.
823 afs_InvalidateAllSegments(struct vcache *avc)
828 struct dcache **dcList;
829 int i, dcListMax, dcListCount;
831 AFS_STATCNT(afs_InvalidateAllSegments);
832 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
833 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
834 hash = DVHash(&avc->fid);
835 avc->truncPos = AFS_NOTRUNC; /* don't truncate later */
836 avc->states &= ~CExtendedFile; /* not any more */
837 ObtainWriteLock(&afs_xcbhash, 459);
838 afs_DequeueCallback(avc);
839 avc->states &= ~(CStatd | CDirty); /* mark status information as bad, too */
840 ReleaseWriteLock(&afs_xcbhash);
841 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
842 osi_dnlc_purgedp(avc);
843 /* Blow away pages; for now, only for Solaris */
844 #if (defined(AFS_SUN5_ENV))
845 if (WriteLocked(&avc->lock))
846 osi_ReleaseVM(avc, (struct AFS_UCRED *)0);
849 * Block out others from screwing with this table; is a read lock
852 MObtainWriteLock(&afs_xdcache, 286);
855 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
856 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
857 tdc = afs_GetDSlot(index, 0);
858 ReleaseReadLock(&tdc->tlock);
859 if (!FidCmp(&tdc->f.fid, &avc->fid))
863 index = afs_dvnextTbl[index];
866 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
869 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
870 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
871 tdc = afs_GetDSlot(index, 0);
872 ReleaseReadLock(&tdc->tlock);
873 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
874 /* same file? we'll zap it */
875 if (afs_indexFlags[index] & IFDataMod) {
876 afs_stats_cmperf.cacheCurrDirtyChunks--;
877 /* don't write it back */
878 afs_indexFlags[index] &= ~IFDataMod;
880 afs_indexFlags[index] &= ~IFAnyPages;
881 if (dcListCount < dcListMax)
882 dcList[dcListCount++] = tdc;
889 index = afs_dvnextTbl[index];
891 MReleaseWriteLock(&afs_xdcache);
893 for (i = 0; i < dcListCount; i++) {
896 ObtainWriteLock(&tdc->lock, 679);
898 if (vType(avc) == VDIR)
900 ReleaseWriteLock(&tdc->lock);
904 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
911 * afs_TruncateAllSegments
914 * Truncate a cache file.
917 * avc : Ptr to vcache entry to truncate.
918 * alen : Number of bytes to make the file.
919 * areq : Ptr to request structure.
922 * Called with avc write-locked; in VFS40 systems, pvnLock is also
926 afs_TruncateAllSegments(register struct vcache *avc, afs_size_t alen,
927 struct vrequest *areq, struct AFS_UCRED *acred)
929 register struct dcache *tdc;
930 register afs_int32 code;
931 register afs_int32 index;
935 struct dcache **tdcArray;
937 AFS_STATCNT(afs_TruncateAllSegments);
938 avc->m.Date = osi_Time();
939 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
940 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
941 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
942 if (alen >= avc->m.Length) {
944 * Special speedup since Sun's vm extends the file this way;
945 * we've never written to the file thus we can just set the new
946 * length and avoid the needless calls below.
947 * Also used for ftruncate calls which can extend the file.
948 * To completely minimize the possible extra StoreMini RPC, we really
949 * should keep the ExtendedPos as well and clear this flag if we
950 * truncate below that value before we store the file back.
952 avc->states |= CExtendedFile;
953 avc->m.Length = alen;
956 #if (defined(AFS_SUN5_ENV))
958 /* Zero unused portion of last page */
959 osi_VM_PreTruncate(avc, alen, acred);
963 #if (defined(AFS_SUN5_ENV))
964 ObtainWriteLock(&avc->vlock, 546);
965 avc->activeV++; /* Block new getpages */
966 ReleaseWriteLock(&avc->vlock);
969 ReleaseWriteLock(&avc->lock);
972 /* Flush pages beyond end-of-file. */
973 osi_VM_Truncate(avc, alen, acred);
976 ObtainWriteLock(&avc->lock, 79);
978 avc->m.Length = alen;
980 if (alen < avc->truncPos)
981 avc->truncPos = alen;
982 code = DVHash(&avc->fid);
984 /* block out others from screwing with this table */
985 MObtainWriteLock(&afs_xdcache, 287);
988 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
989 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
990 tdc = afs_GetDSlot(index, 0);
991 ReleaseReadLock(&tdc->tlock);
992 if (!FidCmp(&tdc->f.fid, &avc->fid))
996 index = afs_dvnextTbl[index];
999 /* Now allocate space where we can save those dcache entries, and
1000 * do a second pass over them.. Since we're holding xdcache, it
1001 * shouldn't be changing.
1003 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
1006 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
1007 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
1008 tdc = afs_GetDSlot(index, 0);
1009 ReleaseReadLock(&tdc->tlock);
1010 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
1011 /* same file, and modified, we'll store it back */
1012 if (dcPos < dcCount) {
1013 tdcArray[dcPos++] = tdc;
1021 index = afs_dvnextTbl[index];
1024 MReleaseWriteLock(&afs_xdcache);
1026 /* Now we loop over the array of dcache entries and truncate them */
1027 for (index = 0; index < dcPos; index++) {
1028 struct osi_file *tfile;
1030 tdc = tdcArray[index];
1032 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
1035 ObtainSharedLock(&tdc->lock, 672);
1036 if (newSize < tdc->f.chunkBytes) {
1037 UpgradeSToWLock(&tdc->lock, 673);
1038 tfile = afs_CFileOpen(tdc->f.inode);
1039 afs_CFileTruncate(tfile, newSize);
1040 afs_CFileClose(tfile);
1041 afs_AdjustSize(tdc, newSize);
1042 if (alen < tdc->validPos) {
1043 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
1046 tdc->validPos = alen;
1048 ConvertWToSLock(&tdc->lock);
1050 ReleaseSharedLock(&tdc->lock);
1054 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
1056 #if (defined(AFS_SUN5_ENV))
1057 ObtainWriteLock(&avc->vlock, 547);
1058 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
1059 avc->vstates &= ~VRevokeWait;
1060 afs_osi_Wakeup((char *)&avc->vstates);
1062 ReleaseWriteLock(&avc->vlock);