2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include "../afs/param.h" /*Should be always first*/
14 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
15 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
16 #include "../afs/afs_stats.h" /* statistics */
17 #include "../afs/afs_cbqueue.h"
18 #include "../afs/afs_osidnlc.h"
22 /* Imported variables */
23 extern afs_rwlock_t afs_xserver;
24 extern afs_rwlock_t afs_xdcache;
25 extern afs_rwlock_t afs_xcbhash;
26 extern afs_lock_t afs_ftf;
27 extern struct server *afs_servers[NSERVERS];
28 extern afs_int32 afs_dhashsize;
29 extern afs_int32 *afs_dvhashTbl;
30 extern unsigned char *afs_indexFlags; /*(only one) Is there data there?*/
31 extern int cacheDiskType;
33 afs_uint32 afs_stampValue=0;
39 * Send a truncation request to a FileServer.
45 * We're write-locked upon entry.
48 int afs_StoreMini(avc, areq)
49 register struct vcache *avc;
50 struct vrequest *areq;
53 register struct conn *tc;
54 struct AFSStoreStatus InStatus;
55 struct AFSFetchStatus OutStatus;
56 struct AFSVolSync tsync;
57 register afs_int32 code;
58 register struct rx_call *tcall;
62 AFS_STATCNT(afs_StoreMini);
63 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
64 ICL_TYPE_INT32, avc->m.Length);
66 if (avc->truncPos < tlen) tlen = avc->truncPos;
67 avc->truncPos = AFS_NOTRUNC;
68 avc->states &= ~CExtendedFile;
71 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
73 #ifdef RX_ENABLE_LOCKS
75 #endif /* RX_ENABLE_LOCKS */
76 tcall = rx_NewCall(tc->id);
77 #ifdef RX_ENABLE_LOCKS
79 #endif /* RX_ENABLE_LOCKS */
80 /* Set the client mod time since we always want the file
81 * to have the client's mod time and not the server's one
82 * (to avoid problems with make, etc.) It almost always
83 * works fine with standard afs because them server/client
84 * times are in sync and more importantly this storemini
85 * it's a special call that would typically be followed by
86 * the proper store-data or store-status calls.
88 InStatus.Mask = AFS_SETMODTIME;
89 InStatus.ClientModTime = avc->m.Date;
90 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
91 #ifdef RX_ENABLE_LOCKS
93 #endif /* RX_ENABLE_LOCKS */
94 code = StartRXAFS_StoreData(tcall,
95 (struct AFSFid *)&avc->fid.Fid,
96 &InStatus, avc->m.Length, 0, tlen);
98 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
100 code = rx_EndCall(tcall, code);
101 #ifdef RX_ENABLE_LOCKS
103 #endif /* RX_ENABLE_LOCKS */
108 (afs_Analyze(tc, code, &avc->fid, areq,
109 AFS_STATS_FS_RPCIDX_STOREDATA,
110 SHARED_LOCK, (struct cell *)0));
113 afs_ProcessFS(avc, &OutStatus, areq);
117 afs_InvalidateAllSegments(avc, 1);
123 unsigned int storeallmissing = 0;
124 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
126 * afs_StoreAllSegments
129 * Stores all modified segments back to server
132 * avc : Pointer to vcache entry.
133 * areq : Pointer to request structure.
136 * Called with avc write-locked.
138 #if defined (AFS_HPUX_ENV) || defined(AFS_ULTRIX_ENV)
139 int NCHUNKSATONCE = 3;
141 int NCHUNKSATONCE = 64 ;
146 afs_StoreAllSegments(avc, areq, sync)
147 register struct vcache *avc;
148 struct vrequest *areq;
151 { /*afs_StoreAllSegments*/
152 register struct dcache *tdc;
153 register afs_int32 code=0;
154 register afs_int32 index;
155 register afs_int32 origCBs, foreign=0;
157 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
158 struct dcache **dcList, **dclist;
159 unsigned int i, j, minj, maxj, moredata, high, off;
162 int maxStoredLength; /* highest offset we've written to server. */
164 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
165 osi_timeval_t xferStartTime, /*FS xfer start time*/
166 xferStopTime; /*FS xfer stop time*/
167 afs_int32 bytesToXfer; /* # bytes to xfer*/
168 afs_int32 bytesXferred; /* # bytes actually xferred*/
169 #endif /* AFS_NOSTATS */
172 AFS_STATCNT(afs_StoreAllSegments);
174 hset(oldDV, avc->m.DataVersion);
175 hset(newDV, avc->m.DataVersion);
176 hash = DVHash(&avc->fid);
177 foreign = (avc->states & CForeign);
178 dcList = (struct dcache **) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
179 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
180 ICL_TYPE_INT32, avc->m.Length);
181 #ifndef AFS_AIX32_ENV
182 /* In the aix vm implementation we need to do the vm_writep even
183 * on the memcache case since that's we adjust the file's size
184 * and finish flushing partial vm pages.
186 if (cacheDiskType != AFS_FCACHE_TYPE_MEM)
187 #endif /* AFS_AIX32_ENV */
189 /* If we're not diskless, reading a file may stress the VM
190 * system enough to cause a pageout, and this vnode would be
191 * locked when the pageout occurs. We can prevent this problem
192 * by making sure all dirty pages are already flushed. We don't
193 * do this when diskless because reading a diskless (i.e.
194 * memory-resident) chunk doesn't require using new VM, and we
195 * also don't want to dump more dirty data into a diskless cache,
196 * since they're smaller, and we might exceed its available
199 #if defined(AFS_SUN5_ENV)
200 if ( sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
201 osi_VM_TryToSmush(avc, CRED() , 1 );
204 osi_VM_StoreAllSegments(avc);
207 ConvertWToSLock(&avc->lock);
210 * Subsequent code expects a sorted list, and it expects all the
211 * chunks in the list to be contiguous, so we need a sort and a
212 * while loop in here, too - but this will work for a first pass...
213 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
214 * bin sort, I guess. Chunk numbers start with 0
216 * - Have to get a write lock on xdcache because GetDSlot might need it (if
217 * the chunk doesn't have a dcache struct).
218 * This seems like overkill in most cases.
219 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
220 * xdcache, then relock xdcache and try to use index. It is done
221 * a lot elsewhere in the CM, but I'm not buying that argument.
222 * - should be able to check IFDataMod without doing the GetDSlot (just
223 * hold afs_xdcache). That way, it's easy to do this without the
224 * writelock on afs_xdcache, and we save unneccessary disk
225 * operations. I don't think that works, 'cuz the next pointers
228 origCBs = afs_allCBs;
232 tlen = avc->m.Length;
236 bzero ((char *)dcList, NCHUNKSATONCE * sizeof(struct dcache *));
240 /* lock and start over from beginning of hash chain
241 * in order to avoid a race condition. */
242 MObtainWriteLock(&afs_xdcache,284);
243 index = afs_dvhashTbl[hash];
245 for(j=0; index != NULLIDX;) {
246 if ((afs_indexFlags[index] & IFDataMod) &&
247 (afs_indexUnique[index] == avc->fid.Fid.Unique)) {
248 tdc = afs_GetDSlot(index, 0); /* refcount+1. */
249 if (!FidCmp( &tdc->f.fid, &avc->fid ) && tdc->f.chunk >= minj ) {
251 off = tdc->f.chunk - minj;
252 if (off < NCHUNKSATONCE) {
254 osi_Panic("dclist slot already in use!");
258 tlen -= tdc->f.chunkBytes; /* shortcut: big win for little files */
265 lockedPutDCache(tdc);
266 if (j == NCHUNKSATONCE)
270 lockedPutDCache(tdc);
273 index = afs_dvnextTbl[index];
276 MReleaseWriteLock(&afs_xdcache);
277 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
278 /* "moredata" just says "there are more dirty chunks yet to come".
281 static afs_uint32 lp1 = 10000, lp2 = 10000;
282 struct AFSStoreStatus InStatus;
283 afs_uint32 base, bytes, nchunks;
288 struct osi_file * tfile;
289 struct rx_call * tcall;
290 extern int afs_defaultAsynchrony;
293 for (bytes = 0, j = 0; !code && j<=high; j++) {
297 bytes += dcList[j]->f.chunkBytes;
298 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
299 && (dcList[j]->f.chunk - minj < high)
301 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
308 if (bytes && (j==high || !dcList[j+1])) {
309 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
310 base = AFS_CHUNKTOBASE(first + minj) ;
313 * take a list of dcache structs and send them all off to the server
314 * the list must be in order, and the chunks contiguous.
315 * Note - there is no locking done by this code currently. For
316 * safety's sake, xdcache could be locked over the entire call.
317 * However, that pretty well ties up all the threads. Meantime, all
318 * the chunks _MUST_ have their refcounts bumped.
319 * The writes done before a store back will clear setuid-ness
321 * We can permit CacheStoreProc to wake up the user process IFF we
322 * are doing the last RPC for this close, ie, storing back the last
323 * set of contiguous chunks of a file.
326 dclist = &dcList[first];
328 nomore = !(moredata || (j!=high));
329 InStatus.ClientModTime = avc->m.Date;
330 InStatus.Mask = AFS_SETMODTIME;
331 if (sync & AFS_SYNC) {
332 InStatus.Mask |= AFS_FSYNC;
334 tlen = lmin(avc->m.Length, avc->truncPos);
338 tc = afs_Conn(&avc->fid, areq);
340 #ifdef RX_ENABLE_LOCKS
342 #endif /* RX_ENABLE_LOCKS */
343 tcall = rx_NewCall(tc->id);
344 code = StartRXAFS_StoreData(tcall, (struct AFSFid *) &avc->fid.Fid,
345 &InStatus, base, bytes, tlen);
346 #ifdef RX_ENABLE_LOCKS
348 #endif /* RX_ENABLE_LOCKS */
354 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
355 avc->truncPos = AFS_NOTRUNC;
357 for (i = 0; i<nchunks && !code;i++) {
360 afs_warn("afs: missing dcache!\n");
362 continue; /* panic? */
366 if (avc->asynchrony == -1) {
367 if (afs_defaultAsynchrony > (bytes-stored)) {
368 shouldwake = &nomore;
370 } else if ((afs_uint32)avc->asynchrony >= (bytes-stored)) {
371 shouldwake = &nomore;
374 tfile = afs_CFileOpen(tdc->f.inode);
376 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[AFS_STATS_FS_XFERIDX_STOREDATA]);
377 osi_GetuTime(&xferStartTime);
379 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes,
380 avc, shouldwake, &bytesToXfer,
383 osi_GetuTime(&xferStopTime);
386 (xferP->numSuccesses)++;
387 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] += bytesXferred;
388 (xferP->sumBytes) += (afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] >> 10);
389 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] &= 0x3FF;
390 if (bytesXferred < xferP->minBytes)
391 xferP->minBytes = bytesXferred;
392 if (bytesXferred > xferP->maxBytes)
393 xferP->maxBytes = bytesXferred;
396 * Tally the size of the object. Note: we tally the actual size,
397 * NOT the number of bytes that made it out over the wire.
399 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
402 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1)
405 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2)
408 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3)
411 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4)
414 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5)
417 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6)
420 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7)
425 afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime);
426 afs_stats_AddTo((xferP->sumTime), elapsedTime);
427 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
428 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
429 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
431 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
432 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
436 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes, avc,
437 shouldwake, &lp1, &lp2);
438 #endif /* AFS_NOSTATS */
439 afs_CFileClose(tfile);
440 if ((tdc->f.chunkBytes < afs_OtherCSize) &&
442 int bsent, tlen, tlen1=0, sbytes = afs_OtherCSize - tdc->f.chunkBytes;
443 char *tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
446 tlen = (sbytes > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : sbytes);
447 bzero(tbuffer, tlen);
448 #ifdef RX_ENABLE_LOCKS
450 #endif /* RX_ENABLE_LOCKS */
451 bsent = rx_Write(tcall, tbuffer, tlen);
452 #ifdef RX_ENABLE_LOCKS
454 #endif /* RX_ENABLE_LOCKS */
457 code = -33; /* XXX */
462 osi_FreeLargeSpace(tbuffer);
464 stored += tdc->f.chunkBytes;
466 /* ideally, I'd like to unlock the dcache and turn
467 * off the writing bit here, but that would
468 * require being able to retry StoreAllSegments in
469 * the event of a failure. It only really matters
470 * if user can't read from a 'locked' dcache or
471 * one which has the writing bit turned on. */
474 struct AFSFetchStatus OutStatus;
475 struct AFSVolSync tsync;
476 #ifdef RX_ENABLE_LOCKS
478 #endif /* RX_ENABLE_LOCKS */
479 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
480 #ifdef RX_ENABLE_LOCKS
482 #endif /* RX_ENABLE_LOCKS */
486 /* Now copy out return params */
487 UpgradeSToWLock(&avc->lock,28); /* keep out others for a while */
488 if (!code) { /* must wait til RPC completes to be sure of this info */
489 afs_ProcessFS(avc, &OutStatus, areq);
490 /* Keep last (max) size of file on server to see if
491 * we need to call afs_StoreMini to extend the file.
494 maxStoredLength = OutStatus.Length;
497 ConvertWToSLock(&avc->lock);
500 #ifdef RX_ENABLE_LOCKS
502 #endif /* RX_ENABLE_LOCKS */
503 code = rx_EndCall(tcall, code, avc, base);
504 #ifdef RX_ENABLE_LOCKS
506 #endif /* RX_ENABLE_LOCKS */
508 } while (afs_Analyze(tc, code, &avc->fid, areq,
509 AFS_STATS_FS_RPCIDX_STOREDATA,
510 SHARED_LOCK, (struct cell *)0));
512 /* put back all remaining locked dcache entries */
513 for (i=0; i<nchunks; i++) {
516 if (afs_indexFlags[tdc->index] & IFDataMod) {
517 afs_indexFlags[tdc->index] &= ~IFDataMod;
518 afs_stats_cmperf.cacheCurrDirtyChunks--;
519 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
520 if ( sync & AFS_VMSYNC_INVAL )
522 /* since we have invalidated all the pages of this
523 ** vnode by calling osi_VM_TryToSmush, we can
524 ** safely mark this dcache entry as not having
525 ** any pages. This vnode now becomes eligible for
526 ** reclamation by getDownD.
528 afs_indexFlags[tdc->index] &= ~IFAnyPages;
532 tdc->f.states &= ~DWriting; /* correct?*/
533 tdc->flags |= DFEntryMod;
534 lockedPutDCache(tdc);
538 for (j++; j<=high; j++)
540 lockedPutDCache(dcList[j]);
543 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
544 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
550 minj += NCHUNKSATONCE;
551 } while ( !code && moredata );
553 UpgradeSToWLock(&avc->lock,29);
555 /* send a trivial truncation store if did nothing else */
558 * Call StoreMini if we haven't written enough data to extend the
559 * file at the fileserver to the client's notion of the file length.
561 if ((avc->truncPos != AFS_NOTRUNC) ||
562 ((avc->states & CExtendedFile) && (maxStoredLength < avc->m.Length))) {
563 code = afs_StoreMini(avc, areq);
565 hadd32(newDV, 1); /* just bumped here, too */
567 avc->states &= ~CExtendedFile;
571 * Finally, turn off DWriting, turn on DFEntryMod,
572 * update f.versionNo.
573 * A lot of this could be integrated into the loop above
578 MObtainWriteLock(&afs_xdcache,285); /* overkill, but it gets the
579 * lock in case GetDSlot needs it */
580 for(safety = 0, index = afs_dvhashTbl[hash];
581 index != NULLIDX && safety < afs_cacheFiles+2;) {
583 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
584 tdc = afs_GetDSlot(index, 0);
586 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
587 /* this is the file */
588 /* was code here to clear IFDataMod, but it should only be done
589 * in storedcache and storealldcache.
591 /* Only increase DV if we had up-to-date data to start with.
592 * Otherwise, we could be falsely upgrading an old chunk
593 * (that we never read) into one labelled with the current
594 * DV #. Also note that we check that no intervening stores
595 * occurred, otherwise we might mislabel cache information
596 * for a chunk that we didn't store this time
598 /* Don't update the version number if it's not yet set. */
599 if (code == 0 && (!hsame(tdc->f.versionNo, h_unset))
600 && (hcmp(tdc->f.versionNo, oldDV) >= 0)) {
601 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
602 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
603 /* no error, this is the DV */
604 hset(tdc->f.versionNo, avc->m.DataVersion);
605 tdc->flags |= DFEntryMod;
609 lockedPutDCache(tdc);
611 index = afs_dvnextTbl[index];
613 MReleaseWriteLock(&afs_xdcache);
618 * Invalidate chunks after an error for ccores files since
619 * afs_inactive won't be called for these and they won't be
620 * invalidated. Also discard data if it's a permanent error from the
623 if (areq->permWriteError || (avc->states & (CCore1 | CCore))) {
624 afs_InvalidateAllSegments(avc, 1);
627 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
628 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, code);
629 /* would like a Trace5, but it doesn't exist...*/
630 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
631 ICL_TYPE_INT32, avc->lock.wait_states,
632 ICL_TYPE_INT32, avc->lock.excl_locked);
633 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
634 ICL_TYPE_INT32, avc->lock.wait_states,
635 ICL_TYPE_INT32, avc->lock.readers_reading,
636 ICL_TYPE_INT32, avc->lock.num_waiting );
639 * Finally, if updated DataVersion matches newDV, we did all of the
640 * stores. If mapDV indicates that the page cache was flushed up
641 * to when we started the store, then we can relabel them as flushed
642 * as recently as newDV.
643 * Turn off CDirty bit because the stored data is now in sync with server.
645 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
646 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
647 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
648 hset(avc->mapDV, newDV);
649 avc->states &= ~CDirty;
652 osi_FreeLargeSpace(dcList);
654 /* If not the final write a temporary error is ok. */
655 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
660 } /*afs_StoreAllSegments (new 03/02/94)*/
664 * afs_InvalidateAllSegments
667 * Invalidates all chunks for a given file
670 * avc : Pointer to vcache entry.
671 * asetLock : If true, we are to set the afs_xdcache lock; otherwise,
672 * the caller has already done it.
675 * For example, called after an error has been detected. Called
676 * with avc write-locked.
679 afs_InvalidateAllSegments(avc, asetLock)
683 { /*afs_InvalidateAllSegments*/
689 AFS_STATCNT(afs_InvalidateAllSegments);
690 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
691 ICL_TYPE_INT32, avc->m.Length);
692 hash = DVHash(&avc->fid);
693 avc->truncPos = AFS_NOTRUNC; /* don't truncate later */
694 avc->states &= ~CExtendedFile; /* not any more */
695 ObtainWriteLock(&afs_xcbhash, 459);
696 afs_DequeueCallback(avc);
697 avc->states &= ~(CStatd|CDirty); /* mark status information as bad, too */
698 ReleaseWriteLock(&afs_xcbhash);
699 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
700 osi_dnlc_purgedp(avc);
701 /* Blow away pages; for now, only for Solaris */
702 #if (defined(AFS_SUN5_ENV))
703 if (WriteLocked(&avc->lock))
704 osi_ReleaseVM(avc, (struct AFS_UCRED *)0);
707 * Block out others from screwing with this table; is a read lock
710 if (asetLock) MObtainWriteLock(&afs_xdcache,286);
711 for(index = afs_dvhashTbl[hash]; index != NULLIDX;) {
712 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
713 tdc = afs_GetDSlot(index, 0);
714 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
715 /* same file? we'll zap it */
716 if (afs_indexFlags[index] & IFDataMod) {
717 afs_stats_cmperf.cacheCurrDirtyChunks--;
718 /* don't write it back */
719 afs_indexFlags[index] &= ~IFDataMod;
721 afs_indexFlags[index] &= ~IFAnyPages;
723 if (vType(avc) == VDIR) {
727 lockedPutDCache(tdc);
729 index = afs_dvnextTbl[index];
731 if (asetLock) MReleaseWriteLock(&afs_xdcache);
734 } /*afs_InvalidateAllSegments*/
738 * afs_TruncateAllSegments
741 * Truncate a cache file.
744 * avc : Ptr to vcache entry to truncate.
745 * alen : Number of bytes to make the file.
746 * areq : Ptr to request structure.
749 * Called with avc write-locked; in VFS40 systems, pvnLock is also
752 afs_TruncateAllSegments(avc, alen, areq, acred)
754 register struct vcache *avc;
755 struct vrequest *areq;
756 struct AFS_UCRED *acred;
757 { /*afs_TruncateAllSegments*/
759 register struct dcache *tdc;
760 register afs_int32 code;
761 register afs_int32 index;
764 AFS_STATCNT(afs_TruncateAllSegments);
765 avc->m.Date = osi_Time();
766 if (alen >= avc->m.Length) {
768 * Special speedup since Sun's vm extends the file this way;
769 * we've never written to the file thus we can just set the new
770 * length and avoid the needless calls below.
771 * Also used for ftruncate calls which can extend the file.
772 * To completely minimize the possible extra StoreMini RPC, we really
773 * should keep the ExtendedPos as well and clear this flag if we
774 * truncate below that value before we store the file back.
776 avc->states |= CExtendedFile;
777 avc->m.Length = alen;
778 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL1, ICL_TYPE_POINTER, avc,
779 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, alen);
783 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL2, ICL_TYPE_POINTER, avc,
784 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, alen);
786 #if (defined(AFS_SUN5_ENV))
788 /* Zero unused portion of last page */
789 osi_VM_PreTruncate(avc, alen, acred);
793 #if (defined(AFS_SUN5_ENV))
794 ObtainWriteLock(&avc->vlock, 546);
795 avc->activeV++; /* Block new getpages */
796 ReleaseWriteLock(&avc->vlock);
799 ReleaseWriteLock(&avc->lock);
802 /* Flush pages beyond end-of-file. */
803 osi_VM_Truncate(avc, alen, acred);
806 ObtainWriteLock(&avc->lock,79);
808 avc->m.Length = alen;
810 if (alen < avc->truncPos) avc->truncPos = alen;
811 code = DVHash(&avc->fid);
812 /* block out others from screwing with this table */
813 MObtainWriteLock(&afs_xdcache,287);
814 for(index = afs_dvhashTbl[code]; index != NULLIDX;) {
815 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
816 tdc = afs_GetDSlot(index, 0);
817 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
818 /* same file, and modified, we'll store it back */
819 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
820 if (newSize < 0) newSize = 0;
821 if (newSize < tdc->f.chunkBytes) {
822 register struct osi_file *tfile;
823 tfile = afs_CFileOpen(tdc->f.inode);
824 afs_CFileTruncate(tfile, newSize);
825 afs_CFileClose(tfile);
826 afs_AdjustSize(tdc, newSize);
829 lockedPutDCache(tdc);
831 index = afs_dvnextTbl[index];
833 #if (defined(AFS_SUN5_ENV))
834 ObtainWriteLock(&avc->vlock, 547);
835 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
836 avc->vstates &= ~VRevokeWait;
837 afs_osi_Wakeup((char *)&avc->vstates);
839 ReleaseWriteLock(&avc->vlock);
841 MReleaseWriteLock(&afs_xdcache);
844 } /*afs_TruncateAllSegments*/