2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /*Standard vendor system headers */
18 #include "afsincludes.h" /*AFS-based standard headers */
19 #include "afs/afs_stats.h" /* statistics */
20 #include "afs/afs_cbqueue.h"
21 #include "afs/afs_osidnlc.h"
23 afs_uint32 afs_stampValue = 0;
29 * Send a truncation request to a FileServer.
35 * We're write-locked upon entry.
39 afs_StoreMini(register struct vcache *avc, struct vrequest *areq)
41 register struct afs_conn *tc;
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
45 register afs_int32 code;
46 register struct rx_call *tcall;
47 afs_size_t tlen, xlen = 0;
49 AFS_STATCNT(afs_StoreMini);
50 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
51 ICL_TYPE_INT32, avc->f.m.Length);
52 tlen = avc->f.m.Length;
53 if (avc->f.truncPos < tlen)
54 tlen = avc->f.truncPos;
55 avc->f.truncPos = AFS_NOTRUNC;
56 avc->f.states &= ~CExtendedFile;
59 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
63 tcall = rx_NewCall(tc->id);
65 /* Set the client mod time since we always want the file
66 * to have the client's mod time and not the server's one
67 * (to avoid problems with make, etc.) It almost always
68 * works fine with standard afs because them server/client
69 * times are in sync and more importantly this storemini
70 * it's a special call that would typically be followed by
71 * the proper store-data or store-status calls.
73 InStatus.Mask = AFS_SETMODTIME;
74 InStatus.ClientModTime = avc->f.m.Date;
75 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
76 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID,
77 &avc->f.fid.Fid, ICL_TYPE_OFFSET,
78 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
79 ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET,
80 ICL_HANDLE_OFFSET(tlen));
82 #ifdef AFS_64BIT_CLIENT
83 if (!afs_serverHasNo64Bit(tc)) {
85 StartRXAFS_StoreData64(tcall,
86 (struct AFSFid *)&avc->f.fid.Fid,
87 &InStatus, avc->f.m.Length,
88 (afs_size_t) 0, tlen);
93 if ((avc->f.m.Length > 0x7fffffff) ||
94 (tlen > 0x7fffffff) ||
95 ((0x7fffffff - tlen) < avc->f.m.Length))
98 StartRXAFS_StoreData(tcall,
99 (struct AFSFid *)&avc->f.fid.Fid,
100 &InStatus, l1, 0, l2);
102 #else /* AFS_64BIT_CLIENT */
104 StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid,
105 &InStatus, avc->f.m.Length, 0, tlen);
106 #endif /* AFS_64BIT_CLIENT */
108 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
110 code = rx_EndCall(tcall, code);
113 #ifdef AFS_64BIT_CLIENT
114 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
115 afs_serverSetNo64Bit(tc);
118 #endif /* AFS_64BIT_CLIENT */
122 (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA,
126 afs_ProcessFS(avc, &OutStatus, areq);
129 afs_InvalidateAllSegments(avc);
135 unsigned int storeallmissing = 0;
136 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
138 * afs_StoreAllSegments
141 * Stores all modified segments back to server
144 * avc : Pointer to vcache entry.
145 * areq : Pointer to request structure.
148 * Called with avc write-locked.
150 #if defined (AFS_HPUX_ENV)
151 int NCHUNKSATONCE = 3;
153 int NCHUNKSATONCE = 64;
159 afs_StoreAllSegments(register struct vcache *avc, struct vrequest *areq,
162 register struct dcache *tdc;
163 register afs_int32 code = 0;
164 register afs_int32 index;
165 register afs_int32 origCBs, foreign = 0;
167 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
168 struct dcache **dcList, **dclist;
169 unsigned int i, j, minj, moredata, high, off;
171 afs_size_t maxStoredLength; /* highest offset we've written to server. */
174 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
175 osi_timeval_t xferStartTime, /*FS xfer start time */
176 xferStopTime; /*FS xfer stop time */
177 afs_size_t bytesToXfer; /* # bytes to xfer */
178 afs_size_t bytesXferred; /* # bytes actually xferred */
179 #endif /* AFS_NOSTATS */
182 AFS_STATCNT(afs_StoreAllSegments);
184 hset(oldDV, avc->f.m.DataVersion);
185 hset(newDV, avc->f.m.DataVersion);
186 hash = DVHash(&avc->f.fid);
187 foreign = (avc->f.states & CForeign);
188 dcList = (struct dcache **)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
189 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
190 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
191 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
192 /* In the aix vm implementation we need to do the vm_writep even
193 * on the memcache case since that's we adjust the file's size
194 * and finish flushing partial vm pages.
196 if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) || (sync & AFS_LASTSTORE))
197 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
199 /* If we're not diskless, reading a file may stress the VM
200 * system enough to cause a pageout, and this vnode would be
201 * locked when the pageout occurs. We can prevent this problem
202 * by making sure all dirty pages are already flushed. We don't
203 * do this when diskless because reading a diskless (i.e.
204 * memory-resident) chunk doesn't require using new VM, and we
205 * also don't want to dump more dirty data into a diskless cache,
206 * since they're smaller, and we might exceed its available
209 #if defined(AFS_SUN5_ENV)
210 if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
211 osi_VM_TryToSmush(avc, CRED(), 1);
214 osi_VM_StoreAllSegments(avc);
216 if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
217 /* This will probably make someone sad ... */
218 /*printf("Net down in afs_StoreSegments\n");*/
221 ConvertWToSLock(&avc->lock);
224 * Subsequent code expects a sorted list, and it expects all the
225 * chunks in the list to be contiguous, so we need a sort and a
226 * while loop in here, too - but this will work for a first pass...
227 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
228 * bin sort, I guess. Chunk numbers start with 0
230 * - Have to get a write lock on xdcache because GetDSlot might need it (if
231 * the chunk doesn't have a dcache struct).
232 * This seems like overkill in most cases.
233 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
234 * xdcache, then relock xdcache and try to use index. It is done
235 * a lot elsewhere in the CM, but I'm not buying that argument.
236 * - should be able to check IFDataMod without doing the GetDSlot (just
237 * hold afs_xdcache). That way, it's easy to do this without the
238 * writelock on afs_xdcache, and we save unneccessary disk
239 * operations. I don't think that works, 'cuz the next pointers
242 origCBs = afs_allCBs;
245 tlen = avc->f.m.Length;
249 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
253 /* lock and start over from beginning of hash chain
254 * in order to avoid a race condition. */
255 MObtainWriteLock(&afs_xdcache, 284);
256 index = afs_dvhashTbl[hash];
258 for (j = 0; index != NULLIDX;) {
259 if ((afs_indexFlags[index] & IFDataMod)
260 && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
261 tdc = afs_GetDSlot(index, 0); /* refcount+1. */
262 ReleaseReadLock(&tdc->tlock);
263 if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
264 off = tdc->f.chunk - minj;
265 if (off < NCHUNKSATONCE) {
267 osi_Panic("dclist slot already in use!");
272 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
273 * can't grab here, due to lock ordering with afs_xdcache.
274 * So, disable this shortcut for now. -- kolya 2001-10-13
276 /* shortcut: big win for little files */
277 /* tlen -= tdc->f.chunkBytes;
284 if (j == NCHUNKSATONCE)
291 index = afs_dvnextTbl[index];
293 MReleaseWriteLock(&afs_xdcache);
295 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
296 /* "moredata" just says "there are more dirty chunks yet to come".
300 static afs_uint32 lp1 = 10000, lp2 = 10000;
302 struct AFSStoreStatus InStatus;
303 struct AFSFetchStatus OutStatus;
305 afs_size_t base, bytes;
308 unsigned int first = 0;
311 struct osi_file *tfile;
312 struct rx_call *tcall;
314 for (bytes = 0, j = 0; !code && j <= high; j++) {
316 ObtainSharedLock(&(dcList[j]->lock), 629);
319 bytes += dcList[j]->f.chunkBytes;
320 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
321 && (dcList[j]->f.chunk - minj < high)
323 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
327 if (bytes && (j == high || !dcList[j + 1])) {
328 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
329 base = AFS_CHUNKTOBASE(first + minj);
332 * take a list of dcache structs and send them all off to the server
333 * the list must be in order, and the chunks contiguous.
334 * Note - there is no locking done by this code currently. For
335 * safety's sake, xdcache could be locked over the entire call.
336 * However, that pretty well ties up all the threads. Meantime, all
337 * the chunks _MUST_ have their refcounts bumped.
338 * The writes done before a store back will clear setuid-ness
340 * We can permit CacheStoreProc to wake up the user process IFF we
341 * are doing the last RPC for this close, ie, storing back the last
342 * set of contiguous chunks of a file.
345 dclist = &dcList[first];
346 nchunks = 1 + j - first;
347 nomore = !(moredata || (j != high));
348 InStatus.ClientModTime = avc->f.m.Date;
349 InStatus.Mask = AFS_SETMODTIME;
350 if (sync & AFS_SYNC) {
351 InStatus.Mask |= AFS_FSYNC;
353 tlen = lmin(avc->f.m.Length, avc->f.truncPos);
354 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
355 ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET,
356 ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET,
357 ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET,
358 ICL_HANDLE_OFFSET(tlen));
362 tc = afs_Conn(&avc->f.fid, areq, 0);
366 tcall = rx_NewCall(tc->id);
367 #ifdef AFS_64BIT_CLIENT
368 if (!afs_serverHasNo64Bit(tc)) {
370 StartRXAFS_StoreData64(tcall,
376 if (tlen > 0xFFFFFFFF) {
379 afs_int32 t1, t2, t3;
384 StartRXAFS_StoreData(tcall,
391 #else /* AFS_64BIT_CLIENT */
393 StartRXAFS_StoreData(tcall,
394 (struct AFSFid *)&avc->
395 f.fid.Fid, &InStatus, base,
397 #endif /* AFS_64BIT_CLIENT */
404 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
405 avc->f.truncPos = AFS_NOTRUNC;
407 for (i = 0; i < nchunks && !code; i++) {
410 afs_warn("afs: missing dcache!\n");
412 continue; /* panic? */
414 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2,
415 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32,
416 tdc->f.chunk, ICL_TYPE_INT32,
417 tdc->index, ICL_TYPE_INT32,
418 afs_inode2trace(&tdc->f.inode));
421 if (avc->asynchrony == -1) {
422 if (afs_defaultAsynchrony >
424 shouldwake = &nomore;
426 } else if ((afs_uint32) avc->asynchrony >=
428 shouldwake = &nomore;
431 tfile = afs_CFileOpen(&tdc->f.inode);
434 &(afs_stats_cmfullperf.rpc.
436 [AFS_STATS_FS_XFERIDX_STOREDATA]);
437 osi_GetuTime(&xferStartTime);
440 afs_CacheStoreProc(tcall, tfile,
441 tdc->f.chunkBytes, avc,
442 shouldwake, &bytesToXfer,
445 osi_GetuTime(&xferStopTime);
448 (xferP->numSuccesses)++;
449 afs_stats_XferSumBytes
450 [AFS_STATS_FS_XFERIDX_STOREDATA] +=
453 (afs_stats_XferSumBytes
454 [AFS_STATS_FS_XFERIDX_STOREDATA] >> 10);
455 afs_stats_XferSumBytes
456 [AFS_STATS_FS_XFERIDX_STOREDATA] &= 0x3FF;
457 if (bytesXferred < xferP->minBytes)
458 xferP->minBytes = bytesXferred;
459 if (bytesXferred > xferP->maxBytes)
460 xferP->maxBytes = bytesXferred;
463 * Tally the size of the object. Note: we tally the actual size,
464 * NOT the number of bytes that made it out over the wire.
466 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
468 else if (bytesToXfer <=
469 AFS_STATS_MAXBYTES_BUCKET1)
471 else if (bytesToXfer <=
472 AFS_STATS_MAXBYTES_BUCKET2)
474 else if (bytesToXfer <=
475 AFS_STATS_MAXBYTES_BUCKET3)
477 else if (bytesToXfer <=
478 AFS_STATS_MAXBYTES_BUCKET4)
480 else if (bytesToXfer <=
481 AFS_STATS_MAXBYTES_BUCKET5)
483 else if (bytesToXfer <=
484 AFS_STATS_MAXBYTES_BUCKET6)
486 else if (bytesToXfer <=
487 AFS_STATS_MAXBYTES_BUCKET7)
492 afs_stats_GetDiff(elapsedTime, xferStartTime,
494 afs_stats_AddTo((xferP->sumTime),
496 afs_stats_SquareAddTo((xferP->sqrTime),
498 if (afs_stats_TimeLessThan
499 (elapsedTime, (xferP->minTime))) {
500 afs_stats_TimeAssign((xferP->minTime),
503 if (afs_stats_TimeGreaterThan
504 (elapsedTime, (xferP->maxTime))) {
505 afs_stats_TimeAssign((xferP->maxTime),
511 afs_CacheStoreProc(tcall, tfile,
512 tdc->f.chunkBytes, avc,
513 shouldwake, &lp1, &lp2);
514 #endif /* AFS_NOSTATS */
515 afs_CFileClose(tfile);
516 if ((tdc->f.chunkBytes < afs_OtherCSize)
517 && (i < (nchunks - 1)) && code == 0) {
518 int bsent, tlen, sbytes =
519 afs_OtherCSize - tdc->f.chunkBytes;
521 osi_AllocLargeSpace(AFS_LRALLOCSIZ);
526 AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ :
528 memset(tbuffer, 0, tlen);
530 bsent = rx_Write(tcall, tbuffer, tlen);
534 code = -33; /* XXX */
539 osi_FreeLargeSpace(tbuffer);
541 stored += tdc->f.chunkBytes;
543 /* ideally, I'd like to unlock the dcache and turn
544 * off the writing bit here, but that would
545 * require being able to retry StoreAllSegments in
546 * the event of a failure. It only really matters
547 * if user can't read from a 'locked' dcache or
548 * one which has the writing bit turned on. */
551 struct AFSVolSync tsync;
554 EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
559 doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
564 code2 = rx_EndCall(tcall, code);
569 #ifdef AFS_64BIT_CLIENT
570 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
571 afs_serverSetNo64Bit(tc);
574 #endif /* AFS_64BIT_CLIENT */
576 (tc, code, &avc->f.fid, areq,
577 AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK,
580 /* put back all remaining locked dcache entries */
581 for (i = 0; i < nchunks; i++) {
584 if (afs_indexFlags[tdc->index] & IFDataMod) {
586 * LOCKXXX -- should hold afs_xdcache(W) when
587 * modifying afs_indexFlags.
589 afs_indexFlags[tdc->index] &= ~IFDataMod;
590 afs_stats_cmperf.cacheCurrDirtyChunks--;
591 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
592 if (sync & AFS_VMSYNC_INVAL) {
593 /* since we have invalidated all the pages of this
594 ** vnode by calling osi_VM_TryToSmush, we can
595 ** safely mark this dcache entry as not having
596 ** any pages. This vnode now becomes eligible for
597 ** reclamation by getDownD.
599 afs_indexFlags[tdc->index] &= ~IFAnyPages;
603 UpgradeSToWLock(&tdc->lock, 628);
604 tdc->f.states &= ~DWriting; /* correct? */
605 tdc->dflags |= DFEntryMod;
606 ReleaseWriteLock(&tdc->lock);
608 /* Mark the entry as released */
613 /* Now copy out return params */
614 UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */
615 afs_ProcessFS(avc, &OutStatus, areq);
616 /* Keep last (max) size of file on server to see if
617 * we need to call afs_StoreMini to extend the file.
620 maxStoredLength = OutStatus.Length;
621 ConvertWToSLock(&avc->lock);
626 for (j++; j <= high; j++) {
628 ReleaseSharedLock(&(dcList[j]->lock));
629 afs_PutDCache(dcList[j]);
630 /* Releasing entry */
636 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
637 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
642 /* Release any zero-length dcache entries in our interval
643 * that we locked but didn't store back above.
645 for (j = 0; j <= high; j++) {
648 osi_Assert(tdc->f.chunkBytes == 0);
649 ReleaseSharedLock(&tdc->lock);
655 minj += NCHUNKSATONCE;
656 } while (!code && moredata);
658 UpgradeSToWLock(&avc->lock, 29);
660 /* send a trivial truncation store if did nothing else */
663 * Call StoreMini if we haven't written enough data to extend the
664 * file at the fileserver to the client's notion of the file length.
666 if ((avc->f.truncPos != AFS_NOTRUNC)
667 || ((avc->f.states & CExtendedFile)
668 && (maxStoredLength < avc->f.m.Length))) {
669 code = afs_StoreMini(avc, areq);
671 hadd32(newDV, 1); /* just bumped here, too */
673 avc->f.states &= ~CExtendedFile;
677 * Finally, turn off DWriting, turn on DFEntryMod,
678 * update f.versionNo.
679 * A lot of this could be integrated into the loop above
689 memset((char *)dcList, 0,
690 NCHUNKSATONCE * sizeof(struct dcache *));
692 /* overkill, but it gets the lock in case GetDSlot needs it */
693 MObtainWriteLock(&afs_xdcache, 285);
695 for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
696 index != NULLIDX && safety < afs_cacheFiles + 2;) {
698 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
699 tdc = afs_GetDSlot(index, 0);
700 ReleaseReadLock(&tdc->tlock);
702 if (!FidCmp(&tdc->f.fid, &avc->f.fid)
703 && tdc->f.chunk >= minj) {
704 off = tdc->f.chunk - minj;
705 if (off < NCHUNKSATONCE) {
706 /* this is the file, and the correct chunk range */
707 if (j >= NCHUNKSATONCE)
709 ("Too many dcache entries in range\n");
714 if (j == NCHUNKSATONCE)
722 index = afs_dvnextTbl[index];
724 MReleaseWriteLock(&afs_xdcache);
726 for (i = 0; i < j; i++) {
727 /* Iterate over the dcache entries we collected above */
729 ObtainSharedLock(&tdc->lock, 677);
731 /* was code here to clear IFDataMod, but it should only be done
732 * in storedcache and storealldcache.
734 /* Only increase DV if we had up-to-date data to start with.
735 * Otherwise, we could be falsely upgrading an old chunk
736 * (that we never read) into one labelled with the current
737 * DV #. Also note that we check that no intervening stores
738 * occurred, otherwise we might mislabel cache information
739 * for a chunk that we didn't store this time
741 /* Don't update the version number if it's not yet set. */
742 if (!hsame(tdc->f.versionNo, h_unset)
743 && hcmp(tdc->f.versionNo, oldDV) >= 0) {
745 if ((!(afs_dvhack || foreign)
746 && hsame(avc->f.m.DataVersion, newDV))
747 || ((afs_dvhack || foreign)
748 && (origCBs == afs_allCBs))) {
749 /* no error, this is the DV */
751 UpgradeSToWLock(&tdc->lock, 678);
752 hset(tdc->f.versionNo, avc->f.m.DataVersion);
753 tdc->dflags |= DFEntryMod;
754 ConvertWToSLock(&tdc->lock);
758 ReleaseSharedLock(&tdc->lock);
762 minj += NCHUNKSATONCE;
769 * Invalidate chunks after an error for ccores files since
770 * afs_inactive won't be called for these and they won't be
771 * invalidated. Also discard data if it's a permanent error from the
774 if (areq->permWriteError || (avc->f.states & (CCore1 | CCore))) {
775 afs_InvalidateAllSegments(avc);
778 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
779 ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
780 /* would like a Trace5, but it doesn't exist... */
781 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
782 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
783 avc->lock.excl_locked);
784 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
785 ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
786 avc->lock.readers_reading, ICL_TYPE_INT32,
787 avc->lock.num_waiting);
790 * Finally, if updated DataVersion matches newDV, we did all of the
791 * stores. If mapDV indicates that the page cache was flushed up
792 * to when we started the store, then we can relabel them as flushed
793 * as recently as newDV.
794 * Turn off CDirty bit because the stored data is now in sync with server.
796 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
797 if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
798 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
799 hset(avc->mapDV, newDV);
800 avc->f.states &= ~CDirty;
803 osi_FreeLargeSpace(dcList);
805 /* If not the final write a temporary error is ok. */
806 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
811 } /*afs_StoreAllSegments (new 03/02/94) */
815 * afs_InvalidateAllSegments
818 * Invalidates all chunks for a given file
821 * avc : Pointer to vcache entry.
824 * For example, called after an error has been detected. Called
825 * with avc write-locked, and afs_xdcache unheld.
829 afs_InvalidateAllSegments(struct vcache *avc)
834 struct dcache **dcList;
835 int i, dcListMax, dcListCount;
837 AFS_STATCNT(afs_InvalidateAllSegments);
838 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
839 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
840 hash = DVHash(&avc->f.fid);
841 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */
842 avc->f.states &= ~CExtendedFile; /* not any more */
843 ObtainWriteLock(&afs_xcbhash, 459);
844 afs_DequeueCallback(avc);
845 avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */
846 ReleaseWriteLock(&afs_xcbhash);
847 if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
848 osi_dnlc_purgedp(avc);
849 /* Blow away pages; for now, only for Solaris */
850 #if (defined(AFS_SUN5_ENV))
851 if (WriteLocked(&avc->lock))
852 osi_ReleaseVM(avc, (struct AFS_UCRED *)0);
855 * Block out others from screwing with this table; is a read lock
858 MObtainWriteLock(&afs_xdcache, 286);
861 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
862 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
863 tdc = afs_GetDSlot(index, 0);
864 ReleaseReadLock(&tdc->tlock);
865 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
869 index = afs_dvnextTbl[index];
872 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
875 for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
876 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
877 tdc = afs_GetDSlot(index, 0);
878 ReleaseReadLock(&tdc->tlock);
879 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
880 /* same file? we'll zap it */
881 if (afs_indexFlags[index] & IFDataMod) {
882 afs_stats_cmperf.cacheCurrDirtyChunks--;
883 /* don't write it back */
884 afs_indexFlags[index] &= ~IFDataMod;
886 afs_indexFlags[index] &= ~IFAnyPages;
887 if (dcListCount < dcListMax)
888 dcList[dcListCount++] = tdc;
895 index = afs_dvnextTbl[index];
897 MReleaseWriteLock(&afs_xdcache);
899 for (i = 0; i < dcListCount; i++) {
902 ObtainWriteLock(&tdc->lock, 679);
904 if (vType(avc) == VDIR)
906 ReleaseWriteLock(&tdc->lock);
910 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
917 * Extend a cache file
919 * \param avc pointer to vcache to extend data for
920 * \param alen Length to extend file to
923 * \note avc must be write locked. May release and reobtain avc and GLOCK
926 afs_ExtendSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq) {
927 afs_size_t offset, toAdd;
928 struct osi_file *tfile;
933 zeros = (void *) afs_osi_Alloc(AFS_PAGESIZE);
936 memset(zeros, 0, AFS_PAGESIZE);
938 while (avc->f.m.Length < alen) {
939 tdc = afs_ObtainDCacheForWriting(avc, avc->f.m.Length, alen - avc->f.m.Length, areq, 0);
945 toAdd = alen - avc->f.m.Length;
947 offset = avc->f.m.Length - AFS_CHUNKTOBASE(tdc->f.chunk);
948 if (offset + toAdd > AFS_CHUNKTOSIZE(tdc->f.chunk)) {
949 toAdd = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;
951 tfile = afs_CFileOpen(&tdc->f.inode);
952 while(tdc->validPos < avc->f.m.Length + toAdd) {
955 towrite = (avc->f.m.Length + toAdd) - tdc->validPos;
956 if (towrite > AFS_PAGESIZE) towrite = AFS_PAGESIZE;
958 code = afs_CFileWrite(tfile,
959 tdc->validPos - AFS_CHUNKTOBASE(tdc->f.chunk),
961 tdc->validPos += towrite;
963 afs_CFileClose(tfile);
964 afs_AdjustSize(tdc, offset + toAdd );
965 avc->f.m.Length += toAdd;
966 ReleaseWriteLock(&tdc->lock);
970 afs_osi_Free(zeros, AFS_PAGESIZE);
975 * afs_TruncateAllSegments
978 * Truncate a cache file.
981 * avc : Ptr to vcache entry to truncate.
982 * alen : Number of bytes to make the file.
983 * areq : Ptr to request structure.
986 * Called with avc write-locked; in VFS40 systems, pvnLock is also
990 afs_TruncateAllSegments(register struct vcache *avc, afs_size_t alen,
991 struct vrequest *areq, struct AFS_UCRED *acred)
993 register struct dcache *tdc;
994 register afs_int32 code;
995 register afs_int32 index;
999 struct dcache **tdcArray;
1001 AFS_STATCNT(afs_TruncateAllSegments);
1002 avc->f.m.Date = osi_Time();
1003 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
1004 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1005 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
1006 if (alen >= avc->f.m.Length) {
1008 * Special speedup since Sun's vm extends the file this way;
1009 * we've never written to the file thus we can just set the new
1010 * length and avoid the needless calls below.
1011 * Also used for ftruncate calls which can extend the file.
1012 * To completely minimize the possible extra StoreMini RPC, we really
1013 * should keep the ExtendedPos as well and clear this flag if we
1014 * truncate below that value before we store the file back.
1016 avc->f.states |= CExtendedFile;
1017 avc->f.m.Length = alen;
1020 #if (defined(AFS_SUN5_ENV))
1022 /* Zero unused portion of last page */
1023 osi_VM_PreTruncate(avc, alen, acred);
1027 #if (defined(AFS_SUN5_ENV))
1028 ObtainWriteLock(&avc->vlock, 546);
1029 avc->activeV++; /* Block new getpages */
1030 ReleaseWriteLock(&avc->vlock);
1033 ReleaseWriteLock(&avc->lock);
1036 /* Flush pages beyond end-of-file. */
1037 osi_VM_Truncate(avc, alen, acred);
1040 ObtainWriteLock(&avc->lock, 79);
1042 avc->f.m.Length = alen;
1044 if (alen < avc->f.truncPos)
1045 avc->f.truncPos = alen;
1046 code = DVHash(&avc->f.fid);
1048 /* block out others from screwing with this table */
1049 MObtainWriteLock(&afs_xdcache, 287);
1052 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
1053 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1054 tdc = afs_GetDSlot(index, 0);
1055 ReleaseReadLock(&tdc->tlock);
1056 if (!FidCmp(&tdc->f.fid, &avc->f.fid))
1060 index = afs_dvnextTbl[index];
1063 /* Now allocate space where we can save those dcache entries, and
1064 * do a second pass over them.. Since we're holding xdcache, it
1065 * shouldn't be changing.
1067 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
1070 for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
1071 if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
1072 tdc = afs_GetDSlot(index, 0);
1073 ReleaseReadLock(&tdc->tlock);
1074 if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
1075 /* same file, and modified, we'll store it back */
1076 if (dcPos < dcCount) {
1077 tdcArray[dcPos++] = tdc;
1085 index = afs_dvnextTbl[index];
1088 MReleaseWriteLock(&afs_xdcache);
1090 /* Now we loop over the array of dcache entries and truncate them */
1091 for (index = 0; index < dcPos; index++) {
1092 struct osi_file *tfile;
1094 tdc = tdcArray[index];
1096 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
1099 ObtainSharedLock(&tdc->lock, 672);
1100 if (newSize < tdc->f.chunkBytes) {
1101 UpgradeSToWLock(&tdc->lock, 673);
1102 tfile = afs_CFileOpen(&tdc->f.inode);
1103 afs_CFileTruncate(tfile, newSize);
1104 afs_CFileClose(tfile);
1105 afs_AdjustSize(tdc, newSize);
1106 if (alen < tdc->validPos) {
1107 if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
1110 tdc->validPos = alen;
1112 ConvertWToSLock(&tdc->lock);
1114 ReleaseSharedLock(&tdc->lock);
1118 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
1120 #if (defined(AFS_SUN5_ENV))
1121 ObtainWriteLock(&avc->vlock, 547);
1122 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
1123 avc->vstates &= ~VRevokeWait;
1124 afs_osi_Wakeup((char *)&avc->vstates);
1126 ReleaseWriteLock(&avc->vlock);