2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "../afs/param.h"
18 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
19 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
20 #include "../afs/afs_stats.h" /* statistics */
21 #include "../afs/afs_cbqueue.h"
22 #include "../afs/afs_osidnlc.h"
26 /* Imported variables */
27 extern afs_rwlock_t afs_xserver;
28 extern afs_rwlock_t afs_xdcache;
29 extern afs_rwlock_t afs_xcbhash;
30 extern afs_lock_t afs_ftf;
31 extern struct server *afs_servers[NSERVERS];
32 extern afs_int32 afs_dhashsize;
33 extern afs_int32 *afs_dvhashTbl;
34 extern unsigned char *afs_indexFlags; /*(only one) Is there data there?*/
35 extern int cacheDiskType;
37 afs_uint32 afs_stampValue=0;
43 * Send a truncation request to a FileServer.
49 * We're write-locked upon entry.
52 int afs_StoreMini(avc, areq)
53 register struct vcache *avc;
54 struct vrequest *areq;
57 register struct conn *tc;
58 struct AFSStoreStatus InStatus;
59 struct AFSFetchStatus OutStatus;
60 struct AFSVolSync tsync;
61 register afs_int32 code;
62 register struct rx_call *tcall;
63 afs_size_t tlen, base = 0;
66 AFS_STATCNT(afs_StoreMini);
67 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
68 ICL_TYPE_INT32, avc->m.Length);
70 if (avc->truncPos < tlen) tlen = avc->truncPos;
71 avc->truncPos = AFS_NOTRUNC;
72 avc->states &= ~CExtendedFile;
75 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
77 #ifdef RX_ENABLE_LOCKS
79 #endif /* RX_ENABLE_LOCKS */
81 tcall = rx_NewCall(tc->id);
82 #ifdef RX_ENABLE_LOCKS
84 #endif /* RX_ENABLE_LOCKS */
85 /* Set the client mod time since we always want the file
86 * to have the client's mod time and not the server's one
87 * (to avoid problems with make, etc.) It almost always
88 * works fine with standard afs because them server/client
89 * times are in sync and more importantly this storemini
90 * it's a special call that would typically be followed by
91 * the proper store-data or store-status calls.
93 InStatus.Mask = AFS_SETMODTIME;
94 InStatus.ClientModTime = avc->m.Date;
95 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
96 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
97 ICL_TYPE_FID, &avc->fid.Fid,
98 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(base),
99 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tlen),
100 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
101 #ifdef RX_ENABLE_LOCKS
103 #endif /* RX_ENABLE_LOCKS */
104 #ifdef AFS_64BIT_CLIENT
105 if (!afs_serverHasNo64Bit(tc)) {
106 code = StartRXAFS_StoreData64(tcall,
107 (struct AFSFid *)&avc->fid.Fid,
108 &InStatus, avc->m.Length,
114 code = StartRXAFS_StoreData(tcall,
115 (struct AFSFid *)&avc->fid.Fid,
116 &InStatus, l1, 0, l2);
118 #else /* AFS_64BIT_CLIENT */
119 code = StartRXAFS_StoreData(tcall,
120 (struct AFSFid *)&avc->fid.Fid,
121 &InStatus, avc->m.Length, 0, tlen);
122 #endif /* AFS_64BIT_CLIENT */
124 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
125 #ifdef AFS_64BIT_CLIENT
126 if (code == RXGEN_OPCODE) {
127 afs_serverSetNo64Bit(tc);
128 code = rx_EndCall(tcall, code);
131 #endif /* AFS_64BIT_CLIENT */
133 code = rx_EndCall(tcall, code);
134 #ifdef RX_ENABLE_LOCKS
136 #endif /* RX_ENABLE_LOCKS */
141 (afs_Analyze(tc, code, &avc->fid, areq,
142 AFS_STATS_FS_RPCIDX_STOREDATA,
143 SHARED_LOCK, (struct cell *)0));
146 afs_ProcessFS(avc, &OutStatus, areq);
150 afs_InvalidateAllSegments(avc);
156 unsigned int storeallmissing = 0;
157 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
159 * afs_StoreAllSegments
162 * Stores all modified segments back to server
165 * avc : Pointer to vcache entry.
166 * areq : Pointer to request structure.
169 * Called with avc write-locked.
171 #if defined (AFS_HPUX_ENV) || defined(AFS_ULTRIX_ENV)
172 int NCHUNKSATONCE = 3;
174 int NCHUNKSATONCE = 64 ;
179 afs_StoreAllSegments(avc, areq, sync)
180 register struct vcache *avc;
181 struct vrequest *areq;
184 { /*afs_StoreAllSegments*/
185 register struct dcache *tdc;
186 register afs_int32 code=0;
187 register afs_int32 index;
188 register afs_int32 origCBs, foreign=0;
190 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
191 struct dcache **dcList, **dclist;
192 unsigned int i, j, minj, maxj, moredata, high, off;
194 afs_size_t maxStoredLength; /* highest offset we've written to server. */
197 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
198 osi_timeval_t xferStartTime, /*FS xfer start time*/
199 xferStopTime; /*FS xfer stop time*/
200 afs_size_t bytesToXfer; /* # bytes to xfer*/
201 afs_size_t bytesXferred; /* # bytes actually xferred*/
202 #endif /* AFS_NOSTATS */
205 AFS_STATCNT(afs_StoreAllSegments);
207 hset(oldDV, avc->m.DataVersion);
208 hset(newDV, avc->m.DataVersion);
209 hash = DVHash(&avc->fid);
210 foreign = (avc->states & CForeign);
211 dcList = (struct dcache **) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
212 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
213 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
214 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
215 /* In the aix vm implementation we need to do the vm_writep even
216 * on the memcache case since that's we adjust the file's size
217 * and finish flushing partial vm pages.
219 if (cacheDiskType != AFS_FCACHE_TYPE_MEM)
220 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
222 /* If we're not diskless, reading a file may stress the VM
223 * system enough to cause a pageout, and this vnode would be
224 * locked when the pageout occurs. We can prevent this problem
225 * by making sure all dirty pages are already flushed. We don't
226 * do this when diskless because reading a diskless (i.e.
227 * memory-resident) chunk doesn't require using new VM, and we
228 * also don't want to dump more dirty data into a diskless cache,
229 * since they're smaller, and we might exceed its available
232 #if defined(AFS_SUN5_ENV)
233 if ( sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
234 osi_VM_TryToSmush(avc, CRED() , 1 );
237 osi_VM_StoreAllSegments(avc);
240 ConvertWToSLock(&avc->lock);
243 * Subsequent code expects a sorted list, and it expects all the
244 * chunks in the list to be contiguous, so we need a sort and a
245 * while loop in here, too - but this will work for a first pass...
246 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
247 * bin sort, I guess. Chunk numbers start with 0
249 * - Have to get a write lock on xdcache because GetDSlot might need it (if
250 * the chunk doesn't have a dcache struct).
251 * This seems like overkill in most cases.
252 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
253 * xdcache, then relock xdcache and try to use index. It is done
254 * a lot elsewhere in the CM, but I'm not buying that argument.
255 * - should be able to check IFDataMod without doing the GetDSlot (just
256 * hold afs_xdcache). That way, it's easy to do this without the
257 * writelock on afs_xdcache, and we save unneccessary disk
258 * operations. I don't think that works, 'cuz the next pointers
261 origCBs = afs_allCBs;
265 tlen = avc->m.Length;
269 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
273 /* lock and start over from beginning of hash chain
274 * in order to avoid a race condition. */
275 MObtainWriteLock(&afs_xdcache,284);
276 index = afs_dvhashTbl[hash];
278 for(j=0; index != NULLIDX;) {
279 if ((afs_indexFlags[index] & IFDataMod) &&
280 (afs_indexUnique[index] == avc->fid.Fid.Unique)) {
281 tdc = afs_GetDSlot(index, 0); /* refcount+1. */
282 ReleaseReadLock(&tdc->tlock);
283 if (!FidCmp( &tdc->f.fid, &avc->fid ) && tdc->f.chunk >= minj ) {
284 off = tdc->f.chunk - minj;
285 if (off < NCHUNKSATONCE) {
287 osi_Panic("dclist slot already in use!");
292 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
293 * can't grab here, due to lock ordering with afs_xdcache.
294 * So, disable this shortcut for now. -- kolya 2001-10-13
296 /* shortcut: big win for little files */
297 /* tlen -= tdc->f.chunkBytes;
305 if (j == NCHUNKSATONCE)
312 index = afs_dvnextTbl[index];
314 MReleaseWriteLock(&afs_xdcache);
316 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
317 /* "moredata" just says "there are more dirty chunks yet to come".
320 static afs_uint32 lp1 = 10000, lp2 = 10000;
321 struct AFSStoreStatus InStatus;
322 afs_size_t base, bytes;
328 struct osi_file * tfile;
329 struct rx_call * tcall;
330 extern int afs_defaultAsynchrony;
333 for (bytes = 0, j = 0; !code && j<=high; j++) {
335 ObtainSharedLock(&(dcList[j]->lock), 629);
338 bytes += dcList[j]->f.chunkBytes;
339 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
340 && (dcList[j]->f.chunk - minj < high)
342 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
346 if (bytes && (j==high || !dcList[j+1])) {
347 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
348 base = AFS_CHUNKTOBASE(first + minj) ;
351 * take a list of dcache structs and send them all off to the server
352 * the list must be in order, and the chunks contiguous.
353 * Note - there is no locking done by this code currently. For
354 * safety's sake, xdcache could be locked over the entire call.
355 * However, that pretty well ties up all the threads. Meantime, all
356 * the chunks _MUST_ have their refcounts bumped.
357 * The writes done before a store back will clear setuid-ness
359 * We can permit CacheStoreProc to wake up the user process IFF we
360 * are doing the last RPC for this close, ie, storing back the last
361 * set of contiguous chunks of a file.
364 dclist = &dcList[first];
366 nomore = !(moredata || (j!=high));
367 InStatus.ClientModTime = avc->m.Date;
368 InStatus.Mask = AFS_SETMODTIME;
369 if (sync & AFS_SYNC) {
370 InStatus.Mask |= AFS_FSYNC;
372 tlen = lmin(avc->m.Length, avc->truncPos);
373 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
374 ICL_TYPE_FID, &avc->fid.Fid,
375 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(base),
376 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(bytes),
377 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tlen));
381 tc = afs_Conn(&avc->fid, areq);
384 #ifdef RX_ENABLE_LOCKS
386 #endif /* RX_ENABLE_LOCKS */
387 tcall = rx_NewCall(tc->id);
388 #ifdef AFS_64BIT_CLIENT
389 if (!afs_serverHasNo64Bit(tc)) {
390 code = StartRXAFS_StoreData64(tcall,
391 (struct AFSFid *) &avc->fid.Fid,
392 &InStatus, base, bytes, tlen);
394 if (tlen > 0xFFFFFFFF) {
397 afs_int32 t1, t2, t3;
401 code = StartRXAFS_StoreData(tcall,
402 (struct AFSFid *) &avc->fid.Fid,
403 &InStatus, t1, t2, t3);
406 #else /* AFS_64BIT_CLIENT */
407 code = StartRXAFS_StoreData(tcall, (struct AFSFid *) &avc->fid.Fid,
408 &InStatus, base, bytes, tlen);
409 #endif /* AFS_64BIT_CLIENT */
410 #ifdef RX_ENABLE_LOCKS
412 #endif /* RX_ENABLE_LOCKS */
418 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
419 avc->truncPos = AFS_NOTRUNC;
421 for (i = 0; i<nchunks && !code;i++) {
424 afs_warn("afs: missing dcache!\n");
426 continue; /* panic? */
428 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2,
429 ICL_TYPE_POINTER, avc,
430 ICL_TYPE_INT32, tdc->f.chunk,
431 ICL_TYPE_INT32, tdc->index,
432 ICL_TYPE_INT32, tdc->f.inode);
435 if (avc->asynchrony == -1) {
436 if (afs_defaultAsynchrony > (bytes-stored)) {
437 shouldwake = &nomore;
439 } else if ((afs_uint32)avc->asynchrony >= (bytes-stored)) {
440 shouldwake = &nomore;
443 tfile = afs_CFileOpen(tdc->f.inode);
445 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[AFS_STATS_FS_XFERIDX_STOREDATA]);
446 osi_GetuTime(&xferStartTime);
448 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes,
449 avc, shouldwake, &bytesToXfer,
452 osi_GetuTime(&xferStopTime);
455 (xferP->numSuccesses)++;
456 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] += bytesXferred;
457 (xferP->sumBytes) += (afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] >> 10);
458 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] &= 0x3FF;
459 if (bytesXferred < xferP->minBytes)
460 xferP->minBytes = bytesXferred;
461 if (bytesXferred > xferP->maxBytes)
462 xferP->maxBytes = bytesXferred;
465 * Tally the size of the object. Note: we tally the actual size,
466 * NOT the number of bytes that made it out over the wire.
468 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
471 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1)
474 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2)
477 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3)
480 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4)
483 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5)
486 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6)
489 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7)
494 afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime);
495 afs_stats_AddTo((xferP->sumTime), elapsedTime);
496 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
497 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
498 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
500 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
501 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
505 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes,
506 avc, shouldwake, &lp1, &lp2);
507 #endif /* AFS_NOSTATS */
508 afs_CFileClose(tfile);
509 #ifdef AFS_64BIT_CLIENT
510 if (code == RXGEN_OPCODE) {
511 afs_serverSetNo64Bit(tc);
514 #endif /* AFS_64BIT_CLIENT */
515 if ((tdc->f.chunkBytes < afs_OtherCSize) &&
517 int bsent, tlen, tlen1=0, sbytes = afs_OtherCSize - tdc->f.chunkBytes;
518 char *tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
521 tlen = (sbytes > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : sbytes);
522 memset(tbuffer, 0, tlen);
523 #ifdef RX_ENABLE_LOCKS
525 #endif /* RX_ENABLE_LOCKS */
526 bsent = rx_Write(tcall, tbuffer, tlen);
527 #ifdef RX_ENABLE_LOCKS
529 #endif /* RX_ENABLE_LOCKS */
532 code = -33; /* XXX */
537 osi_FreeLargeSpace(tbuffer);
539 stored += tdc->f.chunkBytes;
541 /* ideally, I'd like to unlock the dcache and turn
542 * off the writing bit here, but that would
543 * require being able to retry StoreAllSegments in
544 * the event of a failure. It only really matters
545 * if user can't read from a 'locked' dcache or
546 * one which has the writing bit turned on. */
549 struct AFSFetchStatus OutStatus;
550 struct AFSVolSync tsync;
551 #ifdef RX_ENABLE_LOCKS
553 #endif /* RX_ENABLE_LOCKS */
554 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
555 #ifdef RX_ENABLE_LOCKS
557 #endif /* RX_ENABLE_LOCKS */
561 /* Now copy out return params */
562 UpgradeSToWLock(&avc->lock,28); /* keep out others for a while */
563 if (!code) { /* must wait til RPC completes to be sure of this info */
564 afs_ProcessFS(avc, &OutStatus, areq);
565 /* Keep last (max) size of file on server to see if
566 * we need to call afs_StoreMini to extend the file.
569 maxStoredLength = OutStatus.Length;
572 ConvertWToSLock(&avc->lock);
575 #ifdef RX_ENABLE_LOCKS
577 #endif /* RX_ENABLE_LOCKS */
578 code = rx_EndCall(tcall, code, avc, base);
579 #ifdef RX_ENABLE_LOCKS
581 #endif /* RX_ENABLE_LOCKS */
583 } while (afs_Analyze(tc, code, &avc->fid, areq,
584 AFS_STATS_FS_RPCIDX_STOREDATA,
585 SHARED_LOCK, (struct cell *)0));
587 /* put back all remaining locked dcache entries */
588 for (i=0; i<nchunks; i++) {
591 if (afs_indexFlags[tdc->index] & IFDataMod) {
593 * LOCKXXX -- should hold afs_xdcache(W) when
594 * modifying afs_indexFlags.
596 afs_indexFlags[tdc->index] &= ~IFDataMod;
597 afs_stats_cmperf.cacheCurrDirtyChunks--;
598 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
599 if ( sync & AFS_VMSYNC_INVAL )
601 /* since we have invalidated all the pages of this
602 ** vnode by calling osi_VM_TryToSmush, we can
603 ** safely mark this dcache entry as not having
604 ** any pages. This vnode now becomes eligible for
605 ** reclamation by getDownD.
607 afs_indexFlags[tdc->index] &= ~IFAnyPages;
611 UpgradeSToWLock(&tdc->lock, 628);
612 tdc->f.states &= ~DWriting; /* correct?*/
613 tdc->dflags |= DFEntryMod;
614 ReleaseWriteLock(&tdc->lock);
616 /* Mark the entry as released */
621 for (j++; j<=high; j++) {
623 ReleaseSharedLock(&(dcList[j]->lock));
624 afs_PutDCache(dcList[j]);
625 /* Releasing entry */
631 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
632 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
637 /* Release any zero-length dcache entries in our interval
638 * that we locked but didn't store back above.
640 for (j = 0; j<=high; j++) {
643 osi_Assert(tdc->f.chunkBytes == 0);
644 ReleaseSharedLock(&tdc->lock);
650 minj += NCHUNKSATONCE;
651 } while ( !code && moredata );
653 UpgradeSToWLock(&avc->lock,29);
655 /* send a trivial truncation store if did nothing else */
658 * Call StoreMini if we haven't written enough data to extend the
659 * file at the fileserver to the client's notion of the file length.
661 if ((avc->truncPos != AFS_NOTRUNC) ||
662 ((avc->states & CExtendedFile) && (maxStoredLength < avc->m.Length))) {
663 code = afs_StoreMini(avc, areq);
665 hadd32(newDV, 1); /* just bumped here, too */
667 avc->states &= ~CExtendedFile;
671 * Finally, turn off DWriting, turn on DFEntryMod,
672 * update f.versionNo.
673 * A lot of this could be integrated into the loop above
683 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
685 /* overkill, but it gets the lock in case GetDSlot needs it */
686 MObtainWriteLock(&afs_xdcache,285);
688 for(j = 0, safety = 0, index = afs_dvhashTbl[hash];
689 index != NULLIDX && safety < afs_cacheFiles+2;) {
691 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
692 tdc = afs_GetDSlot(index, 0);
693 ReleaseReadLock(&tdc->tlock);
695 if (!FidCmp(&tdc->f.fid, &avc->fid) && tdc->f.chunk >= minj) {
696 off = tdc->f.chunk - minj;
697 if (off < NCHUNKSATONCE) {
698 /* this is the file, and the correct chunk range */
699 if (j >= NCHUNKSATONCE)
700 osi_Panic("Too many dcache entries in range\n");
705 if (j == NCHUNKSATONCE)
713 index = afs_dvnextTbl[index];
715 MReleaseWriteLock(&afs_xdcache);
717 for (i=0; i<j; i++) {
718 /* Iterate over the dcache entries we collected above */
720 ObtainSharedLock(&tdc->lock, 677);
722 /* was code here to clear IFDataMod, but it should only be done
723 * in storedcache and storealldcache.
725 /* Only increase DV if we had up-to-date data to start with.
726 * Otherwise, we could be falsely upgrading an old chunk
727 * (that we never read) into one labelled with the current
728 * DV #. Also note that we check that no intervening stores
729 * occurred, otherwise we might mislabel cache information
730 * for a chunk that we didn't store this time
732 /* Don't update the version number if it's not yet set. */
733 if (!hsame(tdc->f.versionNo, h_unset) &&
734 hcmp(tdc->f.versionNo, oldDV) >= 0) {
736 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
737 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
738 /* no error, this is the DV */
740 UpgradeSToWLock(&tdc->lock, 678);
741 hset(tdc->f.versionNo, avc->m.DataVersion);
742 tdc->dflags |= DFEntryMod;
743 ConvertWToSLock(&tdc->lock);
747 ReleaseSharedLock(&tdc->lock);
751 minj += NCHUNKSATONCE;
758 * Invalidate chunks after an error for ccores files since
759 * afs_inactive won't be called for these and they won't be
760 * invalidated. Also discard data if it's a permanent error from the
763 if (areq->permWriteError || (avc->states & (CCore1 | CCore))) {
764 afs_InvalidateAllSegments(avc);
767 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
768 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, code);
769 /* would like a Trace5, but it doesn't exist...*/
770 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
771 ICL_TYPE_INT32, avc->lock.wait_states,
772 ICL_TYPE_INT32, avc->lock.excl_locked);
773 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
774 ICL_TYPE_INT32, avc->lock.wait_states,
775 ICL_TYPE_INT32, avc->lock.readers_reading,
776 ICL_TYPE_INT32, avc->lock.num_waiting );
779 * Finally, if updated DataVersion matches newDV, we did all of the
780 * stores. If mapDV indicates that the page cache was flushed up
781 * to when we started the store, then we can relabel them as flushed
782 * as recently as newDV.
783 * Turn off CDirty bit because the stored data is now in sync with server.
785 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
786 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
787 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
788 hset(avc->mapDV, newDV);
789 avc->states &= ~CDirty;
792 osi_FreeLargeSpace(dcList);
794 /* If not the final write a temporary error is ok. */
795 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
800 } /*afs_StoreAllSegments (new 03/02/94)*/
804 * afs_InvalidateAllSegments
807 * Invalidates all chunks for a given file
810 * avc : Pointer to vcache entry.
813 * For example, called after an error has been detected. Called
814 * with avc write-locked, and afs_xdcache unheld.
817 afs_InvalidateAllSegments(avc)
820 { /*afs_InvalidateAllSegments*/
825 struct dcache **dcList;
826 int i, dcListMax, dcListCount;
828 AFS_STATCNT(afs_InvalidateAllSegments);
829 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
830 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
831 hash = DVHash(&avc->fid);
832 avc->truncPos = AFS_NOTRUNC; /* don't truncate later */
833 avc->states &= ~CExtendedFile; /* not any more */
834 ObtainWriteLock(&afs_xcbhash, 459);
835 afs_DequeueCallback(avc);
836 avc->states &= ~(CStatd|CDirty); /* mark status information as bad, too */
837 ReleaseWriteLock(&afs_xcbhash);
838 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
839 osi_dnlc_purgedp(avc);
840 /* Blow away pages; for now, only for Solaris */
841 #if (defined(AFS_SUN5_ENV))
842 if (WriteLocked(&avc->lock))
843 osi_ReleaseVM(avc, (struct AFS_UCRED *)0);
846 * Block out others from screwing with this table; is a read lock
849 MObtainWriteLock(&afs_xdcache,286);
852 for(index = afs_dvhashTbl[hash]; index != NULLIDX;) {
853 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
854 tdc = afs_GetDSlot(index, 0);
855 ReleaseReadLock(&tdc->tlock);
856 if (!FidCmp(&tdc->f.fid, &avc->fid))
860 index = afs_dvnextTbl[index];
863 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
866 for(index = afs_dvhashTbl[hash]; index != NULLIDX;) {
867 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
868 tdc = afs_GetDSlot(index, 0);
869 ReleaseReadLock(&tdc->tlock);
870 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
871 /* same file? we'll zap it */
872 if (afs_indexFlags[index] & IFDataMod) {
873 afs_stats_cmperf.cacheCurrDirtyChunks--;
874 /* don't write it back */
875 afs_indexFlags[index] &= ~IFDataMod;
877 afs_indexFlags[index] &= ~IFAnyPages;
878 if (dcListCount < dcListMax)
879 dcList[dcListCount++] = tdc;
886 index = afs_dvnextTbl[index];
888 MReleaseWriteLock(&afs_xdcache);
890 for (i=0; i<dcListCount; i++) {
893 ObtainWriteLock(&tdc->lock, 679);
895 if (vType(avc) == VDIR)
897 ReleaseWriteLock(&tdc->lock);
901 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
905 } /*afs_InvalidateAllSegments*/
909 * afs_TruncateAllSegments
912 * Truncate a cache file.
915 * avc : Ptr to vcache entry to truncate.
916 * alen : Number of bytes to make the file.
917 * areq : Ptr to request structure.
920 * Called with avc write-locked; in VFS40 systems, pvnLock is also
923 afs_TruncateAllSegments(avc, alen, areq, acred)
925 register struct vcache *avc;
926 struct vrequest *areq;
927 struct AFS_UCRED *acred;
928 { /*afs_TruncateAllSegments*/
930 register struct dcache *tdc;
931 register afs_int32 code;
932 register afs_int32 index;
936 struct dcache **tdcArray;
938 AFS_STATCNT(afs_TruncateAllSegments);
939 avc->m.Date = osi_Time();
940 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
941 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
942 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
943 if (alen >= avc->m.Length) {
945 * Special speedup since Sun's vm extends the file this way;
946 * we've never written to the file thus we can just set the new
947 * length and avoid the needless calls below.
948 * Also used for ftruncate calls which can extend the file.
949 * To completely minimize the possible extra StoreMini RPC, we really
950 * should keep the ExtendedPos as well and clear this flag if we
951 * truncate below that value before we store the file back.
953 avc->states |= CExtendedFile;
954 avc->m.Length = alen;
958 #if (defined(AFS_SUN5_ENV))
960 /* Zero unused portion of last page */
961 osi_VM_PreTruncate(avc, alen, acred);
965 #if (defined(AFS_SUN5_ENV))
966 ObtainWriteLock(&avc->vlock, 546);
967 avc->activeV++; /* Block new getpages */
968 ReleaseWriteLock(&avc->vlock);
971 ReleaseWriteLock(&avc->lock);
974 /* Flush pages beyond end-of-file. */
975 osi_VM_Truncate(avc, alen, acred);
978 ObtainWriteLock(&avc->lock,79);
980 avc->m.Length = alen;
982 if (alen < avc->truncPos) avc->truncPos = alen;
983 code = DVHash(&avc->fid);
985 /* block out others from screwing with this table */
986 MObtainWriteLock(&afs_xdcache,287);
989 for(index = afs_dvhashTbl[code]; index != NULLIDX;) {
990 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
991 tdc = afs_GetDSlot(index, 0);
992 ReleaseReadLock(&tdc->tlock);
993 if (!FidCmp(&tdc->f.fid, &avc->fid))
997 index = afs_dvnextTbl[index];
1000 /* Now allocate space where we can save those dcache entries, and
1001 * do a second pass over them.. Since we're holding xdcache, it
1002 * shouldn't be changing.
1004 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
1007 for(index = afs_dvhashTbl[code]; index != NULLIDX;) {
1008 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
1009 tdc = afs_GetDSlot(index, 0);
1010 ReleaseReadLock(&tdc->tlock);
1011 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
1012 /* same file, and modified, we'll store it back */
1013 if (dcPos < dcCount) {
1014 tdcArray[dcPos++] = tdc;
1022 index = afs_dvnextTbl[index];
1025 MReleaseWriteLock(&afs_xdcache);
1027 /* Now we loop over the array of dcache entries and truncate them */
1028 for (index = 0; index < dcPos; index++) {
1029 struct osi_file *tfile;
1031 tdc = tdcArray[index];
1033 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
1034 if (newSize < 0) newSize = 0;
1035 ObtainSharedLock(&tdc->lock, 672);
1036 if (newSize < tdc->f.chunkBytes) {
1037 UpgradeSToWLock(&tdc->lock, 673);
1038 tfile = afs_CFileOpen(tdc->f.inode);
1039 afs_CFileTruncate(tfile, newSize);
1040 afs_CFileClose(tfile);
1041 afs_AdjustSize(tdc, newSize);
1042 ConvertWToSLock(&tdc->lock);
1044 ReleaseSharedLock(&tdc->lock);
1048 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
1050 #if (defined(AFS_SUN5_ENV))
1051 ObtainWriteLock(&avc->vlock, 547);
1052 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
1053 avc->vstates &= ~VRevokeWait;
1054 afs_osi_Wakeup((char *)&avc->vstates);
1056 ReleaseWriteLock(&avc->vlock);
1060 } /*afs_TruncateAllSegments*/