2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * --------------------- Required definitions ---------------------
13 #include <afsconfig.h>
14 #include "afs/param.h"
18 #include "afs/sysincludes.h" /*Standard vendor system headers*/
19 #include "afsincludes.h" /*AFS-based standard headers*/
20 #include "afs/afs_stats.h" /* statistics */
21 #include "afs/afs_cbqueue.h"
22 #include "afs/afs_osidnlc.h"
24 afs_uint32 afs_stampValue=0;
30 * Send a truncation request to a FileServer.
36 * We're write-locked upon entry.
39 int afs_StoreMini(register struct vcache *avc, struct vrequest *areq)
41 register struct conn *tc;
42 struct AFSStoreStatus InStatus;
43 struct AFSFetchStatus OutStatus;
44 struct AFSVolSync tsync;
45 register afs_int32 code;
46 register struct rx_call *tcall;
47 afs_size_t tlen, xlen = 0;
50 AFS_STATCNT(afs_StoreMini);
51 afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc,
52 ICL_TYPE_INT32, avc->m.Length);
54 if (avc->truncPos < tlen) tlen = avc->truncPos;
55 avc->truncPos = AFS_NOTRUNC;
56 avc->states &= ~CExtendedFile;
59 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
63 tcall = rx_NewCall(tc->id);
65 /* Set the client mod time since we always want the file
66 * to have the client's mod time and not the server's one
67 * (to avoid problems with make, etc.) It almost always
68 * works fine with standard afs because them server/client
69 * times are in sync and more importantly this storemini
70 * it's a special call that would typically be followed by
71 * the proper store-data or store-status calls.
73 InStatus.Mask = AFS_SETMODTIME;
74 InStatus.ClientModTime = avc->m.Date;
75 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
76 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
77 ICL_TYPE_FID, &avc->fid.Fid,
78 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
79 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xlen),
80 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tlen));
82 #ifdef AFS_64BIT_CLIENT
83 if (!afs_serverHasNo64Bit(tc)) {
84 code = StartRXAFS_StoreData64(tcall,
85 (struct AFSFid *)&avc->fid.Fid,
86 &InStatus, avc->m.Length,
87 (afs_size_t) 0, tlen);
92 code = StartRXAFS_StoreData(tcall,
93 (struct AFSFid *)&avc->fid.Fid,
94 &InStatus, l1, 0, l2);
96 #else /* AFS_64BIT_CLIENT */
97 code = StartRXAFS_StoreData(tcall,
98 (struct AFSFid *)&avc->fid.Fid,
99 &InStatus, avc->m.Length, 0, tlen);
100 #endif /* AFS_64BIT_CLIENT */
102 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
103 #ifdef AFS_64BIT_CLIENT
104 if (code == RXGEN_OPCODE) {
105 afs_serverSetNo64Bit(tc);
106 code = rx_EndCall(tcall, code);
109 #endif /* AFS_64BIT_CLIENT */
111 code = rx_EndCall(tcall, code);
117 (afs_Analyze(tc, code, &avc->fid, areq,
118 AFS_STATS_FS_RPCIDX_STOREDATA,
122 afs_ProcessFS(avc, &OutStatus, areq);
126 afs_InvalidateAllSegments(avc);
132 unsigned int storeallmissing = 0;
133 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
135 * afs_StoreAllSegments
138 * Stores all modified segments back to server
141 * avc : Pointer to vcache entry.
142 * areq : Pointer to request structure.
145 * Called with avc write-locked.
147 #if defined (AFS_HPUX_ENV) || defined(AFS_ULTRIX_ENV)
148 int NCHUNKSATONCE = 3;
150 int NCHUNKSATONCE = 64 ;
155 int afs_StoreAllSegments(register struct vcache *avc, struct vrequest *areq,
158 register struct dcache *tdc;
159 register afs_int32 code=0;
160 register afs_int32 index;
161 register afs_int32 origCBs, foreign=0;
163 afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */
164 struct dcache **dcList, **dclist;
165 unsigned int i, j, minj, moredata, high, off;
167 afs_size_t maxStoredLength; /* highest offset we've written to server. */
170 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
171 osi_timeval_t xferStartTime, /*FS xfer start time*/
172 xferStopTime; /*FS xfer stop time*/
173 afs_size_t bytesToXfer; /* # bytes to xfer*/
174 afs_size_t bytesXferred; /* # bytes actually xferred*/
175 #endif /* AFS_NOSTATS */
178 AFS_STATCNT(afs_StoreAllSegments);
180 hset(oldDV, avc->m.DataVersion);
181 hset(newDV, avc->m.DataVersion);
182 hash = DVHash(&avc->fid);
183 foreign = (avc->states & CForeign);
184 dcList = (struct dcache **) osi_AllocLargeSpace(AFS_LRALLOCSIZ);
185 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
186 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
187 #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
188 /* In the aix vm implementation we need to do the vm_writep even
189 * on the memcache case since that's we adjust the file's size
190 * and finish flushing partial vm pages.
192 if (cacheDiskType != AFS_FCACHE_TYPE_MEM)
193 #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
195 /* If we're not diskless, reading a file may stress the VM
196 * system enough to cause a pageout, and this vnode would be
197 * locked when the pageout occurs. We can prevent this problem
198 * by making sure all dirty pages are already flushed. We don't
199 * do this when diskless because reading a diskless (i.e.
200 * memory-resident) chunk doesn't require using new VM, and we
201 * also don't want to dump more dirty data into a diskless cache,
202 * since they're smaller, and we might exceed its available
205 #if defined(AFS_SUN5_ENV)
206 if ( sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */
207 osi_VM_TryToSmush(avc, CRED() , 1 );
210 osi_VM_StoreAllSegments(avc);
213 ConvertWToSLock(&avc->lock);
216 * Subsequent code expects a sorted list, and it expects all the
217 * chunks in the list to be contiguous, so we need a sort and a
218 * while loop in here, too - but this will work for a first pass...
219 * 92.10.05 - OK, there's a sort in here now. It's kind of a modified
220 * bin sort, I guess. Chunk numbers start with 0
222 * - Have to get a write lock on xdcache because GetDSlot might need it (if
223 * the chunk doesn't have a dcache struct).
224 * This seems like overkill in most cases.
225 * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
226 * xdcache, then relock xdcache and try to use index. It is done
227 * a lot elsewhere in the CM, but I'm not buying that argument.
228 * - should be able to check IFDataMod without doing the GetDSlot (just
229 * hold afs_xdcache). That way, it's easy to do this without the
230 * writelock on afs_xdcache, and we save unneccessary disk
231 * operations. I don't think that works, 'cuz the next pointers
234 origCBs = afs_allCBs;
238 tlen = avc->m.Length;
242 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
246 /* lock and start over from beginning of hash chain
247 * in order to avoid a race condition. */
248 MObtainWriteLock(&afs_xdcache,284);
249 index = afs_dvhashTbl[hash];
251 for(j=0; index != NULLIDX;) {
252 if ((afs_indexFlags[index] & IFDataMod) &&
253 (afs_indexUnique[index] == avc->fid.Fid.Unique)) {
254 tdc = afs_GetDSlot(index, 0); /* refcount+1. */
255 ReleaseReadLock(&tdc->tlock);
256 if (!FidCmp( &tdc->f.fid, &avc->fid ) && tdc->f.chunk >= minj ) {
257 off = tdc->f.chunk - minj;
258 if (off < NCHUNKSATONCE) {
260 osi_Panic("dclist slot already in use!");
265 /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
266 * can't grab here, due to lock ordering with afs_xdcache.
267 * So, disable this shortcut for now. -- kolya 2001-10-13
269 /* shortcut: big win for little files */
270 /* tlen -= tdc->f.chunkBytes;
278 if (j == NCHUNKSATONCE)
285 index = afs_dvnextTbl[index];
287 MReleaseWriteLock(&afs_xdcache);
289 /* this guy writes chunks, puts back dcache structs, and bumps newDV */
290 /* "moredata" just says "there are more dirty chunks yet to come".
294 static afs_uint32 lp1 = 10000, lp2 = 10000;
296 struct AFSStoreStatus InStatus;
297 struct AFSFetchStatus OutStatus;
299 afs_size_t base, bytes;
305 struct osi_file * tfile;
306 struct rx_call * tcall;
309 for (bytes = 0, j = 0; !code && j<=high; j++) {
311 ObtainSharedLock(&(dcList[j]->lock), 629);
314 bytes += dcList[j]->f.chunkBytes;
315 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
316 && (dcList[j]->f.chunk - minj < high)
318 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
322 if (bytes && (j==high || !dcList[j+1])) {
323 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
324 base = AFS_CHUNKTOBASE(first + minj) ;
327 * take a list of dcache structs and send them all off to the server
328 * the list must be in order, and the chunks contiguous.
329 * Note - there is no locking done by this code currently. For
330 * safety's sake, xdcache could be locked over the entire call.
331 * However, that pretty well ties up all the threads. Meantime, all
332 * the chunks _MUST_ have their refcounts bumped.
333 * The writes done before a store back will clear setuid-ness
335 * We can permit CacheStoreProc to wake up the user process IFF we
336 * are doing the last RPC for this close, ie, storing back the last
337 * set of contiguous chunks of a file.
340 dclist = &dcList[first];
342 nomore = !(moredata || (j!=high));
343 InStatus.ClientModTime = avc->m.Date;
344 InStatus.Mask = AFS_SETMODTIME;
345 if (sync & AFS_SYNC) {
346 InStatus.Mask |= AFS_FSYNC;
348 tlen = lmin(avc->m.Length, avc->truncPos);
349 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
350 ICL_TYPE_FID, &avc->fid.Fid,
351 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(base),
352 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(bytes),
353 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tlen));
357 tc = afs_Conn(&avc->fid, areq, 0);
361 tcall = rx_NewCall(tc->id);
362 #ifdef AFS_64BIT_CLIENT
363 if (!afs_serverHasNo64Bit(tc)) {
364 code = StartRXAFS_StoreData64(tcall,
365 (struct AFSFid *) &avc->fid.Fid,
366 &InStatus, base, bytes, tlen);
368 if (tlen > 0xFFFFFFFF) {
371 afs_int32 t1, t2, t3;
375 code = StartRXAFS_StoreData(tcall,
376 (struct AFSFid *) &avc->fid.Fid,
377 &InStatus, t1, t2, t3);
380 #else /* AFS_64BIT_CLIENT */
381 code = StartRXAFS_StoreData(tcall, (struct AFSFid *) &avc->fid.Fid,
382 &InStatus, base, bytes, tlen);
383 #endif /* AFS_64BIT_CLIENT */
390 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
391 avc->truncPos = AFS_NOTRUNC;
393 for (i = 0; i<nchunks && !code;i++) {
396 afs_warn("afs: missing dcache!\n");
398 continue; /* panic? */
400 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2,
401 ICL_TYPE_POINTER, avc,
402 ICL_TYPE_INT32, tdc->f.chunk,
403 ICL_TYPE_INT32, tdc->index,
404 ICL_TYPE_INT32, tdc->f.inode);
407 if (avc->asynchrony == -1) {
408 if (afs_defaultAsynchrony > (bytes-stored)) {
409 shouldwake = &nomore;
411 } else if ((afs_uint32)avc->asynchrony >= (bytes-stored)) {
412 shouldwake = &nomore;
415 tfile = afs_CFileOpen(tdc->f.inode);
417 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[AFS_STATS_FS_XFERIDX_STOREDATA]);
418 osi_GetuTime(&xferStartTime);
420 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes,
421 avc, shouldwake, &bytesToXfer,
424 osi_GetuTime(&xferStopTime);
427 (xferP->numSuccesses)++;
428 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] += bytesXferred;
429 (xferP->sumBytes) += (afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] >> 10);
430 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_STOREDATA] &= 0x3FF;
431 if (bytesXferred < xferP->minBytes)
432 xferP->minBytes = bytesXferred;
433 if (bytesXferred > xferP->maxBytes)
434 xferP->maxBytes = bytesXferred;
437 * Tally the size of the object. Note: we tally the actual size,
438 * NOT the number of bytes that made it out over the wire.
440 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
443 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1)
446 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2)
449 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3)
452 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4)
455 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5)
458 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6)
461 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7)
466 afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime);
467 afs_stats_AddTo((xferP->sumTime), elapsedTime);
468 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
469 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
470 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
472 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
473 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
477 code = afs_CacheStoreProc(tcall, tfile, tdc->f.chunkBytes,
478 avc, shouldwake, &lp1, &lp2);
479 #endif /* AFS_NOSTATS */
480 afs_CFileClose(tfile);
481 #ifdef AFS_64BIT_CLIENT
482 if (code == RXGEN_OPCODE) {
483 afs_serverSetNo64Bit(tc);
486 #endif /* AFS_64BIT_CLIENT */
487 if ((tdc->f.chunkBytes < afs_OtherCSize) &&
489 int bsent, tlen, sbytes = afs_OtherCSize - tdc->f.chunkBytes;
490 char *tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
493 tlen = (sbytes > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : sbytes);
494 memset(tbuffer, 0, tlen);
496 bsent = rx_Write(tcall, tbuffer, tlen);
500 code = -33; /* XXX */
505 osi_FreeLargeSpace(tbuffer);
507 stored += tdc->f.chunkBytes;
509 /* ideally, I'd like to unlock the dcache and turn
510 * off the writing bit here, but that would
511 * require being able to retry StoreAllSegments in
512 * the event of a failure. It only really matters
513 * if user can't read from a 'locked' dcache or
514 * one which has the writing bit turned on. */
517 struct AFSVolSync tsync;
519 code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync);
523 if (!code) doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
528 code2 = rx_EndCall(tcall, code);
530 if (code2) code = code2;
532 } while (afs_Analyze(tc, code, &avc->fid, areq,
533 AFS_STATS_FS_RPCIDX_STOREDATA,
536 /* put back all remaining locked dcache entries */
537 for (i=0; i<nchunks; i++) {
540 if (afs_indexFlags[tdc->index] & IFDataMod) {
542 * LOCKXXX -- should hold afs_xdcache(W) when
543 * modifying afs_indexFlags.
545 afs_indexFlags[tdc->index] &= ~IFDataMod;
546 afs_stats_cmperf.cacheCurrDirtyChunks--;
547 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
548 if ( sync & AFS_VMSYNC_INVAL )
550 /* since we have invalidated all the pages of this
551 ** vnode by calling osi_VM_TryToSmush, we can
552 ** safely mark this dcache entry as not having
553 ** any pages. This vnode now becomes eligible for
554 ** reclamation by getDownD.
556 afs_indexFlags[tdc->index] &= ~IFAnyPages;
560 UpgradeSToWLock(&tdc->lock, 628);
561 tdc->f.states &= ~DWriting; /* correct?*/
562 tdc->dflags |= DFEntryMod;
563 ReleaseWriteLock(&tdc->lock);
565 /* Mark the entry as released */
570 /* Now copy out return params */
571 UpgradeSToWLock(&avc->lock,28); /* keep out others for a while */
572 afs_ProcessFS(avc, &OutStatus, areq);
573 /* Keep last (max) size of file on server to see if
574 * we need to call afs_StoreMini to extend the file.
577 maxStoredLength = OutStatus.Length;
578 ConvertWToSLock(&avc->lock);
583 for (j++; j<=high; j++) {
585 ReleaseSharedLock(&(dcList[j]->lock));
586 afs_PutDCache(dcList[j]);
587 /* Releasing entry */
593 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
594 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
599 /* Release any zero-length dcache entries in our interval
600 * that we locked but didn't store back above.
602 for (j = 0; j<=high; j++) {
605 osi_Assert(tdc->f.chunkBytes == 0);
606 ReleaseSharedLock(&tdc->lock);
612 minj += NCHUNKSATONCE;
613 } while ( !code && moredata );
615 UpgradeSToWLock(&avc->lock,29);
617 /* send a trivial truncation store if did nothing else */
620 * Call StoreMini if we haven't written enough data to extend the
621 * file at the fileserver to the client's notion of the file length.
623 if ((avc->truncPos != AFS_NOTRUNC) ||
624 ((avc->states & CExtendedFile) && (maxStoredLength < avc->m.Length))) {
625 code = afs_StoreMini(avc, areq);
627 hadd32(newDV, 1); /* just bumped here, too */
629 avc->states &= ~CExtendedFile;
633 * Finally, turn off DWriting, turn on DFEntryMod,
634 * update f.versionNo.
635 * A lot of this could be integrated into the loop above
645 memset((char *)dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
647 /* overkill, but it gets the lock in case GetDSlot needs it */
648 MObtainWriteLock(&afs_xdcache,285);
650 for(j = 0, safety = 0, index = afs_dvhashTbl[hash];
651 index != NULLIDX && safety < afs_cacheFiles+2;) {
653 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
654 tdc = afs_GetDSlot(index, 0);
655 ReleaseReadLock(&tdc->tlock);
657 if (!FidCmp(&tdc->f.fid, &avc->fid) && tdc->f.chunk >= minj) {
658 off = tdc->f.chunk - minj;
659 if (off < NCHUNKSATONCE) {
660 /* this is the file, and the correct chunk range */
661 if (j >= NCHUNKSATONCE)
662 osi_Panic("Too many dcache entries in range\n");
667 if (j == NCHUNKSATONCE)
675 index = afs_dvnextTbl[index];
677 MReleaseWriteLock(&afs_xdcache);
679 for (i=0; i<j; i++) {
680 /* Iterate over the dcache entries we collected above */
682 ObtainSharedLock(&tdc->lock, 677);
684 /* was code here to clear IFDataMod, but it should only be done
685 * in storedcache and storealldcache.
687 /* Only increase DV if we had up-to-date data to start with.
688 * Otherwise, we could be falsely upgrading an old chunk
689 * (that we never read) into one labelled with the current
690 * DV #. Also note that we check that no intervening stores
691 * occurred, otherwise we might mislabel cache information
692 * for a chunk that we didn't store this time
694 /* Don't update the version number if it's not yet set. */
695 if (!hsame(tdc->f.versionNo, h_unset) &&
696 hcmp(tdc->f.versionNo, oldDV) >= 0) {
698 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
699 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
700 /* no error, this is the DV */
702 UpgradeSToWLock(&tdc->lock, 678);
703 hset(tdc->f.versionNo, avc->m.DataVersion);
704 tdc->dflags |= DFEntryMod;
705 ConvertWToSLock(&tdc->lock);
709 ReleaseSharedLock(&tdc->lock);
713 minj += NCHUNKSATONCE;
720 * Invalidate chunks after an error for ccores files since
721 * afs_inactive won't be called for these and they won't be
722 * invalidated. Also discard data if it's a permanent error from the
725 if (areq->permWriteError || (avc->states & (CCore1 | CCore))) {
726 afs_InvalidateAllSegments(avc);
729 afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
730 ICL_TYPE_INT32, avc->m.Length, ICL_TYPE_INT32, code);
731 /* would like a Trace5, but it doesn't exist...*/
732 afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
733 ICL_TYPE_INT32, avc->lock.wait_states,
734 ICL_TYPE_INT32, avc->lock.excl_locked);
735 afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
736 ICL_TYPE_INT32, avc->lock.wait_states,
737 ICL_TYPE_INT32, avc->lock.readers_reading,
738 ICL_TYPE_INT32, avc->lock.num_waiting );
741 * Finally, if updated DataVersion matches newDV, we did all of the
742 * stores. If mapDV indicates that the page cache was flushed up
743 * to when we started the store, then we can relabel them as flushed
744 * as recently as newDV.
745 * Turn off CDirty bit because the stored data is now in sync with server.
747 if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
748 if ((!(afs_dvhack || foreign) && hsame(avc->m.DataVersion, newDV))
749 || ((afs_dvhack || foreign) && (origCBs == afs_allCBs)) ) {
750 hset(avc->mapDV, newDV);
751 avc->states &= ~CDirty;
754 osi_FreeLargeSpace(dcList);
756 /* If not the final write a temporary error is ok. */
757 if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
762 } /*afs_StoreAllSegments (new 03/02/94)*/
766 * afs_InvalidateAllSegments
769 * Invalidates all chunks for a given file
772 * avc : Pointer to vcache entry.
775 * For example, called after an error has been detected. Called
776 * with avc write-locked, and afs_xdcache unheld.
779 int afs_InvalidateAllSegments(struct vcache *avc)
784 struct dcache **dcList;
785 int i, dcListMax, dcListCount;
787 AFS_STATCNT(afs_InvalidateAllSegments);
788 afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
789 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
790 hash = DVHash(&avc->fid);
791 avc->truncPos = AFS_NOTRUNC; /* don't truncate later */
792 avc->states &= ~CExtendedFile; /* not any more */
793 ObtainWriteLock(&afs_xcbhash, 459);
794 afs_DequeueCallback(avc);
795 avc->states &= ~(CStatd|CDirty); /* mark status information as bad, too */
796 ReleaseWriteLock(&afs_xcbhash);
797 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
798 osi_dnlc_purgedp(avc);
799 /* Blow away pages; for now, only for Solaris */
800 #if (defined(AFS_SUN5_ENV))
801 if (WriteLocked(&avc->lock))
802 osi_ReleaseVM(avc, (struct AFS_UCRED *)0);
805 * Block out others from screwing with this table; is a read lock
808 MObtainWriteLock(&afs_xdcache,286);
811 for(index = afs_dvhashTbl[hash]; index != NULLIDX;) {
812 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
813 tdc = afs_GetDSlot(index, 0);
814 ReleaseReadLock(&tdc->tlock);
815 if (!FidCmp(&tdc->f.fid, &avc->fid))
819 index = afs_dvnextTbl[index];
822 dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
825 for(index = afs_dvhashTbl[hash]; index != NULLIDX;) {
826 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
827 tdc = afs_GetDSlot(index, 0);
828 ReleaseReadLock(&tdc->tlock);
829 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
830 /* same file? we'll zap it */
831 if (afs_indexFlags[index] & IFDataMod) {
832 afs_stats_cmperf.cacheCurrDirtyChunks--;
833 /* don't write it back */
834 afs_indexFlags[index] &= ~IFDataMod;
836 afs_indexFlags[index] &= ~IFAnyPages;
837 if (dcListCount < dcListMax)
838 dcList[dcListCount++] = tdc;
845 index = afs_dvnextTbl[index];
847 MReleaseWriteLock(&afs_xdcache);
849 for (i=0; i<dcListCount; i++) {
852 ObtainWriteLock(&tdc->lock, 679);
854 if (vType(avc) == VDIR)
856 ReleaseWriteLock(&tdc->lock);
860 osi_Free(dcList, dcListMax * sizeof(struct dcache *));
867 * afs_TruncateAllSegments
870 * Truncate a cache file.
873 * avc : Ptr to vcache entry to truncate.
874 * alen : Number of bytes to make the file.
875 * areq : Ptr to request structure.
878 * Called with avc write-locked; in VFS40 systems, pvnLock is also
881 int afs_TruncateAllSegments(register struct vcache *avc, afs_size_t alen,
882 struct vrequest *areq, struct AFS_UCRED *acred)
884 register struct dcache *tdc;
885 register afs_int32 code;
886 register afs_int32 index;
890 struct dcache **tdcArray;
892 AFS_STATCNT(afs_TruncateAllSegments);
893 avc->m.Date = osi_Time();
894 afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
895 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
896 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
897 if (alen >= avc->m.Length) {
899 * Special speedup since Sun's vm extends the file this way;
900 * we've never written to the file thus we can just set the new
901 * length and avoid the needless calls below.
902 * Also used for ftruncate calls which can extend the file.
903 * To completely minimize the possible extra StoreMini RPC, we really
904 * should keep the ExtendedPos as well and clear this flag if we
905 * truncate below that value before we store the file back.
907 avc->states |= CExtendedFile;
908 avc->m.Length = alen;
912 #if (defined(AFS_SUN5_ENV))
914 /* Zero unused portion of last page */
915 osi_VM_PreTruncate(avc, alen, acred);
919 #if (defined(AFS_SUN5_ENV))
920 ObtainWriteLock(&avc->vlock, 546);
921 avc->activeV++; /* Block new getpages */
922 ReleaseWriteLock(&avc->vlock);
925 ReleaseWriteLock(&avc->lock);
928 /* Flush pages beyond end-of-file. */
929 osi_VM_Truncate(avc, alen, acred);
932 ObtainWriteLock(&avc->lock,79);
934 avc->m.Length = alen;
936 if (alen < avc->truncPos) avc->truncPos = alen;
937 code = DVHash(&avc->fid);
939 /* block out others from screwing with this table */
940 MObtainWriteLock(&afs_xdcache,287);
943 for(index = afs_dvhashTbl[code]; index != NULLIDX;) {
944 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
945 tdc = afs_GetDSlot(index, 0);
946 ReleaseReadLock(&tdc->tlock);
947 if (!FidCmp(&tdc->f.fid, &avc->fid))
951 index = afs_dvnextTbl[index];
954 /* Now allocate space where we can save those dcache entries, and
955 * do a second pass over them.. Since we're holding xdcache, it
956 * shouldn't be changing.
958 tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
961 for(index = afs_dvhashTbl[code]; index != NULLIDX;) {
962 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
963 tdc = afs_GetDSlot(index, 0);
964 ReleaseReadLock(&tdc->tlock);
965 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
966 /* same file, and modified, we'll store it back */
967 if (dcPos < dcCount) {
968 tdcArray[dcPos++] = tdc;
976 index = afs_dvnextTbl[index];
979 MReleaseWriteLock(&afs_xdcache);
981 /* Now we loop over the array of dcache entries and truncate them */
982 for (index = 0; index < dcPos; index++) {
983 struct osi_file *tfile;
985 tdc = tdcArray[index];
987 newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
988 if (newSize < 0) newSize = 0;
989 ObtainSharedLock(&tdc->lock, 672);
990 if (newSize < tdc->f.chunkBytes) {
991 UpgradeSToWLock(&tdc->lock, 673);
992 tfile = afs_CFileOpen(tdc->f.inode);
993 afs_CFileTruncate(tfile, newSize);
994 afs_CFileClose(tfile);
995 afs_AdjustSize(tdc, newSize);
996 ConvertWToSLock(&tdc->lock);
998 ReleaseSharedLock(&tdc->lock);
1002 osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
1004 #if (defined(AFS_SUN5_ENV))
1005 ObtainWriteLock(&avc->vlock, 547);
1006 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
1007 avc->vstates &= ~VRevokeWait;
1008 afs_osi_Wakeup((char *)&avc->vstates);
1010 ReleaseWriteLock(&avc->vlock);