2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
68 if (!AFS_IS_DISCONNECTED) {
71 /* Now, send the file back. Used to require 0 writers left, but now do
72 * it on every close for write, since two closes in a row are harmless
73 * since first will clean all chunks, and second will be noop. Note that
74 * this will also save confusion when someone keeps a file open
75 * inadvertently, since with old system, writes to the server would never
78 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
80 * We have to do these after the above store in done: in some systems
81 * like aix they'll need to flush all the vm dirty pages to the disk via
82 * the strategy routine. During that all procedure (done under no avc
83 * locks) opens, refcounts would be zero, since it didn't reach the
84 * afs_{rd,wr} routines which means the vcache is a perfect candidate
89 } else if (AFS_IS_DISCON_RW) {
92 if (!avc->ddirty_flags ||
93 (avc->ddirty_flags == VDisconShadowed)) {
94 /* Add to disconnected dirty list. */
95 AFS_DISCON_ADD_DIRTY(avc);
98 /* Set disconnected write flag. */
99 avc->ddirty_flags |= VDisconWriteClose;
101 } /* if not disconnected */
103 #if defined(AFS_SGI_ENV)
104 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
108 avc->execsOrWriters--;
115 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
116 struct AFS_UCRED *acred, int noLock)
118 afs_size_t totalLength;
119 afs_size_t transferLength;
121 afs_size_t offset, len;
122 afs_int32 tlen, trimlen;
125 register struct dcache *tdc;
130 #ifdef AFS_DARWIN80_ENV
134 struct uio *tuiop = &tuio;
135 struct iovec *tvec; /* again, should have define */
137 register afs_int32 code;
138 struct vrequest treq;
140 AFS_STATCNT(afs_MemWrite);
142 return avc->vc_error;
144 startDate = osi_Time();
145 if ((code = afs_InitReq(&treq, acred)))
147 /* otherwise we read */
148 totalLength = AFS_UIO_RESID(auio);
149 filePos = AFS_UIO_OFFSET(auio);
152 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
153 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
154 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
155 ICL_HANDLE_OFFSET(avc->m.Length));
157 afs_MaybeWakeupTruncateDaemon();
158 ObtainWriteLock(&avc->lock, 126);
160 #if defined(AFS_SGI_ENV)
164 * afs_xwrite handles setting m.Length
165 * and handles APPEND mode.
166 * Since we are called via strategy, we need to trim the write to
167 * the actual size of the file
169 osi_Assert(filePos <= avc->m.Length);
170 diff = avc->m.Length - filePos;
171 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
172 totalLength = AFS_UIO_RESID(auio);
175 if (aio & IO_APPEND) {
176 /* append mode, start it at the right spot */
177 #if defined(AFS_SUN56_ENV)
178 auio->uio_loffset = 0;
180 filePos = avc->m.Length;
181 AFS_UIO_SETOFFSET(auio, filePos);
185 * Note that we use startDate rather than calling osi_Time() here.
186 * This is to avoid counting lock-waiting time in file date (for ranlib).
188 avc->m.Date = startDate;
190 #if defined(AFS_HPUX_ENV)
191 #if defined(AFS_HPUX101_ENV)
192 if ((totalLength + filePos) >> 9 >
193 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
195 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
198 ReleaseWriteLock(&avc->lock);
202 #ifdef AFS_VM_RDWR_ENV
204 * If write is implemented via VM, afs_FakeOpen() is called from the
205 * high-level write op.
207 if (avc->execsOrWriters <= 0) {
208 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
209 avc->execsOrWriters);
214 avc->states |= CDirty;
215 #ifndef AFS_DARWIN80_ENV
216 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
218 while (totalLength > 0) {
220 * The following line is necessary because afs_GetDCache with
221 * flag == 4 expects the length field to be filled. It decides
222 * from this whether it's necessary to fetch data into the chunk
223 * before writing or not (when the whole chunk is overwritten!).
225 len = totalLength; /* write this amount by default */
227 tdc = afs_FindDCache(avc, filePos);
229 ObtainWriteLock(&tdc->lock, 653);
230 } else if (afs_blocksUsed >
231 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
232 tdc = afs_FindDCache(avc, filePos);
234 ObtainWriteLock(&tdc->lock, 654);
235 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
236 || (tdc->dflags & DFFetching)) {
237 ReleaseWriteLock(&tdc->lock);
243 afs_MaybeWakeupTruncateDaemon();
244 while (afs_blocksUsed >
245 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
246 ReleaseWriteLock(&avc->lock);
247 if (afs_blocksUsed - afs_blocksDiscarded >
248 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
249 afs_WaitForCacheDrain = 1;
250 afs_osi_Sleep(&afs_WaitForCacheDrain);
252 afs_MaybeFreeDiscardedDCache();
253 afs_MaybeWakeupTruncateDaemon();
254 ObtainWriteLock(&avc->lock, 506);
256 avc->states |= CDirty;
257 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
259 ObtainWriteLock(&tdc->lock, 655);
262 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
264 ObtainWriteLock(&tdc->lock, 656);
270 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
271 afs_stats_cmperf.cacheCurrDirtyChunks++;
272 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
274 if (!(tdc->f.states & DWriting)) {
275 /* don't mark entry as mod if we don't have to */
276 tdc->f.states |= DWriting;
277 tdc->dflags |= DFEntryMod;
279 len = totalLength; /* write this amount by default */
280 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
281 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
282 if (max <= len + offset) { /*if we'd go past the end of this chunk */
283 /* it won't all fit in this chunk, so write as much
288 #ifdef AFS_DARWIN80_ENV
292 tuiop = afsio_darwin_partialcopy(auio, trimlen);
294 /* mung uio structure to be right for this transfer */
295 afsio_copy(auio, &tuio, tvec);
297 afsio_trim(&tuio, trimlen);
299 AFS_UIO_SETOFFSET(tuiop, offset);
301 code = afs_MemWriteUIO(tdc->f.inode, tuiop);
303 void *mep; /* XXX in prototype world is struct memCacheEntry * */
305 ZapDCE(tdc); /* bad data */
306 mep = afs_MemCacheOpen(tdc->f.inode);
307 afs_MemCacheTruncate(mep, 0);
308 afs_MemCacheClose(mep);
309 afs_stats_cmperf.cacheCurrDirtyChunks--;
310 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
311 ReleaseWriteLock(&tdc->lock);
315 /* otherwise we've written some, fixup length, etc and continue with next seg */
316 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
318 afsio_skip(auio, tlen); /* advance auio over data written */
319 /* compute new file size */
320 if (offset + len > tdc->f.chunkBytes) {
321 afs_int32 tlength = offset + len;
322 afs_AdjustSize(tdc, tlength);
323 if (tdc->validPos < filePos + len)
324 tdc->validPos = filePos + len;
327 transferLength += len;
329 #if defined(AFS_SGI_ENV)
330 /* afs_xwrite handles setting m.Length */
331 osi_Assert(filePos <= avc->m.Length);
333 if (filePos > avc->m.Length) {
334 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
335 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
336 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
337 ICL_HANDLE_OFFSET(filePos));
338 avc->m.Length = filePos;
341 ReleaseWriteLock(&tdc->lock);
343 #if !defined(AFS_VM_RDWR_ENV)
345 * If write is implemented via VM, afs_DoPartialWrite() is called from
346 * the high-level write op.
349 code = afs_DoPartialWrite(avc, &treq);
357 #ifndef AFS_VM_RDWR_ENV
358 afs_FakeClose(avc, acred);
360 if (error && !avc->vc_error)
361 avc->vc_error = error;
363 ReleaseWriteLock(&avc->lock);
364 #ifdef AFS_DARWIN80_ENV
367 osi_FreeSmallSpace(tvec);
369 error = afs_CheckCode(error, &treq, 6);
374 /* called on writes */
376 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
377 struct AFS_UCRED *acred, int noLock)
379 afs_size_t totalLength;
380 afs_size_t transferLength;
382 afs_size_t offset, len;
387 register struct dcache *tdc;
392 #ifdef AFS_DARWIN80_ENV
396 struct uio *tuiop = &tuio;
397 struct iovec *tvec; /* again, should have define */
399 struct osi_file *tfile;
400 register afs_int32 code;
401 struct vrequest treq;
403 AFS_STATCNT(afs_UFSWrite);
405 return avc->vc_error;
407 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
410 startDate = osi_Time();
411 if ((code = afs_InitReq(&treq, acred)))
413 /* otherwise we read */
414 totalLength = AFS_UIO_RESID(auio);
415 filePos = AFS_UIO_OFFSET(auio);
418 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
419 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
420 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
421 ICL_HANDLE_OFFSET(avc->m.Length));
423 afs_MaybeWakeupTruncateDaemon();
424 ObtainWriteLock(&avc->lock, 556);
426 #if defined(AFS_SGI_ENV)
430 * afs_xwrite handles setting m.Length
431 * and handles APPEND mode.
432 * Since we are called via strategy, we need to trim the write to
433 * the actual size of the file
435 osi_Assert(filePos <= avc->m.Length);
436 diff = avc->m.Length - filePos;
437 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
438 totalLength = AFS_UIO_RESID(auio);
441 if (aio & IO_APPEND) {
442 /* append mode, start it at the right spot */
443 #if defined(AFS_SUN56_ENV)
444 auio->uio_loffset = 0;
446 filePos = avc->m.Length;
447 AFS_UIO_SETOFFSET(auio, avc->m.Length);
451 * Note that we use startDate rather than calling osi_Time() here.
452 * This is to avoid counting lock-waiting time in file date (for ranlib).
454 avc->m.Date = startDate;
456 #if defined(AFS_HPUX_ENV)
457 #if defined(AFS_HPUX101_ENV)
458 if ((totalLength + filePos) >> 9 >
459 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
461 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
464 ReleaseWriteLock(&avc->lock);
468 #ifdef AFS_VM_RDWR_ENV
470 * If write is implemented via VM, afs_FakeOpen() is called from the
471 * high-level write op.
473 if (avc->execsOrWriters <= 0) {
474 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
475 avc->execsOrWriters);
480 avc->states |= CDirty;
481 #ifndef AFS_DARWIN80_ENV
482 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
484 while (totalLength > 0) {
486 * The following line is necessary because afs_GetDCache with
487 * flag == 4 expects the length field to be filled. It decides
488 * from this whether it's necessary to fetch data into the chunk
489 * before writing or not (when the whole chunk is overwritten!).
491 len = totalLength; /* write this amount by default */
492 /* read the cached info */
494 tdc = afs_FindDCache(avc, filePos);
496 ObtainWriteLock(&tdc->lock, 657);
497 } else if (afs_blocksUsed >
498 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
499 tdc = afs_FindDCache(avc, filePos);
501 ObtainWriteLock(&tdc->lock, 658);
502 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
503 || (tdc->dflags & DFFetching)) {
504 ReleaseWriteLock(&tdc->lock);
510 afs_MaybeWakeupTruncateDaemon();
511 while (afs_blocksUsed >
512 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
513 ReleaseWriteLock(&avc->lock);
514 if (afs_blocksUsed - afs_blocksDiscarded >
515 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
516 afs_WaitForCacheDrain = 1;
517 afs_osi_Sleep(&afs_WaitForCacheDrain);
519 afs_MaybeFreeDiscardedDCache();
520 afs_MaybeWakeupTruncateDaemon();
521 ObtainWriteLock(&avc->lock, 509);
523 avc->states |= CDirty;
524 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
526 ObtainWriteLock(&tdc->lock, 659);
529 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
531 ObtainWriteLock(&tdc->lock, 660);
537 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
538 afs_stats_cmperf.cacheCurrDirtyChunks++;
539 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
541 if (!(tdc->f.states & DWriting)) {
542 /* don't mark entry as mod if we don't have to */
543 tdc->f.states |= DWriting;
544 tdc->dflags |= DFEntryMod;
546 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
547 len = totalLength; /* write this amount by default */
548 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
549 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
550 if (max <= len + offset) { /*if we'd go past the end of this chunk */
551 /* it won't all fit in this chunk, so write as much
556 #ifdef AFS_DARWIN80_ENV
560 tuiop = afsio_darwin_partialcopy(auio, trimlen);
562 /* mung uio structure to be right for this transfer */
563 afsio_copy(auio, &tuio, tvec);
565 afsio_trim(&tuio, trimlen);
567 AFS_UIO_SETOFFSET(tuiop, offset);
569 #if defined(AFS_AIX41_ENV)
572 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
573 NULL, afs_osi_credp);
575 #elif defined(AFS_AIX32_ENV)
576 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
577 #elif defined(AFS_AIX_ENV)
579 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
580 &tuio, NULL, NULL, -1);
581 #elif defined(AFS_SUN5_ENV)
583 #ifdef AFS_SUN510_ENV
587 VOP_RWLOCK(tfile->vnode, 1, &ct);
588 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
589 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
592 VOP_RWLOCK(tfile->vnode, 1);
593 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
594 VOP_RWUNLOCK(tfile->vnode, 1);
599 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
600 #elif defined(AFS_SGI_ENV)
602 avc->states |= CWritingUFS;
603 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
604 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
605 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
606 avc->states &= ~CWritingUFS;
608 #elif defined(AFS_OSF_ENV)
610 struct ucred *tmpcred = u.u_cred;
611 u.u_cred = afs_osi_credp;
612 tuio.uio_rw = UIO_WRITE;
614 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
618 #elif defined(AFS_HPUX100_ENV)
621 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
624 #elif defined(AFS_LINUX20_ENV)
626 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
628 #elif defined(AFS_DARWIN80_ENV)
630 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
632 #elif defined(AFS_DARWIN_ENV)
634 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
635 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
636 VOP_UNLOCK(tfile->vnode, 0, current_proc());
638 #elif defined(AFS_FBSD80_ENV)
640 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
641 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
642 VOP_UNLOCK(tfile->vnode, 0);
644 #elif defined(AFS_FBSD50_ENV)
646 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
647 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
648 VOP_UNLOCK(tfile->vnode, 0, curthread);
650 #elif defined(AFS_XBSD_ENV)
652 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
653 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
654 VOP_UNLOCK(tfile->vnode, 0, curproc);
658 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
660 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
664 ZapDCE(tdc); /* bad data */
665 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
666 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
667 afs_stats_cmperf.cacheCurrDirtyChunks--;
668 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
669 afs_CFileClose(tfile);
670 ReleaseWriteLock(&tdc->lock);
674 /* otherwise we've written some, fixup length, etc and continue with next seg */
675 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
677 afsio_skip(auio, tlen); /* advance auio over data written */
678 /* compute new file size */
679 if (offset + len > tdc->f.chunkBytes) {
680 afs_int32 tlength = offset + len;
681 afs_AdjustSize(tdc, tlength);
682 if (tdc->validPos < filePos + len)
683 tdc->validPos = filePos + len;
686 transferLength += len;
688 #if defined(AFS_SGI_ENV)
689 /* afs_xwrite handles setting m.Length */
690 osi_Assert(filePos <= avc->m.Length);
692 if (filePos > avc->m.Length) {
693 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
694 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
695 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
696 ICL_HANDLE_OFFSET(filePos));
697 avc->m.Length = filePos;
701 ReleaseWriteLock(&tdc->lock);
703 #if !defined(AFS_VM_RDWR_ENV)
705 * If write is implemented via VM, afs_DoPartialWrite() is called from
706 * the high-level write op.
709 code = afs_DoPartialWrite(avc, &treq);
717 #ifndef AFS_VM_RDWR_ENV
718 afs_FakeClose(avc, acred);
720 error = afs_CheckCode(error, &treq, 7);
721 /* This set is here so we get the CheckCode. */
722 if (error && !avc->vc_error)
723 avc->vc_error = error;
725 ReleaseWriteLock(&avc->lock);
726 #ifdef AFS_DARWIN80_ENV
729 osi_FreeSmallSpace(tvec);
731 #ifndef AFS_VM_RDWR_ENV
733 * If write is implemented via VM, afs_fsync() is called from the high-level
736 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
737 if (noLock && (aio & IO_SYNC)) {
740 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
741 * we're doing them because the file was opened with O_SYNCIO specified,
742 * we have to look in the u area. No single mechanism here!!
744 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
746 if (noLock && (aio & FSYNC)) {
749 if (!AFS_NFSXLATORREQ(acred))
750 afs_fsync(avc, acred);
756 /* do partial write if we're low on unmodified chunks */
758 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
760 register afs_int32 code;
762 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
763 afs_stats_cmperf.cacheMaxDirtyChunks)
764 return 0; /* nothing to do */
765 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
766 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
767 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
769 #if defined(AFS_SUN5_ENV)
770 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
772 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
779 #define vno_close(X) vn_close((X), 0, NOCRED)
780 #elif defined(AFS_DUX40_ENV)
781 #define vno_close vn_close
783 /* We don't need this for AIX since:
784 * (1) aix doesn't use fileops and it call close directly intead
785 * (where the unlocking should be done) and
786 * (2) temporarily, the aix lockf isn't supported yet.
788 * this stupid routine is used to release the flocks held on a
789 * particular file descriptor. Sun doesn't pass file descr. info
790 * through to the vnode layer, and yet we must unlock flocked files
791 * on the *appropriate* (not first, as in System V) close call. Thus
793 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
794 * file ops structure into any afs file when it gets flocked.
795 * N.B: Intercepting close syscall doesn't trap aborts or exit system
799 afs_closex(register struct file *afd)
801 struct vrequest treq;
806 struct afs_fakestat_state fakestat;
808 AFS_STATCNT(afs_closex);
809 /* setup the credentials */
810 if ((code = afs_InitReq(&treq, u.u_cred)))
812 afs_InitFakeStat(&fakestat);
815 /* we're the last one. If we're an AFS vnode, clear the flags,
816 * close the file and release the lock when done. Otherwise, just
817 * let the regular close code work. */
818 if (afd->f_type == DTYPE_VNODE) {
819 tvc = VTOAFS(afd->f_data);
820 if (IsAfsVnode(AFSTOV(tvc))) {
821 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
823 afs_PutFakeStat(&fakestat);
826 VN_HOLD(AFSTOV(tvc));
827 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
828 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
829 code = vno_close(afd);
831 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
833 AFS_RELE(AFSTOV(tvc));
837 /* now, if close not done, do it */
839 code = vno_close(afd);
841 afs_PutFakeStat(&fakestat);
842 return code; /* return code from vnode layer */
847 /* handle any closing cleanup stuff */
850 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
851 #if !defined(AFS_SGI65_ENV)
855 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
859 lastclose_t lastclose;
860 #if !defined(AFS_SGI65_ENV)
862 #if defined(AFS_SGI64_ENV)
866 #elif defined(AFS_SUN5_ENV)
867 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
871 afs_close(OSI_VC_ARG(avc), aflags, acred)
875 struct AFS_UCRED *acred;
877 register afs_int32 code;
878 register struct brequest *tb;
879 struct vrequest treq;
883 struct afs_fakestat_state fakestat;
886 AFS_STATCNT(afs_close);
887 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
888 ICL_TYPE_INT32, aflags);
889 code = afs_InitReq(&treq, acred);
892 afs_InitFakeStat(&fakestat);
893 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
895 afs_PutFakeStat(&fakestat);
900 if (avc->flockCount) {
901 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
904 #if defined(AFS_SGI_ENV)
906 afs_PutFakeStat(&fakestat);
910 /* unlock any locks for pid - could be wrong for child .. */
911 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
913 get_current_flid(&flid);
914 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
915 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
918 cleanlocks((vnode_t *) avc, flp);
919 #else /* AFS_SGI64_ENV */
920 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
921 #endif /* AFS_SGI64_ENV */
922 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
923 #endif /* AFS_SGI65_ENV */
924 /* afs_chkpgoob will drop and re-acquire the global lock. */
925 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
926 #elif defined(AFS_SUN5_ENV)
928 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
929 afs_PutFakeStat(&fakestat);
933 #else /* AFS_SGI_ENV */
934 if (avc->flockCount) { /* Release Lock */
935 #if defined(AFS_OSF_ENV)
936 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
938 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
941 #endif /* AFS_SGI_ENV */
942 if (aflags & (FWRITE | FTRUNC)) {
943 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
944 /* do it yourself if daemons are all busy */
945 ObtainWriteLock(&avc->lock, 124);
946 code = afs_StoreOnLastReference(avc, &treq);
947 ReleaseWriteLock(&avc->lock);
948 #if defined(AFS_SGI_ENV)
949 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
952 #if defined(AFS_SGI_ENV)
953 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
955 /* at least one daemon is idle, so ask it to do the store.
956 * Also, note that we don't lock it any more... */
957 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
958 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
960 /* sleep waiting for the store to start, then retrieve error code */
961 while ((tb->flags & BUVALID) == 0) {
969 /* VNOVNODE is "acceptable" error code from close, since
970 * may happen when deleting a file on another machine while
971 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
972 if (code == VNOVNODE || code == ENOENT)
975 /* Ensure last closer gets the error. If another thread caused
976 * DoPartialWrite and this thread does not actually store the data,
977 * it may not see the quota error.
979 ObtainWriteLock(&avc->lock, 406);
982 osi_ReleaseVM(avc, acred);
984 printf("avc->vc_error=%d\n", avc->vc_error);
985 code = avc->vc_error;
988 ReleaseWriteLock(&avc->lock);
990 /* some codes merit specific complaint */
992 afs_warnuser("afs: failed to store file (network problems)\n");
995 else if (code == ENOSPC) {
997 ("afs: failed to store file (over quota or partition full)\n");
1000 else if (code == ENOSPC) {
1001 afs_warnuser("afs: failed to store file (partition full)\n");
1002 } else if (code == EDQUOT) {
1003 afs_warnuser("afs: failed to store file (over quota)\n");
1007 afs_warnuser("afs: failed to store file (%d)\n", code);
1009 /* finally, we flush any text pages lying around here */
1010 hzero(avc->flushDV);
1013 #if defined(AFS_SGI_ENV)
1014 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1015 osi_Assert(avc->opens > 0);
1017 /* file open for read */
1018 ObtainWriteLock(&avc->lock, 411);
1019 if (avc->vc_error) {
1020 #ifdef AFS_AIX32_ENV
1021 osi_ReleaseVM(avc, acred);
1023 code = avc->vc_error;
1027 ReleaseWriteLock(&avc->lock);
1030 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
1031 afs_remunlink(avc, 1); /* ignore any return code */
1034 AFS_DISCON_UNLOCK();
1035 afs_PutFakeStat(&fakestat);
1036 code = afs_CheckCode(code, &treq, 5);
1043 afs_fsync(OSI_VC_DECL(avc), int fflags, struct AFS_UCRED *acred, int waitfor)
1044 #else /* AFS_OSF_ENV */
1045 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1046 afs_fsync(OSI_VC_DECL(avc), int flag, struct AFS_UCRED *acred
1047 #ifdef AFS_SGI65_ENV
1048 , off_t start, off_t stop
1049 #endif /* AFS_SGI65_ENV */
1051 #else /* !OSF && !SUN53 && !SGI */
1052 afs_fsync(OSI_VC_DECL(avc), struct AFS_UCRED *acred)
1056 register afs_int32 code;
1057 struct vrequest treq;
1058 OSI_VC_CONVERT(avc);
1061 return avc->vc_error;
1063 #if defined(AFS_SUN5_ENV)
1064 /* back out if called from NFS server */
1065 if (curthread->t_flag & T_DONTPEND)
1069 AFS_STATCNT(afs_fsync);
1070 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1071 if ((code = afs_InitReq(&treq, acred)))
1074 #if defined(AFS_SGI_ENV)
1075 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1076 if (flag & FSYNC_INVAL)
1077 osi_VM_FSyncInval(avc);
1078 #endif /* AFS_SGI_ENV */
1080 ObtainSharedLock(&avc->lock, 18);
1082 if (avc->execsOrWriters > 0) {
1084 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
1085 /* Your average flush. */
1087 /* put the file back */
1088 UpgradeSToWLock(&avc->lock, 41);
1089 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1090 ConvertWToSLock(&avc->lock);
1092 #if defined(AFS_DISCON_ENV)
1094 /* Disconnected flush. */
1095 ObtainWriteLock(&afs_DDirtyVCListLock, 708);
1097 if (!avc->ddirty_flags ||
1098 (avc->ddirty_flags == VDisconShadowed)) {
1100 /* Add to disconnected dirty list. */
1101 AFS_DISCON_ADD_DIRTY(avc);
1104 UpgradeSToWLock(&avc->lock, 711);
1105 /* Set disconnected write flag. */
1106 avc->ddirty_flags |= VDisconWriteFlush;
1107 ConvertWToSLock(&avc->lock);
1109 ReleaseWriteLock(&afs_DDirtyVCListLock);
1111 } /* if not disconnected */
1112 } /* if (avc->execsOrWriters > 0) */
1114 #if defined(AFS_SGI_ENV)
1115 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1116 if (code == VNOVNODE) {
1117 /* syncing an unlinked file! - non-informative to pass an errno
1118 * 102 (== VNOVNODE) to user
1123 AFS_DISCON_UNLOCK();
1124 code = afs_CheckCode(code, &treq, 33);
1125 ReleaseSharedLock(&avc->lock);