2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
68 if (!AFS_IS_DISCONNECTED) {
71 /* Now, send the file back. Used to require 0 writers left, but now do
72 * it on every close for write, since two closes in a row are harmless
73 * since first will clean all chunks, and second will be noop. Note that
74 * this will also save confusion when someone keeps a file open
75 * inadvertently, since with old system, writes to the server would never
78 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
80 * We have to do these after the above store in done: in some systems
81 * like aix they'll need to flush all the vm dirty pages to the disk via
82 * the strategy routine. During that all procedure (done under no avc
83 * locks) opens, refcounts would be zero, since it didn't reach the
84 * afs_{rd,wr} routines which means the vcache is a perfect candidate
89 } else if (AFS_IS_DISCON_RW) {
92 if (!avc->ddirty_flags ||
93 (avc->ddirty_flags == VDisconShadowed)) {
95 /* Add to disconnected dirty list. */
96 AFS_DISCON_ADD_DIRTY(avc, 1);
99 /* Set disconnected write flag. */
100 avc->ddirty_flags |= VDisconWriteClose;
102 } /* if not disconnected */
104 #if defined(AFS_SGI_ENV)
105 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
109 avc->execsOrWriters--;
116 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
117 struct AFS_UCRED *acred, int noLock)
119 afs_size_t totalLength;
120 afs_size_t transferLength;
122 afs_size_t offset, len;
123 afs_int32 tlen, trimlen;
126 register struct dcache *tdc;
131 #ifdef AFS_DARWIN80_ENV
135 struct uio *tuiop = &tuio;
136 struct iovec *tvec; /* again, should have define */
138 register afs_int32 code;
139 struct vrequest treq;
141 AFS_STATCNT(afs_MemWrite);
143 return avc->vc_error;
145 startDate = osi_Time();
146 if ((code = afs_InitReq(&treq, acred)))
148 /* otherwise we read */
149 totalLength = AFS_UIO_RESID(auio);
150 filePos = AFS_UIO_OFFSET(auio);
153 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
154 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
155 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
156 ICL_HANDLE_OFFSET(avc->m.Length));
158 afs_MaybeWakeupTruncateDaemon();
159 ObtainWriteLock(&avc->lock, 126);
161 #if defined(AFS_SGI_ENV)
165 * afs_xwrite handles setting m.Length
166 * and handles APPEND mode.
167 * Since we are called via strategy, we need to trim the write to
168 * the actual size of the file
170 osi_Assert(filePos <= avc->m.Length);
171 diff = avc->m.Length - filePos;
172 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
173 totalLength = AFS_UIO_RESID(auio);
176 if (aio & IO_APPEND) {
177 /* append mode, start it at the right spot */
178 #if defined(AFS_SUN56_ENV)
179 auio->uio_loffset = 0;
181 filePos = avc->m.Length;
182 AFS_UIO_SETOFFSET(auio, filePos);
186 * Note that we use startDate rather than calling osi_Time() here.
187 * This is to avoid counting lock-waiting time in file date (for ranlib).
189 avc->m.Date = startDate;
191 #if defined(AFS_HPUX_ENV)
192 #if defined(AFS_HPUX101_ENV)
193 if ((totalLength + filePos) >> 9 >
194 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
196 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
199 ReleaseWriteLock(&avc->lock);
203 #ifdef AFS_VM_RDWR_ENV
205 * If write is implemented via VM, afs_FakeOpen() is called from the
206 * high-level write op.
208 if (avc->execsOrWriters <= 0) {
209 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
210 avc->execsOrWriters);
215 avc->states |= CDirty;
216 #ifndef AFS_DARWIN80_ENV
217 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
219 while (totalLength > 0) {
221 * The following line is necessary because afs_GetDCache with
222 * flag == 4 expects the length field to be filled. It decides
223 * from this whether it's necessary to fetch data into the chunk
224 * before writing or not (when the whole chunk is overwritten!).
226 len = totalLength; /* write this amount by default */
228 tdc = afs_FindDCache(avc, filePos);
230 ObtainWriteLock(&tdc->lock, 653);
231 } else if (afs_blocksUsed >
232 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
233 tdc = afs_FindDCache(avc, filePos);
235 ObtainWriteLock(&tdc->lock, 654);
236 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
237 || (tdc->dflags & DFFetching)) {
238 ReleaseWriteLock(&tdc->lock);
244 afs_MaybeWakeupTruncateDaemon();
245 while (afs_blocksUsed >
246 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
247 ReleaseWriteLock(&avc->lock);
248 if (afs_blocksUsed - afs_blocksDiscarded >
249 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
250 afs_WaitForCacheDrain = 1;
251 afs_osi_Sleep(&afs_WaitForCacheDrain);
253 afs_MaybeFreeDiscardedDCache();
254 afs_MaybeWakeupTruncateDaemon();
255 ObtainWriteLock(&avc->lock, 506);
257 avc->states |= CDirty;
258 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
260 ObtainWriteLock(&tdc->lock, 655);
263 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
265 ObtainWriteLock(&tdc->lock, 656);
271 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
272 afs_stats_cmperf.cacheCurrDirtyChunks++;
273 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
275 if (!(tdc->f.states & DWriting)) {
276 /* don't mark entry as mod if we don't have to */
277 tdc->f.states |= DWriting;
278 tdc->dflags |= DFEntryMod;
280 len = totalLength; /* write this amount by default */
281 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
282 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
283 if (max <= len + offset) { /*if we'd go past the end of this chunk */
284 /* it won't all fit in this chunk, so write as much
289 #ifdef AFS_DARWIN80_ENV
293 tuiop = afsio_darwin_partialcopy(auio, trimlen);
295 /* mung uio structure to be right for this transfer */
296 afsio_copy(auio, &tuio, tvec);
298 afsio_trim(&tuio, trimlen);
300 AFS_UIO_SETOFFSET(tuiop, offset);
302 code = afs_MemWriteUIO(tdc->f.inode, tuiop);
304 void *mep; /* XXX in prototype world is struct memCacheEntry * */
306 ZapDCE(tdc); /* bad data */
307 mep = afs_MemCacheOpen(tdc->f.inode);
308 afs_MemCacheTruncate(mep, 0);
309 afs_MemCacheClose(mep);
310 afs_stats_cmperf.cacheCurrDirtyChunks--;
311 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
312 ReleaseWriteLock(&tdc->lock);
316 /* otherwise we've written some, fixup length, etc and continue with next seg */
317 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
319 afsio_skip(auio, tlen); /* advance auio over data written */
320 /* compute new file size */
321 if (offset + len > tdc->f.chunkBytes) {
322 afs_int32 tlength = offset + len;
323 afs_AdjustSize(tdc, tlength);
324 if (tdc->validPos < filePos + len)
325 tdc->validPos = filePos + len;
328 transferLength += len;
330 #if defined(AFS_SGI_ENV)
331 /* afs_xwrite handles setting m.Length */
332 osi_Assert(filePos <= avc->m.Length);
334 if (filePos > avc->m.Length) {
335 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
336 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
337 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
338 ICL_HANDLE_OFFSET(filePos));
339 avc->m.Length = filePos;
342 ReleaseWriteLock(&tdc->lock);
344 #if !defined(AFS_VM_RDWR_ENV)
346 * If write is implemented via VM, afs_DoPartialWrite() is called from
347 * the high-level write op.
350 code = afs_DoPartialWrite(avc, &treq);
358 #ifndef AFS_VM_RDWR_ENV
359 afs_FakeClose(avc, acred);
361 if (error && !avc->vc_error)
362 avc->vc_error = error;
364 ReleaseWriteLock(&avc->lock);
365 #ifdef AFS_DARWIN80_ENV
368 osi_FreeSmallSpace(tvec);
370 error = afs_CheckCode(error, &treq, 6);
375 /* called on writes */
377 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
378 struct AFS_UCRED *acred, int noLock)
380 afs_size_t totalLength;
381 afs_size_t transferLength;
383 afs_size_t offset, len;
388 register struct dcache *tdc;
393 #ifdef AFS_DARWIN80_ENV
397 struct uio *tuiop = &tuio;
398 struct iovec *tvec; /* again, should have define */
400 struct osi_file *tfile;
401 register afs_int32 code;
402 struct vrequest treq;
404 AFS_STATCNT(afs_UFSWrite);
406 return avc->vc_error;
408 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
411 startDate = osi_Time();
412 if ((code = afs_InitReq(&treq, acred)))
414 /* otherwise we read */
415 totalLength = AFS_UIO_RESID(auio);
416 filePos = AFS_UIO_OFFSET(auio);
419 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
420 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
421 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
422 ICL_HANDLE_OFFSET(avc->m.Length));
424 afs_MaybeWakeupTruncateDaemon();
425 ObtainWriteLock(&avc->lock, 556);
427 #if defined(AFS_SGI_ENV)
431 * afs_xwrite handles setting m.Length
432 * and handles APPEND mode.
433 * Since we are called via strategy, we need to trim the write to
434 * the actual size of the file
436 osi_Assert(filePos <= avc->m.Length);
437 diff = avc->m.Length - filePos;
438 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
439 totalLength = AFS_UIO_RESID(auio);
442 if (aio & IO_APPEND) {
443 /* append mode, start it at the right spot */
444 #if defined(AFS_SUN56_ENV)
445 auio->uio_loffset = 0;
447 filePos = avc->m.Length;
448 AFS_UIO_SETOFFSET(auio, avc->m.Length);
452 * Note that we use startDate rather than calling osi_Time() here.
453 * This is to avoid counting lock-waiting time in file date (for ranlib).
455 avc->m.Date = startDate;
457 #if defined(AFS_HPUX_ENV)
458 #if defined(AFS_HPUX101_ENV)
459 if ((totalLength + filePos) >> 9 >
460 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
462 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
465 ReleaseWriteLock(&avc->lock);
469 #ifdef AFS_VM_RDWR_ENV
471 * If write is implemented via VM, afs_FakeOpen() is called from the
472 * high-level write op.
474 if (avc->execsOrWriters <= 0) {
475 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
476 avc->execsOrWriters);
481 avc->states |= CDirty;
482 #ifndef AFS_DARWIN80_ENV
483 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
485 while (totalLength > 0) {
487 * The following line is necessary because afs_GetDCache with
488 * flag == 4 expects the length field to be filled. It decides
489 * from this whether it's necessary to fetch data into the chunk
490 * before writing or not (when the whole chunk is overwritten!).
492 len = totalLength; /* write this amount by default */
493 /* read the cached info */
495 tdc = afs_FindDCache(avc, filePos);
497 ObtainWriteLock(&tdc->lock, 657);
498 } else if (afs_blocksUsed >
499 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
500 tdc = afs_FindDCache(avc, filePos);
502 ObtainWriteLock(&tdc->lock, 658);
503 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
504 || (tdc->dflags & DFFetching)) {
505 ReleaseWriteLock(&tdc->lock);
511 afs_MaybeWakeupTruncateDaemon();
512 while (afs_blocksUsed >
513 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
514 ReleaseWriteLock(&avc->lock);
515 if (afs_blocksUsed - afs_blocksDiscarded >
516 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
517 afs_WaitForCacheDrain = 1;
518 afs_osi_Sleep(&afs_WaitForCacheDrain);
520 afs_MaybeFreeDiscardedDCache();
521 afs_MaybeWakeupTruncateDaemon();
522 ObtainWriteLock(&avc->lock, 509);
524 avc->states |= CDirty;
525 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
527 ObtainWriteLock(&tdc->lock, 659);
530 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
532 ObtainWriteLock(&tdc->lock, 660);
538 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
539 afs_stats_cmperf.cacheCurrDirtyChunks++;
540 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
542 if (!(tdc->f.states & DWriting)) {
543 /* don't mark entry as mod if we don't have to */
544 tdc->f.states |= DWriting;
545 tdc->dflags |= DFEntryMod;
547 #if defined(LINUX_USE_FH)
548 tfile = (struct osi_file *)osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
550 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
552 len = totalLength; /* write this amount by default */
553 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
554 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
555 if (max <= len + offset) { /*if we'd go past the end of this chunk */
556 /* it won't all fit in this chunk, so write as much
561 #ifdef AFS_DARWIN80_ENV
565 tuiop = afsio_darwin_partialcopy(auio, trimlen);
567 /* mung uio structure to be right for this transfer */
568 afsio_copy(auio, &tuio, tvec);
570 afsio_trim(&tuio, trimlen);
572 AFS_UIO_SETOFFSET(tuiop, offset);
574 #if defined(AFS_AIX41_ENV)
577 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
578 NULL, afs_osi_credp);
580 #elif defined(AFS_AIX32_ENV)
581 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
582 #elif defined(AFS_AIX_ENV)
584 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
585 &tuio, NULL, NULL, -1);
586 #elif defined(AFS_SUN5_ENV)
588 #ifdef AFS_SUN510_ENV
592 VOP_RWLOCK(tfile->vnode, 1, &ct);
593 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
594 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
597 VOP_RWLOCK(tfile->vnode, 1);
598 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
599 VOP_RWUNLOCK(tfile->vnode, 1);
604 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
605 #elif defined(AFS_SGI_ENV)
607 avc->states |= CWritingUFS;
608 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
609 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
610 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
611 avc->states &= ~CWritingUFS;
613 #elif defined(AFS_OSF_ENV)
615 struct ucred *tmpcred = u.u_cred;
616 u.u_cred = afs_osi_credp;
617 tuio.uio_rw = UIO_WRITE;
619 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
623 #elif defined(AFS_HPUX100_ENV)
626 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
629 #elif defined(AFS_LINUX20_ENV)
631 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
633 #elif defined(AFS_DARWIN80_ENV)
635 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
637 #elif defined(AFS_DARWIN_ENV)
639 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
640 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
641 VOP_UNLOCK(tfile->vnode, 0, current_proc());
643 #elif defined(AFS_FBSD80_ENV)
645 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
646 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
647 VOP_UNLOCK(tfile->vnode, 0);
649 #elif defined(AFS_FBSD50_ENV)
651 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
652 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
653 VOP_UNLOCK(tfile->vnode, 0, curthread);
655 #elif defined(AFS_XBSD_ENV)
657 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
658 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
659 VOP_UNLOCK(tfile->vnode, 0, curproc);
663 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
665 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
669 ZapDCE(tdc); /* bad data */
670 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
671 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
672 afs_stats_cmperf.cacheCurrDirtyChunks--;
673 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
674 afs_CFileClose(tfile);
675 ReleaseWriteLock(&tdc->lock);
679 /* otherwise we've written some, fixup length, etc and continue with next seg */
680 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
682 afsio_skip(auio, tlen); /* advance auio over data written */
683 /* compute new file size */
684 if (offset + len > tdc->f.chunkBytes) {
685 afs_int32 tlength = offset + len;
686 afs_AdjustSize(tdc, tlength);
687 if (tdc->validPos < filePos + len)
688 tdc->validPos = filePos + len;
691 transferLength += len;
693 #if defined(AFS_SGI_ENV)
694 /* afs_xwrite handles setting m.Length */
695 osi_Assert(filePos <= avc->m.Length);
697 if (filePos > avc->m.Length) {
698 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
699 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
700 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
701 ICL_HANDLE_OFFSET(filePos));
702 avc->m.Length = filePos;
706 ReleaseWriteLock(&tdc->lock);
708 #if !defined(AFS_VM_RDWR_ENV)
710 * If write is implemented via VM, afs_DoPartialWrite() is called from
711 * the high-level write op.
714 code = afs_DoPartialWrite(avc, &treq);
722 #ifndef AFS_VM_RDWR_ENV
723 afs_FakeClose(avc, acred);
725 error = afs_CheckCode(error, &treq, 7);
726 /* This set is here so we get the CheckCode. */
727 if (error && !avc->vc_error)
728 avc->vc_error = error;
730 ReleaseWriteLock(&avc->lock);
731 #ifdef AFS_DARWIN80_ENV
734 osi_FreeSmallSpace(tvec);
736 #ifndef AFS_VM_RDWR_ENV
738 * If write is implemented via VM, afs_fsync() is called from the high-level
741 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
742 if (noLock && (aio & IO_SYNC)) {
745 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
746 * we're doing them because the file was opened with O_SYNCIO specified,
747 * we have to look in the u area. No single mechanism here!!
749 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
751 if (noLock && (aio & FSYNC)) {
754 if (!AFS_NFSXLATORREQ(acred))
755 afs_fsync(avc, acred);
761 /* do partial write if we're low on unmodified chunks */
763 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
765 register afs_int32 code;
767 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
768 afs_stats_cmperf.cacheMaxDirtyChunks)
769 return 0; /* nothing to do */
770 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
771 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
772 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
774 #if defined(AFS_SUN5_ENV)
775 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
777 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
784 #define vno_close(X) vn_close((X), 0, NOCRED)
785 #elif defined(AFS_DUX40_ENV)
786 #define vno_close vn_close
788 /* We don't need this for AIX since:
789 * (1) aix doesn't use fileops and it call close directly intead
790 * (where the unlocking should be done) and
791 * (2) temporarily, the aix lockf isn't supported yet.
793 * this stupid routine is used to release the flocks held on a
794 * particular file descriptor. Sun doesn't pass file descr. info
795 * through to the vnode layer, and yet we must unlock flocked files
796 * on the *appropriate* (not first, as in System V) close call. Thus
798 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
799 * file ops structure into any afs file when it gets flocked.
800 * N.B: Intercepting close syscall doesn't trap aborts or exit system
804 afs_closex(register struct file *afd)
806 struct vrequest treq;
811 struct afs_fakestat_state fakestat;
813 AFS_STATCNT(afs_closex);
814 /* setup the credentials */
815 if ((code = afs_InitReq(&treq, u.u_cred)))
817 afs_InitFakeStat(&fakestat);
820 /* we're the last one. If we're an AFS vnode, clear the flags,
821 * close the file and release the lock when done. Otherwise, just
822 * let the regular close code work. */
823 if (afd->f_type == DTYPE_VNODE) {
824 tvc = VTOAFS(afd->f_data);
825 if (IsAfsVnode(AFSTOV(tvc))) {
826 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
828 afs_PutFakeStat(&fakestat);
831 VN_HOLD(AFSTOV(tvc));
832 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
833 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
834 code = vno_close(afd);
836 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
838 AFS_RELE(AFSTOV(tvc));
842 /* now, if close not done, do it */
844 code = vno_close(afd);
846 afs_PutFakeStat(&fakestat);
847 return code; /* return code from vnode layer */
852 /* handle any closing cleanup stuff */
854 #if defined(AFS_SGI65_ENV)
855 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclost_t lastclose,
856 struct AFS_UCRED *acred)
857 #elif defined(AFS_SGI64_ENV)
858 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
859 off_t offset, struct AFS_UCRED *acred, struct flid *flp)
860 #elif defined(AFS_SGI_ENV)
861 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
862 off_t offset, struct AFS_UCRED *acred)
863 #elif defined(AFS_SUN5_ENV)
864 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
865 struct AFS_UCRED *acred)
867 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, struct AFS_UCRED *acred)
870 register afs_int32 code;
871 register struct brequest *tb;
872 struct vrequest treq;
876 struct afs_fakestat_state fakestat;
879 AFS_STATCNT(afs_close);
880 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
881 ICL_TYPE_INT32, aflags);
882 code = afs_InitReq(&treq, acred);
885 afs_InitFakeStat(&fakestat);
886 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
888 afs_PutFakeStat(&fakestat);
893 if (avc->flockCount) {
894 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
897 #if defined(AFS_SGI_ENV)
899 afs_PutFakeStat(&fakestat);
903 /* unlock any locks for pid - could be wrong for child .. */
904 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
906 get_current_flid(&flid);
907 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
908 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
911 cleanlocks((vnode_t *) avc, flp);
912 #else /* AFS_SGI64_ENV */
913 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
914 #endif /* AFS_SGI64_ENV */
915 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
916 #endif /* AFS_SGI65_ENV */
917 /* afs_chkpgoob will drop and re-acquire the global lock. */
918 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
919 #elif defined(AFS_SUN5_ENV)
921 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
922 afs_PutFakeStat(&fakestat);
926 #else /* AFS_SGI_ENV */
927 if (avc->flockCount) { /* Release Lock */
928 #if defined(AFS_OSF_ENV)
929 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
931 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
934 #endif /* AFS_SGI_ENV */
935 if (aflags & (FWRITE | FTRUNC)) {
936 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
937 /* do it yourself if daemons are all busy */
938 ObtainWriteLock(&avc->lock, 124);
939 code = afs_StoreOnLastReference(avc, &treq);
940 ReleaseWriteLock(&avc->lock);
941 #if defined(AFS_SGI_ENV)
942 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
945 #if defined(AFS_SGI_ENV)
946 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
948 /* at least one daemon is idle, so ask it to do the store.
949 * Also, note that we don't lock it any more... */
950 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
951 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
953 /* sleep waiting for the store to start, then retrieve error code */
954 while ((tb->flags & BUVALID) == 0) {
962 /* VNOVNODE is "acceptable" error code from close, since
963 * may happen when deleting a file on another machine while
964 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
965 if (code == VNOVNODE || code == ENOENT)
968 /* Ensure last closer gets the error. If another thread caused
969 * DoPartialWrite and this thread does not actually store the data,
970 * it may not see the quota error.
972 ObtainWriteLock(&avc->lock, 406);
975 osi_ReleaseVM(avc, acred);
977 printf("avc->vc_error=%d\n", avc->vc_error);
978 code = avc->vc_error;
981 ReleaseWriteLock(&avc->lock);
983 /* some codes merit specific complaint */
985 afs_warnuser("afs: failed to store file (network problems)\n");
988 else if (code == ENOSPC) {
990 ("afs: failed to store file (over quota or partition full)\n");
993 else if (code == ENOSPC) {
994 afs_warnuser("afs: failed to store file (partition full)\n");
995 } else if (code == EDQUOT) {
996 afs_warnuser("afs: failed to store file (over quota)\n");
1000 afs_warnuser("afs: failed to store file (%d)\n", code);
1002 /* finally, we flush any text pages lying around here */
1003 hzero(avc->flushDV);
1006 #if defined(AFS_SGI_ENV)
1007 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1008 osi_Assert(avc->opens > 0);
1010 /* file open for read */
1011 ObtainWriteLock(&avc->lock, 411);
1012 if (avc->vc_error) {
1013 #ifdef AFS_AIX32_ENV
1014 osi_ReleaseVM(avc, acred);
1016 code = avc->vc_error;
1020 ReleaseWriteLock(&avc->lock);
1023 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
1024 afs_remunlink(avc, 1); /* ignore any return code */
1027 AFS_DISCON_UNLOCK();
1028 afs_PutFakeStat(&fakestat);
1029 code = afs_CheckCode(code, &treq, 5);
1036 afs_fsync(OSI_VC_DECL(avc), int fflags, struct AFS_UCRED *acred, int waitfor)
1037 #else /* AFS_OSF_ENV */
1038 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1039 afs_fsync(OSI_VC_DECL(avc), int flag, struct AFS_UCRED *acred
1040 #ifdef AFS_SGI65_ENV
1041 , off_t start, off_t stop
1042 #endif /* AFS_SGI65_ENV */
1044 #else /* !OSF && !SUN53 && !SGI */
1045 afs_fsync(OSI_VC_DECL(avc), struct AFS_UCRED *acred)
1049 register afs_int32 code;
1050 struct vrequest treq;
1051 OSI_VC_CONVERT(avc);
1054 return avc->vc_error;
1056 #if defined(AFS_SUN5_ENV)
1057 /* back out if called from NFS server */
1058 if (curthread->t_flag & T_DONTPEND)
1062 AFS_STATCNT(afs_fsync);
1063 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1064 if ((code = afs_InitReq(&treq, acred)))
1067 #if defined(AFS_SGI_ENV)
1068 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1069 if (flag & FSYNC_INVAL)
1070 osi_VM_FSyncInval(avc);
1071 #endif /* AFS_SGI_ENV */
1073 ObtainSharedLock(&avc->lock, 18);
1075 if (avc->execsOrWriters > 0) {
1077 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
1078 /* Your average flush. */
1080 /* put the file back */
1081 UpgradeSToWLock(&avc->lock, 41);
1082 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1083 ConvertWToSLock(&avc->lock);
1085 #if defined(AFS_DISCON_ENV)
1087 /* Disconnected flush. */
1088 ObtainWriteLock(&afs_DDirtyVCListLock, 708);
1090 if (!avc->ddirty_flags ||
1091 (avc->ddirty_flags == VDisconShadowed)) {
1093 /* Add to disconnected dirty list. */
1094 AFS_DISCON_ADD_DIRTY(avc, 1);
1097 UpgradeSToWLock(&avc->lock, 711);
1098 /* Set disconnected write flag. */
1099 avc->ddirty_flags |= VDisconWriteFlush;
1100 ConvertWToSLock(&avc->lock);
1102 ReleaseWriteLock(&afs_DDirtyVCListLock);
1104 } /* if not disconnected */
1105 } /* if (avc->execsOrWriters > 0) */
1107 #if defined(AFS_SGI_ENV)
1108 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1109 if (code == VNOVNODE) {
1110 /* syncing an unlinked file! - non-informative to pass an errno
1111 * 102 (== VNOVNODE) to user
1116 AFS_DISCON_UNLOCK();
1117 code = afs_CheckCode(code, &treq, 33);
1118 ReleaseSharedLock(&avc->lock);