2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
19 #include <afsconfig.h>
20 #include "afs/param.h"
23 #include "afs/sysincludes.h" /* Standard vendor system headers */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/afs_osidnlc.h"
31 extern unsigned char *afs_indexFlags;
33 /* Called by all write-on-close routines: regular afs_close,
34 * store via background daemon and store via the
35 * afs_FlushActiveVCaches routine (when CCORE is on).
36 * avc->lock must be write-locked.
39 afs_StoreOnLastReference(register struct vcache *avc,
40 register struct vrequest *treq)
44 AFS_STATCNT(afs_StoreOnLastReference);
45 /* if CCore flag is set, we clear it and do the extra decrement
46 * ourselves now. If we're called by the CCore clearer, the CCore
47 * flag will already be clear, so we don't have to worry about
48 * clearing it twice. */
49 if (avc->f.states & CCore) {
50 avc->f.states &= ~CCore;
51 #if defined(AFS_SGI_ENV)
52 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
54 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
55 * depending on the flags the file was opened with. So, if you make any
56 * changes to the way the execsOrWriters flag is handled check with the
59 avc->execsOrWriters--;
60 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
61 crfree((afs_ucred_t *)avc->linkData); /* "crheld" in afs_FakeClose */
65 if (!AFS_IS_DISCONNECTED) {
68 /* Now, send the file back. Used to require 0 writers left, but now do
69 * it on every close for write, since two closes in a row are harmless
70 * since first will clean all chunks, and second will be noop. Note that
71 * this will also save confusion when someone keeps a file open
72 * inadvertently, since with old system, writes to the server would never
75 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
77 * We have to do these after the above store in done: in some systems
78 * like aix they'll need to flush all the vm dirty pages to the disk via
79 * the strategy routine. During that all procedure (done under no avc
80 * locks) opens, refcounts would be zero, since it didn't reach the
81 * afs_{rd,wr} routines which means the vcache is a perfect candidate
84 } else if (AFS_IS_DISCON_RW) {
85 afs_DisconAddDirty(avc, VDisconWriteClose, 0);
86 } /* if not disconnected */
88 #if defined(AFS_SGI_ENV)
89 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
93 avc->execsOrWriters--;
98 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
99 afs_ucred_t *acred, int noLock)
101 afs_size_t totalLength;
102 afs_size_t transferLength;
104 afs_size_t offset, len;
105 afs_int32 tlen, trimlen;
108 register struct dcache *tdc;
113 #ifdef AFS_DARWIN80_ENV
117 struct uio *tuiop = &tuio;
118 struct iovec *tvec; /* again, should have define */
120 register afs_int32 code;
121 struct vrequest treq;
123 AFS_STATCNT(afs_MemWrite);
125 return avc->vc_error;
127 startDate = osi_Time();
128 if ((code = afs_InitReq(&treq, acred)))
130 /* otherwise we read */
131 totalLength = AFS_UIO_RESID(auio);
132 filePos = AFS_UIO_OFFSET(auio);
135 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
136 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
137 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
138 ICL_HANDLE_OFFSET(avc->f.m.Length));
140 afs_MaybeWakeupTruncateDaemon();
141 ObtainWriteLock(&avc->lock, 126);
143 #if defined(AFS_SGI_ENV)
147 * afs_xwrite handles setting m.Length
148 * and handles APPEND mode.
149 * Since we are called via strategy, we need to trim the write to
150 * the actual size of the file
152 osi_Assert(filePos <= avc->f.m.Length);
153 diff = avc->f.m.Length - filePos;
154 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
155 totalLength = AFS_UIO_RESID(auio);
158 if (aio & IO_APPEND) {
159 /* append mode, start it at the right spot */
160 #if defined(AFS_SUN56_ENV)
161 auio->uio_loffset = 0;
163 filePos = avc->f.m.Length;
164 AFS_UIO_SETOFFSET(auio, filePos);
168 * Note that we use startDate rather than calling osi_Time() here.
169 * This is to avoid counting lock-waiting time in file date (for ranlib).
171 avc->f.m.Date = startDate;
173 #if defined(AFS_HPUX_ENV)
174 #if defined(AFS_HPUX101_ENV)
175 if ((totalLength + filePos) >> 9 >
176 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
178 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
181 ReleaseWriteLock(&avc->lock);
185 #ifdef AFS_VM_RDWR_ENV
187 * If write is implemented via VM, afs_FakeOpen() is called from the
188 * high-level write op.
190 if (avc->execsOrWriters <= 0) {
191 afs_warn("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
192 avc->execsOrWriters);
197 avc->f.states |= CDirty;
198 #ifndef AFS_DARWIN80_ENV
199 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
201 while (totalLength > 0) {
202 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
209 len = totalLength; /* write this amount by default */
210 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
211 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
212 if (max <= len + offset) { /*if we'd go past the end of this chunk */
213 /* it won't all fit in this chunk, so write as much
218 #ifdef AFS_DARWIN80_ENV
222 tuiop = afsio_darwin_partialcopy(auio, trimlen);
224 /* mung uio structure to be right for this transfer */
225 afsio_copy(auio, &tuio, tvec);
227 afsio_trim(&tuio, trimlen);
229 AFS_UIO_SETOFFSET(tuiop, offset);
231 code = afs_MemWriteUIO(&tdc->f.inode, tuiop);
233 void *mep; /* XXX in prototype world is struct memCacheEntry * */
235 ZapDCE(tdc); /* bad data */
236 mep = afs_MemCacheOpen(&tdc->f.inode);
237 afs_MemCacheTruncate(mep, 0);
238 afs_MemCacheClose(mep);
239 afs_stats_cmperf.cacheCurrDirtyChunks--;
240 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
241 ReleaseWriteLock(&tdc->lock);
245 /* otherwise we've written some, fixup length, etc and continue with next seg */
246 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
248 afsio_skip(auio, tlen); /* advance auio over data written */
249 /* compute new file size */
250 if (offset + len > tdc->f.chunkBytes) {
251 afs_int32 tlength = offset + len;
252 afs_AdjustSize(tdc, tlength);
253 if (tdc->validPos < filePos + len)
254 tdc->validPos = filePos + len;
257 transferLength += len;
259 #if defined(AFS_SGI_ENV)
260 /* afs_xwrite handles setting m.Length */
261 osi_Assert(filePos <= avc->f.m.Length);
263 if (filePos > avc->f.m.Length) {
264 #if defined(AFS_DISCON_ENV)
265 if (AFS_IS_DISCON_RW)
266 afs_PopulateDCache(avc, filePos, &treq);
268 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
269 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
270 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
271 ICL_HANDLE_OFFSET(filePos));
272 avc->f.m.Length = filePos;
275 ReleaseWriteLock(&tdc->lock);
277 #if !defined(AFS_VM_RDWR_ENV)
279 * If write is implemented via VM, afs_DoPartialWrite() is called from
280 * the high-level write op.
283 code = afs_DoPartialWrite(avc, &treq);
291 #ifndef AFS_VM_RDWR_ENV
292 afs_FakeClose(avc, acred);
294 if (error && !avc->vc_error)
295 avc->vc_error = error;
297 ReleaseWriteLock(&avc->lock);
298 #ifdef AFS_DARWIN80_ENV
301 osi_FreeSmallSpace(tvec);
303 error = afs_CheckCode(error, &treq, 6);
308 /* called on writes */
310 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
311 afs_ucred_t *acred, int noLock)
313 afs_size_t totalLength;
314 afs_size_t transferLength;
316 afs_size_t offset, len;
321 register struct dcache *tdc;
326 #ifdef AFS_DARWIN80_ENV
330 struct uio *tuiop = &tuio;
331 struct iovec *tvec; /* again, should have define */
333 struct osi_file *tfile;
334 register afs_int32 code;
335 struct vrequest treq;
337 AFS_STATCNT(afs_UFSWrite);
339 return avc->vc_error;
341 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
344 startDate = osi_Time();
345 if ((code = afs_InitReq(&treq, acred)))
347 /* otherwise we read */
348 totalLength = AFS_UIO_RESID(auio);
349 filePos = AFS_UIO_OFFSET(auio);
352 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
353 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
354 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
355 ICL_HANDLE_OFFSET(avc->f.m.Length));
357 afs_MaybeWakeupTruncateDaemon();
358 ObtainWriteLock(&avc->lock, 556);
360 #if defined(AFS_SGI_ENV)
364 * afs_xwrite handles setting m.Length
365 * and handles APPEND mode.
366 * Since we are called via strategy, we need to trim the write to
367 * the actual size of the file
369 osi_Assert(filePos <= avc->f.m.Length);
370 diff = avc->f.m.Length - filePos;
371 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
372 totalLength = AFS_UIO_RESID(auio);
375 if (aio & IO_APPEND) {
376 /* append mode, start it at the right spot */
377 #if defined(AFS_SUN56_ENV)
378 auio->uio_loffset = 0;
380 filePos = avc->f.m.Length;
381 AFS_UIO_SETOFFSET(auio, avc->f.m.Length);
385 * Note that we use startDate rather than calling osi_Time() here.
386 * This is to avoid counting lock-waiting time in file date (for ranlib).
388 avc->f.m.Date = startDate;
390 #if defined(AFS_HPUX_ENV)
391 #if defined(AFS_HPUX101_ENV)
392 if ((totalLength + filePos) >> 9 >
393 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
395 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
398 ReleaseWriteLock(&avc->lock);
402 #ifdef AFS_VM_RDWR_ENV
404 * If write is implemented via VM, afs_FakeOpen() is called from the
405 * high-level write op.
407 if (avc->execsOrWriters <= 0) {
408 afs_warn("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
409 avc->execsOrWriters);
414 avc->f.states |= CDirty;
415 #ifndef AFS_DARWIN80_ENV
416 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
418 while (totalLength > 0) {
419 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
425 tfile = (struct osi_file *)osi_UFSOpen(&tdc->f.inode);
426 len = totalLength; /* write this amount by default */
427 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
428 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
429 if (max <= len + offset) { /*if we'd go past the end of this chunk */
430 /* it won't all fit in this chunk, so write as much
435 #ifdef AFS_DARWIN80_ENV
439 tuiop = afsio_darwin_partialcopy(auio, trimlen);
441 /* mung uio structure to be right for this transfer */
442 afsio_copy(auio, &tuio, tvec);
444 afsio_trim(&tuio, trimlen);
446 AFS_UIO_SETOFFSET(tuiop, offset);
448 #if defined(AFS_AIX41_ENV)
451 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
452 NULL, afs_osi_credp);
454 #elif defined(AFS_AIX32_ENV)
455 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
456 #elif defined(AFS_AIX_ENV)
458 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
459 &tuio, NULL, NULL, -1);
460 #elif defined(AFS_SUN5_ENV)
462 #ifdef AFS_SUN510_ENV
466 VOP_RWLOCK(tfile->vnode, 1, &ct);
467 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
468 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
471 VOP_RWLOCK(tfile->vnode, 1);
472 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
473 VOP_RWUNLOCK(tfile->vnode, 1);
478 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
479 #elif defined(AFS_SGI_ENV)
481 avc->f.states |= CWritingUFS;
482 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
483 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
484 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
485 avc->f.states &= ~CWritingUFS;
487 #elif defined(AFS_HPUX100_ENV)
490 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
493 #elif defined(AFS_LINUX20_ENV)
495 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
497 #elif defined(AFS_DARWIN80_ENV)
499 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
501 #elif defined(AFS_DARWIN_ENV)
503 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
504 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
505 VOP_UNLOCK(tfile->vnode, 0, current_proc());
507 #elif defined(AFS_FBSD80_ENV)
509 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
510 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
511 VOP_UNLOCK(tfile->vnode, 0);
513 #elif defined(AFS_FBSD50_ENV)
515 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
516 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
517 VOP_UNLOCK(tfile->vnode, 0, curthread);
519 #elif defined(AFS_NBSD_ENV)
521 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
522 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
523 VOP_UNLOCK(tfile->vnode, 0);
525 #elif defined(AFS_XBSD_ENV)
527 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
528 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
529 VOP_UNLOCK(tfile->vnode, 0, curproc);
533 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
535 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
539 ZapDCE(tdc); /* bad data */
540 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
541 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
542 afs_stats_cmperf.cacheCurrDirtyChunks--;
543 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
544 afs_CFileClose(tfile);
545 ReleaseWriteLock(&tdc->lock);
549 /* otherwise we've written some, fixup length, etc and continue with next seg */
550 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
552 afsio_skip(auio, tlen); /* advance auio over data written */
553 /* compute new file size */
554 if (offset + len > tdc->f.chunkBytes) {
555 afs_int32 tlength = offset + len;
556 afs_AdjustSize(tdc, tlength);
557 if (tdc->validPos < filePos + len)
558 tdc->validPos = filePos + len;
561 transferLength += len;
563 #if defined(AFS_SGI_ENV)
564 /* afs_xwrite handles setting m.Length */
565 osi_Assert(filePos <= avc->f.m.Length);
567 if (filePos > avc->f.m.Length) {
568 #if defined(AFS_DISCON_ENV)
569 if (AFS_IS_DISCON_RW)
570 afs_PopulateDCache(avc, filePos, &treq);
572 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
573 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
574 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
575 ICL_HANDLE_OFFSET(filePos));
576 avc->f.m.Length = filePos;
580 ReleaseWriteLock(&tdc->lock);
582 #if !defined(AFS_VM_RDWR_ENV)
584 * If write is implemented via VM, afs_DoPartialWrite() is called from
585 * the high-level write op.
588 code = afs_DoPartialWrite(avc, &treq);
596 #ifndef AFS_VM_RDWR_ENV
597 afs_FakeClose(avc, acred);
599 error = afs_CheckCode(error, &treq, 7);
600 /* This set is here so we get the CheckCode. */
601 if (error && !avc->vc_error)
602 avc->vc_error = error;
604 ReleaseWriteLock(&avc->lock);
605 #ifdef AFS_DARWIN80_ENV
608 osi_FreeSmallSpace(tvec);
610 #ifndef AFS_VM_RDWR_ENV
612 * If write is implemented via VM, afs_fsync() is called from the high-level
615 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
616 if (noLock && (aio & IO_SYNC)) {
619 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
620 * we're doing them because the file was opened with O_SYNCIO specified,
621 * we have to look in the u area. No single mechanism here!!
623 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
625 if (noLock && (aio & FSYNC)) {
628 if (!AFS_NFSXLATORREQ(acred))
629 afs_fsync(avc, acred);
635 /* do partial write if we're low on unmodified chunks */
637 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
639 register afs_int32 code;
641 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
642 afs_stats_cmperf.cacheMaxDirtyChunks
643 || AFS_IS_DISCONNECTED)
644 return 0; /* nothing to do */
645 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
646 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
647 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
649 #if defined(AFS_SUN5_ENV)
650 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
652 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
657 /* handle any closing cleanup stuff */
659 #if defined(AFS_SGI65_ENV)
660 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
662 #elif defined(AFS_SGI64_ENV)
663 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
664 off_t offset, afs_ucred_t *acred, struct flid *flp)
665 #elif defined(AFS_SGI_ENV)
666 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
667 off_t offset, afs_ucred_t *acred)
668 #elif defined(AFS_SUN5_ENV)
669 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
672 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred)
675 register afs_int32 code;
676 register struct brequest *tb;
677 struct vrequest treq;
681 struct afs_fakestat_state fakestat;
684 AFS_STATCNT(afs_close);
685 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
686 ICL_TYPE_INT32, aflags);
687 code = afs_InitReq(&treq, acred);
690 afs_InitFakeStat(&fakestat);
691 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
693 afs_PutFakeStat(&fakestat);
698 if (avc->flockCount) {
699 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
702 #if defined(AFS_SGI_ENV)
704 afs_PutFakeStat(&fakestat);
708 /* unlock any locks for pid - could be wrong for child .. */
709 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
710 # ifdef AFS_SGI65_ENV
711 get_current_flid(&flid);
712 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
713 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
715 # ifdef AFS_SGI64_ENV
716 cleanlocks((vnode_t *) avc, flp);
717 # else /* AFS_SGI64_ENV */
718 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
719 # endif /* AFS_SGI64_ENV */
720 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
721 # endif /* AFS_SGI65_ENV */
722 /* afs_chkpgoob will drop and re-acquire the global lock. */
723 afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
724 #elif defined(AFS_SUN5_ENV)
726 /* The vfs layer may call this repeatedly with higher "count"; only
727 * on the last close (i.e. count = 1) we should actually proceed
729 afs_PutFakeStat(&fakestat);
734 if (avc->flockCount) { /* Release Lock */
735 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
738 if (aflags & (FWRITE | FTRUNC)) {
739 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
740 /* do it yourself if daemons are all busy */
741 ObtainWriteLock(&avc->lock, 124);
742 code = afs_StoreOnLastReference(avc, &treq);
743 ReleaseWriteLock(&avc->lock);
744 #if defined(AFS_SGI_ENV)
745 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
748 #if defined(AFS_SGI_ENV)
749 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
751 /* at least one daemon is idle, so ask it to do the store.
752 * Also, note that we don't lock it any more... */
753 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
754 (afs_size_t) afs_cr_uid(acred), (afs_size_t) 0,
755 (void *)0, (void *)0, (void *)0);
756 /* sleep waiting for the store to start, then retrieve error code */
757 while ((tb->flags & BUVALID) == 0) {
765 /* VNOVNODE is "acceptable" error code from close, since
766 * may happen when deleting a file on another machine while
767 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
768 if (code == VNOVNODE || code == ENOENT)
771 /* Ensure last closer gets the error. If another thread caused
772 * DoPartialWrite and this thread does not actually store the data,
773 * it may not see the quota error.
775 ObtainWriteLock(&avc->lock, 406);
778 osi_ReleaseVM(avc, acred);
780 /* printf("avc->vc_error=%d\n", avc->vc_error); */
781 code = avc->vc_error;
784 ReleaseWriteLock(&avc->lock);
786 /* some codes merit specific complaint */
788 afs_warnuser("afs: failed to store file (network problems)\n");
791 else if (code == ENOSPC) {
793 ("afs: failed to store file (over quota or partition full)\n");
796 else if (code == ENOSPC) {
797 afs_warnuser("afs: failed to store file (partition full)\n");
798 } else if (code == EDQUOT) {
799 afs_warnuser("afs: failed to store file (over quota)\n");
803 afs_warnuser("afs: failed to store file (%d)\n", code);
805 /* finally, we flush any text pages lying around here */
809 #if defined(AFS_SGI_ENV)
810 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
811 osi_Assert(avc->opens > 0);
813 /* file open for read */
814 ObtainWriteLock(&avc->lock, 411);
817 osi_ReleaseVM(avc, acred);
819 code = avc->vc_error;
823 ReleaseWriteLock(&avc->lock);
826 afs_PutFakeStat(&fakestat);
827 code = afs_CheckCode(code, &treq, 5);
833 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
834 afs_fsync(OSI_VC_DECL(avc), int flag, afs_ucred_t *acred
835 # ifdef AFS_SGI65_ENV
836 , off_t start, off_t stop
837 # endif /* AFS_SGI65_ENV */
839 #else /* !SUN53 && !SGI */
840 afs_fsync(OSI_VC_DECL(avc), afs_ucred_t *acred)
843 register afs_int32 code;
844 struct vrequest treq;
848 return avc->vc_error;
850 #if defined(AFS_SUN5_ENV)
851 /* back out if called from NFS server */
852 if (curthread->t_flag & T_DONTPEND)
856 AFS_STATCNT(afs_fsync);
857 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
858 if ((code = afs_InitReq(&treq, acred)))
861 #if defined(AFS_SGI_ENV)
862 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
863 if (flag & FSYNC_INVAL)
864 osi_VM_FSyncInval(avc);
865 #endif /* AFS_SGI_ENV */
867 ObtainSharedLock(&avc->lock, 18);
869 if (avc->execsOrWriters > 0) {
871 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
872 /* Your average flush. */
874 /* put the file back */
875 UpgradeSToWLock(&avc->lock, 41);
876 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
877 ConvertWToSLock(&avc->lock);
879 #if defined(AFS_DISCON_ENV)
882 UpgradeSToWLock(&avc->lock, 711);
883 afs_DisconAddDirty(avc, VDisconWriteFlush, 1);
884 ConvertWToSLock(&avc->lock);
886 } /* if not disconnected */
887 } /* if (avc->execsOrWriters > 0) */
889 #if defined(AFS_SGI_ENV)
890 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
891 if (code == VNOVNODE) {
892 /* syncing an unlinked file! - non-informative to pass an errno
893 * 102 (== VNOVNODE) to user
899 code = afs_CheckCode(code, &treq, 33);
900 ReleaseSharedLock(&avc->lock);