2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
68 if (!AFS_IS_DISCONNECTED) {
71 /* Now, send the file back. Used to require 0 writers left, but now do
72 * it on every close for write, since two closes in a row are harmless
73 * since first will clean all chunks, and second will be noop. Note that
74 * this will also save confusion when someone keeps a file open
75 * inadvertently, since with old system, writes to the server would never
78 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
80 * We have to do these after the above store in done: in some systems
81 * like aix they'll need to flush all the vm dirty pages to the disk via
82 * the strategy routine. During that all procedure (done under no avc
83 * locks) opens, refcounts would be zero, since it didn't reach the
84 * afs_{rd,wr} routines which means the vcache is a perfect candidate
89 } else if (AFS_IS_DISCON_RW) {
92 if (!avc->ddirty_flags ||
93 (avc->ddirty_flags == VDisconShadowed)) {
95 /* Add to disconnected dirty list. */
96 AFS_DISCON_ADD_DIRTY(avc, 1);
99 /* Set disconnected write flag. */
100 avc->ddirty_flags |= VDisconWriteClose;
102 } /* if not disconnected */
104 #if defined(AFS_SGI_ENV)
105 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
109 avc->execsOrWriters--;
114 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
115 struct AFS_UCRED *acred, int noLock)
117 afs_size_t totalLength;
118 afs_size_t transferLength;
120 afs_size_t offset, len;
121 afs_int32 tlen, trimlen;
124 register struct dcache *tdc;
129 #ifdef AFS_DARWIN80_ENV
133 struct uio *tuiop = &tuio;
134 struct iovec *tvec; /* again, should have define */
136 register afs_int32 code;
137 struct vrequest treq;
139 AFS_STATCNT(afs_MemWrite);
141 return avc->vc_error;
143 startDate = osi_Time();
144 if ((code = afs_InitReq(&treq, acred)))
146 /* otherwise we read */
147 totalLength = AFS_UIO_RESID(auio);
148 filePos = AFS_UIO_OFFSET(auio);
151 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
152 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
153 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
154 ICL_HANDLE_OFFSET(avc->m.Length));
156 afs_MaybeWakeupTruncateDaemon();
157 ObtainWriteLock(&avc->lock, 126);
159 #if defined(AFS_SGI_ENV)
163 * afs_xwrite handles setting m.Length
164 * and handles APPEND mode.
165 * Since we are called via strategy, we need to trim the write to
166 * the actual size of the file
168 osi_Assert(filePos <= avc->m.Length);
169 diff = avc->m.Length - filePos;
170 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
171 totalLength = AFS_UIO_RESID(auio);
174 if (aio & IO_APPEND) {
175 /* append mode, start it at the right spot */
176 #if defined(AFS_SUN56_ENV)
177 auio->uio_loffset = 0;
179 filePos = avc->m.Length;
180 AFS_UIO_SETOFFSET(auio, filePos);
184 * Note that we use startDate rather than calling osi_Time() here.
185 * This is to avoid counting lock-waiting time in file date (for ranlib).
187 avc->m.Date = startDate;
189 #if defined(AFS_HPUX_ENV)
190 #if defined(AFS_HPUX101_ENV)
191 if ((totalLength + filePos) >> 9 >
192 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
194 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
197 ReleaseWriteLock(&avc->lock);
201 #ifdef AFS_VM_RDWR_ENV
203 * If write is implemented via VM, afs_FakeOpen() is called from the
204 * high-level write op.
206 if (avc->execsOrWriters <= 0) {
207 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
208 avc->execsOrWriters);
213 avc->states |= CDirty;
214 #ifndef AFS_DARWIN80_ENV
215 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
217 while (totalLength > 0) {
218 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
225 len = totalLength; /* write this amount by default */
226 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
227 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
228 if (max <= len + offset) { /*if we'd go past the end of this chunk */
229 /* it won't all fit in this chunk, so write as much
234 #ifdef AFS_DARWIN80_ENV
238 tuiop = afsio_darwin_partialcopy(auio, trimlen);
240 /* mung uio structure to be right for this transfer */
241 afsio_copy(auio, &tuio, tvec);
243 afsio_trim(&tuio, trimlen);
245 AFS_UIO_SETOFFSET(tuiop, offset);
247 code = afs_MemWriteUIO(tdc->f.inode, tuiop);
249 void *mep; /* XXX in prototype world is struct memCacheEntry * */
251 ZapDCE(tdc); /* bad data */
252 mep = afs_MemCacheOpen(tdc->f.inode);
253 afs_MemCacheTruncate(mep, 0);
254 afs_MemCacheClose(mep);
255 afs_stats_cmperf.cacheCurrDirtyChunks--;
256 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
257 ReleaseWriteLock(&tdc->lock);
261 /* otherwise we've written some, fixup length, etc and continue with next seg */
262 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
264 afsio_skip(auio, tlen); /* advance auio over data written */
265 /* compute new file size */
266 if (offset + len > tdc->f.chunkBytes) {
267 afs_int32 tlength = offset + len;
268 afs_AdjustSize(tdc, tlength);
269 if (tdc->validPos < filePos + len)
270 tdc->validPos = filePos + len;
273 transferLength += len;
275 #if defined(AFS_SGI_ENV)
276 /* afs_xwrite handles setting m.Length */
277 osi_Assert(filePos <= avc->m.Length);
279 if (filePos > avc->m.Length) {
280 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
281 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
282 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
283 ICL_HANDLE_OFFSET(filePos));
284 avc->m.Length = filePos;
287 ReleaseWriteLock(&tdc->lock);
289 #if !defined(AFS_VM_RDWR_ENV)
291 * If write is implemented via VM, afs_DoPartialWrite() is called from
292 * the high-level write op.
295 code = afs_DoPartialWrite(avc, &treq);
303 #ifndef AFS_VM_RDWR_ENV
304 afs_FakeClose(avc, acred);
306 if (error && !avc->vc_error)
307 avc->vc_error = error;
309 ReleaseWriteLock(&avc->lock);
310 #ifdef AFS_DARWIN80_ENV
313 osi_FreeSmallSpace(tvec);
315 error = afs_CheckCode(error, &treq, 6);
320 /* called on writes */
322 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
323 struct AFS_UCRED *acred, int noLock)
325 afs_size_t totalLength;
326 afs_size_t transferLength;
328 afs_size_t offset, len;
333 register struct dcache *tdc;
338 #ifdef AFS_DARWIN80_ENV
342 struct uio *tuiop = &tuio;
343 struct iovec *tvec; /* again, should have define */
345 struct osi_file *tfile;
346 register afs_int32 code;
347 struct vrequest treq;
349 AFS_STATCNT(afs_UFSWrite);
351 return avc->vc_error;
353 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
356 startDate = osi_Time();
357 if ((code = afs_InitReq(&treq, acred)))
359 /* otherwise we read */
360 totalLength = AFS_UIO_RESID(auio);
361 filePos = AFS_UIO_OFFSET(auio);
364 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
365 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
366 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
367 ICL_HANDLE_OFFSET(avc->m.Length));
369 afs_MaybeWakeupTruncateDaemon();
370 ObtainWriteLock(&avc->lock, 556);
372 #if defined(AFS_SGI_ENV)
376 * afs_xwrite handles setting m.Length
377 * and handles APPEND mode.
378 * Since we are called via strategy, we need to trim the write to
379 * the actual size of the file
381 osi_Assert(filePos <= avc->m.Length);
382 diff = avc->m.Length - filePos;
383 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
384 totalLength = AFS_UIO_RESID(auio);
387 if (aio & IO_APPEND) {
388 /* append mode, start it at the right spot */
389 #if defined(AFS_SUN56_ENV)
390 auio->uio_loffset = 0;
392 filePos = avc->m.Length;
393 AFS_UIO_SETOFFSET(auio, avc->m.Length);
397 * Note that we use startDate rather than calling osi_Time() here.
398 * This is to avoid counting lock-waiting time in file date (for ranlib).
400 avc->m.Date = startDate;
402 #if defined(AFS_HPUX_ENV)
403 #if defined(AFS_HPUX101_ENV)
404 if ((totalLength + filePos) >> 9 >
405 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
407 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
410 ReleaseWriteLock(&avc->lock);
414 #ifdef AFS_VM_RDWR_ENV
416 * If write is implemented via VM, afs_FakeOpen() is called from the
417 * high-level write op.
419 if (avc->execsOrWriters <= 0) {
420 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
421 avc->execsOrWriters);
426 avc->states |= CDirty;
427 #ifndef AFS_DARWIN80_ENV
428 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
430 while (totalLength > 0) {
431 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
437 #if defined(LINUX_USE_FH)
438 tfile = (struct osi_file *)osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
440 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
442 len = totalLength; /* write this amount by default */
443 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
444 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
445 if (max <= len + offset) { /*if we'd go past the end of this chunk */
446 /* it won't all fit in this chunk, so write as much
451 #ifdef AFS_DARWIN80_ENV
455 tuiop = afsio_darwin_partialcopy(auio, trimlen);
457 /* mung uio structure to be right for this transfer */
458 afsio_copy(auio, &tuio, tvec);
460 afsio_trim(&tuio, trimlen);
462 AFS_UIO_SETOFFSET(tuiop, offset);
464 #if defined(AFS_AIX41_ENV)
467 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
468 NULL, afs_osi_credp);
470 #elif defined(AFS_AIX32_ENV)
471 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
472 #elif defined(AFS_AIX_ENV)
474 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
475 &tuio, NULL, NULL, -1);
476 #elif defined(AFS_SUN5_ENV)
478 #ifdef AFS_SUN510_ENV
482 VOP_RWLOCK(tfile->vnode, 1, &ct);
483 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
484 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
487 VOP_RWLOCK(tfile->vnode, 1);
488 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
489 VOP_RWUNLOCK(tfile->vnode, 1);
494 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
495 #elif defined(AFS_SGI_ENV)
497 avc->states |= CWritingUFS;
498 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
499 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
500 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
501 avc->states &= ~CWritingUFS;
503 #elif defined(AFS_OSF_ENV)
505 struct ucred *tmpcred = u.u_cred;
506 u.u_cred = afs_osi_credp;
507 tuio.uio_rw = UIO_WRITE;
509 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
513 #elif defined(AFS_HPUX100_ENV)
516 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
519 #elif defined(AFS_LINUX20_ENV)
521 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
523 #elif defined(AFS_DARWIN80_ENV)
525 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
527 #elif defined(AFS_DARWIN_ENV)
529 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
530 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
531 VOP_UNLOCK(tfile->vnode, 0, current_proc());
533 #elif defined(AFS_FBSD80_ENV)
535 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
536 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
537 VOP_UNLOCK(tfile->vnode, 0);
539 #elif defined(AFS_FBSD50_ENV)
541 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
542 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
543 VOP_UNLOCK(tfile->vnode, 0, curthread);
545 #elif defined(AFS_XBSD_ENV)
547 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
548 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
549 VOP_UNLOCK(tfile->vnode, 0, curproc);
553 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
555 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
559 ZapDCE(tdc); /* bad data */
560 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
561 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
562 afs_stats_cmperf.cacheCurrDirtyChunks--;
563 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
564 afs_CFileClose(tfile);
565 ReleaseWriteLock(&tdc->lock);
569 /* otherwise we've written some, fixup length, etc and continue with next seg */
570 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
572 afsio_skip(auio, tlen); /* advance auio over data written */
573 /* compute new file size */
574 if (offset + len > tdc->f.chunkBytes) {
575 afs_int32 tlength = offset + len;
576 afs_AdjustSize(tdc, tlength);
577 if (tdc->validPos < filePos + len)
578 tdc->validPos = filePos + len;
581 transferLength += len;
583 #if defined(AFS_SGI_ENV)
584 /* afs_xwrite handles setting m.Length */
585 osi_Assert(filePos <= avc->m.Length);
587 if (filePos > avc->m.Length) {
588 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
589 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
590 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
591 ICL_HANDLE_OFFSET(filePos));
592 avc->m.Length = filePos;
596 ReleaseWriteLock(&tdc->lock);
598 #if !defined(AFS_VM_RDWR_ENV)
600 * If write is implemented via VM, afs_DoPartialWrite() is called from
601 * the high-level write op.
604 code = afs_DoPartialWrite(avc, &treq);
612 #ifndef AFS_VM_RDWR_ENV
613 afs_FakeClose(avc, acred);
615 error = afs_CheckCode(error, &treq, 7);
616 /* This set is here so we get the CheckCode. */
617 if (error && !avc->vc_error)
618 avc->vc_error = error;
620 ReleaseWriteLock(&avc->lock);
621 #ifdef AFS_DARWIN80_ENV
624 osi_FreeSmallSpace(tvec);
626 #ifndef AFS_VM_RDWR_ENV
628 * If write is implemented via VM, afs_fsync() is called from the high-level
631 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
632 if (noLock && (aio & IO_SYNC)) {
635 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
636 * we're doing them because the file was opened with O_SYNCIO specified,
637 * we have to look in the u area. No single mechanism here!!
639 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
641 if (noLock && (aio & FSYNC)) {
644 if (!AFS_NFSXLATORREQ(acred))
645 afs_fsync(avc, acred);
651 /* do partial write if we're low on unmodified chunks */
653 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
655 register afs_int32 code;
657 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
658 afs_stats_cmperf.cacheMaxDirtyChunks)
659 return 0; /* nothing to do */
660 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
661 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
662 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
664 #if defined(AFS_SUN5_ENV)
665 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
667 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
674 #define vno_close(X) vn_close((X), 0, NOCRED)
675 #elif defined(AFS_DUX40_ENV)
676 #define vno_close vn_close
678 /* We don't need this for AIX since:
679 * (1) aix doesn't use fileops and it call close directly intead
680 * (where the unlocking should be done) and
681 * (2) temporarily, the aix lockf isn't supported yet.
683 * this stupid routine is used to release the flocks held on a
684 * particular file descriptor. Sun doesn't pass file descr. info
685 * through to the vnode layer, and yet we must unlock flocked files
686 * on the *appropriate* (not first, as in System V) close call. Thus
688 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
689 * file ops structure into any afs file when it gets flocked.
690 * N.B: Intercepting close syscall doesn't trap aborts or exit system
694 afs_closex(register struct file *afd)
696 struct vrequest treq;
701 struct afs_fakestat_state fakestat;
703 AFS_STATCNT(afs_closex);
704 /* setup the credentials */
705 if ((code = afs_InitReq(&treq, u.u_cred)))
707 afs_InitFakeStat(&fakestat);
710 /* we're the last one. If we're an AFS vnode, clear the flags,
711 * close the file and release the lock when done. Otherwise, just
712 * let the regular close code work. */
713 if (afd->f_type == DTYPE_VNODE) {
714 tvc = VTOAFS(afd->f_data);
715 if (IsAfsVnode(AFSTOV(tvc))) {
716 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
718 afs_PutFakeStat(&fakestat);
721 VN_HOLD(AFSTOV(tvc));
722 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
723 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
724 code = vno_close(afd);
726 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
728 AFS_RELE(AFSTOV(tvc));
732 /* now, if close not done, do it */
734 code = vno_close(afd);
736 afs_PutFakeStat(&fakestat);
737 return code; /* return code from vnode layer */
742 /* handle any closing cleanup stuff */
744 #if defined(AFS_SGI65_ENV)
745 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
746 struct AFS_UCRED *acred)
747 #elif defined(AFS_SGI64_ENV)
748 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
749 off_t offset, struct AFS_UCRED *acred, struct flid *flp)
750 #elif defined(AFS_SGI_ENV)
751 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
752 off_t offset, struct AFS_UCRED *acred)
753 #elif defined(AFS_SUN5_ENV)
754 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
755 struct AFS_UCRED *acred)
757 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, struct AFS_UCRED *acred)
760 register afs_int32 code;
761 register struct brequest *tb;
762 struct vrequest treq;
766 struct afs_fakestat_state fakestat;
769 AFS_STATCNT(afs_close);
770 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
771 ICL_TYPE_INT32, aflags);
772 code = afs_InitReq(&treq, acred);
775 afs_InitFakeStat(&fakestat);
776 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
778 afs_PutFakeStat(&fakestat);
783 if (avc->flockCount) {
784 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
787 #if defined(AFS_SGI_ENV)
789 afs_PutFakeStat(&fakestat);
793 /* unlock any locks for pid - could be wrong for child .. */
794 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
796 get_current_flid(&flid);
797 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
798 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
801 cleanlocks((vnode_t *) avc, flp);
802 #else /* AFS_SGI64_ENV */
803 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
804 #endif /* AFS_SGI64_ENV */
805 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
806 #endif /* AFS_SGI65_ENV */
807 /* afs_chkpgoob will drop and re-acquire the global lock. */
808 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
809 #elif defined(AFS_SUN5_ENV)
811 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
812 afs_PutFakeStat(&fakestat);
816 #else /* AFS_SGI_ENV */
817 if (avc->flockCount) { /* Release Lock */
818 #if defined(AFS_OSF_ENV)
819 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
821 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
824 #endif /* AFS_SGI_ENV */
825 if (aflags & (FWRITE | FTRUNC)) {
826 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
827 /* do it yourself if daemons are all busy */
828 ObtainWriteLock(&avc->lock, 124);
829 code = afs_StoreOnLastReference(avc, &treq);
830 ReleaseWriteLock(&avc->lock);
831 #if defined(AFS_SGI_ENV)
832 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
835 #if defined(AFS_SGI_ENV)
836 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
838 /* at least one daemon is idle, so ask it to do the store.
839 * Also, note that we don't lock it any more... */
840 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
841 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
843 /* sleep waiting for the store to start, then retrieve error code */
844 while ((tb->flags & BUVALID) == 0) {
852 /* VNOVNODE is "acceptable" error code from close, since
853 * may happen when deleting a file on another machine while
854 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
855 if (code == VNOVNODE || code == ENOENT)
858 /* Ensure last closer gets the error. If another thread caused
859 * DoPartialWrite and this thread does not actually store the data,
860 * it may not see the quota error.
862 ObtainWriteLock(&avc->lock, 406);
865 osi_ReleaseVM(avc, acred);
867 printf("avc->vc_error=%d\n", avc->vc_error);
868 code = avc->vc_error;
871 ReleaseWriteLock(&avc->lock);
873 /* some codes merit specific complaint */
875 afs_warnuser("afs: failed to store file (network problems)\n");
878 else if (code == ENOSPC) {
880 ("afs: failed to store file (over quota or partition full)\n");
883 else if (code == ENOSPC) {
884 afs_warnuser("afs: failed to store file (partition full)\n");
885 } else if (code == EDQUOT) {
886 afs_warnuser("afs: failed to store file (over quota)\n");
890 afs_warnuser("afs: failed to store file (%d)\n", code);
892 /* finally, we flush any text pages lying around here */
896 #if defined(AFS_SGI_ENV)
897 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
898 osi_Assert(avc->opens > 0);
900 /* file open for read */
901 ObtainWriteLock(&avc->lock, 411);
904 osi_ReleaseVM(avc, acred);
906 code = avc->vc_error;
910 ReleaseWriteLock(&avc->lock);
913 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
914 afs_remunlink(avc, 1); /* ignore any return code */
918 afs_PutFakeStat(&fakestat);
919 code = afs_CheckCode(code, &treq, 5);
926 afs_fsync(OSI_VC_DECL(avc), int fflags, struct AFS_UCRED *acred, int waitfor)
927 #else /* AFS_OSF_ENV */
928 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
929 afs_fsync(OSI_VC_DECL(avc), int flag, struct AFS_UCRED *acred
931 , off_t start, off_t stop
932 #endif /* AFS_SGI65_ENV */
934 #else /* !OSF && !SUN53 && !SGI */
935 afs_fsync(OSI_VC_DECL(avc), struct AFS_UCRED *acred)
939 register afs_int32 code;
940 struct vrequest treq;
944 return avc->vc_error;
946 #if defined(AFS_SUN5_ENV)
947 /* back out if called from NFS server */
948 if (curthread->t_flag & T_DONTPEND)
952 AFS_STATCNT(afs_fsync);
953 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
954 if ((code = afs_InitReq(&treq, acred)))
957 #if defined(AFS_SGI_ENV)
958 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
959 if (flag & FSYNC_INVAL)
960 osi_VM_FSyncInval(avc);
961 #endif /* AFS_SGI_ENV */
963 ObtainSharedLock(&avc->lock, 18);
965 if (avc->execsOrWriters > 0) {
967 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
968 /* Your average flush. */
970 /* put the file back */
971 UpgradeSToWLock(&avc->lock, 41);
972 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
973 ConvertWToSLock(&avc->lock);
975 #if defined(AFS_DISCON_ENV)
977 /* Disconnected flush. */
978 ObtainWriteLock(&afs_DDirtyVCListLock, 708);
980 if (!avc->ddirty_flags ||
981 (avc->ddirty_flags == VDisconShadowed)) {
983 /* Add to disconnected dirty list. */
984 AFS_DISCON_ADD_DIRTY(avc, 1);
987 UpgradeSToWLock(&avc->lock, 711);
988 /* Set disconnected write flag. */
989 avc->ddirty_flags |= VDisconWriteFlush;
990 ConvertWToSLock(&avc->lock);
992 ReleaseWriteLock(&afs_DDirtyVCListLock);
994 } /* if not disconnected */
995 } /* if (avc->execsOrWriters > 0) */
997 #if defined(AFS_SGI_ENV)
998 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
999 if (code == VNOVNODE) {
1000 /* syncing an unlinked file! - non-informative to pass an errno
1001 * 102 (== VNOVNODE) to user
1006 AFS_DISCON_UNLOCK();
1007 code = afs_CheckCode(code, &treq, 33);
1008 ReleaseSharedLock(&avc->lock);