2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
24 #include "afs/sysincludes.h" /* Standard vendor system headers */
25 #include "afsincludes.h" /* Afs-based standard headers */
26 #include "afs/afs_stats.h" /* statistics */
27 #include "afs/afs_cbqueue.h"
28 #include "afs/nfsclient.h"
29 #include "afs/afs_osidnlc.h"
32 extern unsigned char *afs_indexFlags;
34 /* Called by all write-on-close routines: regular afs_close,
35 * store via background daemon and store via the
36 * afs_FlushActiveVCaches routine (when CCORE is on).
37 * avc->lock must be write-locked.
40 afs_StoreOnLastReference(register struct vcache *avc,
41 register struct vrequest *treq)
45 AFS_STATCNT(afs_StoreOnLastReference);
46 /* if CCore flag is set, we clear it and do the extra decrement
47 * ourselves now. If we're called by the CCore clearer, the CCore
48 * flag will already be clear, so we don't have to worry about
49 * clearing it twice. */
50 if (avc->f.states & CCore) {
51 avc->f.states &= ~CCore;
52 #if defined(AFS_SGI_ENV)
53 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
55 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
56 * depending on the flags the file was opened with. So, if you make any
57 * changes to the way the execsOrWriters flag is handled check with the
60 avc->execsOrWriters--;
61 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
62 crfree((afs_ucred_t *)avc->linkData); /* "crheld" in afs_FakeClose */
66 if (!AFS_IS_DISCONNECTED) {
69 /* Now, send the file back. Used to require 0 writers left, but now do
70 * it on every close for write, since two closes in a row are harmless
71 * since first will clean all chunks, and second will be noop. Note that
72 * this will also save confusion when someone keeps a file open
73 * inadvertently, since with old system, writes to the server would never
76 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
78 * We have to do these after the above store in done: in some systems
79 * like aix they'll need to flush all the vm dirty pages to the disk via
80 * the strategy routine. During that all procedure (done under no avc
81 * locks) opens, refcounts would be zero, since it didn't reach the
82 * afs_{rd,wr} routines which means the vcache is a perfect candidate
85 } else if (AFS_IS_DISCON_RW) {
86 afs_DisconAddDirty(avc, VDisconWriteClose, 0);
87 } /* if not disconnected */
89 #if defined(AFS_SGI_ENV)
90 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
94 avc->execsOrWriters--;
99 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
100 afs_ucred_t *acred, int noLock)
102 afs_size_t totalLength;
103 afs_size_t transferLength;
105 afs_size_t offset, len;
106 afs_int32 tlen, trimlen;
109 register struct dcache *tdc;
114 #ifdef AFS_DARWIN80_ENV
118 struct uio *tuiop = &tuio;
119 struct iovec *tvec; /* again, should have define */
121 register afs_int32 code;
122 struct vrequest treq;
124 AFS_STATCNT(afs_MemWrite);
126 return avc->vc_error;
128 startDate = osi_Time();
129 if ((code = afs_InitReq(&treq, acred)))
131 /* otherwise we read */
132 totalLength = AFS_UIO_RESID(auio);
133 filePos = AFS_UIO_OFFSET(auio);
136 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
137 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
138 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
139 ICL_HANDLE_OFFSET(avc->f.m.Length));
141 afs_MaybeWakeupTruncateDaemon();
142 ObtainWriteLock(&avc->lock, 126);
144 #if defined(AFS_SGI_ENV)
148 * afs_xwrite handles setting m.Length
149 * and handles APPEND mode.
150 * Since we are called via strategy, we need to trim the write to
151 * the actual size of the file
153 osi_Assert(filePos <= avc->f.m.Length);
154 diff = avc->f.m.Length - filePos;
155 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
156 totalLength = AFS_UIO_RESID(auio);
159 if (aio & IO_APPEND) {
160 /* append mode, start it at the right spot */
161 #if defined(AFS_SUN56_ENV)
162 auio->uio_loffset = 0;
164 filePos = avc->f.m.Length;
165 AFS_UIO_SETOFFSET(auio, filePos);
169 * Note that we use startDate rather than calling osi_Time() here.
170 * This is to avoid counting lock-waiting time in file date (for ranlib).
172 avc->f.m.Date = startDate;
174 #if defined(AFS_HPUX_ENV)
175 #if defined(AFS_HPUX101_ENV)
176 if ((totalLength + filePos) >> 9 >
177 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
179 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
182 ReleaseWriteLock(&avc->lock);
186 #ifdef AFS_VM_RDWR_ENV
188 * If write is implemented via VM, afs_FakeOpen() is called from the
189 * high-level write op.
191 if (avc->execsOrWriters <= 0) {
192 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
193 avc->execsOrWriters);
198 avc->f.states |= CDirty;
199 #ifndef AFS_DARWIN80_ENV
200 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
202 while (totalLength > 0) {
203 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
210 len = totalLength; /* write this amount by default */
211 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
212 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
213 if (max <= len + offset) { /*if we'd go past the end of this chunk */
214 /* it won't all fit in this chunk, so write as much
219 #ifdef AFS_DARWIN80_ENV
223 tuiop = afsio_darwin_partialcopy(auio, trimlen);
225 /* mung uio structure to be right for this transfer */
226 afsio_copy(auio, &tuio, tvec);
228 afsio_trim(&tuio, trimlen);
230 AFS_UIO_SETOFFSET(tuiop, offset);
232 code = afs_MemWriteUIO(&tdc->f.inode, tuiop);
234 void *mep; /* XXX in prototype world is struct memCacheEntry * */
236 ZapDCE(tdc); /* bad data */
237 mep = afs_MemCacheOpen(&tdc->f.inode);
238 afs_MemCacheTruncate(mep, 0);
239 afs_MemCacheClose(mep);
240 afs_stats_cmperf.cacheCurrDirtyChunks--;
241 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
242 ReleaseWriteLock(&tdc->lock);
246 /* otherwise we've written some, fixup length, etc and continue with next seg */
247 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
249 afsio_skip(auio, tlen); /* advance auio over data written */
250 /* compute new file size */
251 if (offset + len > tdc->f.chunkBytes) {
252 afs_int32 tlength = offset + len;
253 afs_AdjustSize(tdc, tlength);
254 if (tdc->validPos < filePos + len)
255 tdc->validPos = filePos + len;
258 transferLength += len;
260 #if defined(AFS_SGI_ENV)
261 /* afs_xwrite handles setting m.Length */
262 osi_Assert(filePos <= avc->f.m.Length);
264 if (filePos > avc->f.m.Length) {
265 #if defined(AFS_DISCON_ENV)
266 if (AFS_IS_DISCON_RW)
267 afs_PopulateDCache(avc, filePos, &treq);
269 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
270 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
271 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
272 ICL_HANDLE_OFFSET(filePos));
273 avc->f.m.Length = filePos;
276 ReleaseWriteLock(&tdc->lock);
278 #if !defined(AFS_VM_RDWR_ENV)
280 * If write is implemented via VM, afs_DoPartialWrite() is called from
281 * the high-level write op.
284 code = afs_DoPartialWrite(avc, &treq);
292 #ifndef AFS_VM_RDWR_ENV
293 afs_FakeClose(avc, acred);
295 if (error && !avc->vc_error)
296 avc->vc_error = error;
298 ReleaseWriteLock(&avc->lock);
299 #ifdef AFS_DARWIN80_ENV
302 osi_FreeSmallSpace(tvec);
304 error = afs_CheckCode(error, &treq, 6);
309 /* called on writes */
311 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
312 afs_ucred_t *acred, int noLock)
314 afs_size_t totalLength;
315 afs_size_t transferLength;
317 afs_size_t offset, len;
322 register struct dcache *tdc;
327 #ifdef AFS_DARWIN80_ENV
331 struct uio *tuiop = &tuio;
332 struct iovec *tvec; /* again, should have define */
334 struct osi_file *tfile;
335 register afs_int32 code;
336 struct vrequest treq;
338 AFS_STATCNT(afs_UFSWrite);
340 return avc->vc_error;
342 if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
345 startDate = osi_Time();
346 if ((code = afs_InitReq(&treq, acred)))
348 /* otherwise we read */
349 totalLength = AFS_UIO_RESID(auio);
350 filePos = AFS_UIO_OFFSET(auio);
353 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
354 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
355 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
356 ICL_HANDLE_OFFSET(avc->f.m.Length));
358 afs_MaybeWakeupTruncateDaemon();
359 ObtainWriteLock(&avc->lock, 556);
361 #if defined(AFS_SGI_ENV)
365 * afs_xwrite handles setting m.Length
366 * and handles APPEND mode.
367 * Since we are called via strategy, we need to trim the write to
368 * the actual size of the file
370 osi_Assert(filePos <= avc->f.m.Length);
371 diff = avc->f.m.Length - filePos;
372 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
373 totalLength = AFS_UIO_RESID(auio);
376 if (aio & IO_APPEND) {
377 /* append mode, start it at the right spot */
378 #if defined(AFS_SUN56_ENV)
379 auio->uio_loffset = 0;
381 filePos = avc->f.m.Length;
382 AFS_UIO_SETOFFSET(auio, avc->f.m.Length);
386 * Note that we use startDate rather than calling osi_Time() here.
387 * This is to avoid counting lock-waiting time in file date (for ranlib).
389 avc->f.m.Date = startDate;
391 #if defined(AFS_HPUX_ENV)
392 #if defined(AFS_HPUX101_ENV)
393 if ((totalLength + filePos) >> 9 >
394 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
396 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
399 ReleaseWriteLock(&avc->lock);
403 #ifdef AFS_VM_RDWR_ENV
405 * If write is implemented via VM, afs_FakeOpen() is called from the
406 * high-level write op.
408 if (avc->execsOrWriters <= 0) {
409 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
410 avc->execsOrWriters);
415 avc->f.states |= CDirty;
416 #ifndef AFS_DARWIN80_ENV
417 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
419 while (totalLength > 0) {
420 tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
426 tfile = (struct osi_file *)osi_UFSOpen(&tdc->f.inode);
427 len = totalLength; /* write this amount by default */
428 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
429 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
430 if (max <= len + offset) { /*if we'd go past the end of this chunk */
431 /* it won't all fit in this chunk, so write as much
436 #ifdef AFS_DARWIN80_ENV
440 tuiop = afsio_darwin_partialcopy(auio, trimlen);
442 /* mung uio structure to be right for this transfer */
443 afsio_copy(auio, &tuio, tvec);
445 afsio_trim(&tuio, trimlen);
447 AFS_UIO_SETOFFSET(tuiop, offset);
449 #if defined(AFS_AIX41_ENV)
452 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
453 NULL, afs_osi_credp);
455 #elif defined(AFS_AIX32_ENV)
456 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
457 #elif defined(AFS_AIX_ENV)
459 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
460 &tuio, NULL, NULL, -1);
461 #elif defined(AFS_SUN5_ENV)
463 #ifdef AFS_SUN510_ENV
467 VOP_RWLOCK(tfile->vnode, 1, &ct);
468 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
469 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
472 VOP_RWLOCK(tfile->vnode, 1);
473 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
474 VOP_RWUNLOCK(tfile->vnode, 1);
479 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
480 #elif defined(AFS_SGI_ENV)
482 avc->f.states |= CWritingUFS;
483 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
484 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
485 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
486 avc->f.states &= ~CWritingUFS;
488 #elif defined(AFS_OSF_ENV)
490 struct ucred *tmpcred = u.u_cred;
491 u.u_cred = afs_osi_credp;
492 tuio.uio_rw = UIO_WRITE;
494 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
498 #elif defined(AFS_HPUX100_ENV)
501 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
504 #elif defined(AFS_LINUX20_ENV)
506 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
508 #elif defined(AFS_DARWIN80_ENV)
510 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
512 #elif defined(AFS_DARWIN_ENV)
514 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
515 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
516 VOP_UNLOCK(tfile->vnode, 0, current_proc());
518 #elif defined(AFS_FBSD80_ENV)
520 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
521 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
522 VOP_UNLOCK(tfile->vnode, 0);
524 #elif defined(AFS_FBSD50_ENV)
526 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
527 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
528 VOP_UNLOCK(tfile->vnode, 0, curthread);
530 #elif defined(AFS_XBSD_ENV)
532 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
533 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
534 VOP_UNLOCK(tfile->vnode, 0, curproc);
538 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
540 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
544 ZapDCE(tdc); /* bad data */
545 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
546 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
547 afs_stats_cmperf.cacheCurrDirtyChunks--;
548 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
549 afs_CFileClose(tfile);
550 ReleaseWriteLock(&tdc->lock);
554 /* otherwise we've written some, fixup length, etc and continue with next seg */
555 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
557 afsio_skip(auio, tlen); /* advance auio over data written */
558 /* compute new file size */
559 if (offset + len > tdc->f.chunkBytes) {
560 afs_int32 tlength = offset + len;
561 afs_AdjustSize(tdc, tlength);
562 if (tdc->validPos < filePos + len)
563 tdc->validPos = filePos + len;
566 transferLength += len;
568 #if defined(AFS_SGI_ENV)
569 /* afs_xwrite handles setting m.Length */
570 osi_Assert(filePos <= avc->f.m.Length);
572 if (filePos > avc->f.m.Length) {
573 #if defined(AFS_DISCON_ENV)
574 if (AFS_IS_DISCON_RW)
575 afs_PopulateDCache(avc, filePos, &treq);
577 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
578 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
579 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
580 ICL_HANDLE_OFFSET(filePos));
581 avc->f.m.Length = filePos;
585 ReleaseWriteLock(&tdc->lock);
587 #if !defined(AFS_VM_RDWR_ENV)
589 * If write is implemented via VM, afs_DoPartialWrite() is called from
590 * the high-level write op.
593 code = afs_DoPartialWrite(avc, &treq);
601 #ifndef AFS_VM_RDWR_ENV
602 afs_FakeClose(avc, acred);
604 error = afs_CheckCode(error, &treq, 7);
605 /* This set is here so we get the CheckCode. */
606 if (error && !avc->vc_error)
607 avc->vc_error = error;
609 ReleaseWriteLock(&avc->lock);
610 #ifdef AFS_DARWIN80_ENV
613 osi_FreeSmallSpace(tvec);
615 #ifndef AFS_VM_RDWR_ENV
617 * If write is implemented via VM, afs_fsync() is called from the high-level
620 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
621 if (noLock && (aio & IO_SYNC)) {
624 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
625 * we're doing them because the file was opened with O_SYNCIO specified,
626 * we have to look in the u area. No single mechanism here!!
628 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
630 if (noLock && (aio & FSYNC)) {
633 if (!AFS_NFSXLATORREQ(acred))
634 afs_fsync(avc, acred);
640 /* do partial write if we're low on unmodified chunks */
642 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
644 register afs_int32 code;
646 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
647 afs_stats_cmperf.cacheMaxDirtyChunks
648 || AFS_IS_DISCONNECTED)
649 return 0; /* nothing to do */
650 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
651 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
652 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
654 #if defined(AFS_SUN5_ENV)
655 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
657 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
664 #define vno_close(X) vn_close((X), 0, NOCRED)
665 #elif defined(AFS_DUX40_ENV)
666 #define vno_close vn_close
668 /* We don't need this for AIX since:
669 * (1) aix doesn't use fileops and it call close directly intead
670 * (where the unlocking should be done) and
671 * (2) temporarily, the aix lockf isn't supported yet.
673 * this stupid routine is used to release the flocks held on a
674 * particular file descriptor. Sun doesn't pass file descr. info
675 * through to the vnode layer, and yet we must unlock flocked files
676 * on the *appropriate* (not first, as in System V) close call. Thus
678 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
679 * file ops structure into any afs file when it gets flocked.
680 * N.B: Intercepting close syscall doesn't trap aborts or exit system
684 afs_closex(register struct file *afd)
686 struct vrequest treq;
691 struct afs_fakestat_state fakestat;
693 AFS_STATCNT(afs_closex);
694 /* setup the credentials */
695 if ((code = afs_InitReq(&treq, u.u_cred)))
697 afs_InitFakeStat(&fakestat);
700 /* we're the last one. If we're an AFS vnode, clear the flags,
701 * close the file and release the lock when done. Otherwise, just
702 * let the regular close code work. */
703 if (afd->f_type == DTYPE_VNODE) {
704 tvc = VTOAFS(afd->f_data);
705 if (IsAfsVnode(AFSTOV(tvc))) {
706 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
708 afs_PutFakeStat(&fakestat);
711 VN_HOLD(AFSTOV(tvc));
712 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
713 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
714 code = vno_close(afd);
716 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
718 AFS_RELE(AFSTOV(tvc));
722 /* now, if close not done, do it */
724 code = vno_close(afd);
726 afs_PutFakeStat(&fakestat);
727 return code; /* return code from vnode layer */
732 /* handle any closing cleanup stuff */
734 #if defined(AFS_SGI65_ENV)
735 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
737 #elif defined(AFS_SGI64_ENV)
738 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
739 off_t offset, afs_ucred_t *acred, struct flid *flp)
740 #elif defined(AFS_SGI_ENV)
741 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
742 off_t offset, afs_ucred_t *acred)
743 #elif defined(AFS_SUN5_ENV)
744 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
747 afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred)
750 register afs_int32 code;
751 register struct brequest *tb;
752 struct vrequest treq;
756 struct afs_fakestat_state fakestat;
759 AFS_STATCNT(afs_close);
760 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
761 ICL_TYPE_INT32, aflags);
762 code = afs_InitReq(&treq, acred);
765 afs_InitFakeStat(&fakestat);
766 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
768 afs_PutFakeStat(&fakestat);
773 if (avc->flockCount) {
774 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
777 #if defined(AFS_SGI_ENV)
779 afs_PutFakeStat(&fakestat);
783 /* unlock any locks for pid - could be wrong for child .. */
784 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
786 get_current_flid(&flid);
787 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
788 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
791 cleanlocks((vnode_t *) avc, flp);
792 #else /* AFS_SGI64_ENV */
793 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
794 #endif /* AFS_SGI64_ENV */
795 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
796 #endif /* AFS_SGI65_ENV */
797 /* afs_chkpgoob will drop and re-acquire the global lock. */
798 afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
799 #elif defined(AFS_SUN5_ENV)
801 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
802 afs_PutFakeStat(&fakestat);
806 #else /* AFS_SGI_ENV */
807 if (avc->flockCount) { /* Release Lock */
808 #if defined(AFS_OSF_ENV)
809 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
811 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
814 #endif /* AFS_SGI_ENV */
815 if (aflags & (FWRITE | FTRUNC)) {
816 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
817 /* do it yourself if daemons are all busy */
818 ObtainWriteLock(&avc->lock, 124);
819 code = afs_StoreOnLastReference(avc, &treq);
820 ReleaseWriteLock(&avc->lock);
821 #if defined(AFS_SGI_ENV)
822 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
825 #if defined(AFS_SGI_ENV)
826 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
828 /* at least one daemon is idle, so ask it to do the store.
829 * Also, note that we don't lock it any more... */
830 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
831 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
833 /* sleep waiting for the store to start, then retrieve error code */
834 while ((tb->flags & BUVALID) == 0) {
842 /* VNOVNODE is "acceptable" error code from close, since
843 * may happen when deleting a file on another machine while
844 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
845 if (code == VNOVNODE || code == ENOENT)
848 /* Ensure last closer gets the error. If another thread caused
849 * DoPartialWrite and this thread does not actually store the data,
850 * it may not see the quota error.
852 ObtainWriteLock(&avc->lock, 406);
855 osi_ReleaseVM(avc, acred);
857 printf("avc->vc_error=%d\n", avc->vc_error);
858 code = avc->vc_error;
861 ReleaseWriteLock(&avc->lock);
863 /* some codes merit specific complaint */
865 afs_warnuser("afs: failed to store file (network problems)\n");
868 else if (code == ENOSPC) {
870 ("afs: failed to store file (over quota or partition full)\n");
873 else if (code == ENOSPC) {
874 afs_warnuser("afs: failed to store file (partition full)\n");
875 } else if (code == EDQUOT) {
876 afs_warnuser("afs: failed to store file (over quota)\n");
880 afs_warnuser("afs: failed to store file (%d)\n", code);
882 /* finally, we flush any text pages lying around here */
886 #if defined(AFS_SGI_ENV)
887 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
888 osi_Assert(avc->opens > 0);
890 /* file open for read */
891 ObtainWriteLock(&avc->lock, 411);
894 osi_ReleaseVM(avc, acred);
896 code = avc->vc_error;
900 ReleaseWriteLock(&avc->lock);
903 if ((VREFCOUNT(avc) <= 2) && (avc->f.states & CUnlinked)) {
904 afs_remunlink(avc, 1); /* ignore any return code */
908 afs_PutFakeStat(&fakestat);
909 code = afs_CheckCode(code, &treq, 5);
916 afs_fsync(OSI_VC_DECL(avc), int fflags, afs_ucred_t *acred, int waitfor)
917 #else /* AFS_OSF_ENV */
918 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
919 afs_fsync(OSI_VC_DECL(avc), int flag, afs_ucred_t *acred
921 , off_t start, off_t stop
922 #endif /* AFS_SGI65_ENV */
924 #else /* !OSF && !SUN53 && !SGI */
925 afs_fsync(OSI_VC_DECL(avc), afs_ucred_t *acred)
929 register afs_int32 code;
930 struct vrequest treq;
934 return avc->vc_error;
936 #if defined(AFS_SUN5_ENV)
937 /* back out if called from NFS server */
938 if (curthread->t_flag & T_DONTPEND)
942 AFS_STATCNT(afs_fsync);
943 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
944 if ((code = afs_InitReq(&treq, acred)))
947 #if defined(AFS_SGI_ENV)
948 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
949 if (flag & FSYNC_INVAL)
950 osi_VM_FSyncInval(avc);
951 #endif /* AFS_SGI_ENV */
953 ObtainSharedLock(&avc->lock, 18);
955 if (avc->execsOrWriters > 0) {
957 if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
958 /* Your average flush. */
960 /* put the file back */
961 UpgradeSToWLock(&avc->lock, 41);
962 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
963 ConvertWToSLock(&avc->lock);
965 #if defined(AFS_DISCON_ENV)
968 UpgradeSToWLock(&avc->lock, 711);
969 afs_DisconAddDirty(avc, VDisconWriteFlush, 1);
970 ConvertWToSLock(&avc->lock);
972 } /* if not disconnected */
973 } /* if (avc->execsOrWriters > 0) */
975 #if defined(AFS_SGI_ENV)
976 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
977 if (code == VNOVNODE) {
978 /* syncing an unlinked file! - non-informative to pass an errno
979 * 102 (== VNOVNODE) to user
985 code = afs_CheckCode(code, &treq, 33);
986 ReleaseSharedLock(&avc->lock);