2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include "../afs/param.h" /* Should be always first */
21 #include "../afs/sysincludes.h" /* Standard vendor system headers */
22 #include "../afs/afsincludes.h" /* Afs-based standard headers */
23 #include "../afs/afs_stats.h" /* statistics */
24 #include "../afs/afs_cbqueue.h"
25 #include "../afs/nfsclient.h"
26 #include "../afs/afs_osidnlc.h"
29 extern unsigned char *afs_indexFlags;
31 /* Called by all write-on-close routines: regular afs_close,
32 * store via background daemon and store via the
33 * afs_FlushActiveVCaches routine (when CCORE is on).
34 * avc->lock must be write-locked.
36 afs_StoreOnLastReference(avc, treq)
37 register struct vcache *avc;
38 register struct vrequest *treq;
42 AFS_STATCNT(afs_StoreOnLastReference);
43 /* if CCore flag is set, we clear it and do the extra decrement
44 * ourselves now. If we're called by the CCore clearer, the CCore
45 * flag will already be clear, so we don't have to worry about
46 * clearing it twice. */
47 if (avc->states & CCore) {
48 avc->states &= ~CCore;
49 #if defined(AFS_SGI_ENV)
50 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
52 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
53 * depending on the flags the file was opened with. So, if you make any
54 * changes to the way the execsOrWriters flag is handled check with the
57 avc->execsOrWriters--;
58 AFS_RELE((struct vnode *)avc); /* VN_HOLD at set CCore(afs_FakeClose)*/
59 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
60 avc->linkData = (char *)0;
62 /* Now, send the file back. Used to require 0 writers left, but now do
63 * it on every close for write, since two closes in a row are harmless
64 * since first will clean all chunks, and second will be noop. Note that
65 * this will also save confusion when someone keeps a file open
66 * inadvertently, since with old system, writes to the server would never
69 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE/*!sync-to-disk*/);
71 * We have to do these after the above store in done: in some systems like
72 * aix they'll need to flush all the vm dirty pages to the disk via the
73 * strategy routine. During that all procedure (done under no avc locks)
74 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
75 * routines which means the vcache is a perfect candidate for flushing!
77 #if defined(AFS_SGI_ENV)
78 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
81 avc->execsOrWriters--;
87 afs_MemWrite(avc, auio, aio, acred, noLock)
88 register struct vcache *avc;
91 struct AFS_UCRED *acred; {
92 afs_int32 totalLength;
93 afs_int32 transferLength;
97 register struct dcache *tdc;
101 afs_int32 offset, len, error;
103 struct iovec *tvec; /* again, should have define */
105 register afs_int32 code;
106 struct vrequest treq;
108 AFS_STATCNT(afs_MemWrite);
110 return avc->vc_error;
112 startDate = osi_Time();
113 if (code = afs_InitReq(&treq, acred)) return code;
114 /* otherwise we read */
115 totalLength = auio->afsio_resid;
116 filePos = auio->afsio_offset;
119 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
120 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
121 ICL_TYPE_INT32, avc->m.Length);
123 afs_MaybeWakeupTruncateDaemon();
124 ObtainWriteLock(&avc->lock,126);
126 #if defined(AFS_SGI_ENV)
130 * afs_xwrite handles setting m.Length
131 * and handles APPEND mode.
132 * Since we are called via strategy, we need to trim the write to
133 * the actual size of the file
135 osi_Assert(filePos <= avc->m.Length);
136 diff = avc->m.Length - filePos;
137 auio->afsio_resid = MIN(totalLength, diff);
138 totalLength = auio->afsio_resid;
141 if (aio & IO_APPEND) {
142 /* append mode, start it at the right spot */
143 #if defined(AFS_SUN56_ENV)
144 auio->uio_loffset = 0;
146 filePos = auio->afsio_offset = avc->m.Length;
150 * Note that we use startDate rather than calling osi_Time() here.
151 * This is to avoid counting lock-waiting time in file date (for ranlib).
153 avc->m.Date = startDate;
155 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
156 #if defined(AFS_HPUX101_ENV)
157 if ((totalLength + filePos) >> 9 > (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
160 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
162 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
166 ReleaseWriteLock(&avc->lock);
170 #ifdef AFS_VM_RDWR_ENV
172 * If write is implemented via VM, afs_FakeOpen() is called from the
173 * high-level write op.
175 if (avc->execsOrWriters <= 0) {
176 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
181 avc->states |= CDirty;
182 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
183 while (totalLength > 0) {
184 /* Read the cached info. If we call GetDCache while the cache
185 * truncate daemon is running we risk overflowing the disk cache.
186 * Instead we check for an existing cache slot. If we cannot
187 * find an existing slot we wait for the cache to drain
188 * before calling GetDCache.
191 tdc = afs_FindDCache(avc, filePos);
193 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
194 len = tdc->f.chunkBytes - offset;
196 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
197 tdc = afs_FindDCache(avc, filePos);
199 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
200 (tdc->flags & DFFetching)) {
204 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
205 len = tdc->f.chunkBytes - offset;
209 afs_MaybeWakeupTruncateDaemon();
210 while (afs_blocksUsed >
211 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
212 ReleaseWriteLock(&avc->lock);
213 if (afs_blocksUsed - afs_blocksDiscarded >
214 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
215 afs_WaitForCacheDrain = 1;
216 afs_osi_Sleep(&afs_WaitForCacheDrain);
218 afs_MaybeFreeDiscardedDCache();
219 afs_MaybeWakeupTruncateDaemon();
220 ObtainWriteLock(&avc->lock,506);
222 avc->states |= CDirty;
223 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
226 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
232 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
233 afs_stats_cmperf.cacheCurrDirtyChunks++;
234 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
236 if (!(tdc->f.states & DWriting)) {
237 /* don't mark entry as mod if we don't have to */
238 tdc->f.states |= DWriting;
239 tdc->flags |= DFEntryMod;
241 len = totalLength; /* write this amount by default */
242 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
243 if (max <= len + offset) { /*if we'd go past the end of this chunk */
244 /* it won't all fit in this chunk, so write as much
248 /* mung uio structure to be right for this transfer */
249 afsio_copy(auio, &tuio, tvec);
250 afsio_trim(&tuio, len);
251 tuio.afsio_offset = offset;
253 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
256 ZapDCE(tdc); /* bad data */
257 afs_MemCacheTruncate(tdc->f.inode, 0);
258 afs_stats_cmperf.cacheCurrDirtyChunks--;
259 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
263 /* otherwise we've written some, fixup length, etc and continue with next seg */
264 len = len - tuio.afsio_resid; /* compute amount really transferred */
265 afsio_skip(auio, len); /* advance auio over data written */
266 /* compute new file size */
267 if (offset + len > tdc->f.chunkBytes)
268 afs_AdjustSize(tdc, offset+len);
270 transferLength += len;
272 #if defined(AFS_SGI_ENV)
273 /* afs_xwrite handles setting m.Length */
274 osi_Assert(filePos <= avc->m.Length);
276 if (filePos > avc->m.Length)
277 avc->m.Length = filePos;
279 #ifndef AFS_VM_RDWR_ENV
281 * If write is implemented via VM, afs_DoPartialWrite() is called from
282 * the high-level write op.
285 code = afs_DoPartialWrite(avc, &treq);
295 #ifndef AFS_VM_RDWR_ENV
296 afs_FakeClose(avc, acred);
298 if (error && !avc->vc_error)
299 avc->vc_error = error;
301 ReleaseWriteLock(&avc->lock);
302 osi_FreeSmallSpace(tvec);
304 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
305 work. GFS is truly a poorly-designed interface! */
306 afs_gfshack((struct gnode *) avc);
308 error = afs_CheckCode(error, &treq, 6);
313 /* called on writes */
314 afs_UFSWrite(avc, auio, aio, acred, noLock)
315 register struct vcache *avc;
318 struct AFS_UCRED *acred; {
319 afs_int32 totalLength;
320 afs_int32 transferLength;
324 register struct dcache *tdc;
328 afs_int32 offset, len, error;
330 struct iovec *tvec; /* again, should have define */
331 struct osi_file *tfile;
332 register afs_int32 code;
334 struct vrequest treq;
336 AFS_STATCNT(afs_UFSWrite);
338 return avc->vc_error;
340 startDate = osi_Time();
341 if (code = afs_InitReq(&treq, acred)) return code;
342 /* otherwise we read */
343 totalLength = auio->afsio_resid;
344 filePos = auio->afsio_offset;
347 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
348 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
349 ICL_TYPE_INT32, avc->m.Length);
351 afs_MaybeWakeupTruncateDaemon();
352 ObtainWriteLock(&avc->lock,556);
354 #if defined(AFS_SGI_ENV)
358 * afs_xwrite handles setting m.Length
359 * and handles APPEND mode.
360 * Since we are called via strategy, we need to trim the write to
361 * the actual size of the file
363 osi_Assert(filePos <= avc->m.Length);
364 diff = avc->m.Length - filePos;
365 auio->afsio_resid = MIN(totalLength, diff);
366 totalLength = auio->afsio_resid;
369 if (aio & IO_APPEND) {
370 /* append mode, start it at the right spot */
371 #if defined(AFS_SUN56_ENV)
372 auio->uio_loffset = 0;
374 filePos = auio->afsio_offset = avc->m.Length;
378 * Note that we use startDate rather than calling osi_Time() here.
379 * This is to avoid counting lock-waiting time in file date (for ranlib).
381 avc->m.Date = startDate;
383 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
384 #if defined(AFS_HPUX101_ENV)
385 if ((totalLength + filePos) >> 9 > p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
388 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
390 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
394 ReleaseWriteLock(&avc->lock);
398 #ifdef AFS_VM_RDWR_ENV
400 * If write is implemented via VM, afs_FakeOpen() is called from the
401 * high-level write op.
403 if (avc->execsOrWriters <= 0) {
404 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
409 avc->states |= CDirty;
410 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
411 while (totalLength > 0) {
412 /* read the cached info */
414 tdc = afs_FindDCache(avc, filePos);
416 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
417 len = tdc->f.chunkBytes - offset;
419 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
420 tdc = afs_FindDCache(avc, filePos);
422 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
423 (tdc->flags & DFFetching)) {
427 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
428 len = tdc->f.chunkBytes - offset;
432 afs_MaybeWakeupTruncateDaemon();
433 while (afs_blocksUsed >
434 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
435 ReleaseWriteLock(&avc->lock);
436 if (afs_blocksUsed - afs_blocksDiscarded >
437 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
438 afs_WaitForCacheDrain = 1;
439 afs_osi_Sleep(&afs_WaitForCacheDrain);
441 afs_MaybeFreeDiscardedDCache();
442 afs_MaybeWakeupTruncateDaemon();
443 ObtainWriteLock(&avc->lock,509);
445 avc->states |= CDirty;
446 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
449 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
455 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
456 afs_stats_cmperf.cacheCurrDirtyChunks++;
457 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
459 if (!(tdc->f.states & DWriting)) {
460 /* don't mark entry as mod if we don't have to */
461 tdc->f.states |= DWriting;
462 tdc->flags |= DFEntryMod;
464 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
465 len = totalLength; /* write this amount by default */
466 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
467 if (max <= len + offset) { /*if we'd go past the end of this chunk */
468 /* it won't all fit in this chunk, so write as much
472 /* mung uio structure to be right for this transfer */
473 afsio_copy(auio, &tuio, tvec);
474 afsio_trim(&tuio, len);
475 tuio.afsio_offset = offset;
479 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL, NULL, &afs_osi_cred);
483 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
485 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t)&offset, &tuio, NULL, NULL, -1);
487 #endif /* AFS_AIX41_ENV */
488 #else /* AFS_AIX_ENV */
491 VOP_RWLOCK(tfile->vnode, 1);
492 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
493 VOP_RWUNLOCK(tfile->vnode, 1);
495 if (code == ENOSPC) afs_warnuser("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
497 #if defined(AFS_SGI_ENV)
499 avc->states |= CWritingUFS;
500 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
501 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred, code);
502 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
503 avc->states &= ~CWritingUFS;
508 struct ucred *tmpcred = u.u_cred;
509 u.u_cred = &afs_osi_cred;
510 tuio.uio_rw = UIO_WRITE;
512 VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
516 #else /* AFS_OSF_ENV */
517 #if defined(AFS_HPUX100_ENV)
520 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
525 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
527 #if defined(AFS_LINUX20_ENV)
529 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
532 #if defined(AFS_DARWIN_ENV)
534 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
535 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
536 VOP_UNLOCK(tfile->vnode, 0, current_proc());
539 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
540 #endif /* AFS_DARWIN_ENV */
541 #endif /* AFS_LINUX20_ENV */
542 #endif /* AFS_HPUX100_ENV */
543 #endif /* AFS_OSF_ENV */
544 #endif /* AFS_SGI_ENV */
545 #endif /* AFS_SUN5_ENV */
546 #endif /* AFS_AIX41_ENV */
549 ZapDCE(tdc); /* bad data */
550 osi_UFSTruncate(tfile,0); /* fake truncate the segment */
551 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
552 afs_stats_cmperf.cacheCurrDirtyChunks--;
553 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
555 afs_CFileClose(tfile);
558 /* otherwise we've written some, fixup length, etc and continue with next seg */
559 len = len - tuio.afsio_resid; /* compute amount really transferred */
560 afsio_skip(auio, len); /* advance auio over data written */
561 /* compute new file size */
562 if (offset + len > tdc->f.chunkBytes)
563 afs_AdjustSize(tdc, offset+len);
565 transferLength += len;
567 #if defined(AFS_SGI_ENV)
568 /* afs_xwrite handles setting m.Length */
569 osi_Assert(filePos <= avc->m.Length);
571 if (filePos > avc->m.Length) {
572 avc->m.Length = filePos;
576 #ifndef AFS_VM_RDWR_ENV
578 * If write is implemented via VM, afs_DoPartialWrite() is called from
579 * the high-level write op.
582 code = afs_DoPartialWrite(avc, &treq);
592 #ifndef AFS_VM_RDWR_ENV
593 afs_FakeClose(avc, acred);
595 error = afs_CheckCode(error, &treq, 7);
596 /* This set is here so we get the CheckCode. */
597 if (error && !avc->vc_error)
598 avc->vc_error = error;
600 ReleaseWriteLock(&avc->lock);
601 osi_FreeSmallSpace(tvec);
603 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
604 work. GFS is truly a poorly-designed interface! */
605 afs_gfshack((struct gnode *) avc);
607 #ifndef AFS_VM_RDWR_ENV
609 * If write is implemented via VM, afs_fsync() is called from the high-level
612 #ifdef AFS_DARWIN_ENV
613 if (noLock && (aio & IO_SYNC)) {
616 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
617 * we're doing them because the file was opened with O_SYNCIO specified,
618 * we have to look in the u area. No single mechanism here!!
620 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
622 if (noLock && (aio & FSYNC)) {
625 if (!AFS_NFSXLATORREQ(acred))
626 afs_fsync(avc, acred);
632 /* do partial write if we're low on unmodified chunks */
633 afs_DoPartialWrite(avc, areq)
634 register struct vcache *avc;
635 struct vrequest *areq; {
636 register afs_int32 code;
638 if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
639 return 0; /* nothing to do */
640 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
641 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
642 ICL_TYPE_INT32, avc->m.Length);
643 #if defined(AFS_SUN5_ENV)
644 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
646 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
653 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV)
655 #define vno_close(X) vn_close((X), 0, NOCRED)
656 #elif defined(AFS_DUX40_ENV)
657 #define vno_close vn_close
659 /* We don't need this for AIX since:
660 * (1) aix doesn't use fileops and it call close directly intead
661 * (where the unlocking should be done) and
662 * (2) temporarily, the aix lockf isn't supported yet.
664 * this stupid routine is used to release the flocks held on a
665 * particular file descriptor. Sun doesn't pass file descr. info
666 * through to the vnode layer, and yet we must unlock flocked files
667 * on the *appropriate* (not first, as in System V) close call. Thus
669 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
670 * file ops structure into any afs file when it gets flocked.
671 * N.B: Intercepting close syscall doesn't trap aborts or exit system
675 register struct file *afd; {
676 struct vrequest treq;
677 register struct vcache *tvc;
682 AFS_STATCNT(afs_closex);
683 /* setup the credentials */
684 if (code = afs_InitReq(&treq, u.u_cred)) return code;
687 /* we're the last one. If we're an AFS vnode, clear the flags,
688 * close the file and release the lock when done. Otherwise, just
689 * let the regular close code work. */
690 if (afd->f_type == DTYPE_VNODE) {
691 tvc = (struct vcache *) afd->f_data;
692 if (IsAfsVnode((struct vnode *)tvc)) {
693 VN_HOLD((struct vnode *) tvc);
694 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
695 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
696 code = vno_close(afd);
698 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
699 HandleFlock(tvc, LOCK_UN, &treq,
700 u.u_procp->p_pid, 1/*onlymine*/);
702 HandleFlock(tvc, LOCK_UN, &treq, 0, 1/*onlymine*/);
705 grele((struct gnode *) tvc);
707 AFS_RELE((struct vnode *) tvc);
712 /* now, if close not done, do it */
714 code = vno_close(afd);
716 return code; /* return code from vnode layer */
721 /* handle any closing cleanup stuff */
723 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
724 #if !defined(AFS_SGI65_ENV)
728 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
732 lastclose_t lastclose;
733 #if !defined(AFS_SGI65_ENV)
735 #if defined(AFS_SGI64_ENV)
740 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
742 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
745 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
749 afs_close(OSI_VC_ARG(avc), aflags, acred)
754 struct AFS_UCRED *acred;
756 register afs_int32 code, initreq=0;
757 register struct brequest *tb;
758 struct vrequest treq;
764 AFS_STATCNT(afs_close);
765 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
766 ICL_TYPE_INT32, aflags);
768 if (avc->flockCount) {
769 if (code = afs_InitReq(&treq, acred)) return code;
771 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
774 #if defined(AFS_SGI_ENV)
778 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
780 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
788 if (code = afs_InitReq(&treq, acred)) return code;
793 #if defined(AFS_SGI_ENV)
794 /* unlock any locks for pid - could be wrong for child .. */
795 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
797 get_current_flid(&flid);
798 cleanlocks((vnode_t *)avc, flid.fl_pid, flid.fl_sysid);
799 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1/*onlymine*/);
802 cleanlocks((vnode_t *)avc, flp);
803 #else /* AFS_SGI64_ENV */
804 cleanlocks((vnode_t *)avc, u.u_procp->p_epid, u.u_procp->p_sysid);
805 #endif /* AFS_SGI64_ENV */
806 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1/*onlymine*/);
807 #endif /* AFS_SGI65_ENV */
808 /* afs_chkpgoob will drop and re-acquire the global lock. */
809 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
811 if (avc->flockCount) { /* Release Lock */
812 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
813 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1/*onlymine*/);
815 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
820 if (aflags & (FWRITE | FTRUNC)) {
822 /* do it yourself if daemons are all busy */
823 ObtainWriteLock(&avc->lock,124);
824 code = afs_StoreOnLastReference(avc, &treq);
825 ReleaseWriteLock(&avc->lock);
826 #if defined(AFS_SGI_ENV)
827 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
831 #if defined(AFS_SGI_ENV)
832 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
834 /* at least one daemon is idle, so ask it to do the store.
835 Also, note that we don't lock it any more... */
836 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, (long)acred->cr_uid,
838 /* sleep waiting for the store to start, then retrieve error code */
839 while ((tb->flags & BUVALID) == 0) {
847 /* VNOVNODE is "acceptable" error code from close, since
848 may happen when deleting a file on another machine while
849 it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
850 if (code == VNOVNODE || code == ENOENT)
853 /* Ensure last closer gets the error. If another thread caused
854 * DoPartialWrite and this thread does not actually store the data,
855 * it may not see the quota error.
857 ObtainWriteLock(&avc->lock,406);
860 osi_ReleaseVM(avc, acred);
862 code = avc->vc_error;
865 ReleaseWriteLock(&avc->lock);
867 /* some codes merit specific complaint */
869 afs_warnuser("afs: failed to store file (network problems)\n");
872 else if (code == ENOSPC) {
873 afs_warnuser("afs: failed to store file (over quota or partition full)\n");
876 else if (code == ENOSPC) {
877 afs_warnuser("afs: failed to store file (partition full)\n");
879 else if (code == EDQUOT) {
880 afs_warnuser("afs: failed to store file (over quota)\n");
884 afs_warnuser("afs: failed to store file (%d)\n", code);
886 /* finally, we flush any text pages lying around here */
891 #if defined(AFS_SGI_ENV)
892 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
893 osi_Assert(avc->opens > 0);
895 /* file open for read */
896 ObtainWriteLock(&avc->lock, 411);
899 osi_ReleaseVM(avc, acred);
901 code = avc->vc_error;
905 ReleaseWriteLock(&avc->lock);
908 if ((avc->vrefCount <= 2) && (avc->states & CUnlinked)) {
909 afs_remunlink(avc, 1); /* ignore any return code */
912 code = afs_CheckCode(code, &treq, 5);
919 afs_fsync(avc, fflags, acred, waitfor)
922 #else /* AFS_OSF_ENV */
923 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
924 afs_fsync(OSI_VC_ARG(avc), flag, acred
930 afs_fsync(avc, acred)
934 struct AFS_UCRED *acred;
935 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
942 register afs_int32 code;
943 struct vrequest treq;
947 return avc->vc_error;
949 #if defined(AFS_SUN5_ENV)
950 /* back out if called from NFS server */
951 if (curthread->t_flag & T_DONTPEND)
955 AFS_STATCNT(afs_fsync);
956 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
957 if (code = afs_InitReq(&treq, acred)) return code;
959 #if defined(AFS_SGI_ENV)
960 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
961 if (flag & FSYNC_INVAL)
962 osi_VM_FSyncInval(avc);
963 #endif /* AFS_SGI_ENV */
965 ObtainSharedLock(&avc->lock,18);
967 if (avc->execsOrWriters > 0) {
968 /* put the file back */
969 UpgradeSToWLock(&avc->lock,41);
970 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
971 ConvertWToSLock(&avc->lock);
974 #if defined(AFS_SGI_ENV)
975 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
976 if (code == VNOVNODE) {
977 /* syncing an unlinked file! - non-informative to pass an errno
978 * 102 (== VNOVNODE) to user
984 code = afs_CheckCode(code, &treq, 33);
985 ReleaseSharedLock(&avc->lock);