2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include "../afs/param.h" /* Should be always first */
21 #include <afsconfig.h>
25 #include "../afs/sysincludes.h" /* Standard vendor system headers */
26 #include "../afs/afsincludes.h" /* Afs-based standard headers */
27 #include "../afs/afs_stats.h" /* statistics */
28 #include "../afs/afs_cbqueue.h"
29 #include "../afs/nfsclient.h"
30 #include "../afs/afs_osidnlc.h"
33 extern unsigned char *afs_indexFlags;
35 /* Called by all write-on-close routines: regular afs_close,
36 * store via background daemon and store via the
37 * afs_FlushActiveVCaches routine (when CCORE is on).
38 * avc->lock must be write-locked.
40 afs_StoreOnLastReference(avc, treq)
41 register struct vcache *avc;
42 register struct vrequest *treq;
46 AFS_STATCNT(afs_StoreOnLastReference);
47 /* if CCore flag is set, we clear it and do the extra decrement
48 * ourselves now. If we're called by the CCore clearer, the CCore
49 * flag will already be clear, so we don't have to worry about
50 * clearing it twice. */
51 if (avc->states & CCore) {
52 avc->states &= ~CCore;
53 #if defined(AFS_SGI_ENV)
54 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
56 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
57 * depending on the flags the file was opened with. So, if you make any
58 * changes to the way the execsOrWriters flag is handled check with the
61 avc->execsOrWriters--;
62 AFS_RELE((struct vnode *)avc); /* VN_HOLD at set CCore(afs_FakeClose)*/
63 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
64 avc->linkData = (char *)0;
66 /* Now, send the file back. Used to require 0 writers left, but now do
67 * it on every close for write, since two closes in a row are harmless
68 * since first will clean all chunks, and second will be noop. Note that
69 * this will also save confusion when someone keeps a file open
70 * inadvertently, since with old system, writes to the server would never
73 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE/*!sync-to-disk*/);
75 * We have to do these after the above store in done: in some systems like
76 * aix they'll need to flush all the vm dirty pages to the disk via the
77 * strategy routine. During that all procedure (done under no avc locks)
78 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
79 * routines which means the vcache is a perfect candidate for flushing!
81 #if defined(AFS_SGI_ENV)
82 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
85 avc->execsOrWriters--;
91 afs_MemWrite(avc, auio, aio, acred, noLock)
92 register struct vcache *avc;
95 struct AFS_UCRED *acred; {
96 afs_int32 totalLength;
97 afs_int32 transferLength;
101 register struct dcache *tdc;
105 afs_int32 offset, len, error;
107 struct iovec *tvec; /* again, should have define */
109 register afs_int32 code;
110 struct vrequest treq;
112 AFS_STATCNT(afs_MemWrite);
114 return avc->vc_error;
116 startDate = osi_Time();
117 if (code = afs_InitReq(&treq, acred)) return code;
118 /* otherwise we read */
119 totalLength = auio->afsio_resid;
120 filePos = auio->afsio_offset;
123 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
124 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
125 ICL_TYPE_INT32, avc->m.Length);
127 afs_MaybeWakeupTruncateDaemon();
128 ObtainWriteLock(&avc->lock,126);
130 #if defined(AFS_SGI_ENV)
134 * afs_xwrite handles setting m.Length
135 * and handles APPEND mode.
136 * Since we are called via strategy, we need to trim the write to
137 * the actual size of the file
139 osi_Assert(filePos <= avc->m.Length);
140 diff = avc->m.Length - filePos;
141 auio->afsio_resid = MIN(totalLength, diff);
142 totalLength = auio->afsio_resid;
145 if (aio & IO_APPEND) {
146 /* append mode, start it at the right spot */
147 #if defined(AFS_SUN56_ENV)
148 auio->uio_loffset = 0;
150 filePos = auio->afsio_offset = avc->m.Length;
154 * Note that we use startDate rather than calling osi_Time() here.
155 * This is to avoid counting lock-waiting time in file date (for ranlib).
157 avc->m.Date = startDate;
159 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
160 #if defined(AFS_HPUX101_ENV)
161 if ((totalLength + filePos) >> 9 > (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
164 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
166 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
170 ReleaseWriteLock(&avc->lock);
174 #ifdef AFS_VM_RDWR_ENV
176 * If write is implemented via VM, afs_FakeOpen() is called from the
177 * high-level write op.
179 if (avc->execsOrWriters <= 0) {
180 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
185 avc->states |= CDirty;
186 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
187 while (totalLength > 0) {
188 /* Read the cached info. If we call GetDCache while the cache
189 * truncate daemon is running we risk overflowing the disk cache.
190 * Instead we check for an existing cache slot. If we cannot
191 * find an existing slot we wait for the cache to drain
192 * before calling GetDCache.
195 tdc = afs_FindDCache(avc, filePos);
197 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
198 len = tdc->f.chunkBytes - offset;
200 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
201 tdc = afs_FindDCache(avc, filePos);
203 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
204 (tdc->flags & DFFetching)) {
208 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
209 len = tdc->f.chunkBytes - offset;
213 afs_MaybeWakeupTruncateDaemon();
214 while (afs_blocksUsed >
215 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
216 ReleaseWriteLock(&avc->lock);
217 if (afs_blocksUsed - afs_blocksDiscarded >
218 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
219 afs_WaitForCacheDrain = 1;
220 afs_osi_Sleep(&afs_WaitForCacheDrain);
222 afs_MaybeFreeDiscardedDCache();
223 afs_MaybeWakeupTruncateDaemon();
224 ObtainWriteLock(&avc->lock,506);
226 avc->states |= CDirty;
227 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
230 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
236 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
237 afs_stats_cmperf.cacheCurrDirtyChunks++;
238 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
240 if (!(tdc->f.states & DWriting)) {
241 /* don't mark entry as mod if we don't have to */
242 tdc->f.states |= DWriting;
243 tdc->flags |= DFEntryMod;
245 len = totalLength; /* write this amount by default */
246 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
247 if (max <= len + offset) { /*if we'd go past the end of this chunk */
248 /* it won't all fit in this chunk, so write as much
252 /* mung uio structure to be right for this transfer */
253 afsio_copy(auio, &tuio, tvec);
254 afsio_trim(&tuio, len);
255 tuio.afsio_offset = offset;
257 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
260 ZapDCE(tdc); /* bad data */
261 afs_MemCacheTruncate(tdc->f.inode, 0);
262 afs_stats_cmperf.cacheCurrDirtyChunks--;
263 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
267 /* otherwise we've written some, fixup length, etc and continue with next seg */
268 len = len - tuio.afsio_resid; /* compute amount really transferred */
269 afsio_skip(auio, len); /* advance auio over data written */
270 /* compute new file size */
271 if (offset + len > tdc->f.chunkBytes)
272 afs_AdjustSize(tdc, offset+len);
274 transferLength += len;
276 #if defined(AFS_SGI_ENV)
277 /* afs_xwrite handles setting m.Length */
278 osi_Assert(filePos <= avc->m.Length);
280 if (filePos > avc->m.Length)
281 avc->m.Length = filePos;
283 #ifndef AFS_VM_RDWR_ENV
285 * If write is implemented via VM, afs_DoPartialWrite() is called from
286 * the high-level write op.
289 code = afs_DoPartialWrite(avc, &treq);
299 #ifndef AFS_VM_RDWR_ENV
300 afs_FakeClose(avc, acred);
302 if (error && !avc->vc_error)
303 avc->vc_error = error;
305 ReleaseWriteLock(&avc->lock);
306 osi_FreeSmallSpace(tvec);
308 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
309 work. GFS is truly a poorly-designed interface! */
310 afs_gfshack((struct gnode *) avc);
312 error = afs_CheckCode(error, &treq, 6);
317 /* called on writes */
318 afs_UFSWrite(avc, auio, aio, acred, noLock)
319 register struct vcache *avc;
322 struct AFS_UCRED *acred; {
323 afs_int32 totalLength;
324 afs_int32 transferLength;
328 register struct dcache *tdc;
332 afs_int32 offset, len, error;
334 struct iovec *tvec; /* again, should have define */
335 struct osi_file *tfile;
336 register afs_int32 code;
338 struct vrequest treq;
340 AFS_STATCNT(afs_UFSWrite);
342 return avc->vc_error;
344 startDate = osi_Time();
345 if (code = afs_InitReq(&treq, acred)) return code;
346 /* otherwise we read */
347 totalLength = auio->afsio_resid;
348 filePos = auio->afsio_offset;
351 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
352 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
353 ICL_TYPE_INT32, avc->m.Length);
355 afs_MaybeWakeupTruncateDaemon();
356 ObtainWriteLock(&avc->lock,556);
358 #if defined(AFS_SGI_ENV)
362 * afs_xwrite handles setting m.Length
363 * and handles APPEND mode.
364 * Since we are called via strategy, we need to trim the write to
365 * the actual size of the file
367 osi_Assert(filePos <= avc->m.Length);
368 diff = avc->m.Length - filePos;
369 auio->afsio_resid = MIN(totalLength, diff);
370 totalLength = auio->afsio_resid;
373 if (aio & IO_APPEND) {
374 /* append mode, start it at the right spot */
375 #if defined(AFS_SUN56_ENV)
376 auio->uio_loffset = 0;
378 filePos = auio->afsio_offset = avc->m.Length;
382 * Note that we use startDate rather than calling osi_Time() here.
383 * This is to avoid counting lock-waiting time in file date (for ranlib).
385 avc->m.Date = startDate;
387 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
388 #if defined(AFS_HPUX101_ENV)
389 if ((totalLength + filePos) >> 9 > p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
392 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
394 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
398 ReleaseWriteLock(&avc->lock);
402 #ifdef AFS_VM_RDWR_ENV
404 * If write is implemented via VM, afs_FakeOpen() is called from the
405 * high-level write op.
407 if (avc->execsOrWriters <= 0) {
408 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
413 avc->states |= CDirty;
414 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
415 while (totalLength > 0) {
416 /* read the cached info */
418 tdc = afs_FindDCache(avc, filePos);
420 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
421 len = tdc->f.chunkBytes - offset;
423 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
424 tdc = afs_FindDCache(avc, filePos);
426 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
427 (tdc->flags & DFFetching)) {
431 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
432 len = tdc->f.chunkBytes - offset;
436 afs_MaybeWakeupTruncateDaemon();
437 while (afs_blocksUsed >
438 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
439 ReleaseWriteLock(&avc->lock);
440 if (afs_blocksUsed - afs_blocksDiscarded >
441 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
442 afs_WaitForCacheDrain = 1;
443 afs_osi_Sleep(&afs_WaitForCacheDrain);
445 afs_MaybeFreeDiscardedDCache();
446 afs_MaybeWakeupTruncateDaemon();
447 ObtainWriteLock(&avc->lock,509);
449 avc->states |= CDirty;
450 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
453 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
459 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
460 afs_stats_cmperf.cacheCurrDirtyChunks++;
461 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
463 if (!(tdc->f.states & DWriting)) {
464 /* don't mark entry as mod if we don't have to */
465 tdc->f.states |= DWriting;
466 tdc->flags |= DFEntryMod;
468 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
469 len = totalLength; /* write this amount by default */
470 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
471 if (max <= len + offset) { /*if we'd go past the end of this chunk */
472 /* it won't all fit in this chunk, so write as much
476 /* mung uio structure to be right for this transfer */
477 afsio_copy(auio, &tuio, tvec);
478 afsio_trim(&tuio, len);
479 tuio.afsio_offset = offset;
483 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL, NULL, &afs_osi_cred);
487 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
489 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t)&offset, &tuio, NULL, NULL, -1);
491 #endif /* AFS_AIX41_ENV */
492 #else /* AFS_AIX_ENV */
495 VOP_RWLOCK(tfile->vnode, 1);
496 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
497 VOP_RWUNLOCK(tfile->vnode, 1);
499 if (code == ENOSPC) afs_warnuser("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
501 #if defined(AFS_SGI_ENV)
503 avc->states |= CWritingUFS;
504 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
505 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred, code);
506 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
507 avc->states &= ~CWritingUFS;
512 struct ucred *tmpcred = u.u_cred;
513 u.u_cred = &afs_osi_cred;
514 tuio.uio_rw = UIO_WRITE;
516 VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
520 #else /* AFS_OSF_ENV */
521 #if defined(AFS_HPUX100_ENV)
524 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
529 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
531 #if defined(AFS_LINUX20_ENV)
533 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
536 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
538 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
539 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
540 VOP_UNLOCK(tfile->vnode, 0, current_proc());
543 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
544 #endif /* AFS_DARWIN_ENV || AFS_FBSD_ENV */
545 #endif /* AFS_LINUX20_ENV */
546 #endif /* AFS_HPUX100_ENV */
547 #endif /* AFS_OSF_ENV */
548 #endif /* AFS_SGI_ENV */
549 #endif /* AFS_SUN5_ENV */
550 #endif /* AFS_AIX41_ENV */
553 ZapDCE(tdc); /* bad data */
554 osi_UFSTruncate(tfile,0); /* fake truncate the segment */
555 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
556 afs_stats_cmperf.cacheCurrDirtyChunks--;
557 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
559 afs_CFileClose(tfile);
562 /* otherwise we've written some, fixup length, etc and continue with next seg */
563 len = len - tuio.afsio_resid; /* compute amount really transferred */
564 afsio_skip(auio, len); /* advance auio over data written */
565 /* compute new file size */
566 if (offset + len > tdc->f.chunkBytes)
567 afs_AdjustSize(tdc, offset+len);
569 transferLength += len;
571 #if defined(AFS_SGI_ENV)
572 /* afs_xwrite handles setting m.Length */
573 osi_Assert(filePos <= avc->m.Length);
575 if (filePos > avc->m.Length) {
576 avc->m.Length = filePos;
580 #ifndef AFS_VM_RDWR_ENV
582 * If write is implemented via VM, afs_DoPartialWrite() is called from
583 * the high-level write op.
586 code = afs_DoPartialWrite(avc, &treq);
596 #ifndef AFS_VM_RDWR_ENV
597 afs_FakeClose(avc, acred);
599 error = afs_CheckCode(error, &treq, 7);
600 /* This set is here so we get the CheckCode. */
601 if (error && !avc->vc_error)
602 avc->vc_error = error;
604 ReleaseWriteLock(&avc->lock);
605 osi_FreeSmallSpace(tvec);
607 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
608 work. GFS is truly a poorly-designed interface! */
609 afs_gfshack((struct gnode *) avc);
611 #ifndef AFS_VM_RDWR_ENV
613 * If write is implemented via VM, afs_fsync() is called from the high-level
616 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
617 if (noLock && (aio & IO_SYNC)) {
620 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
621 * we're doing them because the file was opened with O_SYNCIO specified,
622 * we have to look in the u area. No single mechanism here!!
624 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
626 if (noLock && (aio & FSYNC)) {
629 if (!AFS_NFSXLATORREQ(acred))
630 afs_fsync(avc, acred);
636 /* do partial write if we're low on unmodified chunks */
637 afs_DoPartialWrite(avc, areq)
638 register struct vcache *avc;
639 struct vrequest *areq; {
640 register afs_int32 code;
642 if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
643 return 0; /* nothing to do */
644 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
645 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
646 ICL_TYPE_INT32, avc->m.Length);
647 #if defined(AFS_SUN5_ENV)
648 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
650 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
657 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
659 #define vno_close(X) vn_close((X), 0, NOCRED)
660 #elif defined(AFS_DUX40_ENV)
661 #define vno_close vn_close
663 /* We don't need this for AIX since:
664 * (1) aix doesn't use fileops and it call close directly intead
665 * (where the unlocking should be done) and
666 * (2) temporarily, the aix lockf isn't supported yet.
668 * this stupid routine is used to release the flocks held on a
669 * particular file descriptor. Sun doesn't pass file descr. info
670 * through to the vnode layer, and yet we must unlock flocked files
671 * on the *appropriate* (not first, as in System V) close call. Thus
673 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
674 * file ops structure into any afs file when it gets flocked.
675 * N.B: Intercepting close syscall doesn't trap aborts or exit system
679 register struct file *afd; {
680 struct vrequest treq;
681 register struct vcache *tvc;
686 AFS_STATCNT(afs_closex);
687 /* setup the credentials */
688 if (code = afs_InitReq(&treq, u.u_cred)) return code;
691 /* we're the last one. If we're an AFS vnode, clear the flags,
692 * close the file and release the lock when done. Otherwise, just
693 * let the regular close code work. */
694 if (afd->f_type == DTYPE_VNODE) {
695 tvc = (struct vcache *) afd->f_data;
696 if (IsAfsVnode((struct vnode *)tvc)) {
697 VN_HOLD((struct vnode *) tvc);
698 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
699 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
700 code = vno_close(afd);
702 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
703 HandleFlock(tvc, LOCK_UN, &treq,
704 u.u_procp->p_pid, 1/*onlymine*/);
706 HandleFlock(tvc, LOCK_UN, &treq, 0, 1/*onlymine*/);
709 grele((struct gnode *) tvc);
711 AFS_RELE((struct vnode *) tvc);
716 /* now, if close not done, do it */
718 code = vno_close(afd);
720 return code; /* return code from vnode layer */
725 /* handle any closing cleanup stuff */
727 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
728 #if !defined(AFS_SGI65_ENV)
732 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
736 lastclose_t lastclose;
737 #if !defined(AFS_SGI65_ENV)
739 #if defined(AFS_SGI64_ENV)
744 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
746 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
749 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
753 afs_close(OSI_VC_ARG(avc), aflags, acred)
758 struct AFS_UCRED *acred;
760 register afs_int32 code, initreq=0;
761 register struct brequest *tb;
762 struct vrequest treq;
768 AFS_STATCNT(afs_close);
769 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
770 ICL_TYPE_INT32, aflags);
772 if (avc->flockCount) {
773 if (code = afs_InitReq(&treq, acred)) return code;
775 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
778 #if defined(AFS_SGI_ENV)
782 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
784 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
792 if (code = afs_InitReq(&treq, acred)) return code;
797 #if defined(AFS_SGI_ENV)
798 /* unlock any locks for pid - could be wrong for child .. */
799 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
801 get_current_flid(&flid);
802 cleanlocks((vnode_t *)avc, flid.fl_pid, flid.fl_sysid);
803 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1/*onlymine*/);
806 cleanlocks((vnode_t *)avc, flp);
807 #else /* AFS_SGI64_ENV */
808 cleanlocks((vnode_t *)avc, u.u_procp->p_epid, u.u_procp->p_sysid);
809 #endif /* AFS_SGI64_ENV */
810 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1/*onlymine*/);
811 #endif /* AFS_SGI65_ENV */
812 /* afs_chkpgoob will drop and re-acquire the global lock. */
813 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
815 if (avc->flockCount) { /* Release Lock */
816 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
817 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1/*onlymine*/);
819 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
824 if (aflags & (FWRITE | FTRUNC)) {
826 /* do it yourself if daemons are all busy */
827 ObtainWriteLock(&avc->lock,124);
828 code = afs_StoreOnLastReference(avc, &treq);
829 ReleaseWriteLock(&avc->lock);
830 #if defined(AFS_SGI_ENV)
831 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
835 #if defined(AFS_SGI_ENV)
836 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
838 /* at least one daemon is idle, so ask it to do the store.
839 Also, note that we don't lock it any more... */
840 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, (long)acred->cr_uid,
842 /* sleep waiting for the store to start, then retrieve error code */
843 while ((tb->flags & BUVALID) == 0) {
851 /* VNOVNODE is "acceptable" error code from close, since
852 may happen when deleting a file on another machine while
853 it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
854 if (code == VNOVNODE || code == ENOENT)
857 /* Ensure last closer gets the error. If another thread caused
858 * DoPartialWrite and this thread does not actually store the data,
859 * it may not see the quota error.
861 ObtainWriteLock(&avc->lock,406);
864 osi_ReleaseVM(avc, acred);
866 code = avc->vc_error;
869 ReleaseWriteLock(&avc->lock);
871 /* some codes merit specific complaint */
873 afs_warnuser("afs: failed to store file (network problems)\n");
876 else if (code == ENOSPC) {
877 afs_warnuser("afs: failed to store file (over quota or partition full)\n");
880 else if (code == ENOSPC) {
881 afs_warnuser("afs: failed to store file (partition full)\n");
883 else if (code == EDQUOT) {
884 afs_warnuser("afs: failed to store file (over quota)\n");
888 afs_warnuser("afs: failed to store file (%d)\n", code);
890 /* finally, we flush any text pages lying around here */
895 #if defined(AFS_SGI_ENV)
896 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
897 osi_Assert(avc->opens > 0);
899 /* file open for read */
900 ObtainWriteLock(&avc->lock, 411);
903 osi_ReleaseVM(avc, acred);
905 code = avc->vc_error;
909 ReleaseWriteLock(&avc->lock);
912 if ((avc->vrefCount <= 2) && (avc->states & CUnlinked)) {
913 afs_remunlink(avc, 1); /* ignore any return code */
916 code = afs_CheckCode(code, &treq, 5);
923 afs_fsync(avc, fflags, acred, waitfor)
926 #else /* AFS_OSF_ENV */
927 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
928 afs_fsync(OSI_VC_ARG(avc), flag, acred
934 afs_fsync(avc, acred)
938 struct AFS_UCRED *acred;
939 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
946 register afs_int32 code;
947 struct vrequest treq;
951 return avc->vc_error;
953 #if defined(AFS_SUN5_ENV)
954 /* back out if called from NFS server */
955 if (curthread->t_flag & T_DONTPEND)
959 AFS_STATCNT(afs_fsync);
960 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
961 if (code = afs_InitReq(&treq, acred)) return code;
963 #if defined(AFS_SGI_ENV)
964 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
965 if (flag & FSYNC_INVAL)
966 osi_VM_FSyncInval(avc);
967 #endif /* AFS_SGI_ENV */
969 ObtainSharedLock(&avc->lock,18);
971 if (avc->execsOrWriters > 0) {
972 /* put the file back */
973 UpgradeSToWLock(&avc->lock,41);
974 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
975 ConvertWToSLock(&avc->lock);
978 #if defined(AFS_SGI_ENV)
979 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
980 if (code == VNOVNODE) {
981 /* syncing an unlinked file! - non-informative to pass an errno
982 * 102 (== VNOVNODE) to user
988 code = afs_CheckCode(code, &treq, 33);
989 ReleaseSharedLock(&avc->lock);