2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
67 /* Now, send the file back. Used to require 0 writers left, but now do
68 * it on every close for write, since two closes in a row are harmless
69 * since first will clean all chunks, and second will be noop. Note that
70 * this will also save confusion when someone keeps a file open
71 * inadvertently, since with old system, writes to the server would never
74 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
76 * We have to do these after the above store in done: in some systems like
77 * aix they'll need to flush all the vm dirty pages to the disk via the
78 * strategy routine. During that all procedure (done under no avc locks)
79 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
80 * routines which means the vcache is a perfect candidate for flushing!
82 #if defined(AFS_SGI_ENV)
83 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
86 avc->execsOrWriters--;
93 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
94 struct AFS_UCRED *acred, int noLock)
96 afs_size_t totalLength;
97 afs_size_t transferLength;
99 afs_size_t offset, len;
100 afs_int32 tlen, trimlen;
103 register struct dcache *tdc;
109 struct iovec *tvec; /* again, should have define */
110 register afs_int32 code;
111 struct vrequest treq;
113 AFS_STATCNT(afs_MemWrite);
115 return avc->vc_error;
117 startDate = osi_Time();
118 if ((code = afs_InitReq(&treq, acred)))
120 /* otherwise we read */
121 totalLength = auio->afsio_resid;
122 filePos = auio->afsio_offset;
125 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
126 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
127 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
128 ICL_HANDLE_OFFSET(avc->m.Length));
130 afs_MaybeWakeupTruncateDaemon();
131 ObtainWriteLock(&avc->lock, 126);
133 #if defined(AFS_SGI_ENV)
137 * afs_xwrite handles setting m.Length
138 * and handles APPEND mode.
139 * Since we are called via strategy, we need to trim the write to
140 * the actual size of the file
142 osi_Assert(filePos <= avc->m.Length);
143 diff = avc->m.Length - filePos;
144 auio->afsio_resid = MIN(totalLength, diff);
145 totalLength = auio->afsio_resid;
148 if (aio & IO_APPEND) {
149 /* append mode, start it at the right spot */
150 #if defined(AFS_SUN56_ENV)
151 auio->uio_loffset = 0;
153 filePos = auio->afsio_offset = avc->m.Length;
157 * Note that we use startDate rather than calling osi_Time() here.
158 * This is to avoid counting lock-waiting time in file date (for ranlib).
160 avc->m.Date = startDate;
162 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
163 #if defined(AFS_HPUX101_ENV)
164 if ((totalLength + filePos) >> 9 >
165 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
168 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
170 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
174 ReleaseWriteLock(&avc->lock);
178 #ifdef AFS_VM_RDWR_ENV
180 * If write is implemented via VM, afs_FakeOpen() is called from the
181 * high-level write op.
183 if (avc->execsOrWriters <= 0) {
184 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc,
185 avc->execsOrWriters);
190 avc->states |= CDirty;
191 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
192 while (totalLength > 0) {
193 /* Read the cached info. If we call GetDCache while the cache
194 * truncate daemon is running we risk overflowing the disk cache.
195 * Instead we check for an existing cache slot. If we cannot
196 * find an existing slot we wait for the cache to drain
197 * before calling GetDCache.
200 tdc = afs_FindDCache(avc, filePos);
202 ObtainWriteLock(&tdc->lock, 653);
203 } else if (afs_blocksUsed >
204 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
205 tdc = afs_FindDCache(avc, filePos);
207 ObtainWriteLock(&tdc->lock, 654);
208 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
209 || (tdc->dflags & DFFetching)) {
210 ReleaseWriteLock(&tdc->lock);
216 afs_MaybeWakeupTruncateDaemon();
217 while (afs_blocksUsed >
218 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
219 ReleaseWriteLock(&avc->lock);
220 if (afs_blocksUsed - afs_blocksDiscarded >
221 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
222 afs_WaitForCacheDrain = 1;
223 afs_osi_Sleep(&afs_WaitForCacheDrain);
225 afs_MaybeFreeDiscardedDCache();
226 afs_MaybeWakeupTruncateDaemon();
227 ObtainWriteLock(&avc->lock, 506);
229 avc->states |= CDirty;
230 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
232 ObtainWriteLock(&tdc->lock, 655);
235 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
237 ObtainWriteLock(&tdc->lock, 656);
243 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
244 afs_stats_cmperf.cacheCurrDirtyChunks++;
245 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
247 if (!(tdc->f.states & DWriting)) {
248 /* don't mark entry as mod if we don't have to */
249 tdc->f.states |= DWriting;
250 tdc->dflags |= DFEntryMod;
252 len = totalLength; /* write this amount by default */
253 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
254 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
255 if (max <= len + offset) { /*if we'd go past the end of this chunk */
256 /* it won't all fit in this chunk, so write as much
260 /* mung uio structure to be right for this transfer */
261 afsio_copy(auio, &tuio, tvec);
263 afsio_trim(&tuio, trimlen);
264 tuio.afsio_offset = offset;
266 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
268 void *mep; /* XXX in prototype world is struct memCacheEntry * */
270 ZapDCE(tdc); /* bad data */
271 mep = afs_MemCacheOpen(tdc->f.inode);
272 afs_MemCacheTruncate(mep, 0);
273 afs_MemCacheClose(mep);
274 afs_stats_cmperf.cacheCurrDirtyChunks--;
275 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
276 ReleaseWriteLock(&tdc->lock);
280 /* otherwise we've written some, fixup length, etc and continue with next seg */
281 len = len - tuio.afsio_resid; /* compute amount really transferred */
283 afsio_skip(auio, tlen); /* advance auio over data written */
284 /* compute new file size */
285 if (offset + len > tdc->f.chunkBytes) {
286 afs_int32 tlength = offset + len;
287 afs_AdjustSize(tdc, tlength);
288 if (tdc->validPos < filePos + len)
289 tdc->validPos = filePos + len;
292 transferLength += len;
294 #if defined(AFS_SGI_ENV)
295 /* afs_xwrite handles setting m.Length */
296 osi_Assert(filePos <= avc->m.Length);
298 if (filePos > avc->m.Length) {
299 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
300 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
301 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
302 ICL_HANDLE_OFFSET(filePos));
303 avc->m.Length = filePos;
306 ReleaseWriteLock(&tdc->lock);
308 #if !defined(AFS_VM_RDWR_ENV)
310 * If write is implemented via VM, afs_DoPartialWrite() is called from
311 * the high-level write op.
314 code = afs_DoPartialWrite(avc, &treq);
322 #ifndef AFS_VM_RDWR_ENV
323 afs_FakeClose(avc, acred);
325 if (error && !avc->vc_error)
326 avc->vc_error = error;
328 ReleaseWriteLock(&avc->lock);
329 osi_FreeSmallSpace(tvec);
331 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
332 * work. GFS is truly a poorly-designed interface! */
333 afs_gfshack((struct gnode *)avc);
335 error = afs_CheckCode(error, &treq, 6);
340 /* called on writes */
342 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
343 struct AFS_UCRED *acred, int noLock)
345 afs_size_t totalLength;
346 afs_size_t transferLength;
348 afs_size_t offset, len;
353 register struct dcache *tdc;
359 struct iovec *tvec; /* again, should have define */
360 struct osi_file *tfile;
361 register afs_int32 code;
362 struct vrequest treq;
364 AFS_STATCNT(afs_UFSWrite);
366 return avc->vc_error;
368 startDate = osi_Time();
369 if ((code = afs_InitReq(&treq, acred)))
371 /* otherwise we read */
372 totalLength = auio->afsio_resid;
373 filePos = auio->afsio_offset;
376 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
377 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
378 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
379 ICL_HANDLE_OFFSET(avc->m.Length));
381 afs_MaybeWakeupTruncateDaemon();
382 ObtainWriteLock(&avc->lock, 556);
384 #if defined(AFS_SGI_ENV)
388 * afs_xwrite handles setting m.Length
389 * and handles APPEND mode.
390 * Since we are called via strategy, we need to trim the write to
391 * the actual size of the file
393 osi_Assert(filePos <= avc->m.Length);
394 diff = avc->m.Length - filePos;
395 auio->afsio_resid = MIN(totalLength, diff);
396 totalLength = auio->afsio_resid;
399 if (aio & IO_APPEND) {
400 /* append mode, start it at the right spot */
401 #if defined(AFS_SUN56_ENV)
402 auio->uio_loffset = 0;
404 filePos = auio->afsio_offset = avc->m.Length;
408 * Note that we use startDate rather than calling osi_Time() here.
409 * This is to avoid counting lock-waiting time in file date (for ranlib).
411 avc->m.Date = startDate;
413 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
414 #if defined(AFS_HPUX101_ENV)
415 if ((totalLength + filePos) >> 9 >
416 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
419 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
421 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
425 ReleaseWriteLock(&avc->lock);
429 #ifdef AFS_VM_RDWR_ENV
431 * If write is implemented via VM, afs_FakeOpen() is called from the
432 * high-level write op.
434 if (avc->execsOrWriters <= 0) {
435 printf("WARNING: afs_ufswr vcp=%x, exOrW=%d\n", avc,
436 avc->execsOrWriters);
441 avc->states |= CDirty;
442 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
443 while (totalLength > 0) {
445 * The following line is necessary because afs_GetDCache with
446 * flag == 4 expects the length field to be filled. It decides
447 * from this whether it's necessary to fetch data into the chunk
448 * before writing or not (when the whole chunk is overwritten!).
450 len = totalLength; /* write this amount by default */
451 /* read the cached info */
453 tdc = afs_FindDCache(avc, filePos);
455 ObtainWriteLock(&tdc->lock, 657);
456 } else if (afs_blocksUsed >
457 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
458 tdc = afs_FindDCache(avc, filePos);
460 ObtainWriteLock(&tdc->lock, 658);
461 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
462 || (tdc->dflags & DFFetching)) {
463 ReleaseWriteLock(&tdc->lock);
469 afs_MaybeWakeupTruncateDaemon();
470 while (afs_blocksUsed >
471 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
472 ReleaseWriteLock(&avc->lock);
473 if (afs_blocksUsed - afs_blocksDiscarded >
474 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
475 afs_WaitForCacheDrain = 1;
476 afs_osi_Sleep(&afs_WaitForCacheDrain);
478 afs_MaybeFreeDiscardedDCache();
479 afs_MaybeWakeupTruncateDaemon();
480 ObtainWriteLock(&avc->lock, 509);
482 avc->states |= CDirty;
483 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
485 ObtainWriteLock(&tdc->lock, 659);
488 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
490 ObtainWriteLock(&tdc->lock, 660);
496 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
497 afs_stats_cmperf.cacheCurrDirtyChunks++;
498 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
500 if (!(tdc->f.states & DWriting)) {
501 /* don't mark entry as mod if we don't have to */
502 tdc->f.states |= DWriting;
503 tdc->dflags |= DFEntryMod;
505 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
506 len = totalLength; /* write this amount by default */
507 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
508 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
509 if (max <= len + offset) { /*if we'd go past the end of this chunk */
510 /* it won't all fit in this chunk, so write as much
514 /* mung uio structure to be right for this transfer */
515 afsio_copy(auio, &tuio, tvec);
517 afsio_trim(&tuio, trimlen);
518 tuio.afsio_offset = offset;
519 #if defined(AFS_AIX41_ENV)
522 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
523 NULL, afs_osi_credp);
525 #elif defined(AFS_AIX32_ENV)
526 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
527 #elif defined(AFS_AIX_ENV)
529 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
530 &tuio, NULL, NULL, -1);
531 #elif defined(AFS_SUN5_ENV)
533 #ifdef AFS_SUN510_ENV
537 VOP_RWLOCK(tfile->vnode, 1, &ct);
538 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
539 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
542 VOP_RWLOCK(tfile->vnode, 1);
543 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
544 VOP_RWUNLOCK(tfile->vnode, 1);
549 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
550 #elif defined(AFS_SGI_ENV)
552 avc->states |= CWritingUFS;
553 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
554 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
555 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
556 avc->states &= ~CWritingUFS;
558 #elif defined(AFS_OSF_ENV)
560 struct ucred *tmpcred = u.u_cred;
561 u.u_cred = afs_osi_credp;
562 tuio.uio_rw = UIO_WRITE;
564 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
568 #elif defined(AFS_HPUX100_ENV)
571 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
574 #elif defined(AFS_LINUX20_ENV)
576 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
578 #elif defined(AFS_DARWIN_ENV)
580 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
581 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
582 VOP_UNLOCK(tfile->vnode, 0, current_proc());
584 #elif defined(AFS_FBSD50_ENV)
586 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
587 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
588 VOP_UNLOCK(tfile->vnode, 0, curthread);
590 #elif defined(AFS_XBSD_ENV)
592 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
593 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
594 VOP_UNLOCK(tfile->vnode, 0, curproc);
598 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
600 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
604 ZapDCE(tdc); /* bad data */
605 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
606 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
607 afs_stats_cmperf.cacheCurrDirtyChunks--;
608 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
609 afs_CFileClose(tfile);
610 ReleaseWriteLock(&tdc->lock);
614 /* otherwise we've written some, fixup length, etc and continue with next seg */
615 len = len - tuio.afsio_resid; /* compute amount really transferred */
617 afsio_skip(auio, tlen); /* advance auio over data written */
618 /* compute new file size */
619 if (offset + len > tdc->f.chunkBytes) {
620 afs_int32 tlength = offset + len;
621 afs_AdjustSize(tdc, tlength);
622 if (tdc->validPos < filePos + len)
623 tdc->validPos = filePos + len;
626 transferLength += len;
628 #if defined(AFS_SGI_ENV)
629 /* afs_xwrite handles setting m.Length */
630 osi_Assert(filePos <= avc->m.Length);
632 if (filePos > avc->m.Length) {
633 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
634 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
635 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
636 ICL_HANDLE_OFFSET(filePos));
637 avc->m.Length = filePos;
641 ReleaseWriteLock(&tdc->lock);
643 #if !defined(AFS_VM_RDWR_ENV)
645 * If write is implemented via VM, afs_DoPartialWrite() is called from
646 * the high-level write op.
649 code = afs_DoPartialWrite(avc, &treq);
657 #ifndef AFS_VM_RDWR_ENV
658 afs_FakeClose(avc, acred);
660 error = afs_CheckCode(error, &treq, 7);
661 /* This set is here so we get the CheckCode. */
662 if (error && !avc->vc_error)
663 avc->vc_error = error;
665 ReleaseWriteLock(&avc->lock);
666 osi_FreeSmallSpace(tvec);
668 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
669 * work. GFS is truly a poorly-designed interface! */
670 afs_gfshack((struct gnode *)avc);
672 #ifndef AFS_VM_RDWR_ENV
674 * If write is implemented via VM, afs_fsync() is called from the high-level
677 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
678 if (noLock && (aio & IO_SYNC)) {
681 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
682 * we're doing them because the file was opened with O_SYNCIO specified,
683 * we have to look in the u area. No single mechanism here!!
685 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
687 if (noLock && (aio & FSYNC)) {
690 if (!AFS_NFSXLATORREQ(acred))
691 afs_fsync(avc, acred);
697 /* do partial write if we're low on unmodified chunks */
699 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
701 register afs_int32 code;
703 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
704 afs_stats_cmperf.cacheMaxDirtyChunks)
705 return 0; /* nothing to do */
706 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
707 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
708 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
709 #if defined(AFS_SUN5_ENV)
710 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
712 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
719 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
721 #define vno_close(X) vn_close((X), 0, NOCRED)
722 #elif defined(AFS_DUX40_ENV)
723 #define vno_close vn_close
725 /* We don't need this for AIX since:
726 * (1) aix doesn't use fileops and it call close directly intead
727 * (where the unlocking should be done) and
728 * (2) temporarily, the aix lockf isn't supported yet.
730 * this stupid routine is used to release the flocks held on a
731 * particular file descriptor. Sun doesn't pass file descr. info
732 * through to the vnode layer, and yet we must unlock flocked files
733 * on the *appropriate* (not first, as in System V) close call. Thus
735 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
736 * file ops structure into any afs file when it gets flocked.
737 * N.B: Intercepting close syscall doesn't trap aborts or exit system
741 afs_closex(register struct file *afd)
743 struct vrequest treq;
748 struct afs_fakestat_state fakestat;
750 AFS_STATCNT(afs_closex);
751 /* setup the credentials */
752 if ((code = afs_InitReq(&treq, u.u_cred)))
754 afs_InitFakeStat(&fakestat);
757 /* we're the last one. If we're an AFS vnode, clear the flags,
758 * close the file and release the lock when done. Otherwise, just
759 * let the regular close code work. */
760 if (afd->f_type == DTYPE_VNODE) {
761 tvc = VTOAFS(afd->f_data);
762 if (IsAfsVnode(AFSTOV(tvc))) {
763 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
765 afs_PutFakeStat(&fakestat);
768 VN_HOLD(AFSTOV(tvc));
769 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
770 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
771 code = vno_close(afd);
773 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
774 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
777 HandleFlock(tvc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
780 grele((struct gnode *)tvc);
782 AFS_RELE(AFSTOV(tvc));
787 /* now, if close not done, do it */
789 code = vno_close(afd);
791 afs_PutFakeStat(&fakestat);
792 return code; /* return code from vnode layer */
797 /* handle any closing cleanup stuff */
800 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
801 #if !defined(AFS_SGI65_ENV)
805 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
809 lastclose_t lastclose;
810 #if !defined(AFS_SGI65_ENV)
812 #if defined(AFS_SGI64_ENV)
816 #elif defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
818 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
821 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
825 afs_close(OSI_VC_ARG(avc), aflags, acred)
829 struct AFS_UCRED *acred;
831 register afs_int32 code;
832 register struct brequest *tb;
833 struct vrequest treq;
837 struct afs_fakestat_state fakestat;
840 AFS_STATCNT(afs_close);
841 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
842 ICL_TYPE_INT32, aflags);
843 code = afs_InitReq(&treq, acred);
846 afs_InitFakeStat(&fakestat);
847 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
849 afs_PutFakeStat(&fakestat);
853 if (avc->flockCount) {
854 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
857 #if defined(AFS_SGI_ENV)
859 afs_PutFakeStat(&fakestat);
862 #elif defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
864 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
865 afs_PutFakeStat(&fakestat);
870 #if defined(AFS_SGI_ENV)
871 /* unlock any locks for pid - could be wrong for child .. */
872 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
874 get_current_flid(&flid);
875 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
876 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
879 cleanlocks((vnode_t *) avc, flp);
880 #else /* AFS_SGI64_ENV */
881 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
882 #endif /* AFS_SGI64_ENV */
883 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
884 #endif /* AFS_SGI65_ENV */
885 /* afs_chkpgoob will drop and re-acquire the global lock. */
886 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
887 #else /* AFS_SGI_ENV */
888 if (avc->flockCount) { /* Release Lock */
889 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
890 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
892 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
895 #endif /* AFS_SGI_ENV */
896 #endif /* AFS_SUN5_ENV */
897 if (aflags & (FWRITE | FTRUNC)) {
899 /* do it yourself if daemons are all busy */
900 ObtainWriteLock(&avc->lock, 124);
901 code = afs_StoreOnLastReference(avc, &treq);
902 ReleaseWriteLock(&avc->lock);
903 #if defined(AFS_SGI_ENV)
904 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
907 #if defined(AFS_SGI_ENV)
908 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
910 /* at least one daemon is idle, so ask it to do the store.
911 * Also, note that we don't lock it any more... */
912 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
913 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
915 /* sleep waiting for the store to start, then retrieve error code */
916 while ((tb->flags & BUVALID) == 0) {
924 /* VNOVNODE is "acceptable" error code from close, since
925 * may happen when deleting a file on another machine while
926 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
927 if (code == VNOVNODE || code == ENOENT)
930 /* Ensure last closer gets the error. If another thread caused
931 * DoPartialWrite and this thread does not actually store the data,
932 * it may not see the quota error.
934 ObtainWriteLock(&avc->lock, 406);
937 osi_ReleaseVM(avc, acred);
939 code = avc->vc_error;
942 ReleaseWriteLock(&avc->lock);
944 /* some codes merit specific complaint */
946 afs_warnuser("afs: failed to store file (network problems)\n");
949 else if (code == ENOSPC) {
951 ("afs: failed to store file (over quota or partition full)\n");
954 else if (code == ENOSPC) {
955 afs_warnuser("afs: failed to store file (partition full)\n");
956 } else if (code == EDQUOT) {
957 afs_warnuser("afs: failed to store file (over quota)\n");
961 afs_warnuser("afs: failed to store file (%d)\n", code);
963 /* finally, we flush any text pages lying around here */
967 #if defined(AFS_SGI_ENV)
968 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
969 osi_Assert(avc->opens > 0);
971 /* file open for read */
972 ObtainWriteLock(&avc->lock, 411);
975 osi_ReleaseVM(avc, acred);
977 code = avc->vc_error;
981 ReleaseWriteLock(&avc->lock);
984 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
985 afs_remunlink(avc, 1); /* ignore any return code */
988 afs_PutFakeStat(&fakestat);
989 code = afs_CheckCode(code, &treq, 5);
996 afs_fsync(avc, fflags, acred, waitfor)
999 #else /* AFS_OSF_ENV */
1000 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1001 afs_fsync(OSI_VC_ARG(avc), flag, acred
1002 #ifdef AFS_SGI65_ENV
1007 afs_fsync(avc, acred)
1011 struct AFS_UCRED *acred;
1012 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1014 #ifdef AFS_SGI65_ENV
1019 register afs_int32 code;
1020 struct vrequest treq;
1024 return avc->vc_error;
1026 #if defined(AFS_SUN5_ENV)
1027 /* back out if called from NFS server */
1028 if (curthread->t_flag & T_DONTPEND)
1032 AFS_STATCNT(afs_fsync);
1033 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1034 if ((code = afs_InitReq(&treq, acred)))
1037 #if defined(AFS_SGI_ENV)
1038 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1039 if (flag & FSYNC_INVAL)
1040 osi_VM_FSyncInval(avc);
1041 #endif /* AFS_SGI_ENV */
1043 ObtainSharedLock(&avc->lock, 18);
1045 if (avc->execsOrWriters > 0) {
1046 /* put the file back */
1047 UpgradeSToWLock(&avc->lock, 41);
1048 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1049 ConvertWToSLock(&avc->lock);
1051 #if defined(AFS_SGI_ENV)
1052 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1053 if (code == VNOVNODE) {
1054 /* syncing an unlinked file! - non-informative to pass an errno
1055 * 102 (== VNOVNODE) to user
1061 code = afs_CheckCode(code, &treq, 33);
1062 ReleaseSharedLock(&avc->lock);