2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "../afs/param.h"
25 #include "../afs/sysincludes.h" /* Standard vendor system headers */
26 #include "../afs/afsincludes.h" /* Afs-based standard headers */
27 #include "../afs/afs_stats.h" /* statistics */
28 #include "../afs/afs_cbqueue.h"
29 #include "../afs/nfsclient.h"
30 #include "../afs/afs_osidnlc.h"
33 extern unsigned char *afs_indexFlags;
35 /* Called by all write-on-close routines: regular afs_close,
36 * store via background daemon and store via the
37 * afs_FlushActiveVCaches routine (when CCORE is on).
38 * avc->lock must be write-locked.
40 int afs_StoreOnLastReference(register struct vcache *avc, register struct vrequest *treq)
44 AFS_STATCNT(afs_StoreOnLastReference);
45 /* if CCore flag is set, we clear it and do the extra decrement
46 * ourselves now. If we're called by the CCore clearer, the CCore
47 * flag will already be clear, so we don't have to worry about
48 * clearing it twice. */
49 if (avc->states & CCore) {
50 avc->states &= ~CCore;
51 #if defined(AFS_SGI_ENV)
52 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
54 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
55 * depending on the flags the file was opened with. So, if you make any
56 * changes to the way the execsOrWriters flag is handled check with the
59 avc->execsOrWriters--;
60 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose)*/
61 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
64 /* Now, send the file back. Used to require 0 writers left, but now do
65 * it on every close for write, since two closes in a row are harmless
66 * since first will clean all chunks, and second will be noop. Note that
67 * this will also save confusion when someone keeps a file open
68 * inadvertently, since with old system, writes to the server would never
71 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE/*!sync-to-disk*/);
73 * We have to do these after the above store in done: in some systems like
74 * aix they'll need to flush all the vm dirty pages to the disk via the
75 * strategy routine. During that all procedure (done under no avc locks)
76 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
77 * routines which means the vcache is a perfect candidate for flushing!
79 #if defined(AFS_SGI_ENV)
80 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
83 avc->execsOrWriters--;
89 int afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
90 struct AFS_UCRED *acred, int noLock)
92 afs_size_t totalLength;
93 afs_size_t transferLength;
95 afs_size_t offset, len;
96 afs_int32 tlen, trimlen;
99 register struct dcache *tdc;
105 struct iovec *tvec; /* again, should have define */
107 register afs_int32 code;
108 struct vrequest treq;
110 AFS_STATCNT(afs_MemWrite);
112 return avc->vc_error;
114 startDate = osi_Time();
115 if ((code = afs_InitReq(&treq, acred))) return code;
116 /* otherwise we read */
117 totalLength = auio->afsio_resid;
118 filePos = auio->afsio_offset;
121 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
122 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
123 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
124 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
126 afs_MaybeWakeupTruncateDaemon();
127 ObtainWriteLock(&avc->lock,126);
129 #if defined(AFS_SGI_ENV)
133 * afs_xwrite handles setting m.Length
134 * and handles APPEND mode.
135 * Since we are called via strategy, we need to trim the write to
136 * the actual size of the file
138 osi_Assert(filePos <= avc->m.Length);
139 diff = avc->m.Length - filePos;
140 auio->afsio_resid = MIN(totalLength, diff);
141 totalLength = auio->afsio_resid;
144 if (aio & IO_APPEND) {
145 /* append mode, start it at the right spot */
146 #if defined(AFS_SUN56_ENV)
147 auio->uio_loffset = 0;
149 filePos = auio->afsio_offset = avc->m.Length;
153 * Note that we use startDate rather than calling osi_Time() here.
154 * This is to avoid counting lock-waiting time in file date (for ranlib).
156 avc->m.Date = startDate;
158 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
159 #if defined(AFS_HPUX101_ENV)
160 if ((totalLength + filePos) >> 9 > (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
163 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
165 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
169 ReleaseWriteLock(&avc->lock);
173 #ifdef AFS_VM_RDWR_ENV
175 * If write is implemented via VM, afs_FakeOpen() is called from the
176 * high-level write op.
178 if (avc->execsOrWriters <= 0) {
179 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
184 avc->states |= CDirty;
185 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
186 while (totalLength > 0) {
187 /* Read the cached info. If we call GetDCache while the cache
188 * truncate daemon is running we risk overflowing the disk cache.
189 * Instead we check for an existing cache slot. If we cannot
190 * find an existing slot we wait for the cache to drain
191 * before calling GetDCache.
194 tdc = afs_FindDCache(avc, filePos);
195 if (tdc) ObtainWriteLock(&tdc->lock, 653);
196 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
197 tdc = afs_FindDCache(avc, filePos);
199 ObtainWriteLock(&tdc->lock, 654);
200 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
201 (tdc->dflags & DFFetching)) {
202 ReleaseWriteLock(&tdc->lock);
208 afs_MaybeWakeupTruncateDaemon();
209 while (afs_blocksUsed >
210 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
211 ReleaseWriteLock(&avc->lock);
212 if (afs_blocksUsed - afs_blocksDiscarded >
213 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
214 afs_WaitForCacheDrain = 1;
215 afs_osi_Sleep(&afs_WaitForCacheDrain);
217 afs_MaybeFreeDiscardedDCache();
218 afs_MaybeWakeupTruncateDaemon();
219 ObtainWriteLock(&avc->lock,506);
221 avc->states |= CDirty;
222 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
223 if (tdc) ObtainWriteLock(&tdc->lock, 655);
226 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
227 if (tdc) ObtainWriteLock(&tdc->lock, 656);
233 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
234 afs_stats_cmperf.cacheCurrDirtyChunks++;
235 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
237 if (!(tdc->f.states & DWriting)) {
238 /* don't mark entry as mod if we don't have to */
239 tdc->f.states |= DWriting;
240 tdc->dflags |= DFEntryMod;
242 len = totalLength; /* write this amount by default */
243 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
244 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
245 if (max <= len + offset) { /*if we'd go past the end of this chunk */
246 /* it won't all fit in this chunk, so write as much
250 /* mung uio structure to be right for this transfer */
251 afsio_copy(auio, &tuio, tvec);
253 afsio_trim(&tuio, trimlen);
254 tuio.afsio_offset = offset;
256 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
258 void *mep; /* XXX in prototype world is struct memCacheEntry * */
260 ZapDCE(tdc); /* bad data */
261 mep = afs_MemCacheOpen(tdc->f.inode);
262 afs_MemCacheTruncate(mep, 0);
263 afs_MemCacheClose(mep);
264 afs_stats_cmperf.cacheCurrDirtyChunks--;
265 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
266 ReleaseWriteLock(&tdc->lock);
270 /* otherwise we've written some, fixup length, etc and continue with next seg */
271 len = len - tuio.afsio_resid; /* compute amount really transferred */
273 afsio_skip(auio, tlen); /* advance auio over data written */
274 /* compute new file size */
275 if (offset + len > tdc->f.chunkBytes) {
276 afs_int32 tlength = offset+len;
277 afs_AdjustSize(tdc, tlength);
280 transferLength += len;
282 #if defined(AFS_SGI_ENV)
283 /* afs_xwrite handles setting m.Length */
284 osi_Assert(filePos <= avc->m.Length);
286 if (filePos > avc->m.Length) {
287 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
288 ICL_TYPE_STRING, __FILE__,
289 ICL_TYPE_LONG, __LINE__,
290 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
291 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
292 avc->m.Length = filePos;
295 ReleaseWriteLock(&tdc->lock);
297 #if !defined(AFS_VM_RDWR_ENV)
299 * If write is implemented via VM, afs_DoPartialWrite() is called from
300 * the high-level write op.
303 code = afs_DoPartialWrite(avc, &treq);
311 #ifndef AFS_VM_RDWR_ENV
312 afs_FakeClose(avc, acred);
314 if (error && !avc->vc_error)
315 avc->vc_error = error;
317 ReleaseWriteLock(&avc->lock);
318 osi_FreeSmallSpace(tvec);
320 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
321 work. GFS is truly a poorly-designed interface! */
322 afs_gfshack((struct gnode *) avc);
324 error = afs_CheckCode(error, &treq, 6);
329 /* called on writes */
330 int afs_UFSWrite(register struct vcache *avc, struct uio *auio,
331 int aio, struct AFS_UCRED *acred, int noLock)
333 afs_size_t totalLength;
334 afs_size_t transferLength;
336 afs_size_t offset, len;
341 register struct dcache *tdc;
347 struct iovec *tvec; /* again, should have define */
348 struct osi_file *tfile;
349 register afs_int32 code;
351 struct vrequest treq;
353 AFS_STATCNT(afs_UFSWrite);
355 return avc->vc_error;
357 startDate = osi_Time();
358 if ((code = afs_InitReq(&treq, acred))) return code;
359 /* otherwise we read */
360 totalLength = auio->afsio_resid;
361 filePos = auio->afsio_offset;
364 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
365 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
366 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
367 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
369 afs_MaybeWakeupTruncateDaemon();
370 ObtainWriteLock(&avc->lock,556);
372 #if defined(AFS_SGI_ENV)
376 * afs_xwrite handles setting m.Length
377 * and handles APPEND mode.
378 * Since we are called via strategy, we need to trim the write to
379 * the actual size of the file
381 osi_Assert(filePos <= avc->m.Length);
382 diff = avc->m.Length - filePos;
383 auio->afsio_resid = MIN(totalLength, diff);
384 totalLength = auio->afsio_resid;
387 if (aio & IO_APPEND) {
388 /* append mode, start it at the right spot */
389 #if defined(AFS_SUN56_ENV)
390 auio->uio_loffset = 0;
392 filePos = auio->afsio_offset = avc->m.Length;
396 * Note that we use startDate rather than calling osi_Time() here.
397 * This is to avoid counting lock-waiting time in file date (for ranlib).
399 avc->m.Date = startDate;
401 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
402 #if defined(AFS_HPUX101_ENV)
403 if ((totalLength + filePos) >> 9 > p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
406 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
408 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
412 ReleaseWriteLock(&avc->lock);
416 #ifdef AFS_VM_RDWR_ENV
418 * If write is implemented via VM, afs_FakeOpen() is called from the
419 * high-level write op.
421 if (avc->execsOrWriters <= 0) {
422 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
427 avc->states |= CDirty;
428 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
429 while (totalLength > 0) {
430 #if 0 /* Can't call without tdc. Can't call later since GetDCache can be
431 called with !tdc. Leaving it out for now. */
433 * The following lines are necessary because afs_GetDCache with
434 * flag == 4 expects the length field to be filled. It decides
435 * from this whether it's necessary to fetch data into the chunk
436 * before writing or not (when the whole chunk is overwritten!).
438 len = totalLength; /* write this amount by default */
439 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
440 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
441 if (max <= len + offset) { /*if we'd go past the end of this chunk */
442 /* it won't all fit in this chunk, so write as much
447 /* read the cached info */
449 tdc = afs_FindDCache(avc, filePos);
450 if (tdc) ObtainWriteLock(&tdc->lock, 657);
451 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
452 tdc = afs_FindDCache(avc, filePos);
454 ObtainWriteLock(&tdc->lock, 658);
455 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
456 (tdc->dflags & DFFetching)) {
457 ReleaseWriteLock(&tdc->lock);
463 afs_MaybeWakeupTruncateDaemon();
464 while (afs_blocksUsed >
465 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
466 ReleaseWriteLock(&avc->lock);
467 if (afs_blocksUsed - afs_blocksDiscarded >
468 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
469 afs_WaitForCacheDrain = 1;
470 afs_osi_Sleep(&afs_WaitForCacheDrain);
472 afs_MaybeFreeDiscardedDCache();
473 afs_MaybeWakeupTruncateDaemon();
474 ObtainWriteLock(&avc->lock,509);
476 avc->states |= CDirty;
477 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
478 if (tdc) ObtainWriteLock(&tdc->lock, 659);
481 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
482 if (tdc) ObtainWriteLock(&tdc->lock, 660);
488 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
489 afs_stats_cmperf.cacheCurrDirtyChunks++;
490 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
492 if (!(tdc->f.states & DWriting)) {
493 /* don't mark entry as mod if we don't have to */
494 tdc->f.states |= DWriting;
495 tdc->dflags |= DFEntryMod;
497 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
498 len = totalLength; /* write this amount by default */
499 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
500 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
501 if (max <= len + offset) { /*if we'd go past the end of this chunk */
502 /* it won't all fit in this chunk, so write as much
506 /* mung uio structure to be right for this transfer */
507 afsio_copy(auio, &tuio, tvec);
509 afsio_trim(&tuio, trimlen);
510 tuio.afsio_offset = offset;
514 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL, NULL, &afs_osi_cred);
518 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
520 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t)&offset, &tuio, NULL, NULL, -1);
522 #endif /* AFS_AIX41_ENV */
523 #else /* AFS_AIX_ENV */
526 VOP_RWLOCK(tfile->vnode, 1);
527 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
528 VOP_RWUNLOCK(tfile->vnode, 1);
530 if (code == ENOSPC) afs_warnuser("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
532 #if defined(AFS_SGI_ENV)
534 avc->states |= CWritingUFS;
535 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
536 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred, code);
537 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
538 avc->states &= ~CWritingUFS;
543 struct ucred *tmpcred = u.u_cred;
544 u.u_cred = &afs_osi_cred;
545 tuio.uio_rw = UIO_WRITE;
547 VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
551 #else /* AFS_OSF_ENV */
552 #if defined(AFS_HPUX100_ENV)
555 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
560 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
562 #if defined(AFS_LINUX20_ENV)
564 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
567 #if defined(AFS_DARWIN_ENV)
569 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
570 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
571 VOP_UNLOCK(tfile->vnode, 0, current_proc());
574 #if defined(AFS_FBSD_ENV)
576 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
577 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
578 VOP_UNLOCK(tfile->vnode, 0, curproc);
581 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
582 #endif /* AFS_FBSD_ENV */
583 #endif /* AFS_DARWIN_ENV */
584 #endif /* AFS_LINUX20_ENV */
585 #endif /* AFS_HPUX100_ENV */
586 #endif /* AFS_OSF_ENV */
587 #endif /* AFS_SGI_ENV */
588 #endif /* AFS_SUN5_ENV */
589 #endif /* AFS_AIX41_ENV */
592 ZapDCE(tdc); /* bad data */
593 osi_UFSTruncate(tfile,0); /* fake truncate the segment */
594 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
595 afs_stats_cmperf.cacheCurrDirtyChunks--;
596 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
597 afs_CFileClose(tfile);
598 ReleaseWriteLock(&tdc->lock);
602 /* otherwise we've written some, fixup length, etc and continue with next seg */
603 len = len - tuio.afsio_resid; /* compute amount really transferred */
605 afsio_skip(auio, tlen); /* advance auio over data written */
606 /* compute new file size */
607 if (offset + len > tdc->f.chunkBytes) {
608 afs_int32 tlength = offset+len;
609 afs_AdjustSize(tdc, tlength);
612 transferLength += len;
614 #if defined(AFS_SGI_ENV)
615 /* afs_xwrite handles setting m.Length */
616 osi_Assert(filePos <= avc->m.Length);
618 if (filePos > avc->m.Length) {
619 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
620 ICL_TYPE_STRING, __FILE__,
621 ICL_TYPE_LONG, __LINE__,
622 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
623 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
624 avc->m.Length = filePos;
628 ReleaseWriteLock(&tdc->lock);
630 #if !defined(AFS_VM_RDWR_ENV)
632 * If write is implemented via VM, afs_DoPartialWrite() is called from
633 * the high-level write op.
636 code = afs_DoPartialWrite(avc, &treq);
644 #ifndef AFS_VM_RDWR_ENV
645 afs_FakeClose(avc, acred);
647 error = afs_CheckCode(error, &treq, 7);
648 /* This set is here so we get the CheckCode. */
649 if (error && !avc->vc_error)
650 avc->vc_error = error;
652 ReleaseWriteLock(&avc->lock);
653 osi_FreeSmallSpace(tvec);
655 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
656 work. GFS is truly a poorly-designed interface! */
657 afs_gfshack((struct gnode *) avc);
659 #ifndef AFS_VM_RDWR_ENV
661 * If write is implemented via VM, afs_fsync() is called from the high-level
664 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
665 if (noLock && (aio & IO_SYNC)) {
668 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
669 * we're doing them because the file was opened with O_SYNCIO specified,
670 * we have to look in the u area. No single mechanism here!!
672 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
674 if (noLock && (aio & FSYNC)) {
677 if (!AFS_NFSXLATORREQ(acred))
678 afs_fsync(avc, acred);
684 /* do partial write if we're low on unmodified chunks */
685 int afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
687 register afs_int32 code;
689 if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
690 return 0; /* nothing to do */
691 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
692 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
693 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
694 #if defined(AFS_SUN5_ENV)
695 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
697 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
704 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
706 #define vno_close(X) vn_close((X), 0, NOCRED)
707 #elif defined(AFS_DUX40_ENV)
708 #define vno_close vn_close
710 /* We don't need this for AIX since:
711 * (1) aix doesn't use fileops and it call close directly intead
712 * (where the unlocking should be done) and
713 * (2) temporarily, the aix lockf isn't supported yet.
715 * this stupid routine is used to release the flocks held on a
716 * particular file descriptor. Sun doesn't pass file descr. info
717 * through to the vnode layer, and yet we must unlock flocked files
718 * on the *appropriate* (not first, as in System V) close call. Thus
720 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
721 * file ops structure into any afs file when it gets flocked.
722 * N.B: Intercepting close syscall doesn't trap aborts or exit system
725 int afs_closex(register struct file *afd)
727 struct vrequest treq;
732 struct afs_fakestat_state fakestat;
734 AFS_STATCNT(afs_closex);
735 /* setup the credentials */
736 if ((code = afs_InitReq(&treq, u.u_cred))) return code;
737 afs_InitFakeStat(&fakestat);
740 /* we're the last one. If we're an AFS vnode, clear the flags,
741 * close the file and release the lock when done. Otherwise, just
742 * let the regular close code work. */
743 if (afd->f_type == DTYPE_VNODE) {
744 tvc = VTOAFS(afd->f_data);
745 if (IsAfsVnode(AFSTOV(tvc))) {
746 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
748 afs_PutFakeStat(&fakestat);
751 VN_HOLD(AFSTOV(tvc));
752 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
753 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
754 code = vno_close(afd);
756 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
757 HandleFlock(tvc, LOCK_UN, &treq,
758 u.u_procp->p_pid, 1/*onlymine*/);
760 HandleFlock(tvc, LOCK_UN, &treq, 0, 1/*onlymine*/);
763 grele((struct gnode *) tvc);
765 AFS_RELE(AFSTOV(tvc));
770 /* now, if close not done, do it */
772 code = vno_close(afd);
774 afs_PutFakeStat(&fakestat);
775 return code; /* return code from vnode layer */
780 /* handle any closing cleanup stuff */
782 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
783 #if !defined(AFS_SGI65_ENV)
787 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
791 lastclose_t lastclose;
792 #if !defined(AFS_SGI65_ENV)
794 #if defined(AFS_SGI64_ENV)
799 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
801 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
804 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
808 afs_close(OSI_VC_ARG(avc), aflags, acred)
813 struct AFS_UCRED *acred;
815 register afs_int32 code;
816 register struct brequest *tb;
817 struct vrequest treq;
821 struct afs_fakestat_state fakestat;
824 AFS_STATCNT(afs_close);
825 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
826 ICL_TYPE_INT32, aflags);
827 code = afs_InitReq(&treq, acred);
828 if (code) return code;
829 afs_InitFakeStat(&fakestat);
830 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
832 afs_PutFakeStat(&fakestat);
836 if (avc->flockCount) {
837 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
840 #if defined(AFS_SGI_ENV)
842 afs_PutFakeStat(&fakestat);
846 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
848 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
849 afs_PutFakeStat(&fakestat);
855 #if defined(AFS_SGI_ENV)
856 /* unlock any locks for pid - could be wrong for child .. */
857 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
859 get_current_flid(&flid);
860 cleanlocks((vnode_t *)avc, flid.fl_pid, flid.fl_sysid);
861 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1/*onlymine*/);
864 cleanlocks((vnode_t *)avc, flp);
865 #else /* AFS_SGI64_ENV */
866 cleanlocks((vnode_t *)avc, u.u_procp->p_epid, u.u_procp->p_sysid);
867 #endif /* AFS_SGI64_ENV */
868 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1/*onlymine*/);
869 #endif /* AFS_SGI65_ENV */
870 /* afs_chkpgoob will drop and re-acquire the global lock. */
871 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
873 if (avc->flockCount) { /* Release Lock */
874 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
875 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1/*onlymine*/);
877 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
882 if (aflags & (FWRITE | FTRUNC)) {
884 /* do it yourself if daemons are all busy */
885 ObtainWriteLock(&avc->lock,124);
886 code = afs_StoreOnLastReference(avc, &treq);
887 ReleaseWriteLock(&avc->lock);
888 #if defined(AFS_SGI_ENV)
889 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
893 #if defined(AFS_SGI_ENV)
894 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
896 /* at least one daemon is idle, so ask it to do the store.
897 Also, note that we don't lock it any more... */
898 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
899 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
901 /* sleep waiting for the store to start, then retrieve error code */
902 while ((tb->flags & BUVALID) == 0) {
910 /* VNOVNODE is "acceptable" error code from close, since
911 may happen when deleting a file on another machine while
912 it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
913 if (code == VNOVNODE || code == ENOENT)
916 /* Ensure last closer gets the error. If another thread caused
917 * DoPartialWrite and this thread does not actually store the data,
918 * it may not see the quota error.
920 ObtainWriteLock(&avc->lock,406);
923 osi_ReleaseVM(avc, acred);
925 code = avc->vc_error;
928 ReleaseWriteLock(&avc->lock);
930 /* some codes merit specific complaint */
932 afs_warnuser("afs: failed to store file (network problems)\n");
935 else if (code == ENOSPC) {
936 afs_warnuser("afs: failed to store file (over quota or partition full)\n");
939 else if (code == ENOSPC) {
940 afs_warnuser("afs: failed to store file (partition full)\n");
942 else if (code == EDQUOT) {
943 afs_warnuser("afs: failed to store file (over quota)\n");
947 afs_warnuser("afs: failed to store file (%d)\n", code);
949 /* finally, we flush any text pages lying around here */
954 #if defined(AFS_SGI_ENV)
955 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
956 osi_Assert(avc->opens > 0);
958 /* file open for read */
959 ObtainWriteLock(&avc->lock, 411);
962 osi_ReleaseVM(avc, acred);
964 code = avc->vc_error;
968 ReleaseWriteLock(&avc->lock);
971 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
972 afs_remunlink(avc, 1); /* ignore any return code */
975 afs_PutFakeStat(&fakestat);
976 code = afs_CheckCode(code, &treq, 5);
983 afs_fsync(avc, fflags, acred, waitfor)
986 #else /* AFS_OSF_ENV */
987 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
988 afs_fsync(OSI_VC_ARG(avc), flag, acred
994 afs_fsync(avc, acred)
998 struct AFS_UCRED *acred;
999 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1001 #ifdef AFS_SGI65_ENV
1006 register afs_int32 code;
1007 struct vrequest treq;
1011 return avc->vc_error;
1013 #if defined(AFS_SUN5_ENV)
1014 /* back out if called from NFS server */
1015 if (curthread->t_flag & T_DONTPEND)
1019 AFS_STATCNT(afs_fsync);
1020 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1021 if ((code = afs_InitReq(&treq, acred))) return code;
1023 #if defined(AFS_SGI_ENV)
1024 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
1025 if (flag & FSYNC_INVAL)
1026 osi_VM_FSyncInval(avc);
1027 #endif /* AFS_SGI_ENV */
1029 ObtainSharedLock(&avc->lock,18);
1031 if (avc->execsOrWriters > 0) {
1032 /* put the file back */
1033 UpgradeSToWLock(&avc->lock,41);
1034 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1035 ConvertWToSLock(&avc->lock);
1038 #if defined(AFS_SGI_ENV)
1039 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
1040 if (code == VNOVNODE) {
1041 /* syncing an unlinked file! - non-informative to pass an errno
1042 * 102 (== VNOVNODE) to user
1048 code = afs_CheckCode(code, &treq, 33);
1049 ReleaseSharedLock(&avc->lock);