2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
67 /* Now, send the file back. Used to require 0 writers left, but now do
68 * it on every close for write, since two closes in a row are harmless
69 * since first will clean all chunks, and second will be noop. Note that
70 * this will also save confusion when someone keeps a file open
71 * inadvertently, since with old system, writes to the server would never
74 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
76 * We have to do these after the above store in done: in some systems like
77 * aix they'll need to flush all the vm dirty pages to the disk via the
78 * strategy routine. During that all procedure (done under no avc locks)
79 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
80 * routines which means the vcache is a perfect candidate for flushing!
82 #if defined(AFS_SGI_ENV)
83 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
86 avc->execsOrWriters--;
93 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
94 struct AFS_UCRED *acred, int noLock)
96 afs_size_t totalLength;
97 afs_size_t transferLength;
99 afs_size_t offset, len;
100 afs_int32 tlen, trimlen;
103 register struct dcache *tdc;
109 struct iovec *tvec; /* again, should have define */
110 register afs_int32 code;
111 struct vrequest treq;
113 AFS_STATCNT(afs_MemWrite);
115 return avc->vc_error;
117 startDate = osi_Time();
118 if ((code = afs_InitReq(&treq, acred)))
120 /* otherwise we read */
121 totalLength = auio->afsio_resid;
122 filePos = auio->afsio_offset;
125 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
126 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
127 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
128 ICL_HANDLE_OFFSET(avc->m.Length));
130 afs_MaybeWakeupTruncateDaemon();
131 ObtainWriteLock(&avc->lock, 126);
133 #if defined(AFS_SGI_ENV)
137 * afs_xwrite handles setting m.Length
138 * and handles APPEND mode.
139 * Since we are called via strategy, we need to trim the write to
140 * the actual size of the file
142 osi_Assert(filePos <= avc->m.Length);
143 diff = avc->m.Length - filePos;
144 auio->afsio_resid = MIN(totalLength, diff);
145 totalLength = auio->afsio_resid;
148 if (aio & IO_APPEND) {
149 /* append mode, start it at the right spot */
150 #if defined(AFS_SUN56_ENV)
151 auio->uio_loffset = 0;
153 filePos = auio->afsio_offset = avc->m.Length;
157 * Note that we use startDate rather than calling osi_Time() here.
158 * This is to avoid counting lock-waiting time in file date (for ranlib).
160 avc->m.Date = startDate;
162 #if defined(AFS_HPUX_ENV)
163 #if defined(AFS_HPUX101_ENV)
164 if ((totalLength + filePos) >> 9 >
165 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
167 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
170 ReleaseWriteLock(&avc->lock);
174 #ifdef AFS_VM_RDWR_ENV
176 * If write is implemented via VM, afs_FakeOpen() is called from the
177 * high-level write op.
179 if (avc->execsOrWriters <= 0) {
180 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
181 avc->execsOrWriters);
186 avc->states |= CDirty;
187 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
188 while (totalLength > 0) {
189 /* Read the cached info. If we call GetDCache while the cache
190 * truncate daemon is running we risk overflowing the disk cache.
191 * Instead we check for an existing cache slot. If we cannot
192 * find an existing slot we wait for the cache to drain
193 * before calling GetDCache.
196 tdc = afs_FindDCache(avc, filePos);
198 ObtainWriteLock(&tdc->lock, 653);
199 } else if (afs_blocksUsed >
200 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
201 tdc = afs_FindDCache(avc, filePos);
203 ObtainWriteLock(&tdc->lock, 654);
204 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
205 || (tdc->dflags & DFFetching)) {
206 ReleaseWriteLock(&tdc->lock);
212 afs_MaybeWakeupTruncateDaemon();
213 while (afs_blocksUsed >
214 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
215 ReleaseWriteLock(&avc->lock);
216 if (afs_blocksUsed - afs_blocksDiscarded >
217 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
218 afs_WaitForCacheDrain = 1;
219 afs_osi_Sleep(&afs_WaitForCacheDrain);
221 afs_MaybeFreeDiscardedDCache();
222 afs_MaybeWakeupTruncateDaemon();
223 ObtainWriteLock(&avc->lock, 506);
225 avc->states |= CDirty;
226 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
228 ObtainWriteLock(&tdc->lock, 655);
231 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
233 ObtainWriteLock(&tdc->lock, 656);
239 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
240 afs_stats_cmperf.cacheCurrDirtyChunks++;
241 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
243 if (!(tdc->f.states & DWriting)) {
244 /* don't mark entry as mod if we don't have to */
245 tdc->f.states |= DWriting;
246 tdc->dflags |= DFEntryMod;
248 len = totalLength; /* write this amount by default */
249 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
250 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
251 if (max <= len + offset) { /*if we'd go past the end of this chunk */
252 /* it won't all fit in this chunk, so write as much
256 /* mung uio structure to be right for this transfer */
257 afsio_copy(auio, &tuio, tvec);
259 afsio_trim(&tuio, trimlen);
260 tuio.afsio_offset = offset;
262 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
264 void *mep; /* XXX in prototype world is struct memCacheEntry * */
266 ZapDCE(tdc); /* bad data */
267 mep = afs_MemCacheOpen(tdc->f.inode);
268 afs_MemCacheTruncate(mep, 0);
269 afs_MemCacheClose(mep);
270 afs_stats_cmperf.cacheCurrDirtyChunks--;
271 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
272 ReleaseWriteLock(&tdc->lock);
276 /* otherwise we've written some, fixup length, etc and continue with next seg */
277 len = len - tuio.afsio_resid; /* compute amount really transferred */
279 afsio_skip(auio, tlen); /* advance auio over data written */
280 /* compute new file size */
281 if (offset + len > tdc->f.chunkBytes) {
282 afs_int32 tlength = offset + len;
283 afs_AdjustSize(tdc, tlength);
284 if (tdc->validPos < filePos + len)
285 tdc->validPos = filePos + len;
288 transferLength += len;
290 #if defined(AFS_SGI_ENV)
291 /* afs_xwrite handles setting m.Length */
292 osi_Assert(filePos <= avc->m.Length);
294 if (filePos > avc->m.Length) {
295 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
296 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
297 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
298 ICL_HANDLE_OFFSET(filePos));
299 avc->m.Length = filePos;
302 ReleaseWriteLock(&tdc->lock);
304 #if !defined(AFS_VM_RDWR_ENV)
306 * If write is implemented via VM, afs_DoPartialWrite() is called from
307 * the high-level write op.
310 code = afs_DoPartialWrite(avc, &treq);
318 #ifndef AFS_VM_RDWR_ENV
319 afs_FakeClose(avc, acred);
321 if (error && !avc->vc_error)
322 avc->vc_error = error;
324 ReleaseWriteLock(&avc->lock);
325 osi_FreeSmallSpace(tvec);
326 error = afs_CheckCode(error, &treq, 6);
331 /* called on writes */
333 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
334 struct AFS_UCRED *acred, int noLock)
336 afs_size_t totalLength;
337 afs_size_t transferLength;
339 afs_size_t offset, len;
344 register struct dcache *tdc;
350 struct iovec *tvec; /* again, should have define */
351 struct osi_file *tfile;
352 register afs_int32 code;
353 struct vrequest treq;
355 AFS_STATCNT(afs_UFSWrite);
357 return avc->vc_error;
359 startDate = osi_Time();
360 if ((code = afs_InitReq(&treq, acred)))
362 /* otherwise we read */
363 totalLength = auio->afsio_resid;
364 filePos = auio->afsio_offset;
367 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
368 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
369 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
370 ICL_HANDLE_OFFSET(avc->m.Length));
372 afs_MaybeWakeupTruncateDaemon();
373 ObtainWriteLock(&avc->lock, 556);
375 #if defined(AFS_SGI_ENV)
379 * afs_xwrite handles setting m.Length
380 * and handles APPEND mode.
381 * Since we are called via strategy, we need to trim the write to
382 * the actual size of the file
384 osi_Assert(filePos <= avc->m.Length);
385 diff = avc->m.Length - filePos;
386 auio->afsio_resid = MIN(totalLength, diff);
387 totalLength = auio->afsio_resid;
390 if (aio & IO_APPEND) {
391 /* append mode, start it at the right spot */
392 #if defined(AFS_SUN56_ENV)
393 auio->uio_loffset = 0;
395 filePos = auio->afsio_offset = avc->m.Length;
399 * Note that we use startDate rather than calling osi_Time() here.
400 * This is to avoid counting lock-waiting time in file date (for ranlib).
402 avc->m.Date = startDate;
404 #if defined(AFS_HPUX_ENV)
405 #if defined(AFS_HPUX101_ENV)
406 if ((totalLength + filePos) >> 9 >
407 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
409 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
412 ReleaseWriteLock(&avc->lock);
416 #ifdef AFS_VM_RDWR_ENV
418 * If write is implemented via VM, afs_FakeOpen() is called from the
419 * high-level write op.
421 if (avc->execsOrWriters <= 0) {
422 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
423 avc->execsOrWriters);
428 avc->states |= CDirty;
429 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
430 while (totalLength > 0) {
432 * The following line is necessary because afs_GetDCache with
433 * flag == 4 expects the length field to be filled. It decides
434 * from this whether it's necessary to fetch data into the chunk
435 * before writing or not (when the whole chunk is overwritten!).
437 len = totalLength; /* write this amount by default */
438 /* read the cached info */
440 tdc = afs_FindDCache(avc, filePos);
442 ObtainWriteLock(&tdc->lock, 657);
443 } else if (afs_blocksUsed >
444 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
445 tdc = afs_FindDCache(avc, filePos);
447 ObtainWriteLock(&tdc->lock, 658);
448 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
449 || (tdc->dflags & DFFetching)) {
450 ReleaseWriteLock(&tdc->lock);
456 afs_MaybeWakeupTruncateDaemon();
457 while (afs_blocksUsed >
458 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
459 ReleaseWriteLock(&avc->lock);
460 if (afs_blocksUsed - afs_blocksDiscarded >
461 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
462 afs_WaitForCacheDrain = 1;
463 afs_osi_Sleep(&afs_WaitForCacheDrain);
465 afs_MaybeFreeDiscardedDCache();
466 afs_MaybeWakeupTruncateDaemon();
467 ObtainWriteLock(&avc->lock, 509);
469 avc->states |= CDirty;
470 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
472 ObtainWriteLock(&tdc->lock, 659);
475 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
477 ObtainWriteLock(&tdc->lock, 660);
483 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
484 afs_stats_cmperf.cacheCurrDirtyChunks++;
485 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
487 if (!(tdc->f.states & DWriting)) {
488 /* don't mark entry as mod if we don't have to */
489 tdc->f.states |= DWriting;
490 tdc->dflags |= DFEntryMod;
492 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
493 len = totalLength; /* write this amount by default */
494 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
495 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
496 if (max <= len + offset) { /*if we'd go past the end of this chunk */
497 /* it won't all fit in this chunk, so write as much
501 /* mung uio structure to be right for this transfer */
502 afsio_copy(auio, &tuio, tvec);
504 afsio_trim(&tuio, trimlen);
505 tuio.afsio_offset = offset;
506 #if defined(AFS_AIX41_ENV)
509 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
510 NULL, afs_osi_credp);
512 #elif defined(AFS_AIX32_ENV)
513 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
514 #elif defined(AFS_AIX_ENV)
516 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
517 &tuio, NULL, NULL, -1);
518 #elif defined(AFS_SUN5_ENV)
520 #ifdef AFS_SUN510_ENV
524 VOP_RWLOCK(tfile->vnode, 1, &ct);
525 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
526 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
529 VOP_RWLOCK(tfile->vnode, 1);
530 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
531 VOP_RWUNLOCK(tfile->vnode, 1);
536 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
537 #elif defined(AFS_SGI_ENV)
539 avc->states |= CWritingUFS;
540 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
541 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
542 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
543 avc->states &= ~CWritingUFS;
545 #elif defined(AFS_OSF_ENV)
547 struct ucred *tmpcred = u.u_cred;
548 u.u_cred = afs_osi_credp;
549 tuio.uio_rw = UIO_WRITE;
551 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
555 #elif defined(AFS_HPUX100_ENV)
558 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
561 #elif defined(AFS_LINUX20_ENV)
563 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
565 #elif defined(AFS_DARWIN_ENV)
567 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
568 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
569 VOP_UNLOCK(tfile->vnode, 0, current_proc());
571 #elif defined(AFS_FBSD50_ENV)
573 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
574 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
575 VOP_UNLOCK(tfile->vnode, 0, curthread);
577 #elif defined(AFS_XBSD_ENV)
579 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
580 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
581 VOP_UNLOCK(tfile->vnode, 0, curproc);
585 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
587 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
591 ZapDCE(tdc); /* bad data */
592 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
593 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
594 afs_stats_cmperf.cacheCurrDirtyChunks--;
595 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
596 afs_CFileClose(tfile);
597 ReleaseWriteLock(&tdc->lock);
601 /* otherwise we've written some, fixup length, etc and continue with next seg */
602 len = len - tuio.afsio_resid; /* compute amount really transferred */
604 afsio_skip(auio, tlen); /* advance auio over data written */
605 /* compute new file size */
606 if (offset + len > tdc->f.chunkBytes) {
607 afs_int32 tlength = offset + len;
608 afs_AdjustSize(tdc, tlength);
609 if (tdc->validPos < filePos + len)
610 tdc->validPos = filePos + len;
613 transferLength += len;
615 #if defined(AFS_SGI_ENV)
616 /* afs_xwrite handles setting m.Length */
617 osi_Assert(filePos <= avc->m.Length);
619 if (filePos > avc->m.Length) {
620 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
621 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
622 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
623 ICL_HANDLE_OFFSET(filePos));
624 avc->m.Length = filePos;
628 ReleaseWriteLock(&tdc->lock);
630 #if !defined(AFS_VM_RDWR_ENV)
632 * If write is implemented via VM, afs_DoPartialWrite() is called from
633 * the high-level write op.
636 code = afs_DoPartialWrite(avc, &treq);
644 #ifndef AFS_VM_RDWR_ENV
645 afs_FakeClose(avc, acred);
647 error = afs_CheckCode(error, &treq, 7);
648 /* This set is here so we get the CheckCode. */
649 if (error && !avc->vc_error)
650 avc->vc_error = error;
652 ReleaseWriteLock(&avc->lock);
653 osi_FreeSmallSpace(tvec);
654 #ifndef AFS_VM_RDWR_ENV
656 * If write is implemented via VM, afs_fsync() is called from the high-level
659 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
660 if (noLock && (aio & IO_SYNC)) {
663 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
664 * we're doing them because the file was opened with O_SYNCIO specified,
665 * we have to look in the u area. No single mechanism here!!
667 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
669 if (noLock && (aio & FSYNC)) {
672 if (!AFS_NFSXLATORREQ(acred))
673 afs_fsync(avc, acred);
679 /* do partial write if we're low on unmodified chunks */
681 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
683 register afs_int32 code;
685 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
686 afs_stats_cmperf.cacheMaxDirtyChunks)
687 return 0; /* nothing to do */
688 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
689 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
690 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
691 #if defined(AFS_SUN5_ENV)
692 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
694 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
701 #define vno_close(X) vn_close((X), 0, NOCRED)
702 #elif defined(AFS_DUX40_ENV)
703 #define vno_close vn_close
705 /* We don't need this for AIX since:
706 * (1) aix doesn't use fileops and it call close directly intead
707 * (where the unlocking should be done) and
708 * (2) temporarily, the aix lockf isn't supported yet.
710 * this stupid routine is used to release the flocks held on a
711 * particular file descriptor. Sun doesn't pass file descr. info
712 * through to the vnode layer, and yet we must unlock flocked files
713 * on the *appropriate* (not first, as in System V) close call. Thus
715 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
716 * file ops structure into any afs file when it gets flocked.
717 * N.B: Intercepting close syscall doesn't trap aborts or exit system
721 afs_closex(register struct file *afd)
723 struct vrequest treq;
728 struct afs_fakestat_state fakestat;
730 AFS_STATCNT(afs_closex);
731 /* setup the credentials */
732 if ((code = afs_InitReq(&treq, u.u_cred)))
734 afs_InitFakeStat(&fakestat);
737 /* we're the last one. If we're an AFS vnode, clear the flags,
738 * close the file and release the lock when done. Otherwise, just
739 * let the regular close code work. */
740 if (afd->f_type == DTYPE_VNODE) {
741 tvc = VTOAFS(afd->f_data);
742 if (IsAfsVnode(AFSTOV(tvc))) {
743 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
745 afs_PutFakeStat(&fakestat);
748 VN_HOLD(AFSTOV(tvc));
749 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
750 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
751 code = vno_close(afd);
753 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
755 AFS_RELE(AFSTOV(tvc));
759 /* now, if close not done, do it */
761 code = vno_close(afd);
763 afs_PutFakeStat(&fakestat);
764 return code; /* return code from vnode layer */
769 /* handle any closing cleanup stuff */
772 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
773 #if !defined(AFS_SGI65_ENV)
777 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
781 lastclose_t lastclose;
782 #if !defined(AFS_SGI65_ENV)
784 #if defined(AFS_SGI64_ENV)
788 #elif defined(AFS_SUN5_ENV)
789 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
793 afs_close(OSI_VC_ARG(avc), aflags, acred)
797 struct AFS_UCRED *acred;
799 register afs_int32 code;
800 register struct brequest *tb;
801 struct vrequest treq;
805 struct afs_fakestat_state fakestat;
808 AFS_STATCNT(afs_close);
809 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
810 ICL_TYPE_INT32, aflags);
811 code = afs_InitReq(&treq, acred);
814 afs_InitFakeStat(&fakestat);
815 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
817 afs_PutFakeStat(&fakestat);
821 if (avc->flockCount) {
822 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
825 #if defined(AFS_SGI_ENV)
827 afs_PutFakeStat(&fakestat);
830 #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
832 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
833 afs_PutFakeStat(&fakestat);
838 #if defined(AFS_SGI_ENV)
839 /* unlock any locks for pid - could be wrong for child .. */
840 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
842 get_current_flid(&flid);
843 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
844 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
847 cleanlocks((vnode_t *) avc, flp);
848 #else /* AFS_SGI64_ENV */
849 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
850 #endif /* AFS_SGI64_ENV */
851 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
852 #endif /* AFS_SGI65_ENV */
853 /* afs_chkpgoob will drop and re-acquire the global lock. */
854 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
855 #else /* AFS_SGI_ENV */
856 if (avc->flockCount) { /* Release Lock */
857 #if defined(AFS_OSF_ENV)
858 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
860 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
863 #endif /* AFS_SGI_ENV */
864 #endif /* AFS_SUN5_ENV */
865 if (aflags & (FWRITE | FTRUNC)) {
867 /* do it yourself if daemons are all busy */
868 ObtainWriteLock(&avc->lock, 124);
869 code = afs_StoreOnLastReference(avc, &treq);
870 ReleaseWriteLock(&avc->lock);
871 #if defined(AFS_SGI_ENV)
872 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
875 #if defined(AFS_SGI_ENV)
876 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
878 /* at least one daemon is idle, so ask it to do the store.
879 * Also, note that we don't lock it any more... */
880 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
881 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
883 /* sleep waiting for the store to start, then retrieve error code */
884 while ((tb->flags & BUVALID) == 0) {
892 /* VNOVNODE is "acceptable" error code from close, since
893 * may happen when deleting a file on another machine while
894 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
895 if (code == VNOVNODE || code == ENOENT)
898 /* Ensure last closer gets the error. If another thread caused
899 * DoPartialWrite and this thread does not actually store the data,
900 * it may not see the quota error.
902 ObtainWriteLock(&avc->lock, 406);
905 osi_ReleaseVM(avc, acred);
907 code = avc->vc_error;
910 ReleaseWriteLock(&avc->lock);
912 /* some codes merit specific complaint */
914 afs_warnuser("afs: failed to store file (network problems)\n");
917 else if (code == ENOSPC) {
919 ("afs: failed to store file (over quota or partition full)\n");
922 else if (code == ENOSPC) {
923 afs_warnuser("afs: failed to store file (partition full)\n");
924 } else if (code == EDQUOT) {
925 afs_warnuser("afs: failed to store file (over quota)\n");
929 afs_warnuser("afs: failed to store file (%d)\n", code);
931 /* finally, we flush any text pages lying around here */
935 #if defined(AFS_SGI_ENV)
936 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
937 osi_Assert(avc->opens > 0);
939 /* file open for read */
940 ObtainWriteLock(&avc->lock, 411);
943 osi_ReleaseVM(avc, acred);
945 code = avc->vc_error;
949 ReleaseWriteLock(&avc->lock);
952 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
953 afs_remunlink(avc, 1); /* ignore any return code */
956 afs_PutFakeStat(&fakestat);
957 code = afs_CheckCode(code, &treq, 5);
964 afs_fsync(OSI_VC_DECL(avc), int fflags, struct AFS_UCRED *acred, int waitfor)
965 #else /* AFS_OSF_ENV */
966 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
967 afs_fsync(OSI_VC_DECL(avc), int flag, struct AFS_UCRED *acred
969 , off_t start, off_t stop
970 #endif /* AFS_SGI65_ENV */
972 #else /* !OSF && !SUN53 && !SGI */
973 afs_fsync(OSI_VC_DECL(avc), struct AFS_UCRED *acred)
977 register afs_int32 code;
978 struct vrequest treq;
982 return avc->vc_error;
984 #if defined(AFS_SUN5_ENV)
985 /* back out if called from NFS server */
986 if (curthread->t_flag & T_DONTPEND)
990 AFS_STATCNT(afs_fsync);
991 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
992 if ((code = afs_InitReq(&treq, acred)))
995 #if defined(AFS_SGI_ENV)
996 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
997 if (flag & FSYNC_INVAL)
998 osi_VM_FSyncInval(avc);
999 #endif /* AFS_SGI_ENV */
1001 ObtainSharedLock(&avc->lock, 18);
1003 if (avc->execsOrWriters > 0) {
1004 /* put the file back */
1005 UpgradeSToWLock(&avc->lock, 41);
1006 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1007 ConvertWToSLock(&avc->lock);
1009 #if defined(AFS_SGI_ENV)
1010 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1011 if (code == VNOVNODE) {
1012 /* syncing an unlinked file! - non-informative to pass an errno
1013 * 102 (== VNOVNODE) to user
1019 code = afs_CheckCode(code, &treq, 33);
1020 ReleaseSharedLock(&avc->lock);