2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
67 /* Now, send the file back. Used to require 0 writers left, but now do
68 * it on every close for write, since two closes in a row are harmless
69 * since first will clean all chunks, and second will be noop. Note that
70 * this will also save confusion when someone keeps a file open
71 * inadvertently, since with old system, writes to the server would never
74 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
76 * We have to do these after the above store in done: in some systems like
77 * aix they'll need to flush all the vm dirty pages to the disk via the
78 * strategy routine. During that all procedure (done under no avc locks)
79 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
80 * routines which means the vcache is a perfect candidate for flushing!
82 #if defined(AFS_SGI_ENV)
83 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
86 avc->execsOrWriters--;
93 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
94 struct AFS_UCRED *acred, int noLock)
96 afs_size_t totalLength;
97 afs_size_t transferLength;
99 afs_size_t offset, len;
100 afs_int32 tlen, trimlen;
103 register struct dcache *tdc;
108 #ifdef AFS_DARWIN80_ENV
112 struct uio *tuiop = &tuio;
113 struct iovec *tvec; /* again, should have define */
115 register afs_int32 code;
116 struct vrequest treq;
118 AFS_STATCNT(afs_MemWrite);
120 return avc->vc_error;
122 startDate = osi_Time();
123 if ((code = afs_InitReq(&treq, acred)))
125 /* otherwise we read */
126 totalLength = AFS_UIO_RESID(auio);
127 filePos = AFS_UIO_OFFSET(auio);
130 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
131 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
132 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
133 ICL_HANDLE_OFFSET(avc->m.Length));
135 afs_MaybeWakeupTruncateDaemon();
136 ObtainWriteLock(&avc->lock, 126);
138 #if defined(AFS_SGI_ENV)
142 * afs_xwrite handles setting m.Length
143 * and handles APPEND mode.
144 * Since we are called via strategy, we need to trim the write to
145 * the actual size of the file
147 osi_Assert(filePos <= avc->m.Length);
148 diff = avc->m.Length - filePos;
149 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
150 totalLength = AFS_UIO_RESID(auio);
153 if (aio & IO_APPEND) {
154 /* append mode, start it at the right spot */
155 #if defined(AFS_SUN56_ENV)
156 auio->uio_loffset = 0;
158 filePos = avc->m.Length;
159 AFS_UIO_SETOFFSET(auio, filePos);
163 * Note that we use startDate rather than calling osi_Time() here.
164 * This is to avoid counting lock-waiting time in file date (for ranlib).
166 avc->m.Date = startDate;
168 #if defined(AFS_HPUX_ENV)
169 #if defined(AFS_HPUX101_ENV)
170 if ((totalLength + filePos) >> 9 >
171 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
173 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
176 ReleaseWriteLock(&avc->lock);
180 #ifdef AFS_VM_RDWR_ENV
182 * If write is implemented via VM, afs_FakeOpen() is called from the
183 * high-level write op.
185 if (avc->execsOrWriters <= 0) {
186 printf("WARNING: afs_ufswr vp=%lx, exOrW=%d\n", (unsigned long)avc,
187 avc->execsOrWriters);
192 avc->states |= CDirty;
193 #ifndef AFS_DARWIN80_ENV
194 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
196 while (totalLength > 0) {
198 * The following line is necessary because afs_GetDCache with
199 * flag == 4 expects the length field to be filled. It decides
200 * from this whether it's necessary to fetch data into the chunk
201 * before writing or not (when the whole chunk is overwritten!).
203 len = totalLength; /* write this amount by default */
205 tdc = afs_FindDCache(avc, filePos);
207 ObtainWriteLock(&tdc->lock, 653);
208 } else if (afs_blocksUsed >
209 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
210 tdc = afs_FindDCache(avc, filePos);
212 ObtainWriteLock(&tdc->lock, 654);
213 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
214 || (tdc->dflags & DFFetching)) {
215 ReleaseWriteLock(&tdc->lock);
221 afs_MaybeWakeupTruncateDaemon();
222 while (afs_blocksUsed >
223 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
224 ReleaseWriteLock(&avc->lock);
225 if (afs_blocksUsed - afs_blocksDiscarded >
226 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
227 afs_WaitForCacheDrain = 1;
228 afs_osi_Sleep(&afs_WaitForCacheDrain);
230 afs_MaybeFreeDiscardedDCache();
231 afs_MaybeWakeupTruncateDaemon();
232 ObtainWriteLock(&avc->lock, 506);
234 avc->states |= CDirty;
235 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
237 ObtainWriteLock(&tdc->lock, 655);
240 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
242 ObtainWriteLock(&tdc->lock, 656);
248 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
249 afs_stats_cmperf.cacheCurrDirtyChunks++;
250 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
252 if (!(tdc->f.states & DWriting)) {
253 /* don't mark entry as mod if we don't have to */
254 tdc->f.states |= DWriting;
255 tdc->dflags |= DFEntryMod;
257 len = totalLength; /* write this amount by default */
258 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
259 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
260 if (max <= len + offset) { /*if we'd go past the end of this chunk */
261 /* it won't all fit in this chunk, so write as much
266 #ifdef AFS_DARWIN80_ENV
270 tuiop = afsio_darwin_partialcopy(auio, trimlen);
272 /* mung uio structure to be right for this transfer */
273 afsio_copy(auio, &tuio, tvec);
275 afsio_trim(&tuio, trimlen);
277 AFS_UIO_SETOFFSET(tuiop, offset);
279 code = afs_MemWriteUIO(tdc->f.inode, tuiop);
281 void *mep; /* XXX in prototype world is struct memCacheEntry * */
283 ZapDCE(tdc); /* bad data */
284 mep = afs_MemCacheOpen(tdc->f.inode);
285 afs_MemCacheTruncate(mep, 0);
286 afs_MemCacheClose(mep);
287 afs_stats_cmperf.cacheCurrDirtyChunks--;
288 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
289 ReleaseWriteLock(&tdc->lock);
293 /* otherwise we've written some, fixup length, etc and continue with next seg */
294 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
296 afsio_skip(auio, tlen); /* advance auio over data written */
297 /* compute new file size */
298 if (offset + len > tdc->f.chunkBytes) {
299 afs_int32 tlength = offset + len;
300 afs_AdjustSize(tdc, tlength);
301 if (tdc->validPos < filePos + len)
302 tdc->validPos = filePos + len;
305 transferLength += len;
307 #if defined(AFS_SGI_ENV)
308 /* afs_xwrite handles setting m.Length */
309 osi_Assert(filePos <= avc->m.Length);
311 if (filePos > avc->m.Length) {
312 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
313 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
314 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
315 ICL_HANDLE_OFFSET(filePos));
316 avc->m.Length = filePos;
319 ReleaseWriteLock(&tdc->lock);
321 #if !defined(AFS_VM_RDWR_ENV)
323 * If write is implemented via VM, afs_DoPartialWrite() is called from
324 * the high-level write op.
327 code = afs_DoPartialWrite(avc, &treq);
335 #ifndef AFS_VM_RDWR_ENV
336 afs_FakeClose(avc, acred);
338 if (error && !avc->vc_error)
339 avc->vc_error = error;
341 ReleaseWriteLock(&avc->lock);
342 #ifdef AFS_DARWIN80_ENV
345 osi_FreeSmallSpace(tvec);
347 error = afs_CheckCode(error, &treq, 6);
352 /* called on writes */
354 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
355 struct AFS_UCRED *acred, int noLock)
357 afs_size_t totalLength;
358 afs_size_t transferLength;
360 afs_size_t offset, len;
365 register struct dcache *tdc;
370 #ifdef AFS_DARWIN80_ENV
374 struct uio *tuiop = &tuio;
375 struct iovec *tvec; /* again, should have define */
377 struct osi_file *tfile;
378 register afs_int32 code;
379 struct vrequest treq;
381 AFS_STATCNT(afs_UFSWrite);
383 return avc->vc_error;
385 startDate = osi_Time();
386 if ((code = afs_InitReq(&treq, acred)))
388 /* otherwise we read */
389 totalLength = AFS_UIO_RESID(auio);
390 filePos = AFS_UIO_OFFSET(auio);
393 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
394 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
395 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
396 ICL_HANDLE_OFFSET(avc->m.Length));
398 afs_MaybeWakeupTruncateDaemon();
399 ObtainWriteLock(&avc->lock, 556);
401 #if defined(AFS_SGI_ENV)
405 * afs_xwrite handles setting m.Length
406 * and handles APPEND mode.
407 * Since we are called via strategy, we need to trim the write to
408 * the actual size of the file
410 osi_Assert(filePos <= avc->m.Length);
411 diff = avc->m.Length - filePos;
412 AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
413 totalLength = AFS_UIO_RESID(auio);
416 if (aio & IO_APPEND) {
417 /* append mode, start it at the right spot */
418 #if defined(AFS_SUN56_ENV)
419 auio->uio_loffset = 0;
421 filePos = avc->m.Length;
422 AFS_UIO_SETOFFSET(auio, avc->m.Length);
426 * Note that we use startDate rather than calling osi_Time() here.
427 * This is to avoid counting lock-waiting time in file date (for ranlib).
429 avc->m.Date = startDate;
431 #if defined(AFS_HPUX_ENV)
432 #if defined(AFS_HPUX101_ENV)
433 if ((totalLength + filePos) >> 9 >
434 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
436 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
439 ReleaseWriteLock(&avc->lock);
443 #ifdef AFS_VM_RDWR_ENV
445 * If write is implemented via VM, afs_FakeOpen() is called from the
446 * high-level write op.
448 if (avc->execsOrWriters <= 0) {
449 printf("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
450 avc->execsOrWriters);
455 avc->states |= CDirty;
456 #ifndef AFS_DARWIN80_ENV
457 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
459 while (totalLength > 0) {
461 * The following line is necessary because afs_GetDCache with
462 * flag == 4 expects the length field to be filled. It decides
463 * from this whether it's necessary to fetch data into the chunk
464 * before writing or not (when the whole chunk is overwritten!).
466 len = totalLength; /* write this amount by default */
467 /* read the cached info */
469 tdc = afs_FindDCache(avc, filePos);
471 ObtainWriteLock(&tdc->lock, 657);
472 } else if (afs_blocksUsed >
473 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
474 tdc = afs_FindDCache(avc, filePos);
476 ObtainWriteLock(&tdc->lock, 658);
477 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
478 || (tdc->dflags & DFFetching)) {
479 ReleaseWriteLock(&tdc->lock);
485 afs_MaybeWakeupTruncateDaemon();
486 while (afs_blocksUsed >
487 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
488 ReleaseWriteLock(&avc->lock);
489 if (afs_blocksUsed - afs_blocksDiscarded >
490 PERCENT(CM_WAITFORDRAINPCT, afs_cacheBlocks)) {
491 afs_WaitForCacheDrain = 1;
492 afs_osi_Sleep(&afs_WaitForCacheDrain);
494 afs_MaybeFreeDiscardedDCache();
495 afs_MaybeWakeupTruncateDaemon();
496 ObtainWriteLock(&avc->lock, 509);
498 avc->states |= CDirty;
499 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
501 ObtainWriteLock(&tdc->lock, 659);
504 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
506 ObtainWriteLock(&tdc->lock, 660);
512 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
513 afs_stats_cmperf.cacheCurrDirtyChunks++;
514 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
516 if (!(tdc->f.states & DWriting)) {
517 /* don't mark entry as mod if we don't have to */
518 tdc->f.states |= DWriting;
519 tdc->dflags |= DFEntryMod;
521 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
522 len = totalLength; /* write this amount by default */
523 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
524 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
525 if (max <= len + offset) { /*if we'd go past the end of this chunk */
526 /* it won't all fit in this chunk, so write as much
531 #ifdef AFS_DARWIN80_ENV
535 tuiop = afsio_darwin_partialcopy(auio, trimlen);
537 /* mung uio structure to be right for this transfer */
538 afsio_copy(auio, &tuio, tvec);
540 afsio_trim(&tuio, trimlen);
542 AFS_UIO_SETOFFSET(tuiop, offset);
544 #if defined(AFS_AIX41_ENV)
547 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
548 NULL, afs_osi_credp);
550 #elif defined(AFS_AIX32_ENV)
551 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
552 #elif defined(AFS_AIX_ENV)
554 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
555 &tuio, NULL, NULL, -1);
556 #elif defined(AFS_SUN5_ENV)
558 #ifdef AFS_SUN510_ENV
562 VOP_RWLOCK(tfile->vnode, 1, &ct);
563 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
564 VOP_RWUNLOCK(tfile->vnode, 1, &ct);
567 VOP_RWLOCK(tfile->vnode, 1);
568 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
569 VOP_RWUNLOCK(tfile->vnode, 1);
574 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
575 #elif defined(AFS_SGI_ENV)
577 avc->states |= CWritingUFS;
578 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
579 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
580 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
581 avc->states &= ~CWritingUFS;
583 #elif defined(AFS_OSF_ENV)
585 struct ucred *tmpcred = u.u_cred;
586 u.u_cred = afs_osi_credp;
587 tuio.uio_rw = UIO_WRITE;
589 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
593 #elif defined(AFS_HPUX100_ENV)
596 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
599 #elif defined(AFS_LINUX20_ENV)
601 code = osi_rdwr(tfile, &tuio, UIO_WRITE);
603 #elif defined(AFS_DARWIN80_ENV)
605 code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
607 #elif defined(AFS_DARWIN_ENV)
609 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
610 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
611 VOP_UNLOCK(tfile->vnode, 0, current_proc());
613 #elif defined(AFS_FBSD50_ENV)
615 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
616 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
617 VOP_UNLOCK(tfile->vnode, 0, curthread);
619 #elif defined(AFS_XBSD_ENV)
621 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
622 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
623 VOP_UNLOCK(tfile->vnode, 0, curproc);
627 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
629 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
633 ZapDCE(tdc); /* bad data */
634 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
635 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
636 afs_stats_cmperf.cacheCurrDirtyChunks--;
637 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
638 afs_CFileClose(tfile);
639 ReleaseWriteLock(&tdc->lock);
643 /* otherwise we've written some, fixup length, etc and continue with next seg */
644 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
646 afsio_skip(auio, tlen); /* advance auio over data written */
647 /* compute new file size */
648 if (offset + len > tdc->f.chunkBytes) {
649 afs_int32 tlength = offset + len;
650 afs_AdjustSize(tdc, tlength);
651 if (tdc->validPos < filePos + len)
652 tdc->validPos = filePos + len;
655 transferLength += len;
657 #if defined(AFS_SGI_ENV)
658 /* afs_xwrite handles setting m.Length */
659 osi_Assert(filePos <= avc->m.Length);
661 if (filePos > avc->m.Length) {
662 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
663 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
664 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
665 ICL_HANDLE_OFFSET(filePos));
666 avc->m.Length = filePos;
670 ReleaseWriteLock(&tdc->lock);
672 #if !defined(AFS_VM_RDWR_ENV)
674 * If write is implemented via VM, afs_DoPartialWrite() is called from
675 * the high-level write op.
678 code = afs_DoPartialWrite(avc, &treq);
686 #ifndef AFS_VM_RDWR_ENV
687 afs_FakeClose(avc, acred);
689 error = afs_CheckCode(error, &treq, 7);
690 /* This set is here so we get the CheckCode. */
691 if (error && !avc->vc_error)
692 avc->vc_error = error;
694 ReleaseWriteLock(&avc->lock);
695 #ifdef AFS_DARWIN80_ENV
698 osi_FreeSmallSpace(tvec);
700 #ifndef AFS_VM_RDWR_ENV
702 * If write is implemented via VM, afs_fsync() is called from the high-level
705 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
706 if (noLock && (aio & IO_SYNC)) {
709 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
710 * we're doing them because the file was opened with O_SYNCIO specified,
711 * we have to look in the u area. No single mechanism here!!
713 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
715 if (noLock && (aio & FSYNC)) {
718 if (!AFS_NFSXLATORREQ(acred))
719 afs_fsync(avc, acred);
725 /* do partial write if we're low on unmodified chunks */
727 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
729 register afs_int32 code;
731 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
732 afs_stats_cmperf.cacheMaxDirtyChunks)
733 return 0; /* nothing to do */
734 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
735 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
736 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
737 #if defined(AFS_SUN5_ENV)
738 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
740 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
747 #define vno_close(X) vn_close((X), 0, NOCRED)
748 #elif defined(AFS_DUX40_ENV)
749 #define vno_close vn_close
751 /* We don't need this for AIX since:
752 * (1) aix doesn't use fileops and it call close directly intead
753 * (where the unlocking should be done) and
754 * (2) temporarily, the aix lockf isn't supported yet.
756 * this stupid routine is used to release the flocks held on a
757 * particular file descriptor. Sun doesn't pass file descr. info
758 * through to the vnode layer, and yet we must unlock flocked files
759 * on the *appropriate* (not first, as in System V) close call. Thus
761 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
762 * file ops structure into any afs file when it gets flocked.
763 * N.B: Intercepting close syscall doesn't trap aborts or exit system
767 afs_closex(register struct file *afd)
769 struct vrequest treq;
774 struct afs_fakestat_state fakestat;
776 AFS_STATCNT(afs_closex);
777 /* setup the credentials */
778 if ((code = afs_InitReq(&treq, u.u_cred)))
780 afs_InitFakeStat(&fakestat);
783 /* we're the last one. If we're an AFS vnode, clear the flags,
784 * close the file and release the lock when done. Otherwise, just
785 * let the regular close code work. */
786 if (afd->f_type == DTYPE_VNODE) {
787 tvc = VTOAFS(afd->f_data);
788 if (IsAfsVnode(AFSTOV(tvc))) {
789 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
791 afs_PutFakeStat(&fakestat);
794 VN_HOLD(AFSTOV(tvc));
795 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
796 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
797 code = vno_close(afd);
799 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
801 AFS_RELE(AFSTOV(tvc));
805 /* now, if close not done, do it */
807 code = vno_close(afd);
809 afs_PutFakeStat(&fakestat);
810 return code; /* return code from vnode layer */
815 /* handle any closing cleanup stuff */
818 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
819 #if !defined(AFS_SGI65_ENV)
823 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
827 lastclose_t lastclose;
828 #if !defined(AFS_SGI65_ENV)
830 #if defined(AFS_SGI64_ENV)
834 #elif defined(AFS_SUN5_ENV)
835 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
839 afs_close(OSI_VC_ARG(avc), aflags, acred)
843 struct AFS_UCRED *acred;
845 register afs_int32 code;
846 register struct brequest *tb;
847 struct vrequest treq;
851 struct afs_fakestat_state fakestat;
854 AFS_STATCNT(afs_close);
855 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
856 ICL_TYPE_INT32, aflags);
857 code = afs_InitReq(&treq, acred);
860 afs_InitFakeStat(&fakestat);
861 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
863 afs_PutFakeStat(&fakestat);
867 if (avc->flockCount) {
868 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
871 #if defined(AFS_SGI_ENV)
873 afs_PutFakeStat(&fakestat);
876 /* unlock any locks for pid - could be wrong for child .. */
877 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
879 get_current_flid(&flid);
880 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
881 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
884 cleanlocks((vnode_t *) avc, flp);
885 #else /* AFS_SGI64_ENV */
886 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
887 #endif /* AFS_SGI64_ENV */
888 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
889 #endif /* AFS_SGI65_ENV */
890 /* afs_chkpgoob will drop and re-acquire the global lock. */
891 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
892 #elif defined(AFS_SUN5_ENV)
894 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
895 afs_PutFakeStat(&fakestat);
898 #else /* AFS_SGI_ENV */
899 if (avc->flockCount) { /* Release Lock */
900 #if defined(AFS_OSF_ENV)
901 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
903 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
906 #endif /* AFS_SGI_ENV */
907 if (aflags & (FWRITE | FTRUNC)) {
908 if (afs_BBusy() || (AFS_NFSXLATORREQ(acred))) {
909 /* do it yourself if daemons are all busy */
910 ObtainWriteLock(&avc->lock, 124);
911 code = afs_StoreOnLastReference(avc, &treq);
912 ReleaseWriteLock(&avc->lock);
913 #if defined(AFS_SGI_ENV)
914 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
917 #if defined(AFS_SGI_ENV)
918 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
920 /* at least one daemon is idle, so ask it to do the store.
921 * Also, note that we don't lock it any more... */
922 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
923 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
925 /* sleep waiting for the store to start, then retrieve error code */
926 while ((tb->flags & BUVALID) == 0) {
934 /* VNOVNODE is "acceptable" error code from close, since
935 * may happen when deleting a file on another machine while
936 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
937 if (code == VNOVNODE || code == ENOENT)
940 /* Ensure last closer gets the error. If another thread caused
941 * DoPartialWrite and this thread does not actually store the data,
942 * it may not see the quota error.
944 ObtainWriteLock(&avc->lock, 406);
947 osi_ReleaseVM(avc, acred);
949 code = avc->vc_error;
952 ReleaseWriteLock(&avc->lock);
954 /* some codes merit specific complaint */
956 afs_warnuser("afs: failed to store file (network problems)\n");
959 else if (code == ENOSPC) {
961 ("afs: failed to store file (over quota or partition full)\n");
964 else if (code == ENOSPC) {
965 afs_warnuser("afs: failed to store file (partition full)\n");
966 } else if (code == EDQUOT) {
967 afs_warnuser("afs: failed to store file (over quota)\n");
971 afs_warnuser("afs: failed to store file (%d)\n", code);
973 /* finally, we flush any text pages lying around here */
977 #if defined(AFS_SGI_ENV)
978 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
979 osi_Assert(avc->opens > 0);
981 /* file open for read */
982 ObtainWriteLock(&avc->lock, 411);
985 osi_ReleaseVM(avc, acred);
987 code = avc->vc_error;
991 ReleaseWriteLock(&avc->lock);
994 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
995 afs_remunlink(avc, 1); /* ignore any return code */
998 afs_PutFakeStat(&fakestat);
999 code = afs_CheckCode(code, &treq, 5);
1006 afs_fsync(OSI_VC_DECL(avc), int fflags, struct AFS_UCRED *acred, int waitfor)
1007 #else /* AFS_OSF_ENV */
1008 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1009 afs_fsync(OSI_VC_DECL(avc), int flag, struct AFS_UCRED *acred
1010 #ifdef AFS_SGI65_ENV
1011 , off_t start, off_t stop
1012 #endif /* AFS_SGI65_ENV */
1014 #else /* !OSF && !SUN53 && !SGI */
1015 afs_fsync(OSI_VC_DECL(avc), struct AFS_UCRED *acred)
1019 register afs_int32 code;
1020 struct vrequest treq;
1021 OSI_VC_CONVERT(avc);
1024 return avc->vc_error;
1026 #if defined(AFS_SUN5_ENV)
1027 /* back out if called from NFS server */
1028 if (curthread->t_flag & T_DONTPEND)
1032 AFS_STATCNT(afs_fsync);
1033 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1034 if ((code = afs_InitReq(&treq, acred)))
1037 #if defined(AFS_SGI_ENV)
1038 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1039 if (flag & FSYNC_INVAL)
1040 osi_VM_FSyncInval(avc);
1041 #endif /* AFS_SGI_ENV */
1043 ObtainSharedLock(&avc->lock, 18);
1045 if (avc->execsOrWriters > 0) {
1046 /* put the file back */
1047 UpgradeSToWLock(&avc->lock, 41);
1048 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1049 ConvertWToSLock(&avc->lock);
1051 #if defined(AFS_SGI_ENV)
1052 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1053 if (code == VNOVNODE) {
1054 /* syncing an unlinked file! - non-informative to pass an errno
1055 * 102 (== VNOVNODE) to user
1061 code = afs_CheckCode(code, &treq, 33);
1062 ReleaseSharedLock(&avc->lock);