2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "afs/param.h"
26 #include "afs/sysincludes.h" /* Standard vendor system headers */
27 #include "afsincludes.h" /* Afs-based standard headers */
28 #include "afs/afs_stats.h" /* statistics */
29 #include "afs/afs_cbqueue.h"
30 #include "afs/nfsclient.h"
31 #include "afs/afs_osidnlc.h"
34 extern unsigned char *afs_indexFlags;
36 /* Called by all write-on-close routines: regular afs_close,
37 * store via background daemon and store via the
38 * afs_FlushActiveVCaches routine (when CCORE is on).
39 * avc->lock must be write-locked.
42 afs_StoreOnLastReference(register struct vcache *avc,
43 register struct vrequest *treq)
47 AFS_STATCNT(afs_StoreOnLastReference);
48 /* if CCore flag is set, we clear it and do the extra decrement
49 * ourselves now. If we're called by the CCore clearer, the CCore
50 * flag will already be clear, so we don't have to worry about
51 * clearing it twice. */
52 if (avc->states & CCore) {
53 avc->states &= ~CCore;
54 #if defined(AFS_SGI_ENV)
55 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
57 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
58 * depending on the flags the file was opened with. So, if you make any
59 * changes to the way the execsOrWriters flag is handled check with the
62 avc->execsOrWriters--;
63 AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
64 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
67 /* Now, send the file back. Used to require 0 writers left, but now do
68 * it on every close for write, since two closes in a row are harmless
69 * since first will clean all chunks, and second will be noop. Note that
70 * this will also save confusion when someone keeps a file open
71 * inadvertently, since with old system, writes to the server would never
74 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
76 * We have to do these after the above store in done: in some systems like
77 * aix they'll need to flush all the vm dirty pages to the disk via the
78 * strategy routine. During that all procedure (done under no avc locks)
79 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
80 * routines which means the vcache is a perfect candidate for flushing!
82 #if defined(AFS_SGI_ENV)
83 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
86 avc->execsOrWriters--;
93 afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
94 struct AFS_UCRED *acred, int noLock)
96 afs_size_t totalLength;
97 afs_size_t transferLength;
99 afs_size_t offset, len;
100 afs_int32 tlen, trimlen;
103 register struct dcache *tdc;
109 struct iovec *tvec; /* again, should have define */
110 register afs_int32 code;
111 struct vrequest treq;
113 AFS_STATCNT(afs_MemWrite);
115 return avc->vc_error;
117 startDate = osi_Time();
118 if ((code = afs_InitReq(&treq, acred)))
120 /* otherwise we read */
121 totalLength = auio->afsio_resid;
122 filePos = auio->afsio_offset;
125 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
126 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
127 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
128 ICL_HANDLE_OFFSET(avc->m.Length));
130 afs_MaybeWakeupTruncateDaemon();
131 ObtainWriteLock(&avc->lock, 126);
133 #if defined(AFS_SGI_ENV)
137 * afs_xwrite handles setting m.Length
138 * and handles APPEND mode.
139 * Since we are called via strategy, we need to trim the write to
140 * the actual size of the file
142 osi_Assert(filePos <= avc->m.Length);
143 diff = avc->m.Length - filePos;
144 auio->afsio_resid = MIN(totalLength, diff);
145 totalLength = auio->afsio_resid;
148 if (aio & IO_APPEND) {
149 /* append mode, start it at the right spot */
150 #if defined(AFS_SUN56_ENV)
151 auio->uio_loffset = 0;
153 filePos = auio->afsio_offset = avc->m.Length;
157 * Note that we use startDate rather than calling osi_Time() here.
158 * This is to avoid counting lock-waiting time in file date (for ranlib).
160 avc->m.Date = startDate;
162 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
163 #if defined(AFS_HPUX101_ENV)
164 if ((totalLength + filePos) >> 9 >
165 (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
168 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
170 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
174 ReleaseWriteLock(&avc->lock);
178 #ifdef AFS_VM_RDWR_ENV
180 * If write is implemented via VM, afs_FakeOpen() is called from the
181 * high-level write op.
183 if (avc->execsOrWriters <= 0) {
184 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc,
185 avc->execsOrWriters);
190 avc->states |= CDirty;
191 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
192 while (totalLength > 0) {
193 /* Read the cached info. If we call GetDCache while the cache
194 * truncate daemon is running we risk overflowing the disk cache.
195 * Instead we check for an existing cache slot. If we cannot
196 * find an existing slot we wait for the cache to drain
197 * before calling GetDCache.
200 tdc = afs_FindDCache(avc, filePos);
202 ObtainWriteLock(&tdc->lock, 653);
203 } else if (afs_blocksUsed >
204 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
205 tdc = afs_FindDCache(avc, filePos);
207 ObtainWriteLock(&tdc->lock, 654);
208 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
209 || (tdc->dflags & DFFetching)) {
210 ReleaseWriteLock(&tdc->lock);
216 afs_MaybeWakeupTruncateDaemon();
217 while (afs_blocksUsed >
218 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
219 ReleaseWriteLock(&avc->lock);
220 if (afs_blocksUsed - afs_blocksDiscarded >
221 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
222 afs_WaitForCacheDrain = 1;
223 afs_osi_Sleep(&afs_WaitForCacheDrain);
225 afs_MaybeFreeDiscardedDCache();
226 afs_MaybeWakeupTruncateDaemon();
227 ObtainWriteLock(&avc->lock, 506);
229 avc->states |= CDirty;
230 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
232 ObtainWriteLock(&tdc->lock, 655);
235 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
237 ObtainWriteLock(&tdc->lock, 656);
243 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
244 afs_stats_cmperf.cacheCurrDirtyChunks++;
245 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
247 if (!(tdc->f.states & DWriting)) {
248 /* don't mark entry as mod if we don't have to */
249 tdc->f.states |= DWriting;
250 tdc->dflags |= DFEntryMod;
252 len = totalLength; /* write this amount by default */
253 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
254 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
255 if (max <= len + offset) { /*if we'd go past the end of this chunk */
256 /* it won't all fit in this chunk, so write as much
260 /* mung uio structure to be right for this transfer */
261 afsio_copy(auio, &tuio, tvec);
263 afsio_trim(&tuio, trimlen);
264 tuio.afsio_offset = offset;
266 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
268 void *mep; /* XXX in prototype world is struct memCacheEntry * */
270 ZapDCE(tdc); /* bad data */
271 mep = afs_MemCacheOpen(tdc->f.inode);
272 afs_MemCacheTruncate(mep, 0);
273 afs_MemCacheClose(mep);
274 afs_stats_cmperf.cacheCurrDirtyChunks--;
275 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
276 ReleaseWriteLock(&tdc->lock);
280 /* otherwise we've written some, fixup length, etc and continue with next seg */
281 len = len - tuio.afsio_resid; /* compute amount really transferred */
283 afsio_skip(auio, tlen); /* advance auio over data written */
284 /* compute new file size */
285 if (offset + len > tdc->f.chunkBytes) {
286 afs_int32 tlength = offset + len;
287 afs_AdjustSize(tdc, tlength);
288 if (tdc->validPos < filePos + len)
289 tdc->validPos = filePos + len;
292 transferLength += len;
294 #if defined(AFS_SGI_ENV)
295 /* afs_xwrite handles setting m.Length */
296 osi_Assert(filePos <= avc->m.Length);
298 if (filePos > avc->m.Length) {
299 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
300 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
301 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
302 ICL_HANDLE_OFFSET(filePos));
303 avc->m.Length = filePos;
306 ReleaseWriteLock(&tdc->lock);
308 #if !defined(AFS_VM_RDWR_ENV)
310 * If write is implemented via VM, afs_DoPartialWrite() is called from
311 * the high-level write op.
314 code = afs_DoPartialWrite(avc, &treq);
322 #ifndef AFS_VM_RDWR_ENV
323 afs_FakeClose(avc, acred);
325 if (error && !avc->vc_error)
326 avc->vc_error = error;
328 ReleaseWriteLock(&avc->lock);
329 osi_FreeSmallSpace(tvec);
331 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
332 * work. GFS is truly a poorly-designed interface! */
333 afs_gfshack((struct gnode *)avc);
335 error = afs_CheckCode(error, &treq, 6);
340 /* called on writes */
342 afs_UFSWrite(register struct vcache *avc, struct uio *auio, int aio,
343 struct AFS_UCRED *acred, int noLock)
345 afs_size_t totalLength;
346 afs_size_t transferLength;
348 afs_size_t offset, len;
353 register struct dcache *tdc;
359 struct iovec *tvec; /* again, should have define */
360 struct osi_file *tfile;
361 register afs_int32 code;
362 struct vrequest treq;
364 AFS_STATCNT(afs_UFSWrite);
366 return avc->vc_error;
368 startDate = osi_Time();
369 if ((code = afs_InitReq(&treq, acred)))
371 /* otherwise we read */
372 totalLength = auio->afsio_resid;
373 filePos = auio->afsio_offset;
376 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
377 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
378 ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
379 ICL_HANDLE_OFFSET(avc->m.Length));
381 afs_MaybeWakeupTruncateDaemon();
382 ObtainWriteLock(&avc->lock, 556);
384 #if defined(AFS_SGI_ENV)
388 * afs_xwrite handles setting m.Length
389 * and handles APPEND mode.
390 * Since we are called via strategy, we need to trim the write to
391 * the actual size of the file
393 osi_Assert(filePos <= avc->m.Length);
394 diff = avc->m.Length - filePos;
395 auio->afsio_resid = MIN(totalLength, diff);
396 totalLength = auio->afsio_resid;
399 if (aio & IO_APPEND) {
400 /* append mode, start it at the right spot */
401 #if defined(AFS_SUN56_ENV)
402 auio->uio_loffset = 0;
404 filePos = auio->afsio_offset = avc->m.Length;
408 * Note that we use startDate rather than calling osi_Time() here.
409 * This is to avoid counting lock-waiting time in file date (for ranlib).
411 avc->m.Date = startDate;
413 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
414 #if defined(AFS_HPUX101_ENV)
415 if ((totalLength + filePos) >> 9 >
416 p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
419 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
421 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
425 ReleaseWriteLock(&avc->lock);
429 #ifdef AFS_VM_RDWR_ENV
431 * If write is implemented via VM, afs_FakeOpen() is called from the
432 * high-level write op.
434 if (avc->execsOrWriters <= 0) {
435 printf("WARNING: afs_ufswr vcp=%x, exOrW=%d\n", avc,
436 avc->execsOrWriters);
441 avc->states |= CDirty;
442 tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
443 while (totalLength > 0) {
445 * The following line is necessary because afs_GetDCache with
446 * flag == 4 expects the length field to be filled. It decides
447 * from this whether it's necessary to fetch data into the chunk
448 * before writing or not (when the whole chunk is overwritten!).
450 len = totalLength; /* write this amount by default */
451 /* read the cached info */
453 tdc = afs_FindDCache(avc, filePos);
455 ObtainWriteLock(&tdc->lock, 657);
456 } else if (afs_blocksUsed >
457 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
458 tdc = afs_FindDCache(avc, filePos);
460 ObtainWriteLock(&tdc->lock, 658);
461 if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
462 || (tdc->dflags & DFFetching)) {
463 ReleaseWriteLock(&tdc->lock);
469 afs_MaybeWakeupTruncateDaemon();
470 while (afs_blocksUsed >
471 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
472 ReleaseWriteLock(&avc->lock);
473 if (afs_blocksUsed - afs_blocksDiscarded >
474 (CM_WAITFORDRAINPCT * afs_cacheBlocks) / 100) {
475 afs_WaitForCacheDrain = 1;
476 afs_osi_Sleep(&afs_WaitForCacheDrain);
478 afs_MaybeFreeDiscardedDCache();
479 afs_MaybeWakeupTruncateDaemon();
480 ObtainWriteLock(&avc->lock, 509);
482 avc->states |= CDirty;
483 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
485 ObtainWriteLock(&tdc->lock, 659);
488 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
490 ObtainWriteLock(&tdc->lock, 660);
496 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
497 afs_stats_cmperf.cacheCurrDirtyChunks++;
498 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
500 if (!(tdc->f.states & DWriting)) {
501 /* don't mark entry as mod if we don't have to */
502 tdc->f.states |= DWriting;
503 tdc->dflags |= DFEntryMod;
505 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
506 len = totalLength; /* write this amount by default */
507 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
508 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
509 if (max <= len + offset) { /*if we'd go past the end of this chunk */
510 /* it won't all fit in this chunk, so write as much
514 /* mung uio structure to be right for this transfer */
515 afsio_copy(auio, &tuio, tvec);
517 afsio_trim(&tuio, trimlen);
518 tuio.afsio_offset = offset;
519 #if defined(AFS_AIX41_ENV)
522 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL,
523 NULL, afs_osi_credp);
525 #elif defined(AFS_AIX32_ENV)
526 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
527 #elif defined(AFS_AIX_ENV)
529 VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) & offset,
530 &tuio, NULL, NULL, -1);
531 #elif defined(AFS_SUN5_ENV)
533 VOP_RWLOCK(tfile->vnode, 1);
534 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
535 VOP_RWUNLOCK(tfile->vnode, 1);
539 ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
540 #elif defined(AFS_SGI_ENV)
542 avc->states |= CWritingUFS;
543 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
544 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
545 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
546 avc->states &= ~CWritingUFS;
548 #elif defined(AFS_OSF_ENV)
550 struct ucred *tmpcred = u.u_cred;
551 u.u_cred = afs_osi_credp;
552 tuio.uio_rw = UIO_WRITE;
554 VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp, code);
558 #elif defined(AFS_HPUX100_ENV)
561 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
564 #elif defined(AFS_LINUX20_ENV)
566 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
568 #elif defined(AFS_DARWIN_ENV)
570 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
571 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
572 VOP_UNLOCK(tfile->vnode, 0, current_proc());
574 #elif defined(AFS_FBSD50_ENV)
576 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
577 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
578 VOP_UNLOCK(tfile->vnode, 0, curthread);
580 #elif defined(AFS_XBSD_ENV)
582 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
583 code = VOP_WRITE(tfile->vnode, &tuio, 0, afs_osi_credp);
584 VOP_UNLOCK(tfile->vnode, 0, curproc);
588 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
590 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, afs_osi_credp);
594 ZapDCE(tdc); /* bad data */
595 osi_UFSTruncate(tfile, 0); /* fake truncate the segment */
596 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
597 afs_stats_cmperf.cacheCurrDirtyChunks--;
598 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
599 afs_CFileClose(tfile);
600 ReleaseWriteLock(&tdc->lock);
604 /* otherwise we've written some, fixup length, etc and continue with next seg */
605 len = len - tuio.afsio_resid; /* compute amount really transferred */
607 afsio_skip(auio, tlen); /* advance auio over data written */
608 /* compute new file size */
609 if (offset + len > tdc->f.chunkBytes) {
610 afs_int32 tlength = offset + len;
611 afs_AdjustSize(tdc, tlength);
612 if (tdc->validPos < filePos + len)
613 tdc->validPos = filePos + len;
616 transferLength += len;
618 #if defined(AFS_SGI_ENV)
619 /* afs_xwrite handles setting m.Length */
620 osi_Assert(filePos <= avc->m.Length);
622 if (filePos > avc->m.Length) {
623 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
624 __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
625 ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
626 ICL_HANDLE_OFFSET(filePos));
627 avc->m.Length = filePos;
631 ReleaseWriteLock(&tdc->lock);
633 #if !defined(AFS_VM_RDWR_ENV)
635 * If write is implemented via VM, afs_DoPartialWrite() is called from
636 * the high-level write op.
639 code = afs_DoPartialWrite(avc, &treq);
647 #ifndef AFS_VM_RDWR_ENV
648 afs_FakeClose(avc, acred);
650 error = afs_CheckCode(error, &treq, 7);
651 /* This set is here so we get the CheckCode. */
652 if (error && !avc->vc_error)
653 avc->vc_error = error;
655 ReleaseWriteLock(&avc->lock);
656 osi_FreeSmallSpace(tvec);
658 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
659 * work. GFS is truly a poorly-designed interface! */
660 afs_gfshack((struct gnode *)avc);
662 #ifndef AFS_VM_RDWR_ENV
664 * If write is implemented via VM, afs_fsync() is called from the high-level
667 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
668 if (noLock && (aio & IO_SYNC)) {
671 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
672 * we're doing them because the file was opened with O_SYNCIO specified,
673 * we have to look in the u area. No single mechanism here!!
675 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
677 if (noLock && (aio & FSYNC)) {
680 if (!AFS_NFSXLATORREQ(acred))
681 afs_fsync(avc, acred);
687 /* do partial write if we're low on unmodified chunks */
689 afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
691 register afs_int32 code;
693 if (afs_stats_cmperf.cacheCurrDirtyChunks <=
694 afs_stats_cmperf.cacheMaxDirtyChunks)
695 return 0; /* nothing to do */
696 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
697 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
698 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
699 #if defined(AFS_SUN5_ENV)
700 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
702 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
709 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
711 #define vno_close(X) vn_close((X), 0, NOCRED)
712 #elif defined(AFS_DUX40_ENV)
713 #define vno_close vn_close
715 /* We don't need this for AIX since:
716 * (1) aix doesn't use fileops and it call close directly intead
717 * (where the unlocking should be done) and
718 * (2) temporarily, the aix lockf isn't supported yet.
720 * this stupid routine is used to release the flocks held on a
721 * particular file descriptor. Sun doesn't pass file descr. info
722 * through to the vnode layer, and yet we must unlock flocked files
723 * on the *appropriate* (not first, as in System V) close call. Thus
725 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
726 * file ops structure into any afs file when it gets flocked.
727 * N.B: Intercepting close syscall doesn't trap aborts or exit system
731 afs_closex(register struct file *afd)
733 struct vrequest treq;
738 struct afs_fakestat_state fakestat;
740 AFS_STATCNT(afs_closex);
741 /* setup the credentials */
742 if ((code = afs_InitReq(&treq, u.u_cred)))
744 afs_InitFakeStat(&fakestat);
747 /* we're the last one. If we're an AFS vnode, clear the flags,
748 * close the file and release the lock when done. Otherwise, just
749 * let the regular close code work. */
750 if (afd->f_type == DTYPE_VNODE) {
751 tvc = VTOAFS(afd->f_data);
752 if (IsAfsVnode(AFSTOV(tvc))) {
753 code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
755 afs_PutFakeStat(&fakestat);
758 VN_HOLD(AFSTOV(tvc));
759 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
760 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
761 code = vno_close(afd);
763 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
764 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
767 HandleFlock(tvc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
770 grele((struct gnode *)tvc);
772 AFS_RELE(AFSTOV(tvc));
777 /* now, if close not done, do it */
779 code = vno_close(afd);
781 afs_PutFakeStat(&fakestat);
782 return code; /* return code from vnode layer */
787 /* handle any closing cleanup stuff */
790 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
791 #if !defined(AFS_SGI65_ENV)
795 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
799 lastclose_t lastclose;
800 #if !defined(AFS_SGI65_ENV)
802 #if defined(AFS_SGI64_ENV)
806 #elif defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
808 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
811 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
815 afs_close(OSI_VC_ARG(avc), aflags, acred)
819 struct AFS_UCRED *acred;
821 register afs_int32 code;
822 register struct brequest *tb;
823 struct vrequest treq;
827 struct afs_fakestat_state fakestat;
830 AFS_STATCNT(afs_close);
831 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
832 ICL_TYPE_INT32, aflags);
833 code = afs_InitReq(&treq, acred);
836 afs_InitFakeStat(&fakestat);
837 code = afs_EvalFakeStat(&avc, &fakestat, &treq);
839 afs_PutFakeStat(&fakestat);
843 if (avc->flockCount) {
844 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
847 #if defined(AFS_SGI_ENV)
849 afs_PutFakeStat(&fakestat);
852 #elif defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
854 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
855 afs_PutFakeStat(&fakestat);
860 #if defined(AFS_SGI_ENV)
861 /* unlock any locks for pid - could be wrong for child .. */
862 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
864 get_current_flid(&flid);
865 cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
866 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
869 cleanlocks((vnode_t *) avc, flp);
870 #else /* AFS_SGI64_ENV */
871 cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
872 #endif /* AFS_SGI64_ENV */
873 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
874 #endif /* AFS_SGI65_ENV */
875 /* afs_chkpgoob will drop and re-acquire the global lock. */
876 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
877 #else /* AFS_SGI_ENV */
878 if (avc->flockCount) { /* Release Lock */
879 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
880 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1 /*onlymine */ );
882 HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
885 #endif /* AFS_SGI_ENV */
886 #endif /* AFS_SUN5_ENV */
887 if (aflags & (FWRITE | FTRUNC)) {
889 /* do it yourself if daemons are all busy */
890 ObtainWriteLock(&avc->lock, 124);
891 code = afs_StoreOnLastReference(avc, &treq);
892 ReleaseWriteLock(&avc->lock);
893 #if defined(AFS_SGI_ENV)
894 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
897 #if defined(AFS_SGI_ENV)
898 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
900 /* at least one daemon is idle, so ask it to do the store.
901 * Also, note that we don't lock it any more... */
902 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
903 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
905 /* sleep waiting for the store to start, then retrieve error code */
906 while ((tb->flags & BUVALID) == 0) {
914 /* VNOVNODE is "acceptable" error code from close, since
915 * may happen when deleting a file on another machine while
916 * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
917 if (code == VNOVNODE || code == ENOENT)
920 /* Ensure last closer gets the error. If another thread caused
921 * DoPartialWrite and this thread does not actually store the data,
922 * it may not see the quota error.
924 ObtainWriteLock(&avc->lock, 406);
927 osi_ReleaseVM(avc, acred);
929 code = avc->vc_error;
932 ReleaseWriteLock(&avc->lock);
934 /* some codes merit specific complaint */
936 afs_warnuser("afs: failed to store file (network problems)\n");
939 else if (code == ENOSPC) {
941 ("afs: failed to store file (over quota or partition full)\n");
944 else if (code == ENOSPC) {
945 afs_warnuser("afs: failed to store file (partition full)\n");
946 } else if (code == EDQUOT) {
947 afs_warnuser("afs: failed to store file (over quota)\n");
951 afs_warnuser("afs: failed to store file (%d)\n", code);
953 /* finally, we flush any text pages lying around here */
957 #if defined(AFS_SGI_ENV)
958 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
959 osi_Assert(avc->opens > 0);
961 /* file open for read */
962 ObtainWriteLock(&avc->lock, 411);
965 osi_ReleaseVM(avc, acred);
967 code = avc->vc_error;
971 ReleaseWriteLock(&avc->lock);
974 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
975 afs_remunlink(avc, 1); /* ignore any return code */
978 afs_PutFakeStat(&fakestat);
979 code = afs_CheckCode(code, &treq, 5);
986 afs_fsync(avc, fflags, acred, waitfor)
989 #else /* AFS_OSF_ENV */
990 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
991 afs_fsync(OSI_VC_ARG(avc), flag, acred
997 afs_fsync(avc, acred)
1001 struct AFS_UCRED *acred;
1002 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
1004 #ifdef AFS_SGI65_ENV
1009 register afs_int32 code;
1010 struct vrequest treq;
1014 return avc->vc_error;
1016 #if defined(AFS_SUN5_ENV)
1017 /* back out if called from NFS server */
1018 if (curthread->t_flag & T_DONTPEND)
1022 AFS_STATCNT(afs_fsync);
1023 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1024 if ((code = afs_InitReq(&treq, acred)))
1027 #if defined(AFS_SGI_ENV)
1028 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1029 if (flag & FSYNC_INVAL)
1030 osi_VM_FSyncInval(avc);
1031 #endif /* AFS_SGI_ENV */
1033 ObtainSharedLock(&avc->lock, 18);
1035 if (avc->execsOrWriters > 0) {
1036 /* put the file back */
1037 UpgradeSToWLock(&avc->lock, 41);
1038 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1039 ConvertWToSLock(&avc->lock);
1041 #if defined(AFS_SGI_ENV)
1042 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
1043 if (code == VNOVNODE) {
1044 /* syncing an unlinked file! - non-informative to pass an errno
1045 * 102 (== VNOVNODE) to user
1051 code = afs_CheckCode(code, &treq, 33);
1052 ReleaseSharedLock(&avc->lock);