2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
14 * afs_StoreOnLastReference
20 #include <afsconfig.h>
21 #include "../afs/param.h"
25 #include "../afs/sysincludes.h" /* Standard vendor system headers */
26 #include "../afs/afsincludes.h" /* Afs-based standard headers */
27 #include "../afs/afs_stats.h" /* statistics */
28 #include "../afs/afs_cbqueue.h"
29 #include "../afs/nfsclient.h"
30 #include "../afs/afs_osidnlc.h"
33 extern unsigned char *afs_indexFlags;
35 /* Called by all write-on-close routines: regular afs_close,
36 * store via background daemon and store via the
37 * afs_FlushActiveVCaches routine (when CCORE is on).
38 * avc->lock must be write-locked.
40 afs_StoreOnLastReference(avc, treq)
41 register struct vcache *avc;
42 register struct vrequest *treq;
46 AFS_STATCNT(afs_StoreOnLastReference);
47 /* if CCore flag is set, we clear it and do the extra decrement
48 * ourselves now. If we're called by the CCore clearer, the CCore
49 * flag will already be clear, so we don't have to worry about
50 * clearing it twice. */
51 if (avc->states & CCore) {
52 avc->states &= ~CCore;
53 #if defined(AFS_SGI_ENV)
54 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
56 /* WARNING: Our linux cm code treats the execsOrWriters counter differently
57 * depending on the flags the file was opened with. So, if you make any
58 * changes to the way the execsOrWriters flag is handled check with the
61 avc->execsOrWriters--;
62 AFS_RELE((struct vnode *)avc); /* VN_HOLD at set CCore(afs_FakeClose)*/
63 crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
64 avc->linkData = (char *)0;
66 /* Now, send the file back. Used to require 0 writers left, but now do
67 * it on every close for write, since two closes in a row are harmless
68 * since first will clean all chunks, and second will be noop. Note that
69 * this will also save confusion when someone keeps a file open
70 * inadvertently, since with old system, writes to the server would never
73 code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE/*!sync-to-disk*/);
75 * We have to do these after the above store in done: in some systems like
76 * aix they'll need to flush all the vm dirty pages to the disk via the
77 * strategy routine. During that all procedure (done under no avc locks)
78 * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
79 * routines which means the vcache is a perfect candidate for flushing!
81 #if defined(AFS_SGI_ENV)
82 osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
85 avc->execsOrWriters--;
91 afs_MemWrite(avc, auio, aio, acred, noLock)
92 register struct vcache *avc;
95 struct AFS_UCRED *acred;
97 afs_size_t totalLength;
98 afs_size_t transferLength;
100 afs_size_t offset, len;
101 afs_int32 tlen, trimlen;
104 register struct dcache *tdc;
110 struct iovec *tvec; /* again, should have define */
112 register afs_int32 code;
113 struct vrequest treq;
115 AFS_STATCNT(afs_MemWrite);
117 return avc->vc_error;
119 startDate = osi_Time();
120 if (code = afs_InitReq(&treq, acred)) return code;
121 /* otherwise we read */
122 totalLength = auio->afsio_resid;
123 filePos = auio->afsio_offset;
126 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
127 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
128 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
129 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
131 afs_MaybeWakeupTruncateDaemon();
132 ObtainWriteLock(&avc->lock,126);
134 #if defined(AFS_SGI_ENV)
138 * afs_xwrite handles setting m.Length
139 * and handles APPEND mode.
140 * Since we are called via strategy, we need to trim the write to
141 * the actual size of the file
143 osi_Assert(filePos <= avc->m.Length);
144 diff = avc->m.Length - filePos;
145 auio->afsio_resid = MIN(totalLength, diff);
146 totalLength = auio->afsio_resid;
149 if (aio & IO_APPEND) {
150 /* append mode, start it at the right spot */
151 #if defined(AFS_SUN56_ENV)
152 auio->uio_loffset = 0;
154 filePos = auio->afsio_offset = avc->m.Length;
158 * Note that we use startDate rather than calling osi_Time() here.
159 * This is to avoid counting lock-waiting time in file date (for ranlib).
161 avc->m.Date = startDate;
163 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
164 #if defined(AFS_HPUX101_ENV)
165 if ((totalLength + filePos) >> 9 > (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
168 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
170 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
174 ReleaseWriteLock(&avc->lock);
178 #ifdef AFS_VM_RDWR_ENV
180 * If write is implemented via VM, afs_FakeOpen() is called from the
181 * high-level write op.
183 if (avc->execsOrWriters <= 0) {
184 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
189 avc->states |= CDirty;
190 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
191 while (totalLength > 0) {
192 /* Read the cached info. If we call GetDCache while the cache
193 * truncate daemon is running we risk overflowing the disk cache.
194 * Instead we check for an existing cache slot. If we cannot
195 * find an existing slot we wait for the cache to drain
196 * before calling GetDCache.
199 tdc = afs_FindDCache(avc, filePos);
200 if (tdc) ObtainWriteLock(&tdc->lock, 653);
201 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
202 tdc = afs_FindDCache(avc, filePos);
204 ObtainWriteLock(&tdc->lock, 654);
205 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
206 (tdc->dflags & DFFetching)) {
207 ReleaseWriteLock(&tdc->lock);
213 afs_MaybeWakeupTruncateDaemon();
214 while (afs_blocksUsed >
215 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
216 ReleaseWriteLock(&avc->lock);
217 if (afs_blocksUsed - afs_blocksDiscarded >
218 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
219 afs_WaitForCacheDrain = 1;
220 afs_osi_Sleep(&afs_WaitForCacheDrain);
222 afs_MaybeFreeDiscardedDCache();
223 afs_MaybeWakeupTruncateDaemon();
224 ObtainWriteLock(&avc->lock,506);
226 avc->states |= CDirty;
227 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
228 if (tdc) ObtainWriteLock(&tdc->lock, 655);
231 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
232 if (tdc) ObtainWriteLock(&tdc->lock, 656);
238 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
239 afs_stats_cmperf.cacheCurrDirtyChunks++;
240 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
242 if (!(tdc->f.states & DWriting)) {
243 /* don't mark entry as mod if we don't have to */
244 tdc->f.states |= DWriting;
245 tdc->dflags |= DFEntryMod;
247 len = totalLength; /* write this amount by default */
248 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
249 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
250 if (max <= len + offset) { /*if we'd go past the end of this chunk */
251 /* it won't all fit in this chunk, so write as much
255 /* mung uio structure to be right for this transfer */
256 afsio_copy(auio, &tuio, tvec);
258 afsio_trim(&tuio, trimlen);
259 tuio.afsio_offset = offset;
261 code = afs_MemWriteUIO(tdc->f.inode, &tuio);
263 void *mep; /* XXX in prototype world is struct memCacheEntry * */
265 ZapDCE(tdc); /* bad data */
266 mep = afs_MemCacheOpen(tdc->f.inode);
267 afs_MemCacheTruncate(mep, 0);
268 afs_MemCacheClose(mep);
269 afs_stats_cmperf.cacheCurrDirtyChunks--;
270 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
271 ReleaseWriteLock(&tdc->lock);
275 /* otherwise we've written some, fixup length, etc and continue with next seg */
276 len = len - tuio.afsio_resid; /* compute amount really transferred */
278 afsio_skip(auio, tlen); /* advance auio over data written */
279 /* compute new file size */
280 if (offset + len > tdc->f.chunkBytes) {
281 afs_int32 tlength = offset+len;
282 afs_AdjustSize(tdc, tlength);
285 transferLength += len;
287 #if defined(AFS_SGI_ENV)
288 /* afs_xwrite handles setting m.Length */
289 osi_Assert(filePos <= avc->m.Length);
291 if (filePos > avc->m.Length) {
292 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
293 ICL_TYPE_STRING, __FILE__,
294 ICL_TYPE_LONG, __LINE__,
295 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
296 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
297 avc->m.Length = filePos;
300 #ifndef AFS_VM_RDWR_ENV
302 * If write is implemented via VM, afs_DoPartialWrite() is called from
303 * the high-level write op.
306 code = afs_DoPartialWrite(avc, &treq);
309 ReleaseWriteLock(&tdc->lock);
315 ReleaseWriteLock(&tdc->lock);
318 #ifndef AFS_VM_RDWR_ENV
319 afs_FakeClose(avc, acred);
321 if (error && !avc->vc_error)
322 avc->vc_error = error;
324 ReleaseWriteLock(&avc->lock);
325 osi_FreeSmallSpace(tvec);
327 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
328 work. GFS is truly a poorly-designed interface! */
329 afs_gfshack((struct gnode *) avc);
331 error = afs_CheckCode(error, &treq, 6);
336 /* called on writes */
337 afs_UFSWrite(avc, auio, aio, acred, noLock)
338 register struct vcache *avc;
341 struct AFS_UCRED *acred;
343 afs_size_t totalLength;
344 afs_size_t transferLength;
346 afs_size_t offset, len;
351 register struct dcache *tdc;
357 struct iovec *tvec; /* again, should have define */
358 struct osi_file *tfile;
359 register afs_int32 code;
361 struct vrequest treq;
363 AFS_STATCNT(afs_UFSWrite);
365 return avc->vc_error;
367 startDate = osi_Time();
368 if (code = afs_InitReq(&treq, acred)) return code;
369 /* otherwise we read */
370 totalLength = auio->afsio_resid;
371 filePos = auio->afsio_offset;
374 afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
375 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
376 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
377 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
379 afs_MaybeWakeupTruncateDaemon();
380 ObtainWriteLock(&avc->lock,556);
382 #if defined(AFS_SGI_ENV)
386 * afs_xwrite handles setting m.Length
387 * and handles APPEND mode.
388 * Since we are called via strategy, we need to trim the write to
389 * the actual size of the file
391 osi_Assert(filePos <= avc->m.Length);
392 diff = avc->m.Length - filePos;
393 auio->afsio_resid = MIN(totalLength, diff);
394 totalLength = auio->afsio_resid;
397 if (aio & IO_APPEND) {
398 /* append mode, start it at the right spot */
399 #if defined(AFS_SUN56_ENV)
400 auio->uio_loffset = 0;
402 filePos = auio->afsio_offset = avc->m.Length;
406 * Note that we use startDate rather than calling osi_Time() here.
407 * This is to avoid counting lock-waiting time in file date (for ranlib).
409 avc->m.Date = startDate;
411 #if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
412 #if defined(AFS_HPUX101_ENV)
413 if ((totalLength + filePos) >> 9 > p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
416 if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
418 if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
422 ReleaseWriteLock(&avc->lock);
426 #ifdef AFS_VM_RDWR_ENV
428 * If write is implemented via VM, afs_FakeOpen() is called from the
429 * high-level write op.
431 if (avc->execsOrWriters <= 0) {
432 printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
437 avc->states |= CDirty;
438 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
439 while (totalLength > 0) {
440 /* read the cached info */
442 tdc = afs_FindDCache(avc, filePos);
443 if (tdc) ObtainWriteLock(&tdc->lock, 657);
444 } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
445 tdc = afs_FindDCache(avc, filePos);
447 ObtainWriteLock(&tdc->lock, 658);
448 if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
449 (tdc->dflags & DFFetching)) {
450 ReleaseWriteLock(&tdc->lock);
456 afs_MaybeWakeupTruncateDaemon();
457 while (afs_blocksUsed >
458 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
459 ReleaseWriteLock(&avc->lock);
460 if (afs_blocksUsed - afs_blocksDiscarded >
461 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
462 afs_WaitForCacheDrain = 1;
463 afs_osi_Sleep(&afs_WaitForCacheDrain);
465 afs_MaybeFreeDiscardedDCache();
466 afs_MaybeWakeupTruncateDaemon();
467 ObtainWriteLock(&avc->lock,509);
469 avc->states |= CDirty;
470 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
471 if (tdc) ObtainWriteLock(&tdc->lock, 659);
474 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
475 if (tdc) ObtainWriteLock(&tdc->lock, 660);
481 if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
482 afs_stats_cmperf.cacheCurrDirtyChunks++;
483 afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
485 if (!(tdc->f.states & DWriting)) {
486 /* don't mark entry as mod if we don't have to */
487 tdc->f.states |= DWriting;
488 tdc->dflags |= DFEntryMod;
490 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
491 len = totalLength; /* write this amount by default */
492 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
493 max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
494 if (max <= len + offset) { /*if we'd go past the end of this chunk */
495 /* it won't all fit in this chunk, so write as much
499 /* mung uio structure to be right for this transfer */
500 afsio_copy(auio, &tuio, tvec);
502 afsio_trim(&tuio, trimlen);
503 tuio.afsio_offset = offset;
507 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL, NULL, &afs_osi_cred);
511 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
513 code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t)&offset, &tuio, NULL, NULL, -1);
515 #endif /* AFS_AIX41_ENV */
516 #else /* AFS_AIX_ENV */
519 VOP_RWLOCK(tfile->vnode, 1);
520 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
521 VOP_RWUNLOCK(tfile->vnode, 1);
523 if (code == ENOSPC) afs_warnuser("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
525 #if defined(AFS_SGI_ENV)
527 avc->states |= CWritingUFS;
528 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
529 AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred, code);
530 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
531 avc->states &= ~CWritingUFS;
536 struct ucred *tmpcred = u.u_cred;
537 u.u_cred = &afs_osi_cred;
538 tuio.uio_rw = UIO_WRITE;
540 VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
544 #else /* AFS_OSF_ENV */
545 #if defined(AFS_HPUX100_ENV)
548 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
553 tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
555 #if defined(AFS_LINUX20_ENV)
557 code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
560 #if defined(AFS_DARWIN_ENV)
562 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
563 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
564 VOP_UNLOCK(tfile->vnode, 0, current_proc());
567 #if defined(AFS_FBSD_ENV)
569 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
570 code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
571 VOP_UNLOCK(tfile->vnode, 0, curproc);
574 code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
575 #endif /* AFS_FBSD_ENV */
576 #endif /* AFS_DARWIN_ENV */
577 #endif /* AFS_LINUX20_ENV */
578 #endif /* AFS_HPUX100_ENV */
579 #endif /* AFS_OSF_ENV */
580 #endif /* AFS_SGI_ENV */
581 #endif /* AFS_SUN5_ENV */
582 #endif /* AFS_AIX41_ENV */
585 ZapDCE(tdc); /* bad data */
586 osi_UFSTruncate(tfile,0); /* fake truncate the segment */
587 afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
588 afs_stats_cmperf.cacheCurrDirtyChunks--;
589 afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
590 afs_CFileClose(tfile);
591 ReleaseWriteLock(&tdc->lock);
595 /* otherwise we've written some, fixup length, etc and continue with next seg */
596 len = len - tuio.afsio_resid; /* compute amount really transferred */
598 afsio_skip(auio, tlen); /* advance auio over data written */
599 /* compute new file size */
600 if (offset + len > tdc->f.chunkBytes) {
601 afs_int32 tlength = offset+len;
602 afs_AdjustSize(tdc, tlength);
605 transferLength += len;
607 #if defined(AFS_SGI_ENV)
608 /* afs_xwrite handles setting m.Length */
609 osi_Assert(filePos <= avc->m.Length);
611 if (filePos > avc->m.Length) {
612 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
613 ICL_TYPE_STRING, __FILE__,
614 ICL_TYPE_LONG, __LINE__,
615 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
616 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
617 avc->m.Length = filePos;
621 #ifndef AFS_VM_RDWR_ENV
623 * If write is implemented via VM, afs_DoPartialWrite() is called from
624 * the high-level write op.
627 code = afs_DoPartialWrite(avc, &treq);
630 ReleaseWriteLock(&tdc->lock);
636 ReleaseWriteLock(&tdc->lock);
639 #ifndef AFS_VM_RDWR_ENV
640 afs_FakeClose(avc, acred);
642 error = afs_CheckCode(error, &treq, 7);
643 /* This set is here so we get the CheckCode. */
644 if (error && !avc->vc_error)
645 avc->vc_error = error;
647 ReleaseWriteLock(&avc->lock);
648 osi_FreeSmallSpace(tvec);
650 /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
651 work. GFS is truly a poorly-designed interface! */
652 afs_gfshack((struct gnode *) avc);
654 #ifndef AFS_VM_RDWR_ENV
656 * If write is implemented via VM, afs_fsync() is called from the high-level
659 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
660 if (noLock && (aio & IO_SYNC)) {
663 /* On hpux on synchronous writes syncio will be set to IO_SYNC. If
664 * we're doing them because the file was opened with O_SYNCIO specified,
665 * we have to look in the u area. No single mechanism here!!
667 if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
669 if (noLock && (aio & FSYNC)) {
672 if (!AFS_NFSXLATORREQ(acred))
673 afs_fsync(avc, acred);
679 /* do partial write if we're low on unmodified chunks */
680 afs_DoPartialWrite(avc, areq)
681 register struct vcache *avc;
682 struct vrequest *areq; {
683 register afs_int32 code;
685 if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
686 return 0; /* nothing to do */
687 /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
688 afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
689 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
690 #if defined(AFS_SUN5_ENV)
691 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
693 code = afs_StoreAllSegments(avc, areq, AFS_ASYNC);
700 #if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
702 #define vno_close(X) vn_close((X), 0, NOCRED)
703 #elif defined(AFS_DUX40_ENV)
704 #define vno_close vn_close
706 /* We don't need this for AIX since:
707 * (1) aix doesn't use fileops and it call close directly intead
708 * (where the unlocking should be done) and
709 * (2) temporarily, the aix lockf isn't supported yet.
711 * this stupid routine is used to release the flocks held on a
712 * particular file descriptor. Sun doesn't pass file descr. info
713 * through to the vnode layer, and yet we must unlock flocked files
714 * on the *appropriate* (not first, as in System V) close call. Thus
716 * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
717 * file ops structure into any afs file when it gets flocked.
718 * N.B: Intercepting close syscall doesn't trap aborts or exit system
722 register struct file *afd; {
723 struct vrequest treq;
724 register struct vcache *tvc;
729 AFS_STATCNT(afs_closex);
730 /* setup the credentials */
731 if (code = afs_InitReq(&treq, u.u_cred)) return code;
734 /* we're the last one. If we're an AFS vnode, clear the flags,
735 * close the file and release the lock when done. Otherwise, just
736 * let the regular close code work. */
737 if (afd->f_type == DTYPE_VNODE) {
738 tvc = (struct vcache *) afd->f_data;
739 if (IsAfsVnode((struct vnode *)tvc)) {
740 VN_HOLD((struct vnode *) tvc);
741 flags = afd->f_flag & (FSHLOCK | FEXLOCK);
742 afd->f_flag &= ~(FSHLOCK | FEXLOCK);
743 code = vno_close(afd);
745 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
746 HandleFlock(tvc, LOCK_UN, &treq,
747 u.u_procp->p_pid, 1/*onlymine*/);
749 HandleFlock(tvc, LOCK_UN, &treq, 0, 1/*onlymine*/);
752 grele((struct gnode *) tvc);
754 AFS_RELE((struct vnode *) tvc);
759 /* now, if close not done, do it */
761 code = vno_close(afd);
763 return code; /* return code from vnode layer */
768 /* handle any closing cleanup stuff */
770 afs_close(OSI_VC_ARG(avc), aflags, lastclose,
771 #if !defined(AFS_SGI65_ENV)
775 #if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
779 lastclose_t lastclose;
780 #if !defined(AFS_SGI65_ENV)
782 #if defined(AFS_SGI64_ENV)
787 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
789 afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
792 afs_close(OSI_VC_ARG(avc), aflags, count, acred)
796 afs_close(OSI_VC_ARG(avc), aflags, acred)
801 struct AFS_UCRED *acred;
803 register afs_int32 code, initreq=0;
804 register struct brequest *tb;
805 struct vrequest treq;
811 AFS_STATCNT(afs_close);
812 afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
813 ICL_TYPE_INT32, aflags);
815 if (avc->flockCount) {
816 if (code = afs_InitReq(&treq, acred)) return code;
818 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
821 #if defined(AFS_SGI_ENV)
825 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
827 /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
835 if (code = afs_InitReq(&treq, acred)) return code;
840 #if defined(AFS_SGI_ENV)
841 /* unlock any locks for pid - could be wrong for child .. */
842 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
844 get_current_flid(&flid);
845 cleanlocks((vnode_t *)avc, flid.fl_pid, flid.fl_sysid);
846 HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1/*onlymine*/);
849 cleanlocks((vnode_t *)avc, flp);
850 #else /* AFS_SGI64_ENV */
851 cleanlocks((vnode_t *)avc, u.u_procp->p_epid, u.u_procp->p_sysid);
852 #endif /* AFS_SGI64_ENV */
853 HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1/*onlymine*/);
854 #endif /* AFS_SGI65_ENV */
855 /* afs_chkpgoob will drop and re-acquire the global lock. */
856 afs_chkpgoob(&avc->v, btoc(avc->m.Length));
858 if (avc->flockCount) { /* Release Lock */
859 #if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
860 HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1/*onlymine*/);
862 HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
867 if (aflags & (FWRITE | FTRUNC)) {
869 /* do it yourself if daemons are all busy */
870 ObtainWriteLock(&avc->lock,124);
871 code = afs_StoreOnLastReference(avc, &treq);
872 ReleaseWriteLock(&avc->lock);
873 #if defined(AFS_SGI_ENV)
874 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
878 #if defined(AFS_SGI_ENV)
879 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
881 /* at least one daemon is idle, so ask it to do the store.
882 Also, note that we don't lock it any more... */
883 tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
884 (afs_size_t) acred->cr_uid, (afs_size_t) 0,
886 /* sleep waiting for the store to start, then retrieve error code */
887 while ((tb->flags & BUVALID) == 0) {
895 /* VNOVNODE is "acceptable" error code from close, since
896 may happen when deleting a file on another machine while
897 it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
898 if (code == VNOVNODE || code == ENOENT)
901 /* Ensure last closer gets the error. If another thread caused
902 * DoPartialWrite and this thread does not actually store the data,
903 * it may not see the quota error.
905 ObtainWriteLock(&avc->lock,406);
908 osi_ReleaseVM(avc, acred);
910 code = avc->vc_error;
913 ReleaseWriteLock(&avc->lock);
915 /* some codes merit specific complaint */
917 afs_warnuser("afs: failed to store file (network problems)\n");
920 else if (code == ENOSPC) {
921 afs_warnuser("afs: failed to store file (over quota or partition full)\n");
924 else if (code == ENOSPC) {
925 afs_warnuser("afs: failed to store file (partition full)\n");
927 else if (code == EDQUOT) {
928 afs_warnuser("afs: failed to store file (over quota)\n");
932 afs_warnuser("afs: failed to store file (%d)\n", code);
934 /* finally, we flush any text pages lying around here */
939 #if defined(AFS_SGI_ENV)
940 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
941 osi_Assert(avc->opens > 0);
943 /* file open for read */
944 ObtainWriteLock(&avc->lock, 411);
947 osi_ReleaseVM(avc, acred);
949 code = avc->vc_error;
953 ReleaseWriteLock(&avc->lock);
956 if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
957 afs_remunlink(avc, 1); /* ignore any return code */
960 code = afs_CheckCode(code, &treq, 5);
967 afs_fsync(avc, fflags, acred, waitfor)
970 #else /* AFS_OSF_ENV */
971 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
972 afs_fsync(OSI_VC_ARG(avc), flag, acred
978 afs_fsync(avc, acred)
982 struct AFS_UCRED *acred;
983 #if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
990 register afs_int32 code;
991 struct vrequest treq;
995 return avc->vc_error;
997 #if defined(AFS_SUN5_ENV)
998 /* back out if called from NFS server */
999 if (curthread->t_flag & T_DONTPEND)
1003 AFS_STATCNT(afs_fsync);
1004 afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
1005 if (code = afs_InitReq(&treq, acred)) return code;
1007 #if defined(AFS_SGI_ENV)
1008 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
1009 if (flag & FSYNC_INVAL)
1010 osi_VM_FSyncInval(avc);
1011 #endif /* AFS_SGI_ENV */
1013 ObtainSharedLock(&avc->lock,18);
1015 if (avc->execsOrWriters > 0) {
1016 /* put the file back */
1017 UpgradeSToWLock(&avc->lock,41);
1018 code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
1019 ConvertWToSLock(&avc->lock);
1022 #if defined(AFS_SGI_ENV)
1023 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
1024 if (code == VNOVNODE) {
1025 /* syncing an unlinked file! - non-informative to pass an errno
1026 * 102 (== VNOVNODE) to user
1032 code = afs_CheckCode(code, &treq, 33);
1033 ReleaseSharedLock(&avc->lock);