/*
* Implements:
- * afs_UFSWrite
- * afs_MemWrite
+ * afs_write
+ * afs_UFSWriteUIO
* afs_StoreOnLastReference
* afs_close
- * afs_closex
* afs_fsync
*/
#include <afsconfig.h>
-#include "../afs/param.h"
+#include "afs/param.h"
-RCSID("$Header$");
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* statistics */
-#include "../afs/afs_cbqueue.h"
-#include "../afs/nfsclient.h"
-#include "../afs/afs_osidnlc.h"
+#include "afs/sysincludes.h" /* Standard vendor system headers */
+#include "afsincludes.h" /* Afs-based standard headers */
+#include "afs/afs_stats.h" /* statistics */
+#include "afs/afs_cbqueue.h"
+#include "afs/nfsclient.h"
+#include "afs/afs_osidnlc.h"
extern unsigned char *afs_indexFlags;
* afs_FlushActiveVCaches routine (when CCORE is on).
* avc->lock must be write-locked.
*/
-afs_StoreOnLastReference(avc, treq)
-register struct vcache *avc;
-register struct vrequest *treq;
+int
+afs_StoreOnLastReference(struct vcache *avc,
+ struct vrequest *treq)
{
int code = 0;
-
+
AFS_STATCNT(afs_StoreOnLastReference);
/* if CCore flag is set, we clear it and do the extra decrement
* ourselves now. If we're called by the CCore clearer, the CCore
* flag will already be clear, so we don't have to worry about
* clearing it twice. */
- if (avc->states & CCore) {
- avc->states &= ~CCore;
+ if (avc->f.states & CCore) {
+ avc->f.states &= ~CCore;
#if defined(AFS_SGI_ENV)
osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
#endif
* top level code. */
avc->opens--;
avc->execsOrWriters--;
- AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose)*/
- crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
- avc->linkData = (char *)0;
+ AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */
+ crfree((afs_ucred_t *)avc->linkData); /* "crheld" in afs_FakeClose */
+ avc->linkData = NULL;
}
- /* Now, send the file back. Used to require 0 writers left, but now do
- * it on every close for write, since two closes in a row are harmless
- * since first will clean all chunks, and second will be noop. Note that
- * this will also save confusion when someone keeps a file open
- * inadvertently, since with old system, writes to the server would never
- * happen again.
- */
- code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE/*!sync-to-disk*/);
- /*
- * We have to do these after the above store in done: in some systems like
- * aix they'll need to flush all the vm dirty pages to the disk via the
- * strategy routine. During that all procedure (done under no avc locks)
- * opens, refcounts would be zero, since it didn't reach the afs_{rd,wr}
- * routines which means the vcache is a perfect candidate for flushing!
- */
+
+ if (!AFS_IS_DISCONNECTED) {
+ /* Connected. */
+
+ /* Now, send the file back. Used to require 0 writers left, but now do
+ * it on every close for write, since two closes in a row are harmless
+ * since first will clean all chunks, and second will be noop. Note that
+ * this will also save confusion when someone keeps a file open
+ * inadvertently, since with old system, writes to the server would never
+ * happen again.
+ */
+ code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ );
+ /*
+ * We have to do these after the above store in done: in some systems
+ * like aix they'll need to flush all the vm dirty pages to the disk via
+ * the strategy routine. During that all procedure (done under no avc
+ * locks) opens, refcounts would be zero, since it didn't reach the
+ * afs_{rd,wr} routines which means the vcache is a perfect candidate
+ * for flushing!
+ */
+ } else if (AFS_IS_DISCON_RW) {
+ afs_DisconAddDirty(avc, VDisconWriteClose, 0);
+ } /* if not disconnected */
+
#if defined(AFS_SGI_ENV)
osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
#endif
+
avc->opens--;
avc->execsOrWriters--;
return code;
}
-
-
-afs_MemWrite(avc, auio, aio, acred, noLock)
- register struct vcache *avc;
- struct uio *auio;
- int aio, noLock;
- struct AFS_UCRED *acred;
+int
+afs_UFSWriteUIO(struct vcache *avc, afs_dcache_id_t *inode, struct uio *tuiop)
{
- afs_size_t totalLength;
- afs_size_t transferLength;
- afs_size_t filePos;
- afs_size_t offset, len;
- afs_int32 tlen, trimlen;
- afs_int32 startDate;
- afs_int32 max;
- register struct dcache *tdc;
-#ifdef _HIGHC_
- volatile
-#endif
- afs_int32 error;
- struct uio tuio;
- struct iovec *tvec; /* again, should have define */
- char *tfile;
- register afs_int32 code;
- struct vrequest treq;
-
- AFS_STATCNT(afs_MemWrite);
- if (avc->vc_error)
- return avc->vc_error;
+ struct osi_file *tfile;
+ int code;
+
+ tfile = (struct osi_file *)osi_UFSOpen(inode);
+#if defined(AFS_AIX41_ENV)
+ AFS_GUNLOCK();
+ code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL,
+ NULL, afs_osi_credp);
+ AFS_GLOCK();
+#elif defined(AFS_AIX32_ENV)
+ code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL);
+#elif defined(AFS_AIX_ENV)
+ code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) &offset,
+ tuiop, NULL, NULL, -1);
+#elif defined(AFS_SUN5_ENV)
+ AFS_GUNLOCK();
+# ifdef AFS_SUN510_ENV
+ {
+ caller_context_t ct;
- startDate = osi_Time();
- if (code = afs_InitReq(&treq, acred)) return code;
- /* otherwise we read */
- totalLength = auio->afsio_resid;
- filePos = auio->afsio_offset;
- error = 0;
- transferLength = 0;
- afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
- if (!noLock) {
- afs_MaybeWakeupTruncateDaemon();
- ObtainWriteLock(&avc->lock,126);
+ VOP_RWLOCK(tfile->vnode, 1, &ct);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp, &ct);
+ VOP_RWUNLOCK(tfile->vnode, 1, &ct);
}
-#if defined(AFS_SGI_ENV)
+# else
+ VOP_RWLOCK(tfile->vnode, 1);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+ VOP_RWUNLOCK(tfile->vnode, 1);
+# endif
+ AFS_GLOCK();
+ if (code == ENOSPC)
+ afs_warnuser
+ ("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
+#elif defined(AFS_SGI_ENV)
+ AFS_GUNLOCK();
+ avc->f.states |= CWritingUFS;
+ AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
+ AFS_VOP_WRITE(tfile->vnode, tuiop, IO_ISLOCKED, afs_osi_credp, code);
+ AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
+ avc->f.states &= ~CWritingUFS;
+ AFS_GLOCK();
+#elif defined(AFS_HPUX100_ENV)
{
- off_t diff;
- /*
- * afs_xwrite handles setting m.Length
- * and handles APPEND mode.
- * Since we are called via strategy, we need to trim the write to
- * the actual size of the file
- */
- osi_Assert(filePos <= avc->m.Length);
- diff = avc->m.Length - filePos;
- auio->afsio_resid = MIN(totalLength, diff);
- totalLength = auio->afsio_resid;
+ AFS_GUNLOCK();
+ code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp);
+ AFS_GLOCK();
}
+#elif defined(AFS_LINUX20_ENV)
+ AFS_GUNLOCK();
+ code = osi_rdwr(tfile, tuiop, UIO_WRITE);
+ AFS_GLOCK();
+#elif defined(AFS_DARWIN80_ENV)
+ AFS_GUNLOCK();
+ code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
+ AFS_GLOCK();
+#elif defined(AFS_DARWIN_ENV)
+ AFS_GUNLOCK();
+ VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+ VOP_UNLOCK(tfile->vnode, 0, current_proc());
+ AFS_GLOCK();
+#elif defined(AFS_FBSD80_ENV)
+ AFS_GUNLOCK();
+ VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+ VOP_UNLOCK(tfile->vnode, 0);
+ AFS_GLOCK();
+#elif defined(AFS_FBSD_ENV)
+ AFS_GUNLOCK();
+ VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+ VOP_UNLOCK(tfile->vnode, 0, curthread);
+ AFS_GLOCK();
+#elif defined(AFS_NBSD_ENV)
+ AFS_GUNLOCK();
+ VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+#if defined(AFS_NBSD60_ENV)
+ VOP_UNLOCK(tfile->vnode);
#else
- if (aio & IO_APPEND) {
- /* append mode, start it at the right spot */
-#if defined(AFS_SUN56_ENV)
- auio->uio_loffset = 0;
-#endif
- filePos = auio->afsio_offset = avc->m.Length;
- }
-#endif
- /*
- * Note that we use startDate rather than calling osi_Time() here.
- * This is to avoid counting lock-waiting time in file date (for ranlib).
- */
- avc->m.Date = startDate;
-
-#if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
-#if defined(AFS_HPUX101_ENV)
- if ((totalLength + filePos) >> 9 > (p_rlimit(u.u_procp))[RLIMIT_FSIZE].rlim_cur) {
+ VOP_UNLOCK(tfile->vnode, 0);
+#endif
+ AFS_GLOCK();
+#elif defined(AFS_XBSD_ENV)
+ AFS_GUNLOCK();
+ VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
+ code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp);
+ VOP_UNLOCK(tfile->vnode, 0, curproc);
+ AFS_GLOCK();
#else
-#ifdef AFS_HPUX_ENV
- if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
-#else
- if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
-#endif
-#endif
- if (!noLock)
- ReleaseWriteLock(&avc->lock);
- return (EFBIG);
- }
+# ifdef AFS_HPUX_ENV
+ tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
+# endif
+ code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp);
#endif
-#ifdef AFS_VM_RDWR_ENV
- /*
- * If write is implemented via VM, afs_FakeOpen() is called from the
- * high-level write op.
- */
- if (avc->execsOrWriters <= 0) {
- printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
- }
-#else
- afs_FakeOpen(avc);
-#endif
- avc->states |= CDirty;
- tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
- while (totalLength > 0) {
- /* Read the cached info. If we call GetDCache while the cache
- * truncate daemon is running we risk overflowing the disk cache.
- * Instead we check for an existing cache slot. If we cannot
- * find an existing slot we wait for the cache to drain
- * before calling GetDCache.
- */
- if (noLock) {
- tdc = afs_FindDCache(avc, filePos);
- if (tdc) ObtainWriteLock(&tdc->lock, 653);
- } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- tdc = afs_FindDCache(avc, filePos);
- if (tdc) {
- ObtainWriteLock(&tdc->lock, 654);
- if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
- (tdc->dflags & DFFetching)) {
- ReleaseWriteLock(&tdc->lock);
- afs_PutDCache(tdc);
- tdc = NULL;
- }
- }
- if (!tdc) {
- afs_MaybeWakeupTruncateDaemon();
- while (afs_blocksUsed >
- (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- ReleaseWriteLock(&avc->lock);
- if (afs_blocksUsed - afs_blocksDiscarded >
- (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- afs_WaitForCacheDrain = 1;
- afs_osi_Sleep(&afs_WaitForCacheDrain);
- }
- afs_MaybeFreeDiscardedDCache();
- afs_MaybeWakeupTruncateDaemon();
- ObtainWriteLock(&avc->lock,506);
- }
- avc->states |= CDirty;
- tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
- if (tdc) ObtainWriteLock(&tdc->lock, 655);
- }
- } else {
- tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
- if (tdc) ObtainWriteLock(&tdc->lock, 656);
- }
- if (!tdc) {
- error = EIO;
- break;
- }
- if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
- afs_stats_cmperf.cacheCurrDirtyChunks++;
- afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
- }
- if (!(tdc->f.states & DWriting)) {
- /* don't mark entry as mod if we don't have to */
- tdc->f.states |= DWriting;
- tdc->dflags |= DFEntryMod;
- }
- len = totalLength; /* write this amount by default */
- offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
- max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
- if (max <= len + offset) { /*if we'd go past the end of this chunk */
- /* it won't all fit in this chunk, so write as much
- as will fit */
- len = max - offset;
- }
- /* mung uio structure to be right for this transfer */
- afsio_copy(auio, &tuio, tvec);
- trimlen = len;
- afsio_trim(&tuio, trimlen);
- tuio.afsio_offset = offset;
+ osi_UFSClose(tfile);
- code = afs_MemWriteUIO(tdc->f.inode, &tuio);
- if (code) {
- void *mep; /* XXX in prototype world is struct memCacheEntry * */
- error = code;
- ZapDCE(tdc); /* bad data */
- mep = afs_MemCacheOpen(tdc->f.inode);
- afs_MemCacheTruncate(mep, 0);
- afs_MemCacheClose(mep);
- afs_stats_cmperf.cacheCurrDirtyChunks--;
- afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
- ReleaseWriteLock(&tdc->lock);
- afs_PutDCache(tdc);
- break;
- }
- /* otherwise we've written some, fixup length, etc and continue with next seg */
- len = len - tuio.afsio_resid; /* compute amount really transferred */
- tlen = len;
- afsio_skip(auio, tlen); /* advance auio over data written */
- /* compute new file size */
- if (offset + len > tdc->f.chunkBytes) {
- afs_int32 tlength = offset+len;
- afs_AdjustSize(tdc, tlength);
- }
- totalLength -= len;
- transferLength += len;
- filePos += len;
-#if defined(AFS_SGI_ENV)
- /* afs_xwrite handles setting m.Length */
- osi_Assert(filePos <= avc->m.Length);
-#else
- if (filePos > avc->m.Length) {
- afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
- ICL_TYPE_STRING, __FILE__,
- ICL_TYPE_LONG, __LINE__,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
- avc->m.Length = filePos;
- }
-#endif
- ReleaseWriteLock(&tdc->lock);
- afs_PutDCache(tdc);
-#if !defined(AFS_VM_RDWR_ENV) || defined(AFS_LINUX22_ENV)
- /*
- * If write is implemented via VM, afs_DoPartialWrite() is called from
- * the high-level write op.
- */
- if (!noLock) {
- code = afs_DoPartialWrite(avc, &treq);
- if (code) {
- error = code;
- break;
- }
- }
-#endif
- }
-#ifndef AFS_VM_RDWR_ENV
- afs_FakeClose(avc, acred);
-#endif
- if (error && !avc->vc_error)
- avc->vc_error = error;
- if (!noLock)
- ReleaseWriteLock(&avc->lock);
- osi_FreeSmallSpace(tvec);
-#ifdef AFS_DEC_ENV
- /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
- work. GFS is truly a poorly-designed interface! */
- afs_gfshack((struct gnode *) avc);
-#endif
- error = afs_CheckCode(error, &treq, 6);
- return error;
+ return code;
}
-
/* called on writes */
-afs_UFSWrite(avc, auio, aio, acred, noLock)
- register struct vcache *avc;
- struct uio *auio;
- int aio, noLock;
- struct AFS_UCRED *acred;
+int
+afs_write(struct vcache *avc, struct uio *auio, int aio,
+ afs_ucred_t *acred, int noLock)
{
afs_size_t totalLength;
afs_size_t transferLength;
afs_size_t filePos;
afs_size_t offset, len;
- afs_int32 tlen;
- afs_int32 trimlen;
+ afs_int32 tlen;
+ afs_int32 trimlen;
afs_int32 startDate;
afs_int32 max;
- register struct dcache *tdc;
+ struct dcache *tdc;
#ifdef _HIGHC_
volatile
#endif
afs_int32 error;
- struct uio tuio;
- struct iovec *tvec; /* again, should have define */
- struct osi_file *tfile;
- register afs_int32 code;
- struct vnode *vp;
+#if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
+ struct vnode *vp = AFSTOV(avc);
+#endif
+ struct uio *tuiop = NULL;
+ afs_int32 code;
struct vrequest treq;
- AFS_STATCNT(afs_UFSWrite);
+ AFS_STATCNT(afs_write);
+
if (avc->vc_error)
- return avc->vc_error;
+ return avc->vc_error;
+ if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW)
+ return ENETDOWN;
+
startDate = osi_Time();
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred)))
+ return code;
/* otherwise we read */
- totalLength = auio->afsio_resid;
- filePos = auio->afsio_offset;
+ totalLength = AFS_UIO_RESID(auio);
+ filePos = AFS_UIO_OFFSET(auio);
error = 0;
transferLength = 0;
- afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(totalLength),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
if (!noLock) {
afs_MaybeWakeupTruncateDaemon();
- ObtainWriteLock(&avc->lock,556);
+ ObtainWriteLock(&avc->lock, 556);
}
#if defined(AFS_SGI_ENV)
{
- off_t diff;
- /*
- * afs_xwrite handles setting m.Length
- * and handles APPEND mode.
- * Since we are called via strategy, we need to trim the write to
- * the actual size of the file
- */
- osi_Assert(filePos <= avc->m.Length);
- diff = avc->m.Length - filePos;
- auio->afsio_resid = MIN(totalLength, diff);
- totalLength = auio->afsio_resid;
+ off_t diff;
+ /*
+ * afs_xwrite handles setting m.Length
+ * and handles APPEND mode.
+ * Since we are called via strategy, we need to trim the write to
+ * the actual size of the file
+ */
+ osi_Assert(filePos <= avc->f.m.Length);
+ diff = avc->f.m.Length - filePos;
+ AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
+ totalLength = AFS_UIO_RESID(auio);
}
#else
if (aio & IO_APPEND) {
/* append mode, start it at the right spot */
-#if defined(AFS_SUN56_ENV)
- auio->uio_loffset = 0;
+#if defined(AFS_SUN5_ENV)
+ auio->uio_loffset = 0;
#endif
- filePos = auio->afsio_offset = avc->m.Length;
+ filePos = avc->f.m.Length;
+ AFS_UIO_SETOFFSET(auio, avc->f.m.Length);
}
#endif
/*
* Note that we use startDate rather than calling osi_Time() here.
* This is to avoid counting lock-waiting time in file date (for ranlib).
*/
- avc->m.Date = startDate;
+ avc->f.m.Date = startDate;
-#if defined(AFS_HPUX_ENV) || defined(AFS_GFS_ENV)
+#if defined(AFS_HPUX_ENV)
#if defined(AFS_HPUX101_ENV)
- if ((totalLength + filePos) >> 9 > p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
+ if ((totalLength + filePos) >> 9 >
+ p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) {
#else
-#ifdef AFS_HPUX_ENV
if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
-#else
- if (totalLength + filePos > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) {
-#endif
#endif
if (!noLock)
ReleaseWriteLock(&avc->lock);
return (EFBIG);
}
#endif
-#ifdef AFS_VM_RDWR_ENV
+#if defined(AFS_VM_RDWR_ENV) && !defined(AFS_FAKEOPEN_ENV)
/*
* If write is implemented via VM, afs_FakeOpen() is called from the
* high-level write op.
*/
if (avc->execsOrWriters <= 0) {
- printf("WARNING: afs_ufswr vp=%x, exOrW=%d\n", avc, avc->execsOrWriters);
+ afs_warn("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc,
+ avc->execsOrWriters);
}
#else
afs_FakeOpen(avc);
#endif
- avc->states |= CDirty;
- tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
+ avc->f.states |= CDirty;
+
while (totalLength > 0) {
- /* read the cached info */
- if (noLock) {
- tdc = afs_FindDCache(avc, filePos);
- if (tdc) ObtainWriteLock(&tdc->lock, 657);
- } else if (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- tdc = afs_FindDCache(avc, filePos);
- if (tdc) {
- ObtainWriteLock(&tdc->lock, 658);
- if (!hsame(tdc->f.versionNo, avc->m.DataVersion) ||
- (tdc->dflags & DFFetching)) {
- ReleaseWriteLock(&tdc->lock);
- afs_PutDCache(tdc);
- tdc = NULL;
- }
- }
- if (!tdc) {
- afs_MaybeWakeupTruncateDaemon();
- while (afs_blocksUsed >
- (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- ReleaseWriteLock(&avc->lock);
- if (afs_blocksUsed - afs_blocksDiscarded >
- (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
- afs_WaitForCacheDrain = 1;
- afs_osi_Sleep(&afs_WaitForCacheDrain);
- }
- afs_MaybeFreeDiscardedDCache();
- afs_MaybeWakeupTruncateDaemon();
- ObtainWriteLock(&avc->lock,509);
- }
- avc->states |= CDirty;
- tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
- if (tdc) ObtainWriteLock(&tdc->lock, 659);
- }
- } else {
- tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 4);
- if (tdc) ObtainWriteLock(&tdc->lock, 660);
- }
+ tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, &treq,
+ noLock);
if (!tdc) {
error = EIO;
break;
}
- if (!(afs_indexFlags[tdc->index] & IFDataMod)) {
- afs_stats_cmperf.cacheCurrDirtyChunks++;
- afs_indexFlags[tdc->index] |= IFDataMod; /* so it doesn't disappear */
- }
- if (!(tdc->f.states & DWriting)) {
- /* don't mark entry as mod if we don't have to */
- tdc->f.states |= DWriting;
- tdc->dflags |= DFEntryMod;
- }
- tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
len = totalLength; /* write this amount by default */
offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */
- if (max <= len + offset) { /*if we'd go past the end of this chunk */
+ if (max <= len + offset) { /*if we'd go past the end of this chunk */
/* it won't all fit in this chunk, so write as much
- as will fit */
+ * as will fit */
len = max - offset;
}
- /* mung uio structure to be right for this transfer */
- afsio_copy(auio, &tuio, tvec);
+
+ if (tuiop)
+ afsio_free(tuiop);
trimlen = len;
- afsio_trim(&tuio, trimlen);
- tuio.afsio_offset = offset;
-#ifdef AFS_AIX_ENV
-#ifdef AFS_AIX41_ENV
- AFS_GUNLOCK();
- code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL, NULL, &afs_osi_cred);
- AFS_GLOCK();
-#else
-#ifdef AFS_AIX32_ENV
- code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, &tuio, NULL, NULL);
-#else
- code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t)&offset, &tuio, NULL, NULL, -1);
-#endif
-#endif /* AFS_AIX41_ENV */
-#else /* AFS_AIX_ENV */
-#ifdef AFS_SUN5_ENV
- AFS_GUNLOCK();
- VOP_RWLOCK(tfile->vnode, 1);
- code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
- VOP_RWUNLOCK(tfile->vnode, 1);
- AFS_GLOCK();
- if (code == ENOSPC) afs_warnuser("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
-#else
-#if defined(AFS_SGI_ENV)
- AFS_GUNLOCK();
- avc->states |= CWritingUFS;
- AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
- AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred, code);
- AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
- avc->states &= ~CWritingUFS;
- AFS_GLOCK();
-#else
-#ifdef AFS_OSF_ENV
- {
- struct ucred *tmpcred = u.u_cred;
- u.u_cred = &afs_osi_cred;
- tuio.uio_rw = UIO_WRITE;
- AFS_GUNLOCK();
- VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
- AFS_GLOCK();
- u.u_cred = tmpcred;
- }
-#else /* AFS_OSF_ENV */
-#if defined(AFS_HPUX100_ENV)
- {
- AFS_GUNLOCK();
- code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
- AFS_GLOCK();
- }
-#else
-#ifdef AFS_HPUX_ENV
- tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */
-#endif
-#if defined(AFS_LINUX20_ENV)
- AFS_GUNLOCK();
- code = osi_file_uio_rdwr(tfile, &tuio, UIO_WRITE);
- AFS_GLOCK();
-#else
-#if defined(AFS_DARWIN_ENV)
- AFS_GUNLOCK();
- VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
- code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
- VOP_UNLOCK(tfile->vnode, 0, current_proc());
- AFS_GLOCK();
-#else
-#if defined(AFS_FBSD_ENV)
- AFS_GUNLOCK();
- VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
- code = VOP_WRITE(tfile->vnode, &tuio, 0, &afs_osi_cred);
- VOP_UNLOCK(tfile->vnode, 0, curproc);
- AFS_GLOCK();
-#else
- code = VOP_RDWR(tfile->vnode, &tuio, UIO_WRITE, 0, &afs_osi_cred);
-#endif /* AFS_FBSD_ENV */
-#endif /* AFS_DARWIN_ENV */
-#endif /* AFS_LINUX20_ENV */
-#endif /* AFS_HPUX100_ENV */
-#endif /* AFS_OSF_ENV */
-#endif /* AFS_SGI_ENV */
-#endif /* AFS_SUN5_ENV */
-#endif /* AFS_AIX41_ENV */
+ tuiop = afsio_partialcopy(auio, trimlen);
+ AFS_UIO_SETOFFSET(tuiop, offset);
+
+ code = (*(afs_cacheType->vwriteUIO))(avc, &tdc->f.inode, tuiop);
+
if (code) {
+ void *cfile;
+
error = code;
- ZapDCE(tdc); /* bad data */
- osi_UFSTruncate(tfile,0); /* fake truncate the segment */
- afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
+ ZapDCE(tdc); /* bad data */
+ cfile = afs_CFileOpen(&tdc->f.inode);
+ afs_CFileTruncate(cfile, 0);
+ afs_CFileClose(cfile);
+ afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */
+
afs_stats_cmperf.cacheCurrDirtyChunks--;
- afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
- afs_CFileClose(tfile);
+ afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
break;
}
/* otherwise we've written some, fixup length, etc and continue with next seg */
- len = len - tuio.afsio_resid; /* compute amount really transferred */
+ len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
tlen = len;
- afsio_skip(auio, tlen); /* advance auio over data written */
+ afsio_skip(auio, tlen); /* advance auio over data written */
/* compute new file size */
if (offset + len > tdc->f.chunkBytes) {
- afs_int32 tlength = offset+len;
+ afs_int32 tlength = offset + len;
afs_AdjustSize(tdc, tlength);
+ if (tdc->validPos < filePos + len)
+ tdc->validPos = filePos + len;
}
totalLength -= len;
transferLength += len;
filePos += len;
#if defined(AFS_SGI_ENV)
- /* afs_xwrite handles setting m.Length */
- osi_Assert(filePos <= avc->m.Length);
+ /* afs_xwrite handles setting m.Length */
+ osi_Assert(filePos <= avc->f.m.Length);
#else
- if (filePos > avc->m.Length) {
- afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
- ICL_TYPE_STRING, __FILE__,
- ICL_TYPE_LONG, __LINE__,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos));
- avc->m.Length = filePos;
+ if (filePos > avc->f.m.Length) {
+ if (AFS_IS_DISCON_RW)
+ afs_PopulateDCache(avc, filePos, &treq);
+ afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
+ __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(filePos));
+ avc->f.m.Length = filePos;
+#if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV)
+ vnode_pager_setsize(vp, filePos);
+#endif
}
#endif
- osi_UFSClose(tfile);
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
-#if !defined(AFS_VM_RDWR_ENV) || defined(AFS_LINUX22_ENV)
+#if !defined(AFS_VM_RDWR_ENV)
/*
* If write is implemented via VM, afs_DoPartialWrite() is called from
* the high-level write op.
}
#endif
}
-#ifndef AFS_VM_RDWR_ENV
+#if !defined(AFS_VM_RDWR_ENV) || defined(AFS_FAKEOPEN_ENV)
afs_FakeClose(avc, acred);
#endif
error = afs_CheckCode(error, &treq, 7);
avc->vc_error = error;
if (!noLock)
ReleaseWriteLock(&avc->lock);
- osi_FreeSmallSpace(tvec);
-#ifdef AFS_DEC_ENV
- /* next, on GFS systems, we update g_size so that lseek's relative to EOF will
- work. GFS is truly a poorly-designed interface! */
- afs_gfshack((struct gnode *) avc);
-#endif
+ if (tuiop)
+ afsio_free(tuiop);
+
#ifndef AFS_VM_RDWR_ENV
/*
* If write is implemented via VM, afs_fsync() is called from the high-level
* write op.
*/
-#if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
- if (noLock && (aio & IO_SYNC)) {
-#else
+#if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
+ if (noLock && (aio & IO_SYNC)) {
+#else
#ifdef AFS_HPUX_ENV
/* On hpux on synchronous writes syncio will be set to IO_SYNC. If
* we're doing them because the file was opened with O_SYNCIO specified,
* we have to look in the u area. No single mechanism here!!
*/
- if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
+ if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) {
#else
if (noLock && (aio & FSYNC)) {
#endif
}
/* do partial write if we're low on unmodified chunks */
-afs_DoPartialWrite(avc, areq)
-register struct vcache *avc;
-struct vrequest *areq; {
- register afs_int32 code;
+int
+afs_DoPartialWrite(struct vcache *avc, struct vrequest *areq)
+{
+ afs_int32 code;
- if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
- return 0; /* nothing to do */
+ if (afs_stats_cmperf.cacheCurrDirtyChunks <=
+ afs_stats_cmperf.cacheMaxDirtyChunks
+ || AFS_IS_DISCONNECTED)
+ return 0; /* nothing to do */
/* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
+
#if defined(AFS_SUN5_ENV)
code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
#else
return code;
}
-
-
-#if !defined (AFS_AIX_ENV) && !defined (AFS_HPUX_ENV) && !defined (AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
-#ifdef AFS_DUX50_ENV
-#define vno_close(X) vn_close((X), 0, NOCRED)
-#elif defined(AFS_DUX40_ENV)
-#define vno_close vn_close
-#endif
-/* We don't need this for AIX since:
- * (1) aix doesn't use fileops and it call close directly intead
- * (where the unlocking should be done) and
- * (2) temporarily, the aix lockf isn't supported yet.
- *
- * this stupid routine is used to release the flocks held on a
- * particular file descriptor. Sun doesn't pass file descr. info
- * through to the vnode layer, and yet we must unlock flocked files
- * on the *appropriate* (not first, as in System V) close call. Thus
- * this code.
- * How does this code get invoked? The afs AFS_FLOCK plugs in the new afs
- * file ops structure into any afs file when it gets flocked.
- * N.B: Intercepting close syscall doesn't trap aborts or exit system
- * calls.
-*/
-afs_closex(afd)
- register struct file *afd; {
- struct vrequest treq;
- struct vcache *tvc;
- afs_int32 flags;
- int closeDone;
- afs_int32 code = 0;
- struct afs_fakestat_state fakestat;
-
- AFS_STATCNT(afs_closex);
- /* setup the credentials */
- if (code = afs_InitReq(&treq, u.u_cred)) return code;
- afs_InitFakeStat(&fakestat);
-
- closeDone = 0;
- /* we're the last one. If we're an AFS vnode, clear the flags,
- * close the file and release the lock when done. Otherwise, just
- * let the regular close code work. */
- if (afd->f_type == DTYPE_VNODE) {
- tvc = VTOAFS(afd->f_data);
- if (IsAfsVnode(AFSTOV(tvc))) {
- code = afs_EvalFakeStat(&tvc, &fakestat, &treq);
- if (code) {
- afs_PutFakeStat(&fakestat);
- return code;
- }
- VN_HOLD(AFSTOV(tvc));
- flags = afd->f_flag & (FSHLOCK | FEXLOCK);
- afd->f_flag &= ~(FSHLOCK | FEXLOCK);
- code = vno_close(afd);
- if (flags)
-#if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)
- HandleFlock(tvc, LOCK_UN, &treq,
- u.u_procp->p_pid, 1/*onlymine*/);
-#else
- HandleFlock(tvc, LOCK_UN, &treq, 0, 1/*onlymine*/);
-#endif
-#ifdef AFS_DEC_ENV
- grele((struct gnode *) tvc);
-#else
- AFS_RELE(AFSTOV(tvc));
-#endif
- closeDone = 1;
- }
- }
- /* now, if close not done, do it */
- if (!closeDone) {
- code = vno_close(afd);
- }
- afs_PutFakeStat(&fakestat);
- return code; /* return code from vnode layer */
-}
-#endif
-
-
/* handle any closing cleanup stuff */
-#ifdef AFS_SGI_ENV
-afs_close(OSI_VC_ARG(avc), aflags, lastclose,
-#if !defined(AFS_SGI65_ENV)
- offset,
-#endif
- acred
-#if defined(AFS_SGI64_ENV) && !defined(AFS_SGI65_ENV)
- , flp
-#endif
- )
-lastclose_t lastclose;
-#if !defined(AFS_SGI65_ENV)
-off_t offset;
-#if defined(AFS_SGI64_ENV)
-struct flid *flp;
-#endif
-#endif
-#else /* SGI */
-#if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
-#ifdef AFS_SUN5_ENV
-afs_close(OSI_VC_ARG(avc), aflags, count, offset, acred)
- offset_t offset;
-#else
-afs_close(OSI_VC_ARG(avc), aflags, count, acred)
-#endif
-int count;
+int
+#if defined(AFS_SGI65_ENV)
+afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
+ afs_ucred_t *acred)
+#elif defined(AFS_SGI64_ENV)
+afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose,
+ off_t offset, afs_ucred_t *acred, struct flid *flp)
+#elif defined(AFS_SGI_ENV)
+afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose
+ off_t offset, afs_ucred_t *acred)
+#elif defined(AFS_SUN5_ENV)
+afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset,
+ afs_ucred_t *acred)
#else
-afs_close(OSI_VC_ARG(avc), aflags, acred)
-#endif
+afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred)
#endif
- OSI_VC_DECL(avc);
- afs_int32 aflags;
- struct AFS_UCRED *acred;
{
- register afs_int32 code;
- register struct brequest *tb;
+ afs_int32 code;
+ struct brequest *tb;
struct vrequest treq;
#ifdef AFS_SGI65_ENV
struct flid flid;
#endif
struct afs_fakestat_state fakestat;
- OSI_VC_CONVERT(avc)
+ OSI_VC_CONVERT(avc);
AFS_STATCNT(afs_close);
afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, aflags);
code = afs_InitReq(&treq, acred);
- if (code) return code;
+ if (code)
+ return code;
afs_InitFakeStat(&fakestat);
code = afs_EvalFakeStat(&avc, &fakestat, &treq);
if (code) {
afs_PutFakeStat(&fakestat);
return code;
}
+ AFS_DISCON_LOCK();
#ifdef AFS_SUN5_ENV
if (avc->flockCount) {
- HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
+ HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
}
#endif
#if defined(AFS_SGI_ENV)
if (!lastclose) {
afs_PutFakeStat(&fakestat);
- return 0;
- }
-#else
-#if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
- if (count > 1) {
- /* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
- afs_PutFakeStat(&fakestat);
+ AFS_DISCON_UNLOCK();
return 0;
}
-#endif
-#endif
-#ifndef AFS_SUN5_ENV
-#if defined(AFS_SGI_ENV)
/* unlock any locks for pid - could be wrong for child .. */
- AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
-#ifdef AFS_SGI65_ENV
+ AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
+# ifdef AFS_SGI65_ENV
get_current_flid(&flid);
- cleanlocks((vnode_t *)avc, flid.fl_pid, flid.fl_sysid);
- HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1/*onlymine*/);
-#else
-#ifdef AFS_SGI64_ENV
- cleanlocks((vnode_t *)avc, flp);
-#else /* AFS_SGI64_ENV */
- cleanlocks((vnode_t *)avc, u.u_procp->p_epid, u.u_procp->p_sysid);
-#endif /* AFS_SGI64_ENV */
- HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1/*onlymine*/);
-#endif /* AFS_SGI65_ENV */
+ cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid);
+ HandleFlock(avc, LOCK_UN, &treq, flid.fl_pid, 1 /*onlymine */ );
+# else
+# ifdef AFS_SGI64_ENV
+ cleanlocks((vnode_t *) avc, flp);
+# else /* AFS_SGI64_ENV */
+ cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid);
+# endif /* AFS_SGI64_ENV */
+ HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
+# endif /* AFS_SGI65_ENV */
/* afs_chkpgoob will drop and re-acquire the global lock. */
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
-#else
- if (avc->flockCount) { /* Release Lock */
-#if defined(AFS_OSF_ENV) || defined(AFS_SUN_ENV)
- HandleFlock(avc, LOCK_UN, &treq, u.u_procp->p_pid, 1/*onlymine*/);
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
+#elif defined(AFS_SUN5_ENV)
+ if (count > 1) {
+ /* The vfs layer may call this repeatedly with higher "count"; only
+ * on the last close (i.e. count = 1) we should actually proceed
+ * with the close. */
+ afs_PutFakeStat(&fakestat);
+ AFS_DISCON_UNLOCK();
+ return 0;
+ }
#else
- HandleFlock(avc, LOCK_UN, &treq, 0, 1/*onlymine*/);
-#endif
+ if (avc->flockCount) { /* Release Lock */
+ HandleFlock(avc, LOCK_UN, &treq, 0, 1 /*onlymine */ );
}
#endif
-#endif
if (aflags & (FWRITE | FTRUNC)) {
- if (afs_BBusy()) {
+ if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) {
/* do it yourself if daemons are all busy */
- ObtainWriteLock(&avc->lock,124);
+ ObtainWriteLock(&avc->lock, 124);
code = afs_StoreOnLastReference(avc, &treq);
ReleaseWriteLock(&avc->lock);
#if defined(AFS_SGI_ENV)
- AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
- }
- else {
+ } else {
#if defined(AFS_SGI_ENV)
- AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
/* at least one daemon is idle, so ask it to do the store.
- Also, note that we don't lock it any more... */
- tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
- (afs_size_t) acred->cr_uid, (afs_size_t) 0,
- (void *) 0);
+ * Also, note that we don't lock it any more... */
+ tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
+ (afs_size_t) afs_cr_uid(acred), (afs_size_t) 0,
+ (void *)0, (void *)0, (void *)0);
/* sleep waiting for the store to start, then retrieve error code */
while ((tb->flags & BUVALID) == 0) {
tb->flags |= BUWAIT;
}
/* VNOVNODE is "acceptable" error code from close, since
- may happen when deleting a file on another machine while
- it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
+ * may happen when deleting a file on another machine while
+ * it is open here. We do the same for ENOENT since in afs_CheckCode we map VNOVNODE -> ENOENT */
if (code == VNOVNODE || code == ENOENT)
code = 0;
-
+
/* Ensure last closer gets the error. If another thread caused
* DoPartialWrite and this thread does not actually store the data,
* it may not see the quota error.
*/
- ObtainWriteLock(&avc->lock,406);
+ ObtainWriteLock(&avc->lock, 406);
if (avc->vc_error) {
#ifdef AFS_AIX32_ENV
osi_ReleaseVM(avc, acred);
#endif
+ /* printf("avc->vc_error=%d\n", avc->vc_error); */
code = avc->vc_error;
avc->vc_error = 0;
}
}
#ifdef AFS_SUN5_ENV
else if (code == ENOSPC) {
- afs_warnuser("afs: failed to store file (over quota or partition full)\n");
+ afs_warnuser
+ ("afs: failed to store file (over quota or partition full)\n");
}
#else
else if (code == ENOSPC) {
afs_warnuser("afs: failed to store file (partition full)\n");
- }
- else if (code == EDQUOT) {
+ } else if (code == EDQUOT) {
afs_warnuser("afs: failed to store file (over quota)\n");
}
#endif
/* finally, we flush any text pages lying around here */
hzero(avc->flushDV);
osi_FlushText(avc);
- }
- else {
+ } else {
#if defined(AFS_SGI_ENV)
- AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
osi_Assert(avc->opens > 0);
#endif
/* file open for read */
code = avc->vc_error;
avc->vc_error = 0;
}
+#if defined(AFS_FBSD80_ENV)
+ /* XXX */
+ if (!avc->opens) {
+ afs_int32 opens, is_free, is_gone, is_doomed, iflag;
+ struct vnode *vp = AFSTOV(avc);
+ VI_LOCK(vp);
+ is_doomed = vp->v_iflag & VI_DOOMED;
+ is_free = vp->v_iflag & VI_FREE;
+ is_gone = vp->v_iflag & VI_DOINGINACT;
+ iflag = vp->v_iflag;
+ VI_UNLOCK(vp);
+ opens = avc->opens;
+ afs_warn("afs_close avc %p vp %p opens %d free %d doinginact %d doomed %d iflag %d\n",
+ avc, vp, opens, is_free, is_gone, is_doomed, iflag);
+ }
+#endif
avc->opens--;
ReleaseWriteLock(&avc->lock);
}
-#ifdef AFS_OSF_ENV
- if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
- afs_remunlink(avc, 1); /* ignore any return code */
- }
-#endif
+ AFS_DISCON_UNLOCK();
afs_PutFakeStat(&fakestat);
code = afs_CheckCode(code, &treq, 5);
return code;
}
-
-#ifdef AFS_OSF_ENV
-afs_fsync(avc, fflags, acred, waitfor)
-int fflags;
-int waitfor;
-#else /* AFS_OSF_ENV */
-#if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
-afs_fsync(OSI_VC_ARG(avc), flag, acred
-#ifdef AFS_SGI65_ENV
- , start, stop
-#endif
- )
-#else
-afs_fsync(avc, acred)
-#endif
-#endif
- OSI_VC_DECL(avc);
- struct AFS_UCRED *acred;
-#if defined(AFS_SGI_ENV) || defined(AFS_SUN53_ENV)
-int flag;
-#ifdef AFS_SGI65_ENV
-off_t start, stop;
-#endif
-#endif
+int
+#if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)
+afs_fsync(OSI_VC_DECL(avc), int flag, afs_ucred_t *acred
+# ifdef AFS_SGI65_ENV
+ , off_t start, off_t stop
+# endif /* AFS_SGI65_ENV */
+ )
+#else /* !SUN5 && !SGI */
+afs_fsync(OSI_VC_DECL(avc), afs_ucred_t *acred)
+#endif
{
- register afs_int32 code;
+ afs_int32 code;
struct vrequest treq;
- OSI_VC_CONVERT(avc)
+ OSI_VC_CONVERT(avc);
if (avc->vc_error)
return avc->vc_error;
#if defined(AFS_SUN5_ENV)
- /* back out if called from NFS server */
+ /* back out if called from NFS server */
if (curthread->t_flag & T_DONTPEND)
return 0;
#endif
AFS_STATCNT(afs_fsync);
afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
- if (code = afs_InitReq(&treq, acred)) return code;
-
+ if ((code = afs_InitReq(&treq, acred)))
+ return code;
+ AFS_DISCON_LOCK();
#if defined(AFS_SGI_ENV)
- AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
+ AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
if (flag & FSYNC_INVAL)
osi_VM_FSyncInval(avc);
#endif /* AFS_SGI_ENV */
- ObtainSharedLock(&avc->lock,18);
+ ObtainSharedLock(&avc->lock, 18);
code = 0;
if (avc->execsOrWriters > 0) {
- /* put the file back */
- UpgradeSToWLock(&avc->lock,41);
- code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
- ConvertWToSLock(&avc->lock);
- }
+ if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
+ /* Your average flush. */
+
+ /* put the file back */
+ UpgradeSToWLock(&avc->lock, 41);
+ code = afs_StoreAllSegments(avc, &treq, AFS_SYNC);
+ ConvertWToSLock(&avc->lock);
+ } else {
+ UpgradeSToWLock(&avc->lock, 711);
+ afs_DisconAddDirty(avc, VDisconWriteFlush, 1);
+ ConvertWToSLock(&avc->lock);
+ } /* if not disconnected */
+ } /* if (avc->execsOrWriters > 0) */
#if defined(AFS_SGI_ENV)
- AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
if (code == VNOVNODE) {
/* syncing an unlinked file! - non-informative to pass an errno
* 102 (== VNOVNODE) to user
*/
- code = ENOENT;
+ code = ENOENT;
}
#endif
-
+ AFS_DISCON_UNLOCK();
code = afs_CheckCode(code, &treq, 33);
ReleaseSharedLock(&avc->lock);
return code;