/*
* SGI specific vnodeops + other misc interface glue
*/
-#include "../afs/param.h" /* Should be always first */
#include <afsconfig.h>
+#include "afs/param.h"
-RCSID("$Header$");
#ifdef AFS_SGI62_ENV
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* statistics */
-#include "../sys/flock.h"
-#include "../afs/nfsclient.h"
+#include "afs/sysincludes.h" /* Standard vendor system headers */
+#include "afsincludes.h" /* Afs-based standard headers */
+#include "afs/afs_stats.h" /* statistics */
+#include "sys/flock.h"
+#include "afs/nfsclient.h"
/* AFSBSIZE must be at least the size of a page, else the client will hang.
* For 64 bit platforms, the page size is more than 8K.
static int afs_xread(), afs_xwrite();
static int afs_xbmap(), afs_map(), afs_reclaim();
#ifndef AFS_SGI65_ENV
-static int afs_addmap(), afs_delmap();
+static int afs_addmap(), afs_delmap();
#endif
extern int afs_open(), afs_close(), afs_ioctl(), afs_getattr(), afs_setattr();
extern int afs_access(), afs_lookup();
extern int afs_create(), afs_remove(), afs_link(), afs_rename();
extern int afs_mkdir(), afs_rmdir(), afs_readdir();
-extern int afs_symlink(), afs_readlink(), afs_fsync(), afs_fid(), afs_frlock();
-static int afs_seek(OSI_VC_DECL(a), off_t b, off_t *c);
+extern int afs_symlink(), afs_readlink(), afs_fsync(), afs_fid(),
+afs_frlock();
+static int afs_seek(OSI_VC_DECL(a), off_t b, off_t * c);
#ifdef AFS_SGI64_ENV
extern int afs_xinactive();
#else
#endif
extern void afs_rwlock(OSI_VN_DECL(vp), AFS_RWLOCK_T b);
-extern void afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T b);
+extern void afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T b);
extern int afs_fid2();
-static int afsrwvp(register struct vcache *avc,
- register struct uio *uio,
- enum uio_rw rw,
- int ioflag,
+static int afsrwvp(register struct vcache *avc, register struct uio *uio,
+ enum uio_rw rw, int ioflag,
#ifdef AFS_SGI64_ENV
- struct cred *cr,
- struct flid *flp);
+ struct cred *cr, struct flid *flp);
#else
struct cred *cr);
#endif
{
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
- BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
+ BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
#else
- VNODE_POSITION_BASE,
-#endif
-#endif
- afs_open,
- afs_close,
- afs_xread,
- afs_xwrite,
- afs_ioctl,
- fs_setfl,
- afs_getattr,
- afs_setattr,
- afs_access,
- afs_lookup,
- afs_create,
- afs_remove,
- afs_link,
- afs_rename,
- afs_mkdir,
- afs_rmdir,
- afs_readdir,
- afs_symlink,
- afs_readlink,
- afs_fsync,
- afs_xinactive,
- afs_fid,
- afs_fid2,
- afs_rwlock,
- afs_rwunlock,
- afs_seek,
- fs_cmp,
- afs_frlock,
- fs_nosys, /* realvp */
- afs_xbmap,
- afs_strategy,
- afs_map,
+ VNODE_POSITION_BASE,
+#endif
+#endif
+ afs_open,
+ afs_close,
+ afs_xread,
+ afs_xwrite,
+ afs_ioctl,
+ fs_setfl,
+ afs_getattr,
+ afs_setattr,
+ afs_access,
+ afs_lookup,
+ afs_create,
+ afs_remove,
+ afs_link,
+ afs_rename,
+ afs_mkdir,
+ afs_rmdir,
+ afs_readdir,
+ afs_symlink,
+ afs_readlink,
+ afs_fsync,
+ afs_xinactive,
+ afs_fid,
+ afs_fid2,
+ afs_rwlock,
+ afs_rwunlock,
+ afs_seek,
+ fs_cmp,
+ afs_frlock,
+ fs_nosys, /* realvp */
+ afs_xbmap,
+ afs_strategy,
+ afs_map,
#ifdef AFS_SGI65_ENV
- fs_noerr, /* addmap - devices only */
- fs_noerr, /* delmap - devices only */
+ fs_noerr, /* addmap - devices only */
+ fs_noerr, /* delmap - devices only */
#else
- afs_addmap,
- afs_delmap,
-#endif
- fs_poll, /* poll */
- fs_nosys, /* dump */
- fs_pathconf,
- fs_nosys, /* allocstore */
- fs_nosys, /* fcntl */
- afs_reclaim, /* reclaim */
- fs_nosys, /* attr_get */
- fs_nosys, /* attr_set */
- fs_nosys, /* attr_remove */
- fs_nosys, /* attr_list */
+ afs_addmap,
+ afs_delmap,
+#endif
+ fs_poll, /* poll */
+ fs_nosys, /* dump */
+ fs_pathconf,
+ fs_nosys, /* allocstore */
+ fs_nosys, /* fcntl */
+ afs_reclaim, /* reclaim */
+ fs_nosys, /* attr_get */
+ fs_nosys, /* attr_set */
+ fs_nosys, /* attr_remove */
+ fs_nosys, /* attr_list */
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
- fs_cover,
- (vop_link_removed_t)fs_noval,
- fs_vnode_change,
- fs_tosspages,
- fs_flushinval_pages,
- fs_flush_pages,
- fs_invalfree_pages,
- fs_pages_sethole,
- (vop_commit_t)fs_nosys,
- (vop_readbuf_t)fs_nosys,
- fs_strgetmsg,
- fs_strputmsg,
+ fs_cover,
+ (vop_link_removed_t) fs_noval,
+ fs_vnode_change,
+ fs_tosspages,
+ fs_flushinval_pages,
+ fs_flush_pages,
+ fs_invalfree_pages,
+ fs_pages_sethole,
+ (vop_commit_t) fs_nosys,
+ (vop_readbuf_t) fs_nosys,
+ fs_strgetmsg,
+ fs_strputmsg,
#else
- fs_mount,
+ fs_mount,
#endif
#endif
};
+
#ifndef MP
struct vnodeops *afs_ops = &Afs_vnodeops;
#endif
-int afs_frlock(OSI_VN_DECL(vp), int cmd, struct flock *lfp, int flag,
- off_t offset,
+int
+afs_frlock(OSI_VN_DECL(vp), int cmd, struct flock *lfp, int flag,
+ off_t offset,
#ifdef AFS_SGI65_ENV
- vrwlock_t vrwlock,
+ vrwlock_t vrwlock,
#endif
- cred_t *cr)
+ cred_t * cr)
{
int error;
- OSI_VN_CONVERT(vp)
+ OSI_VN_CONVERT(vp);
#ifdef AFS_SGI65_ENV
struct flid flid;
int pid;
* For GETLK we do a bit more - we first check any byte-wise
* locks - if none then check for full AFS file locks
*/
- if (cmd == F_GETLK || lfp->l_whence != 0 || lfp->l_start != 0 ||
- (lfp->l_len != MAXEND && lfp->l_len != 0)) {
+ if (cmd == F_GETLK || lfp->l_whence != 0 || lfp->l_start != 0
+ || (lfp->l_len != MAXEND && lfp->l_len != 0)) {
AFS_RWLOCK(vp, VRWLOCK_WRITE);
AFS_GUNLOCK();
#ifdef AFS_SGI65_ENV
- error = fs_frlock(OSI_VN_ARG(vp), cmd, lfp, flag, offset,
- vrwlock, cr);
+ error =
+ fs_frlock(OSI_VN_ARG(vp), cmd, lfp, flag, offset, vrwlock, cr);
#else
error = fs_frlock(vp, cmd, lfp, flag, offset, cr);
#endif
/* fall through to check for full AFS file locks */
}
- /* map BSD style to plain - we don't call reclock()
+ /* map BSD style to plain - we don't call reclock()
* and its only there that the difference is important
*/
switch (cmd) {
- case F_GETLK:
- case F_RGETLK:
- break;
- case F_SETLK:
- case F_RSETLK:
- break;
- case F_SETBSDLK:
- cmd = F_SETLK;
- break;
- case F_SETLKW:
- case F_RSETLKW:
- break;
- case F_SETBSDLKW:
- cmd = F_SETLKW;
- break;
- default:
- return EINVAL;
+ case F_GETLK:
+ case F_RGETLK:
+ break;
+ case F_SETLK:
+ case F_RSETLK:
+ break;
+ case F_SETBSDLK:
+ cmd = F_SETLK;
+ break;
+ case F_SETLKW:
+ case F_RSETLKW:
+ break;
+ case F_SETBSDLKW:
+ cmd = F_SETLKW;
+ break;
+ default:
+ return EINVAL;
}
AFS_GUNLOCK();
- error = convoff(vp, lfp, 0, offset
- , SEEKLIMIT
+ error = convoff(vp, lfp, 0, offset, SEEKLIMIT
#ifdef AFS_SGI64_ENV
, OSI_GET_CURRENT_CRED()
#endif /* AFS_SGI64_ENV */
- );
+ );
AFS_GLOCK();
if (!error) {
*/
/* ARGSUSED */
#ifdef AFS_SGI64_ENV
-static int afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
-struct flid *flp;
+static int
+afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
+ struct flid *flp;
#else
-static int afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr)
+static int
+afs_xread(OSI_VC_ARG(avc), uiop, ioflag, cr)
#endif
- OSI_VC_DECL(avc);
- struct uio *uiop;
- int ioflag;
- struct cred *cr;
+OSI_VC_DECL(avc);
+ struct uio *uiop;
+ int ioflag;
+ struct cred *cr;
{
int code;
- OSI_VC_CONVERT(avc)
+ OSI_VC_CONVERT(avc);
osi_Assert(avc->v.v_count > 0);
if (avc->v.v_type != VREG)
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
if (!(ioflag & IO_ISLOCKED))
- AFS_RWLOCK((vnode_t*)avc, VRWLOCK_READ);
+ AFS_RWLOCK((vnode_t *) avc, VRWLOCK_READ);
#endif
code = afsrwvp(avc, uiop, UIO_READ, ioflag, cr, flp);
#ifdef AFS_SGI65_ENV
if (!(ioflag & IO_ISLOCKED))
- AFS_RWUNLOCK((vnode_t*)avc, VRWLOCK_READ);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_READ);
#endif
#else
code = afsrwvp(avc, uiop, UIO_READ, ioflag, cr);
/* ARGSUSED */
#ifdef AFS_SGI64_ENV
-static int afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
-struct flid *flp;
+static int
+afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr, flp)
+ struct flid *flp;
#else
-static int afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr)
+static int
+afs_xwrite(OSI_VC_ARG(avc), uiop, ioflag, cr)
#endif
- OSI_VC_DECL(avc);
- struct uio *uiop;
- int ioflag;
- struct cred *cr;
+OSI_VC_DECL(avc);
+ struct uio *uiop;
+ int ioflag;
+ struct cred *cr;
{
int code;
- OSI_VC_CONVERT(avc)
+ OSI_VC_CONVERT(avc);
osi_Assert(avc->v.v_count > 0);
if (avc->v.v_type != VREG)
return EISDIR;
if (ioflag & IO_APPEND)
- uiop->uio_offset = avc->m.Length;
+ uiop->uio_offset = avc->f.m.Length;
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
if (!(ioflag & IO_ISLOCKED))
- AFS_RWLOCK(((vnode_t*)avc), VRWLOCK_WRITE);
+ AFS_RWLOCK(((vnode_t *) avc), VRWLOCK_WRITE);
#endif
code = afsrwvp(avc, uiop, UIO_WRITE, ioflag, cr, flp);
#ifdef AFS_SGI65_ENV
if (!(ioflag & IO_ISLOCKED))
- AFS_RWUNLOCK((vnode_t*)avc, VRWLOCK_WRITE);
+ AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#endif
#else
code = afsrwvp(avc, uiop, UIO_WRITE, ioflag, cr);
static int acchk = 0;
static int acdrop = 0;
-static int afsrwvp(register struct vcache *avc,
- register struct uio *uio,
- enum uio_rw rw,
- int ioflag,
+static int
+afsrwvp(register struct vcache *avc, register struct uio *uio, enum uio_rw rw,
+ int ioflag,
#ifdef AFS_SGI64_ENV
- struct cred *cr,
- struct flid *flp)
+ struct cred *cr, struct flid *flp)
#else
- struct cred *cr)
+ struct cred *cr)
#endif
{
- register struct vnode *vp = (struct vnode *)avc;
+ register struct vnode *vp = AFSTOV(avc);
struct buf *bp;
daddr_t bn;
- size_t acnt, cnt;
- int off, newoff;
- ssize_t bsize, rem, len;
+ off_t acnt, cnt;
+ off_t off, newoff;
+ off_t bsize, rem, len;
int error;
struct bmapval bmv[2];
int nmaps, didFakeOpen = 0;
struct dcache *tdc;
int counter = 0;
- osi_Assert((valusema(&avc->vc_rwlock) <= 0) &&
- (OSI_GET_LOCKID() == avc->vc_rwlockid));
+ osi_Assert((valusema(&avc->vc_rwlock) <= 0)
+ && (OSI_GET_LOCKID() == avc->vc_rwlockid));
newoff = uio->uio_resid + uio->uio_offset;
if (uio->uio_resid <= 0) {
return (0);
}
- if (uio->uio_offset < 0
- || (signed long)newoff < 0) {
+ if (uio->uio_offset < 0 || newoff < 0) {
return (EINVAL);
}
if (ioflag & IO_DIRECT)
return EINVAL;
- if (rw == UIO_WRITE && vp->v_type == VREG
- && newoff > uio->uio_limit) {
+ if (rw == UIO_WRITE && vp->v_type == VREG && newoff > uio->uio_limit) {
return (EFBIG);
}
afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, ioflag,
- ICL_TYPE_INT32, rw, ICL_TYPE_INT32, 0);
+ ICL_TYPE_INT32, ioflag, ICL_TYPE_INT32, rw, ICL_TYPE_INT32, 0);
/* get a validated vcache entry */
afs_InitReq(&treq, cr);
error = afs_VerifyVCache(avc, &treq);
- if (error) return afs_CheckCode(error, &treq, 51);
+ if (error)
+ return afs_CheckCode(error, &treq, 51);
/*
* flush any stale pages - this will unmap
osi_FlushPages(avc, cr);
if (cr && AFS_NFSXLATORREQ(cr) && rw == UIO_READ) {
- if (!afs_AccessOK(avc, PRSFS_READ, &treq,
- CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ))
+ if (!afs_AccessOK
+ (avc, PRSFS_READ, &treq,
+ CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ))
return EACCES;
}
/*
* the last writers credentials
*/
if (rw == UIO_WRITE || (rw == UIO_READ && avc->cred == NULL)) {
- ObtainWriteLock(&avc->lock,92);
+ ObtainWriteLock(&avc->lock, 92);
if (avc->cred)
crfree(avc->cred);
crhold(cr);
* because there're no open/close nfs rpc's to call our afs_open/close.
*/
if (root_exported && rw == UIO_WRITE) {
- ObtainWriteLock(&avc->lock,234);
+ ObtainWriteLock(&avc->lock, 234);
if (root_exported) {
didFakeOpen = 1;
afs_FakeOpen(avc);
error = 0;
if (rw == UIO_WRITE) {
- ObtainWriteLock(&avc->lock,330);
- avc->states |= CDirty;
+ ObtainWriteLock(&avc->lock, 330);
+ avc->f.states |= CDirty;
ReleaseWriteLock(&avc->lock);
}
error = avc->vc_error;
break;
}
- bsize = AFSBSIZE; /* why not?? */
+ bsize = AFSBSIZE; /* why not?? */
off = uio->uio_offset % bsize;
bn = BTOBBT(uio->uio_offset - off);
/*
/*
* read/paging in a normal file
*/
- rem = avc->m.Length - (afs_int32)uio->uio_offset;
+ rem = avc->f.m.Length - uio->uio_offset;
if (rem <= 0)
/* EOF */
break;
* do read-ahead on any file that has potentially
* dirty mmap pages.
*/
- if ((avc->lastr + BTOBB(AFSBSIZE) == bn ||
- uio->uio_resid > AFSBSIZE)
+ if ((avc->lastr + BTOBB(AFSBSIZE) == bn
+ || uio->uio_resid > AFSBSIZE)
#ifdef AFS_SGI61_ENV
&& (!AFS_VN_MAPPED(vp))
#else /* AFS_SGI61_ENV */
}
#ifdef DEBUG
else if (prnra)
- printf("NRA:vp 0x%x lastr %d bn %d len %d cnt %d bsize %d rem %d resid %d\n",
- vp, avc->lastr, bn,
- len, cnt, bsize, rem,
- uio->uio_resid);
+ printf
+ ("NRA:vp 0x%x lastr %d bn %d len %d cnt %d bsize %d rem %d resid %d\n",
+ vp, avc->lastr, bn, len, cnt, bsize, rem,
+ uio->uio_resid);
#endif
avc->lastr = bn;
*/
if (counter == 0 || AFS_CHUNKOFFSET(off) == 0) {
AFS_GLOCK();
- ObtainWriteLock(&avc->lock,562);
+ ObtainWriteLock(&avc->lock, 562);
tdc = afs_FindDCache(avc, off);
- if (tdc) {
- if (!(tdc->flags & DFNextStarted))
- afs_PrefetchChunk(avc, tdc, cr, &treq);
+ if (tdc) {
+ if (!(tdc->mflags & DFNextStarted))
+ afs_PrefetchChunk(avc, tdc, cr, &treq);
afs_PutDCache(tdc);
- }
+ }
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
}
*/
if (counter > 0 && AFS_CHUNKOFFSET(uio->uio_offset) == 0) {
AFS_GLOCK();
- ObtainWriteLock(&avc->lock,90);
+ ObtainWriteLock(&avc->lock, 90);
error = afs_DoPartialWrite(avc, &treq);
if (error == 0)
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
- if (error) break;
+ if (error)
+ break;
}
counter++;
else
bp = chunkread(vp, bmv, 1, cr);
- avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
}
if (bp->b_flags & B_ERROR) {
/*
* Since we compile -signed, b_error is a signed
- * char when it should ba an unsigned char.
+ * char when it should be an unsigned char.
* This can cause some errors codes to be interpreted
* as negative #s
*/
osi_Assert(bp->b_error == 0);
if (uio->uio_segflg != UIO_NOSPACE)
- (void) bp_mapin(bp);
+ (void)bp_mapin(bp);
AFS_UIOMOVE(bp->b_un.b_addr + bmv[0].pboff, cnt, rw, uio, error);
if (rw == UIO_READ || error) {
if (bp->b_flags & B_DELWRI) {
* m.Length is the maximum number of bytes known to be in the file.
* Make sure it is at least as high as the last byte we just wrote
* into the buffer.
- */
- if (avc->m.Length < (afs_int32)uio->uio_offset) {
+ */
+ if (avc->f.m.Length < uio->uio_offset) {
AFS_GLOCK();
- ObtainWriteLock(&avc->lock,235);
- avc->m.Length = uio->uio_offset;
- ReleaseWriteLock(&avc->lock);
+ ObtainWriteLock(&avc->lock, 235);
+ avc->f.m.Length = uio->uio_offset;
+ ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
}
if (uio->uio_fmode & FSYNC) {
/*
* Since EIO on an unlinked file is non-intuitive - give some
* explanation
- */
+ */
if (error) {
- if (avc->m.LinkCount == 0)
- cmn_err(CE_WARN,"AFS: Process pid %d write error %d writing to unlinked file.",
+ if (avc->f.m.LinkCount == 0)
+ cmn_err(CE_WARN,
+ "AFS: Process pid %d write error %d writing to unlinked file.",
OSI_GET_CURRENT_PID(), error);
}
}
} while (!error && uio->uio_resid > 0);
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
AFS_GLOCK();
- if (rw == UIO_WRITE && error == 0 && (avc->states & CDirty)) {
+ if (rw == UIO_WRITE && error == 0 && (avc->f.states & CDirty)) {
ObtainWriteLock(&avc->lock, 405);
error = afs_DoPartialWrite(avc, &treq);
ReleaseWriteLock(&avc->lock);
if (!error) {
#ifdef AFS_SGI61_ENV
- if (((ioflag & IO_SYNC) || (ioflag & IO_DSYNC)) && (rw == UIO_WRITE) &&
- !AFS_NFSXLATORREQ(cr)) {
- error = afs_fsync(avc, 0, cr);
+ if (((ioflag & IO_SYNC) || (ioflag & IO_DSYNC)) && (rw == UIO_WRITE)
+ && !AFS_NFSXLATORREQ(cr)) {
+ error = afs_fsync(avc, 0, cr
+#ifdef AFS_SGI65_ENV
+ , 0, 0
+#endif
+ );
}
#else /* AFS_SGI61_ENV */
if ((ioflag & IO_SYNC) && (rw == UIO_WRITE) && !AFS_NFSXLATORREQ(cr)) {
#endif /* AFS_SGI61_ENV */
}
if (didFakeOpen) {
- ObtainWriteLock(&avc->lock,236);
+ ObtainWriteLock(&avc->lock, 236);
afs_FakeClose(avc, cr); /* XXXX For nfs trans XXXX */
ReleaseWriteLock(&avc->lock);
}
afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, ioflag,
- ICL_TYPE_INT32, rw, ICL_TYPE_INT32, error);
+ ICL_TYPE_INT32, ioflag, ICL_TYPE_INT32, rw, ICL_TYPE_INT32,
+ error);
return (error);
}
-int afs_xbmap(OSI_VC_ARG(avc), offset, count, flag, cr, bmv, nbmv)
- OSI_VC_DECL(avc);
- off_t offset;
- ssize_t count;
- int flag;
- struct cred *cr;
- struct bmapval *bmv;
- int *nbmv;
+int
+afs_xbmap(OSI_VC_ARG(avc), offset, count, flag, cr, bmv, nbmv)
+OSI_VC_DECL(avc);
+ off_t offset;
+ ssize_t count;
+ int flag;
+ struct cred *cr;
+ struct bmapval *bmv;
+ int *nbmv;
{
- int bsize; /* server's block size in bytes */
+ int bsize; /* server's block size in bytes */
off_t off;
size_t rem, cnt;
- OSI_VC_CONVERT(avc)
+ OSI_VC_CONVERT(avc);
bsize = AFSBSIZE;
off = offset % bsize; /* offset into block */
bmv->bn = BTOBBT(offset - off);
bmv->offset = bmv->bn;
bmv->pboff = off;
- rem = avc->m.Length - offset;
+ rem = avc->f.m.Length - offset;
if (rem <= 0)
- cnt = 0; /* EOF */
+ cnt = 0; /* EOF */
else
cnt = MIN(bsize - off, rem);
-
+
/*
* It is benign to ignore *nbmv > 1, since it is only for requesting
* readahead.
*/
static void
afs_strategy(OSI_VC_ARG(avc), bp)
- OSI_VC_DECL(avc);
+OSI_VC_DECL(avc);
struct buf *bp;
{
uio_t auio;
iovec_t aiovec;
int error;
struct cred *cr;
- OSI_VC_CONVERT(avc)
- vnode_t *vp = (vnode_t *)avc;
+ OSI_VC_CONVERT(avc);
+ vnode_t *vp = (vnode_t *) avc;
/*
* We can't afford DELWRI buffers for 2 reasons:
* 1) Since we can call underlying EFS, we can require a
- * buffer to flush a buffer. This leads to 2 potential
- * recursions/deadlocks
- * a) if all buffers are DELWRI afs buffers, then
- * ngeteblk -> bwrite -> afs_strategy -> afs_write ->
- * UFS_Write -> efs_write -> ngeteblk .... could
- * recurse a long ways!
- * b) brelse -> chunkhold which can call dchunkpush
- * will look for any DELWRI buffers and call strategy
- * on them. This can then end up via UFS_Write
- * recursing
+ * buffer to flush a buffer. This leads to 2 potential
+ * recursions/deadlocks
+ * a) if all buffers are DELWRI afs buffers, then
+ * ngeteblk -> bwrite -> afs_strategy -> afs_write ->
+ * UFS_Write -> efs_write -> ngeteblk .... could
+ * recurse a long ways!
+ * b) brelse -> chunkhold which can call dchunkpush
+ * will look for any DELWRI buffers and call strategy
+ * on them. This can then end up via UFS_Write
+ * recursing
* Current hack:
- * a) We never do bdwrite(s) on AFS buffers.
- * b) We call pdflush with B_ASYNC
- * c) in chunkhold where it can set a buffer DELWRI
- * we immediatly do a clusterwrite for AFS vp's
+ * a) We never do bdwrite(s) on AFS buffers.
+ * b) We call pdflush with B_ASYNC
+ * c) in chunkhold where it can set a buffer DELWRI
+ * we immediatly do a clusterwrite for AFS vp's
* XXX Alas, 'c' got dropped in 5.1 so its possible to get DELWRI
- * buffers if someone has mmaped the file and dirtied it then
- * reads/faults it again.
- * Instead - wherever we call chunkread/getchunk we check for a
- * returned bp with DELWRI set, and write it out immediately
+ * buffers if someone has mmaped the file and dirtied it then
+ * reads/faults it again.
+ * Instead - wherever we call chunkread/getchunk we check for a
+ * returned bp with DELWRI set, and write it out immediately
*/
if (CheckLock(&avc->lock) && VN_GET_DBUF(vp)) {
printf("WARN: afs_strategy vp=%x, v_dbuf=%x bp=%x\n", vp,
iodone(bp);
return;
}
- if (bp->b_error != 0)
- printf("WARNING: afs_strategy3 vp=%x, bp=%x, err=%x\n", vp, bp, bp->b_error);
+ if (bp->b_error != 0)
+ printf("WARNING: afs_strategy3 vp=%x, bp=%x, err=%x\n", vp, bp,
+ bp->b_error);
/*
* To get credentials somewhat correct (we may be called from bdflush/
*/
ObtainReadLock(&avc->lock);
if (bp->b_flags & B_READ) {
- if (BBTOB(bp->b_blkno) >= avc->m.Length) {
+ if (BBTOB(bp->b_blkno) >= avc->f.m.Length) {
/* we are responsible for zero'ing the page */
caddr_t c;
c = bp_mapin(bp);
- bzero(c, bp->b_bcount);
+ memset(c, 0, bp->b_bcount);
iodone(bp);
ReleaseReadLock(&avc->lock);
return;
}
- } else if ((avc->states & CWritingUFS) && (bp->b_flags & B_DELWRI)) {
+ } else if ((avc->f.states & CWritingUFS) && (bp->b_flags & B_DELWRI)) {
bp->b_ref = 3;
ReleaseReadLock(&avc->lock);
iodone(bp);
}
/* ARGSUSED */
-static int afs_seek(OSI_VC_ARG(avc), ooff, noffp)
- OSI_VC_DECL(avc);
- off_t ooff;
- off_t *noffp;
+static int
+afs_seek(OSI_VC_ARG(avc), ooff, noffp)
+OSI_VC_DECL(avc);
+ off_t ooff;
+ off_t *noffp;
{
- return *noffp < 0 ? EINVAL : 0;
+ return *noffp < 0 ? EINVAL : 0;
}
#if !defined(AFS_SGI65_ENV)
/* Irix 6.5 uses addmap/delmap only for devices. */
/* ARGSUSED */
-static int afs_addmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot,
- flags, cr)
- off_t off;
- OSI_VC_DECL(avc);
- struct pregion *prp;
- addr_t addr;
- size_t len;
- u_int prot, maxprot;
- u_int flags;
- struct cred *cr;
+static int
+afs_addmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot, flags, cr)
+ off_t off;
+OSI_VC_DECL(avc);
+ struct pregion *prp;
+ addr_t addr;
+ size_t len;
+ u_int prot, maxprot;
+ u_int flags;
+ struct cred *cr;
{
- OSI_VC_CONVERT(avc)
- struct vnode *vp = (struct vnode*)avc;
+ OSI_VC_CONVERT(avc);
+ struct vnode *vp = AFSTOV(avc);
if (vp->v_flag & VNOMAP)
return ENOSYS;
AFS_RWLOCK(vp, VRWLOCK_WRITE);
if (avc->mapcnt == 0) {
/* on first mapping add a open reference */
- ObtainWriteLock(&avc->lock,237);
+ ObtainWriteLock(&avc->lock, 237);
avc->execsOrWriters++;
avc->opens++;
ReleaseWriteLock(&avc->lock);
return 0;
}
-/*ARGSUSED*/
-static int afs_delmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot,
- flags, acred)
- off_t off;
- OSI_VC_DECL(avc);
- struct pregion *prp;
- addr_t addr;
- size_t len;
- u_int prot, maxprot;
- u_int flags;
- struct cred *acred;
+ /*ARGSUSED*/ static int
+afs_delmap(OSI_VC_ARG(avc), off, prp, addr, len, prot, maxprot, flags, acred)
+ off_t off;
+OSI_VC_DECL(avc);
+ struct pregion *prp;
+ addr_t addr;
+ size_t len;
+ u_int prot, maxprot;
+ u_int flags;
+ struct cred *acred;
{
- OSI_VC_CONVERT(avc)
- struct vnode *vp = (struct vnode*)avc;
+ OSI_VC_CONVERT(avc);
+ struct vnode *vp = AFSTOV(avc);
register struct brequest *tb;
struct vrequest treq;
afs_int32 code;
/* on last mapping push back and remove our reference */
osi_Assert(avc->execsOrWriters > 0);
osi_Assert(avc->opens > 0);
- if (avc->m.LinkCount == 0) {
- ObtainWriteLock(&avc->lock,238);
+ if (avc->f.m.LinkCount == 0) {
+ ObtainWriteLock(&avc->lock, 238);
AFS_GUNLOCK();
- PTOSSVP(vp, (off_t)0, (off_t)MAXLONG);
+ PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
AFS_GLOCK();
ReleaseWriteLock(&avc->lock);
}
afs_InitReq(&treq, acred);
if (afs_BBusy()) {
/* do it yourself if daemons are all busy */
- ObtainWriteLock(&avc->lock,239);
+ ObtainWriteLock(&avc->lock, 239);
code = afs_StoreOnLastReference(avc, &treq);
ReleaseWriteLock(&avc->lock);
/* BStore does CheckCode so we should also */
/* VNOVNODE is "acceptable" error code from close, since
- may happen when deleting a file on another machine while
- it is open here. */
+ * may happen when deleting a file on another machine while
+ * it is open here. */
if (code == VNOVNODE)
code = 0;
if (code) {
- afs_StoreWarn(code, avc->fid.Fid.Volume, /* /dev/console */ 1);
+ afs_StoreWarn(code, avc->f.fid.Fid.Volume, /* /dev/console */
+ 1);
}
code = afs_CheckCode(code, &treq, 52);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
/* at least one daemon is idle, so ask it to do the store.
* Also, note that we don't lock it any more... */
- tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, (long)acred->cr_uid,
- 0L, 0L, 0L);
+ tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred,
+ (afs_size_t) afs_cr_uid(acred), 0L, (void *)0);
/* sleep waiting for the store to start, then retrieve error code */
while ((tb->flags & BUVALID) == 0) {
tb->flags |= BUWAIT;
* ever having been 'opened'
*/
#ifdef AFS_SGI65_ENV
-static int afs_map(OSI_VC_ARG(avc), off, len, prot, flags, cr, vpp)
+static int
+afs_map(OSI_VC_ARG(avc), off, len, prot, flags, cr, vpp)
off_t off;
- OSI_VC_DECL(avc);
+OSI_VC_DECL(avc);
size_t len;
mprot_t prot;
u_int flags;
struct cred *cr;
vnode_t **vpp;
#else
-static int afs_map(OSI_VC_ARG(avc), off, prp, addrp, len, prot, maxprot,
- flags, cr)
- off_t off;
- OSI_VC_DECL(avc);
- struct pregion *prp;
- addr_t *addrp;
- size_t len;
- u_int prot, maxprot;
- u_int flags;
- struct cred *cr;
+static int
+afs_map(OSI_VC_ARG(avc), off, prp, addrp, len, prot, maxprot, flags, cr)
+ off_t off;
+OSI_VC_DECL(avc);
+ struct pregion *prp;
+ addr_t *addrp;
+ size_t len;
+ u_int prot, maxprot;
+ u_int flags;
+ struct cred *cr;
#endif
{
- OSI_VC_CONVERT(avc)
- struct vnode *vp = (struct vnode*)avc;
+ OSI_VC_CONVERT(avc);
+ struct vnode *vp = AFSTOV(avc);
struct vrequest treq;
int error;
/* get a validated vcache entry */
afs_InitReq(&treq, cr);
error = afs_VerifyVCache(avc, &treq);
- if (error) return afs_CheckCode(error, &treq, 53);
+ if (error)
+ return afs_CheckCode(error, &treq, 53);
osi_FlushPages(avc, cr); /* ensure old pages are gone */
#ifdef AFS_SGI65_ENV
AFS_RWLOCK(vp, VRWLOCK_WRITE);
ObtainWriteLock(&avc->lock, 501);
if (avc->execsOrWriters > 0) {
- avc->execsOrWriters ++;
- avc->opens ++;
- avc->mapcnt ++; /* count eow's due to mappings. */
+ avc->execsOrWriters++;
+ avc->opens++;
+ avc->mapcnt++; /* count eow's due to mappings. */
}
ReleaseWriteLock(&avc->lock);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
#else
AFS_RWLOCK(vp, VRWLOCK_WRITE);
AFS_GUNLOCK();
- error = fs_map_subr(vp, (off_t) avc->m.Length, (u_int)avc->m.Mode, off, prp,
- *addrp, len, prot, maxprot, flags, cr);
+ error =
+ fs_map_subr(vp, (off_t) avc->f.m.Length, (u_int) avc->f.m.Mode, off, prp,
+ *addrp, len, prot, maxprot, flags, cr);
AFS_GLOCK();
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
#endif /* AFS_SGI65_ENV */
#ifdef AFS_SGI64_ENV
int
#else
-void
+void
#endif
afs_xinactive(OSI_VC_ARG(avc), acred)
- OSI_VC_DECL(avc);
- struct ucred *acred;
+OSI_VC_DECL(avc);
+ struct ucred *acred;
{
int s;
- OSI_VC_CONVERT(avc)
- vnode_t *vp = (vnode_t *)avc;
- int mapcnt = avc->mapcnt; /* We just clear off this many. */
-
+ OSI_VC_CONVERT(avc);
+ vnode_t *vp = (vnode_t *) avc;
+ int mapcnt = avc->mapcnt; /* We just clear off this many. */
+
AFS_STATCNT(afs_inactive);
s = VN_LOCK(vp);
/* In Irix 6.5, the last unmap of a dirty mmap'd file does not
* get an explicit vnode op. Instead we only find out at VOP_INACTIVE.
*/
- if (!afs_rwlock_nowait((vnode_t*)avc, VRWLOCK_WRITE)) {
+ if (!afs_rwlock_nowait((vnode_t *) avc, VRWLOCK_WRITE)) {
return VN_INACTIVE_CACHE;
}
if (NBObtainWriteLock(&avc->lock, 502)) {
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
return VN_INACTIVE_CACHE;
}
- if (avc->states & CUnlinked) {
- if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
- avc->states |= CUnlinkedDel;
+ if (avc->f.states & CUnlinked) {
+ if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
+ avc->f.states |= CUnlinkedDel;
ReleaseWriteLock(&avc->lock);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
} else {
}
return VN_INACTIVE_CACHE;
}
- if ((avc->states & CDirty) || (avc->execsOrWriters > 0)) {
+ if ((avc->f.states & CDirty) || (avc->execsOrWriters > 0)) {
/* File either already has dirty chunks (CDirty) or was mapped at
* time in its life with the potential for being written into.
* Note that afs_close defers storebacks if the vnode's ref count
code = 0;
if (code) {
if (mapcnt) {
- cmn_err(CE_WARN, "AFS: Failed to store FID (%x:%lu.%lu.%lu) in VOP_INACTIVE, error = %d\n",
- (int)(avc->fid.Cell) & 0xffffffff,
- avc->fid.Fid.Volume,
- avc->fid.Fid.Vnode, avc->fid.Fid.Unique,
- code);
+ cmn_err(CE_WARN,
+ "AFS: Failed to store FID (%x:%lu.%lu.%lu) in VOP_INACTIVE, error = %d\n",
+ (int)(avc->f.fid.Cell) & 0xffffffff,
+ avc->f.fid.Fid.Volume, avc->f.fid.Fid.Vnode,
+ avc->f.fid.Fid.Unique, code);
}
- afs_InvalidateAllSegments(avc, 1);
+ afs_InvalidateAllSegments(avc);
}
s = VN_LOCK(vp);
- vp->v_count --;
+ vp->v_count--;
code = (vp->v_count == 0);
VN_UNLOCK(vp, s);
/* If the vnode is now in use by someone else, return early. */
}
#endif
- osi_Assert((avc->states & (CCore|CMAPPED)) == 0);
+ osi_Assert((avc->f.states & (CCore | CMAPPED)) == 0);
if (avc->cred) {
crfree(avc->cred);
* If someone unlinked a file and this is the last hurrah -
* nuke all the pages.
*/
- if (avc->m.LinkCount == 0) {
+ if (avc->f.m.LinkCount == 0) {
AFS_GUNLOCK();
- PTOSSVP(vp, (off_t)0, (off_t)MAXLONG);
+ PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
AFS_GLOCK();
}
-
#ifndef AFS_SGI65_ENV
osi_Assert(avc->mapcnt == 0);
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
- avc->states &= ~CDirty; /* Give up on store-backs */
- if (avc->states & CUnlinked) {
- if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
- avc->states |= CUnlinkedDel;
+ avc->f.states &= ~CDirty; /* Give up on store-backs */
+ if (avc->f.states & CUnlinked) {
+ if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
+ avc->f.states |= CUnlinkedDel;
} else {
afs_remunlink(avc, 1); /* ignore any return code */
}
afs_reclaim(OSI_VC_DECL(avc), int flag)
{
#ifdef AFS_SGI64_ENV
- /* Get's called via VOP_RELCAIM in afs_FlushVCache to clear repl_vnodeops*/
+ /* Get's called via VOP_RELCAIM in afs_FlushVCache to clear repl_vnodeops */
return 0;
#else
panic("afs_reclaim");
#endif
}
-void afs_rwlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
+void
+afs_rwlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
{
- OSI_VN_CONVERT(vp)
- struct vcache *avc = (struct vcache *)vp;
+ OSI_VN_CONVERT(vp);
+ struct vcache *avc = VTOAFS(vp);
if (OSI_GET_LOCKID() == avc->vc_rwlockid) {
avc->vc_locktrips++;
avc->vc_rwlockid = OSI_GET_LOCKID();
}
-void afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
+void
+afs_rwunlock(OSI_VN_DECL(vp), AFS_RWLOCK_T flag)
{
- OSI_VN_CONVERT(vp)
- struct vcache *avc = (struct vcache *)vp;
+ OSI_VN_CONVERT(vp);
+ struct vcache *avc = VTOAFS(vp);
AFS_ASSERT_GLOCK();
osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
* does not match the corresponding lock flag. But they may start using this
* flag for a real rw lock at some time.
*/
-int afs_rwlock_nowait(vnode_t *vp, AFS_RWLOCK_T flag)
+int
+afs_rwlock_nowait(vnode_t * vp, AFS_RWLOCK_T flag)
{
- struct vcache *avc = (struct vcache *)vp;
+ struct vcache *avc = VTOAFS(vp);
AFS_ASSERT_GLOCK();
if (OSI_GET_LOCKID() == avc->vc_rwlockid) {
}
if (cpsema(&avc->vc_rwlock)) {
avc->vc_rwlockid = OSI_GET_LOCKID();
- return 1;
+ return 1;
}
return 0;
}
-#if defined(AFS_SGI64_ENV) && defined(CKPT)
-int afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
+#if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
+int
+afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
{
struct cell *tcell;
- afs_fid2_t *afid = (afs_fid2_t*)fidp;
- OSI_VC_CONVERT(avc)
+ afs_fid2_t *afid = (afs_fid2_t *) fidp;
+ OSI_VC_CONVERT(avc);
osi_Assert(sizeof(fid_t) >= sizeof(afs_fid2_t));
afid->af_len = sizeof(afs_fid2_t) - sizeof(afid->af_len);
- tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
+ tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
afid->af_cell = tcell->cellIndex & 0xffff;
afs_PutCell(tcell, READ_LOCK);
-
- afid->af_volid = avc->fid.Fid.Volume;
- afid->af_vno = avc->fid.Fid.Vnode;
- afid->af_uniq = avc->fid.Fid.Unique;
+
+ afid->af_volid = avc->f.fid.Fid.Volume;
+ afid->af_vno = avc->f.fid.Fid.Vnode;
+ afid->af_uniq = avc->f.fid.Fid.Unique;
return 0;
}
* return of ENOSYS would make the code fail over to VOP_FID. We can't let
* that happen, since we do a VN_HOLD there in the expectation that
* posthandle will be called to release the vnode.
+ *
+ * afs_fid2 is used to support the R5000 workarounds (_R5000_CVT_WAR)
*/
-int afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
+int
+afs_fid2(OSI_VC_DECL(avc), struct fid *fidp)
{
+#if defined(_R5000_CVT_WAR)
+ extern int R5000_cvt_war;
+
+ if (R5000_cvt_war)
+ return ENOSYS;
+ else
+ return EINVAL;
+#else
return EINVAL;
+#endif
}
#endif /* AFS_SGI64_ENV && CKPT */
* Drop the global lock here, since we may not actually do the call.
*/
void
-afs_chkpgoob(vnode_t *vp, pgno_t pgno)
+afs_chkpgoob(vnode_t * vp, pgno_t pgno)
{
#undef PGDEBUG
#ifdef PGDEBUG
#endif
#ifdef AFS_SGI64_ENV
-int mp_afs_open(bhv_desc_t *bhp, vnode_t **a, mode_t b, struct cred *c)
+int
+mp_afs_open(bhv_desc_t * bhp, vnode_t ** a, mode_t b, struct cred *c)
#else
-int mp_afs_open(vnode_t **a, mode_t b, struct cred *c)
+int
+mp_afs_open(vnode_t ** a, mode_t b, struct cred *c)
#endif
{
- int rv;
- AFS_GLOCK();
+ int rv;
+ AFS_GLOCK();
#ifdef AFS_SGI64_ENV
- rv = afs_lockedvnodeops.vop_open(bhp, a, b, c);
+ rv = afs_lockedvnodeops.vop_open(bhp, a, b, c);
#else
- rv = afs_lockedvnodeops.vop_open(a, b, c);
+ rv = afs_lockedvnodeops.vop_open(a, b, c);
#endif
- AFS_GUNLOCK();
- return rv;
+ AFS_GUNLOCK();
+ return rv;
}
#if defined(AFS_SGI64_ENV)
#if defined(AFS_SGI65_ENV)
-int mp_afs_close(AFS_MP_VC_ARG (*a), int b, lastclose_t c, struct cred *d)
+int
+mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, struct cred *d)
#else
-int mp_afs_close(AFS_MP_VC_ARG (*a), int b, lastclose_t c, off_t d,
- struct cred *e, struct flid *f)
+int
+mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, off_t d, struct cred *e,
+ struct flid *f)
#endif
#else
-int mp_afs_close(AFS_MP_VC_ARG (*a), int b, lastclose_t c, off_t d,
- struct cred *e)
+int
+mp_afs_close(AFS_MP_VC_ARG(*a), int b, lastclose_t c, off_t d, struct cred *e)
#endif
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_close(a, b, c, d
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_close(a, b, c, d
#if !defined(AFS_SGI65_ENV)
- , e
+ , e
#if defined(AFS_SGI64_ENV)
- , f
+ , f
#endif
#endif
- );
-
- AFS_GUNLOCK();
- return rv;
+ );
+
+ AFS_GUNLOCK();
+ return rv;
}
#ifdef AFS_SGI64_ENV
-int mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
- struct flid *f)
+int
+mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
+ struct flid *f)
#else
-int mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
+int
+mp_afs_read(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
#endif
{
- int rv;
- AFS_GLOCK();
+ int rv;
+ AFS_GLOCK();
#ifdef AFS_SGI64_ENV
- rv = afs_lockedvnodeops.vop_read(a, b, c, d, f);
+ rv = afs_lockedvnodeops.vop_read(a, b, c, d, f);
#else
- rv = afs_lockedvnodeops.vop_read(a, b, c, d);
+ rv = afs_lockedvnodeops.vop_read(a, b, c, d);
#endif
- AFS_GUNLOCK();
- return rv;
+ AFS_GUNLOCK();
+ return rv;
}
#ifdef AFS_SGI64_ENV
-int mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
- struct flid *f)
+int
+mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d,
+ struct flid *f)
#else
-int mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
+int
+mp_afs_write(AFS_MP_VC_ARG(*a), struct uio *b, int c, struct cred *d)
#endif
{
- int rv;
- AFS_GLOCK();
+ int rv;
+ AFS_GLOCK();
#ifdef AFS_SGI64_ENV
- rv = afs_lockedvnodeops.vop_write(a, b, c, d, f);
+ rv = afs_lockedvnodeops.vop_write(a, b, c, d, f);
#else
- rv = afs_lockedvnodeops.vop_write(a, b, c, d);
+ rv = afs_lockedvnodeops.vop_write(a, b, c, d);
#endif
- AFS_GUNLOCK();
- return rv;
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_ioctl(AFS_MP_VC_ARG(*a), int b, void *c, int d, struct cred *e,
- int *f
+int
+mp_afs_ioctl(AFS_MP_VC_ARG(*a), int b, void *c, int d, struct cred *e, int *f
#ifdef AFS_SGI65_ENV
- , struct vopbd *vbds
+ , struct vopbd *vbds
#endif
- )
+ )
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_ioctl(a, b, c, d, e, f
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_ioctl(a, b, c, d, e, f
#ifdef AFS_SGI65_ENV
- , vbds
+ , vbds
#endif
- );
- AFS_GUNLOCK();
- return rv;
+ );
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_fs_setfl(AFS_MP_VC_ARG(*a), int b, int c, struct cred *d)
+int
+mp_fs_setfl(AFS_MP_VC_ARG(*a), int b, int c, struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_setfl(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_setfl(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_getattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
+
+int
+mp_afs_getattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_getattr(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_getattr(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_setattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
+
+int
+mp_afs_setattr(AFS_MP_VC_ARG(*a), struct vattr *b, int c, struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_setattr(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_setattr(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_access(AFS_MP_VC_ARG(*a), int b,
+int
+mp_afs_access(AFS_MP_VC_ARG(*a), int b,
#ifndef AFS_SGI65_ENV
- int c,
+ int c,
#endif
- struct cred *d)
+ struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_access(a, b,
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_access(a, b,
#ifndef AFS_SGI65_ENV
- c,
+ c,
#endif
- d);
- AFS_GUNLOCK();
- return rv;
+ d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_lookup(AFS_MP_VC_ARG(*a), char *b, vnode_t **c, struct pathname *d,
- int e, vnode_t *f, struct cred *g)
+int
+mp_afs_lookup(AFS_MP_VC_ARG(*a), char *b, vnode_t ** c, struct pathname *d,
+ int e, vnode_t * f, struct cred *g)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_lookup(a, b, c, d, e, f, g);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_lookup(a, b, c, d, e, f, g);
+ AFS_GUNLOCK();
+ return rv;
}
+
#ifdef AFS_SGI64_ENV
-int mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, int d, int e,
- vnode_t **f, struct cred *g)
+int
+mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, int d, int e,
+ vnode_t ** f, struct cred *g)
#else
-int mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, enum vcexcl d,
- int e, vnode_t **f, struct cred *g)
+int
+mp_afs_create(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, enum vcexcl d,
+ int e, vnode_t ** f, struct cred *g)
#endif
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_create(a, b, c, d, e, f, g);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_create(a, b, c, d, e, f, g);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_remove(AFS_MP_VC_ARG(*a), char *b, struct cred *c)
+
+int
+mp_afs_remove(AFS_MP_VC_ARG(*a), char *b, struct cred *c)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_remove(a, b, c);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_remove(a, b, c);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_link(AFS_MP_VC_ARG(*a), vnode_t *b, char *c, struct cred *d)
+
+int
+mp_afs_link(AFS_MP_VC_ARG(*a), vnode_t * b, char *c, struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_link(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_link(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_rename(AFS_MP_VC_ARG(*a), char *b, vnode_t *c, char *d,
- struct pathname *e, struct cred *f)
+
+int
+mp_afs_rename(AFS_MP_VC_ARG(*a), char *b, vnode_t * c, char *d,
+ struct pathname *e, struct cred *f)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_rename(a, b, c, d, e, f);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_rename(a, b, c, d, e, f);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_mkdir(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, vnode_t **d,
- struct cred *e)
+
+int
+mp_afs_mkdir(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, vnode_t ** d,
+ struct cred *e)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_mkdir(a, b, c, d, e);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_mkdir(a, b, c, d, e);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_rmdir(AFS_MP_VC_ARG(*a), char *b, vnode_t *c, struct cred *d)
+
+int
+mp_afs_rmdir(AFS_MP_VC_ARG(*a), char *b, vnode_t * c, struct cred *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_rmdir(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_rmdir(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_readdir(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c, int *d)
+
+int
+mp_afs_readdir(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c, int *d)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_readdir(a, b, c, d);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_readdir(a, b, c, d);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_symlink(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, char *d,
- struct cred *e)
+
+int
+mp_afs_symlink(AFS_MP_VC_ARG(*a), char *b, struct vattr *c, char *d,
+ struct cred *e)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_symlink(a, b, c, d, e);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_symlink(a, b, c, d, e);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_readlink(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c)
+
+int
+mp_afs_readlink(AFS_MP_VC_ARG(*a), struct uio *b, struct cred *c)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_readlink(a, b, c);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_readlink(a, b, c);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_fsync(AFS_MP_VC_ARG(*a), int b, struct cred *c
+int
+mp_afs_fsync(AFS_MP_VC_ARG(*a), int b, struct cred *c
#ifdef AFS_SGI65_ENV
- , off_t start, off_t stop
+ , off_t start, off_t stop
#endif
- )
+ )
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_fsync(a, b, c
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_fsync(a, b, c
#ifdef AFS_SGI65_ENV
- , start, stop
+ , start, stop
#endif
- );
- AFS_GUNLOCK();
- return rv;
+ );
+ AFS_GUNLOCK();
+ return rv;
}
-void mp_afs_inactive(AFS_MP_VC_ARG(*a), struct cred *b)
+void
+mp_afs_inactive(AFS_MP_VC_ARG(*a), struct cred *b)
{
- AFS_GLOCK();
- afs_lockedvnodeops.vop_inactive(a, b);
- AFS_GUNLOCK();
- return;
+ AFS_GLOCK();
+ afs_lockedvnodeops.vop_inactive(a, b);
+ AFS_GUNLOCK();
+ return;
}
-int mp_afs_fid(AFS_MP_VC_ARG(*a), struct fid **b)
+
+int
+mp_afs_fid(AFS_MP_VC_ARG(*a), struct fid **b)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_fid(a, b);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_fid(a, b);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_fid2(AFS_MP_VC_ARG(*a), struct fid *b)
+
+int
+mp_afs_fid2(AFS_MP_VC_ARG(*a), struct fid *b)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_fid2(a, b);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_fid2(a, b);
+ AFS_GUNLOCK();
+ return rv;
}
-void mp_afs_rwlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
+
+void
+mp_afs_rwlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
{
- AFS_GLOCK();
- afs_rwlock(a, VRWLOCK_WRITE);
- AFS_GUNLOCK();
+ AFS_GLOCK();
+ afs_rwlock(a, VRWLOCK_WRITE);
+ AFS_GUNLOCK();
}
-void mp_afs_rwunlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
+
+void
+mp_afs_rwunlock(AFS_MP_VC_ARG(*a), AFS_RWLOCK_T b)
{
- AFS_GLOCK();
- afs_rwunlock(a, VRWLOCK_WRITE);
- AFS_GUNLOCK();
+ AFS_GLOCK();
+ afs_rwunlock(a, VRWLOCK_WRITE);
+ AFS_GUNLOCK();
}
-int mp_afs_seek(AFS_MP_VC_ARG(*a), off_t b, off_t*c)
+
+int
+mp_afs_seek(AFS_MP_VC_ARG(*a), off_t b, off_t * c)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_seek(a, b, c);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_seek(a, b, c);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_fs_cmp(AFS_MP_VC_ARG(*a), vnode_t *b)
+
+int
+mp_fs_cmp(AFS_MP_VC_ARG(*a), vnode_t * b)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_cmp(a, b);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_cmp(a, b);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_frlock(AFS_MP_VC_ARG(*a), int b, struct flock *c, int d, off_t e,
+int
+mp_afs_frlock(AFS_MP_VC_ARG(*a), int b, struct flock *c, int d, off_t e,
#ifdef AFS_SGI65_ENV
- vrwlock_t vrwlock,
+ vrwlock_t vrwlock,
#endif
- struct cred *f)
+ struct cred *f)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_frlock(a, b, c, d, e,
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_frlock(a, b, c, d, e,
#ifdef AFS_SGI65_ENV
- vrwlock,
+ vrwlock,
#endif
- f);
- AFS_GUNLOCK();
- return rv;
+ f);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_realvp(AFS_MP_VC_ARG(*a), vnode_t **b)
+int
+mp_afs_realvp(AFS_MP_VC_ARG(*a), vnode_t ** b)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_realvp(a, b);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_realvp(a, b);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_bmap(AFS_MP_VC_ARG(*a), off_t b, ssize_t c, int d, struct cred *e,
- struct bmapval *f, int *g)
+
+int
+mp_afs_bmap(AFS_MP_VC_ARG(*a), off_t b, ssize_t c, int d, struct cred *e,
+ struct bmapval *f, int *g)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_bmap(a, b, c, d, e, f, g);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_bmap(a, b, c, d, e, f, g);
+ AFS_GUNLOCK();
+ return rv;
}
-void mp_afs_strategy(AFS_MP_VC_ARG(*a), struct buf *b)
+void
+mp_afs_strategy(AFS_MP_VC_ARG(*a), struct buf *b)
{
- int rv;
- AFS_GLOCK();
- afs_lockedvnodeops.vop_strategy(a, b);
- AFS_GUNLOCK();
- return;
+ int rv;
+ AFS_GLOCK();
+ afs_lockedvnodeops.vop_strategy(a, b);
+ AFS_GUNLOCK();
+ return;
}
#ifdef AFS_SGI65_ENV
-int mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, size_t c, mprot_t d,
- u_int e, struct cred *f, vnode_t **g)
+int
+mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, size_t c, mprot_t d, u_int e,
+ struct cred *f, vnode_t ** g)
#else
-int mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, char ** d,
- size_t e, u_int f, u_int g, u_int h, struct cred *i)
+int
+mp_afs_map(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, char **d, size_t e,
+ u_int f, u_int g, u_int h, struct cred *i)
#endif
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_map(a, b, c, d, e, f, g
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_map(a, b, c, d, e, f, g
#ifndef AFS_SGI65_ENV
- , h, i
+ , h, i
#endif
- );
- AFS_GUNLOCK();
- return rv;
+ );
+ AFS_GUNLOCK();
+ return rv;
}
#ifndef AFS_SGI65_ENV
/* As of Irix 6.5, addmap and delmap are only for devices */
-int mp_afs_addmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
- size_t e, u_int f, u_int g, u_int h, struct cred *i)
+int
+mp_afs_addmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
+ size_t e, u_int f, u_int g, u_int h, struct cred *i)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_addmap(a, b, c, d, e, f, g, h, i);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_addmap(a, b, c, d, e, f, g, h, i);
+ AFS_GUNLOCK();
+ return rv;
}
-int mp_afs_delmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
- size_t e, u_int f, u_int g, u_int h, struct cred *i)
+int
+mp_afs_delmap(AFS_MP_VC_ARG(*a), off_t b, struct pregion *c, addr_t d,
+ size_t e, u_int f, u_int g, u_int h, struct cred *i)
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_delmap(a, b, c, d, e, f, g, h, i);
- AFS_GUNLOCK();
- return rv;
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_delmap(a, b, c, d, e, f, g, h, i);
+ AFS_GUNLOCK();
+ return rv;
}
#endif /* ! AFS_SGI65_ENV */
-int mp_fs_poll(AFS_MP_VC_ARG(*a), short b, int c, short *d,
- struct pollhead **e
+int
+mp_fs_poll(AFS_MP_VC_ARG(*a), short b, int c, short *d, struct pollhead **e
#ifdef AFS_SGI65_ENV
- , unsigned int *f
+ , unsigned int *f
#endif
- )
+ )
{
- int rv;
- AFS_GLOCK();
- rv = afs_lockedvnodeops.vop_poll(a, b, c, d, e
+ int rv;
+ AFS_GLOCK();
+ rv = afs_lockedvnodeops.vop_poll(a, b, c, d, e
#ifdef AFS_SGI65_ENV
- , f
+ , f
#endif
- );
- AFS_GUNLOCK();
- return rv;
+ );
+ AFS_GUNLOCK();
+ return rv;
}
struct vnodeops Afs_vnodeops = {
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
- BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
+ BHV_IDENTITY_INIT_POSITION(VNODE_POSITION_BASE),
#else
- VNODE_POSITION_BASE,
-#endif
-#endif
- mp_afs_open,
- mp_afs_close,
- mp_afs_read,
- mp_afs_write,
- mp_afs_ioctl,
- mp_fs_setfl,
- mp_afs_getattr,
- mp_afs_setattr,
- mp_afs_access,
- mp_afs_lookup,
- mp_afs_create,
- mp_afs_remove,
- mp_afs_link,
- mp_afs_rename,
- mp_afs_mkdir,
- mp_afs_rmdir,
- mp_afs_readdir,
- mp_afs_symlink,
- mp_afs_readlink,
- mp_afs_fsync,
- mp_afs_inactive,
- mp_afs_fid,
- mp_afs_fid2,
- mp_afs_rwlock,
- mp_afs_rwunlock,
- mp_afs_seek,
- mp_fs_cmp,
- mp_afs_frlock,
- fs_nosys, /* realvp */
- mp_afs_bmap,
- mp_afs_strategy,
- mp_afs_map,
+ VNODE_POSITION_BASE,
+#endif
+#endif
+ mp_afs_open,
+ mp_afs_close,
+ mp_afs_read,
+ mp_afs_write,
+ mp_afs_ioctl,
+ mp_fs_setfl,
+ mp_afs_getattr,
+ mp_afs_setattr,
+ mp_afs_access,
+ mp_afs_lookup,
+ mp_afs_create,
+ mp_afs_remove,
+ mp_afs_link,
+ mp_afs_rename,
+ mp_afs_mkdir,
+ mp_afs_rmdir,
+ mp_afs_readdir,
+ mp_afs_symlink,
+ mp_afs_readlink,
+ mp_afs_fsync,
+ mp_afs_inactive,
+ mp_afs_fid,
+ mp_afs_fid2,
+ mp_afs_rwlock,
+ mp_afs_rwunlock,
+ mp_afs_seek,
+ mp_fs_cmp,
+ mp_afs_frlock,
+ fs_nosys, /* realvp */
+ mp_afs_bmap,
+ mp_afs_strategy,
+ mp_afs_map,
#ifdef AFS_SGI65_ENV
- fs_noerr, /* addmap - devices only */
- fs_noerr, /* delmap - devices only */
+ fs_noerr, /* addmap - devices only */
+ fs_noerr, /* delmap - devices only */
#else
- mp_afs_addmap,
- mp_afs_delmap,
-#endif
- mp_fs_poll, /* poll */
- fs_nosys, /* dump */
- fs_pathconf,
- fs_nosys, /* allocstore */
- fs_nosys, /* fcntl */
- afs_reclaim, /* reclaim */
- fs_nosys, /* attr_get */
- fs_nosys, /* attr_set */
- fs_nosys, /* attr_remove */
- fs_nosys, /* attr_list */
+ mp_afs_addmap,
+ mp_afs_delmap,
+#endif
+ mp_fs_poll, /* poll */
+ fs_nosys, /* dump */
+ fs_pathconf,
+ fs_nosys, /* allocstore */
+ fs_nosys, /* fcntl */
+ afs_reclaim, /* reclaim */
+ fs_nosys, /* attr_get */
+ fs_nosys, /* attr_set */
+ fs_nosys, /* attr_remove */
+ fs_nosys, /* attr_list */
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
- fs_cover,
- (vop_link_removed_t)fs_noval,
- fs_vnode_change,
- fs_tosspages,
- fs_flushinval_pages,
- fs_flush_pages,
- fs_invalfree_pages,
- fs_pages_sethole,
- (vop_commit_t)fs_nosys,
- (vop_readbuf_t)fs_nosys,
- fs_strgetmsg,
- fs_strputmsg,
+ fs_cover,
+ (vop_link_removed_t) fs_noval,
+ fs_vnode_change,
+ fs_tosspages,
+ fs_flushinval_pages,
+ fs_flush_pages,
+ fs_invalfree_pages,
+ fs_pages_sethole,
+ (vop_commit_t) fs_nosys,
+ (vop_readbuf_t) fs_nosys,
+ fs_strgetmsg,
+ fs_strputmsg,
#else
- fs_mount,
+ fs_mount,
#endif
#endif
};
#endif /* MP */
-#if defined(AFS_SGI62_ENV) && defined(AFS_SGI_DUAL_FS_CACHE)
-/* Support for EFS and XFS caches. The assumption here is that the size of
+/* Support for XFS caches. The assumption here is that the size of
* a cache file also does not exceed 32 bits.
*/
/* Initialized in osi_InitCacheFSType(). Used to determine inode type. */
-int afs_CacheFSType = -1;
-vnodeops_t *afs_efs_vnodeopsp;
vnodeops_t *afs_xfs_vnodeopsp;
-vnode_t * (*afs_IGetVnode)(ino_t);
-
-extern vnode_t *afs_EFSIGetVnode(ino_t); /* defined in osi_file.c */
-extern vnode_t *afs_XFSIGetVnode(ino_t); /* defined in osi_file.c */
-
-extern afs_lock_t afs_xosi; /* lock is for tvattr */
-
-/* Initialize the cache operations. Called while initializing cache files. */
-void afs_InitDualFSCacheOps(struct vnode *vp)
-{
- static int inited = 0;
- struct vfssw *swp;
- int found = 0;
-
- if (inited)
- return;
- inited = 1;
+extern afs_lock_t afs_xosi; /* lock is for tvattr */
-#ifdef AFS_SGI_EFS_IOPS_ENV
- swp = vfs_getvfssw("efs");
- if (swp) {
- afs_efs_vnodeopsp = swp->vsw_vnodeops;
- if(vp && vp->v_op == afs_efs_vnodeopsp) {
- afs_CacheFSType = AFS_SGI_EFS_CACHE;
- afs_IGetVnode = afs_EFSIGetVnode;
- found = 1;
- }
- }
-#endif /* AFS_SGI_EFS_IOPS_ENV */
-
- swp = vfs_getvfssw("xfs");
- if (swp) {
- afs_xfs_vnodeopsp = swp->vsw_vnodeops;
- if (!found) {
- if (vp &&vp->v_op == afs_xfs_vnodeopsp) {
- afs_CacheFSType = AFS_SGI_XFS_CACHE;
- afs_IGetVnode = afs_XFSIGetVnode;
- found = 1;
- }
- }
- }
-
- if (vp && !found)
- osi_Panic("osi_InitCacheFSType: Can't find fstype for vnode 0x%llx\n",
- vp);
-}
-
-ino_t VnodeToIno(vnode_t *vp)
+ino_t
+VnodeToIno(vnode_t * vp)
{
int code;
struct vattr vattr;
- MObtainWriteLock(&afs_xosi,579);
- vattr.va_mask = AT_FSID|AT_NODEID; /* quick return using this mask. */
+ ObtainWriteLock(&afs_xosi, 579);
+ vattr.va_mask = AT_FSID | AT_NODEID; /* quick return using this mask. */
AFS_GUNLOCK();
AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
AFS_GLOCK();
if (code) {
osi_Panic("VnodeToIno");
}
- MReleaseWriteLock(&afs_xosi);
+ ReleaseWriteLock(&afs_xosi);
return vattr.va_nodeid;
}
-dev_t VnodeToDev(vnode_t *vp)
+dev_t
+VnodeToDev(vnode_t * vp)
{
int code;
struct vattr vattr;
- MObtainWriteLock(&afs_xosi,580);
- vattr.va_mask = AT_FSID|AT_NODEID; /* quick return using this mask. */
+ ObtainWriteLock(&afs_xosi, 580);
+ vattr.va_mask = AT_FSID | AT_NODEID; /* quick return using this mask. */
AFS_GUNLOCK();
AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
AFS_GLOCK();
if (code) {
osi_Panic("VnodeToDev");
}
- MReleaseWriteLock(&afs_xosi);
- return (dev_t)vattr.va_fsid;
+ ReleaseWriteLock(&afs_xosi);
+ return (dev_t) vattr.va_fsid;
}
-off_t VnodeToSize(vnode_t *vp)
+off_t
+VnodeToSize(vnode_t * vp)
{
int code;
struct vattr vattr;
- MObtainWriteLock(&afs_xosi,581);
+ ObtainWriteLock(&afs_xosi, 581);
vattr.va_mask = AT_SIZE;
AFS_GUNLOCK();
AFS_VOP_GETATTR(vp, &vattr, 0, OSI_GET_CURRENT_CRED(), code);
if (code) {
osi_Panic("VnodeToSize");
}
- MReleaseWriteLock(&afs_xosi);
+ ReleaseWriteLock(&afs_xosi);
return vattr.va_size;
}
-#endif /* 6.2 and dual fs cache */
#endif /* AFS_SGI62_ENV */