Try to test builds using gmake -j # MAKE="gmake -j #", it seems like a good
way to find missing or order-dependent dependency rules. (Is there a better
way to do this?)
+
+-- Prototyping and Style --
+Prototypes for all source files in a given dir DDD should be placed
+int the file DDD/DDD_prototypes.h. All externally used (either API
+or used by other source files) routines and variables should be
+prototyped in this file.
+
+The prototypes should be a full prototype, with argument and return
+types. (Should not generate a warning with gcc -Wstrict-prototypes.)
+
+Format of the prototype files should look like:
+
+ Standard Copyright Notice
+
+ #ifndef AFS_SRC_DDD_PROTO_H
+ #define AFS_SRC_DDD_PROTO_H
+
+ /* filename.c */
+ prototypes
+
+ /* filename.c */
+ prototypes
+
+ #endif /* AFS_SRC_DDD_PROTO_H */
+
+In most of the existing prototypes, the define is DDD_PROTOTYPES_H, which is
+probably ok as well.
+
+The declaration of the routines should be done in ANSI style. If at some
+later date, it is determined that prototypes don't work on some platform
+properly, we can use ansi2knr during the compile.
+
+ rettype routine(argtype arg)
+ {
+
+ }
+
+All routines should have a return type specified, void if nothing returned,
+and should have (void) if no arguments are taken.
+
+Header files should not contain macros or other definitions unless they
+are used across multiple source files.
+
+All routines should be declared static if they are not used outside that
+source file.
+
+Compiles on gcc-using machines should strive to handle using
+-Wstrict-prototypes -Werror. (this may take a while)
+
+Routines shall be defined in source prior to use if possible, and
+prototyped in block at top of file if static.
+
+If you make a routine or variable static, be sure and remove it from
+the AIX .exp files.
+
+Suggested compiler flags:
+ gcc: -Wall -Wstrict-prototypes
+ Solaris Workshop CC: -fd -v
+ (You might not want the -fd, it isn't really useful, just complains about the
+ K&R style functions, but -v gives useful info.)
#undef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
#undef STRUCT_INODE_HAS_I_DEVICES
+#undef HAVE_STRUCT_BUF
+
/* glue for RedHat kernel bug */
#undef ENABLE_REDHAT_BUILDSYS
DARWIN_INFOFILE=afs.${AFS_SYSNAME}.plist
;;
esac
+
+AC_MSG_CHECKING(for definition of struct buf)
+AC_CACHE_VAL(ac_cv_have_struct_buf, [
+ ac_cv_have_struct_buf=no
+ AC_TRY_COMPILE(
+ [#include <sys/buf.h>],
+ [struct buf x;
+ printf("%d\n", sizeof(x));],
+ ac_cv_have_struct_buf=yes,)
+ ]
+)
+AC_MSG_RESULT($ac_cv_have_struct_buf)
+if test "$ac_cv_have_struct_buf" = yes; then
+ AC_DEFINE(HAVE_STRUCT_BUF)
+fi
+
+
AC_CACHE_VAL(ac_cv_sockaddr_len,
[
AC_MSG_CHECKING([if struct sockaddr has sa_len field])
extern struct vfs *afs_cacheVfsp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
extern struct vfs *rootvfs;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
extern int cacheDiskType;
afs_int32 code = 0;
int dummy;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
+int osi_UFSClose(register struct osi_file *afile)
{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
return 0;
}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
struct inode *ip = VTOIP(avp);
ip->i_flag &= ~IACC;
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
AFS_STATCNT(devtovfs);
a.dev = dev;
- a.ans = (struct vfs *)0;
+ a.ans = NULL;
vfs_search(devtovfs_func, &a);
return a.ans;
}
SYSENT(icreate, (dev, near_inode, param1, param2, param3, param4), ) {
struct inode *ip, *newip, *pip;
register int err, rval1, rc=0;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
extern struct vfs *rootvfs;
register struct vfs *vfsp;
struct vfs *nvfsp = NULL;
SYSENT(iopen, (dev, inode, usrmod), ) {
struct file *fp;
register struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
extern struct fileops vnodefops;
register struct vfs *vfsp;
int fd;
SYSENT(iincdec, (dev, inode, inode_p1, amount), ) {
register struct inode *ip;
char error;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
AFS_STATCNT(afs_syscall_iincdec);
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
credp = crref();
if (!(code = afs_InitReq(&treq, credp)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
if (avc->vrefCount != 0)
return EBUSY;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
if (avc->vmh) {
/*
avc->execsOrWriters--;
AFS_RELE(AFSTOV(avc));
crfree((struct ucred *)avc->linkData);
- avc->linkData = (char *)0;
+ avc->linkData = NULL;
}
}
}
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred,
+ int sync)
{
if (avc->segid) {
ReleaseWriteLock(&avc->lock);
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
if (avc->segid) {
vm_flushp(avc->segid, 0, MAXFSIZE/PAGESIZE - 1);
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
if (avc->segid) {
int firstpage = (alen + PAGESIZE-1)/PAGESIZE;
#include "../afs/icl.h"
#include "../afs/prs_fs.h"
#include "../h/flock.h"
+#include "../afs/afsincludes.h"
/*
crhold(cred);
if (vcp->credp) {
struct ucred *crp = vcp->credp;
- vcp->credp = (struct ucred *)0;
+ vcp->credp = NULL;
crfree(crp);
}
vcp->credp = cred;
struct ucred *crp;
UpgradeSToWLock(&vcp->lock, 508);
crp = vcp->credp;
- vcp->credp = (struct ucred *)0;
+ vcp->credp = NULL;
ConvertWToSLock(&vcp->lock);
crfree(crp);
}
* Ensure that all comparable buffers are grouped contiguously.
* Later on, we'll merge adjacent buffers into a single request.
*/
- firstComparable = (struct buf *) 0;
+ firstComparable = NULL;
lbp = &afs_asyncbuf;
for(qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
if (EFS_COMPARABLE(tbp, qbp)) {
/* do the insert before qbp now */
tbp->av_forw = *lbp;
*lbp = tbp;
- if (firstComparable == (struct buf *) 0) {
+ if (firstComparable == NULL) {
/* next we're going to do all sorts of buffer merging tricks, but
* here we know we're the only COMPARABLE block in the
* afs_asyncbuf list, so we just skip that and continue with
osi_Panic("VnodeToDev called before cacheops initialized\n");
}
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct vnode *vp;
struct vattr va;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
- {
+int osi_UFSClose(register struct osi_file *afile)
+{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
AFS_RELE(afile->vnode);
osi_FreeSmallSpace(afile);
return 0;
- }
+}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
int fd;
extern struct fileops vnops;
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(error = afs_InitReq(&treq, &cr)) &&
!(error = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
*
* OSF/1 Locking: VN_LOCK has been called.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
struct vnode *vp=AFSTOV(avc);
#ifdef AFS_DARWIN14_ENV
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
struct vnode *vp=AFSTOV(avc);
ReleaseWriteLock(&avc->lock);
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred,
+ int sync)
{
struct vnode *vp=AFSTOV(avc);
void *object;
/* XXX this seems to not be referenced anywhere. *somebody* ought to be calling
this, and also making sure that ubc's idea of the filesize is right more
often */
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
struct vnode *vp=AFSTOV(avc);
void *object;
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
struct vnode *vp=AFSTOV(avc);
if (UBCINFOEXISTS(vp)) {
}
}
-extern struct AFS_UCRED afs_osi_cred;
-extern afs_rwlock_t afs_xvcache;
/* vnreclaim and vinactive are probably not aggressive enough to keep
enough afs vcaches free, so we try to do some of it ourselves */
/* XXX there's probably not nearly enough locking here */
-void osi_VM_TryReclaim(avc, slept)
- struct vcache *avc;
- int *slept;
+void osi_VM_TryReclaim(struct vcache *avc, int *slept)
{
struct proc *p=current_proc();
struct vnode *vp=AFSTOV(avc);
ObtainReadLock(&afs_xvcache);
}
-void osi_VM_NukePages(struct vnode *vp, off_t offset, off_t size) {
-
+void osi_VM_NukePages(struct vnode *vp, off_t offset, off_t size)
+{
void *object;
struct vcache *avc = VTOAFS(vp);
ubc_setsize(vp, size);
#endif
#endif
-
}
-int osi_VM_Setup(struct vcache *avc) {
+
+int osi_VM_Setup(struct vcache *avc)
+{
int error;
struct vnode *vp=AFSTOV(avc);
extern struct mount *afs_cacheVfsp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
static struct vnode *tags_vnode = NULL;
struct inode *ip;
return afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
- {
+int osi_UFSClose(register struct osi_file *afile)
+{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
AFS_RELE(afile->vnode);
osi_FreeSmallSpace(afile);
return 0;
- }
+}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
struct inode *ip;
assert(avp->v_tag == VT_UFS);
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
int fd;
extern struct fileops vnops;
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(code = afs_InitReq(&treq, u.u_cred)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
*
* OSF/1 Locking: VN_LOCK has been called.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
if (avc->vrefCount > 1)
return EBUSY;
*
* Called with the global lock NOT held.
*/
-void
-osi_ubc_flush_dirty_and_wait(vp, flags)
-struct vnode *vp;
-int flags; {
+static void osi_ubc_flush_dirty_and_wait(struct vnode *vp, int flags)
+{
int retry;
vm_page_t pp;
int first;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
ubc_flush_dirty(AFSTOV(avc)->v_object, 0);
ubc_invalidate(AFSTOV(avc)->v_object, 0, 0, B_INVAL);
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
ubc_invalidate(AFSTOV(avc)->v_object, alen,
MAXINT - alen, B_INVAL);
ndp->ni_segflg = seg;
ndp->ni_dirp = namep;
error = namei(ndp);
- if (dvpp != (struct vnode **)0)
+ if (dvpp != NULL)
*dvpp = ndp->ni_dvp;
- if (cvpp != (struct vnode **)0)
+ if (cvpp != NULL)
*cvpp = ndp->ni_vp;
return(error);
}
extern struct mount *afs_cacheVfsp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
+int osi_UFSClose(register struct osi_file *afile)
{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
return 0;
}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct vattr tvattr;
register afs_int32 code;
struct osi_stat tstat;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
struct inode *ip = VTOI(avp);
ip->i_flag &= ~IN_ACCESS;
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
unsigned int resid;
register afs_int32 code;
register afs_int32 cnt1=0;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
unsigned int resid;
register afs_int32 code;
AFS_STATCNT(osi_Write);
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
int fd;
extern struct fileops vnops;
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(error = afs_InitReq(&treq, &cr)) &&
!(error = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
*
* OSF/1 Locking: VN_LOCK has been called.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
struct vm_object *obj;
struct vnode *vp;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
struct vnode *vp;
struct vm_object *obj;
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
struct vnode *vp;
struct vm_object *obj;
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
struct vnode *vp;
struct vm_object *obj;
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
vnode_pager_setsize(AFSTOV(avc), alen);
}
extern struct vfs *afs_cacheVfsp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
- {
+int osi_UFSClose(register struct osi_file *afile)
+{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
AFS_RELE(afile->vnode);
osi_FreeSmallSpace(afile);
return 0;
- }
+}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
struct inode *ip = VTOI(avp);
ip->i_flag &= ~IACC;
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
long resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
long resid;
register afs_int32 code;
}
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
if (!mp && !(mp = getmp(dev))) {
u.u_error = ENXIO;
- return((struct inode *)0);
+ return(NULL);
}
pip=iget(dev,mp,inode);
if(!pip)
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
extern struct fileops vnodefops;
register int code;
return code;
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
if (!(code = afs_InitReq(&treq, p_cred(u.u_procp))) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
if (avc->vrefCount > 1)
return EBUSY;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
; /* Nothing here yet */
}
* Locking: the vcache entry's lock is held. It may be dropped and
* re-obtained.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
struct vnode *vp = AFSTOV(avc);
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
; /* Nothing here yet */
}
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
; /* Nothing here yet */
}
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
- {
+int osi_UFSClose(register struct osi_file *afile)
+{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
VN_RELE(afile->vnode);
osi_FreeSmallSpace(afile);
return 0;
- }
+}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
}
#ifdef AFS_SGI_EFS_IOPS_ENV
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
if (afs_CacheFSType == AFS_SGI_EFS_CACHE)
{
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
ssize_t resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
ssize_t resid;
register afs_int32 code;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
if (params[1] == INODESPECIAL)
AFS_LOCK_VOL_CREATE();
- code = gop_lookupname(path, AFS_UIOSYS, FOLLOW, (struct vnode **) 0, &dvp);
+ code = gop_lookupname(path, AFS_UIOSYS, FOLLOW, NULL, &dvp);
if (code == ENOENT) {
/* Maybe it's an old directory name format. */
AFS_COPYINSTR((char*)datap, name, AFS_PNAME_SIZE-1, &junk, unused);
strcat(name, "/.");
strcat(name, int_to_base64(stmp1, rw_vno));
- code = gop_lookupname(name, AFS_UIOSYS, FOLLOW, (struct vnode **) 0,
+ code = gop_lookupname(name, AFS_UIOSYS, FOLLOW, NULL,
&dvp);
if (!code) {
/* Use old name format. */
if (code) {
if (code == EEXIST) {
/* someone beat us to it? */
- code = gop_lookupname(path, AFS_UIOSYS, 0, (struct vnode **) 0,
+ code = gop_lookupname(path, AFS_UIOSYS, 0, NULL,
&dvp);
}
if (code) {
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED())) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
if (code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
goto out;
- *avcp = (vnode_t*) afs_GetVCache(&vfid, &treq, (afs_int32 *)0,
- (struct vcache*)0, 0);
+ *avcp = (vnode_t*) afs_GetVCache(&vfid, &treq, NULL, (struct vcache*)0);
if (! *avcp) {
code = ENOENT;
}
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
int s, code;
vnode_t *vp = &avc->v;
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
*
* Locking: only the global lock is held.
*/
-void
-osi_VM_FSyncInval(avc)
- struct vcache *avc;
+void osi_VM_FSyncInval(struct vcache *avc)
{
AFS_GUNLOCK();
PFLUSHINVALVP((vnode_t *)avc, (off_t)0, (off_t)avc->m.Length);
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
int error;
osi_Assert(valusema(&avc->vc_rwlock) <= 0);
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
vnode_t *vp = (vnode_t *)avc;
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
PTOSSVP(&avc->v, (off_t)alen, (off_t)MAXLONG);
}
lmem.chunk = addr;
/* remove this chunk from our hash table */
- if ( lmp = (struct osi_linux_mem *)afs_lhash_remove(lh_mem_htab, hash_chunk(addr), &lmem)) {
+ if ((lmp = (struct osi_linux_mem *)afs_lhash_remove(lh_mem_htab, hash_chunk(addr), &lmem))) {
linux_free(lmp->chunk); /* this contains the piggybacked type info*/
afs_atomlist_put(al_mem_pool, lmp); /* return osi_linux_mem struct to pool*/
afs_linux_cur_allocs--;
extern struct osi_dev cacheDev;
extern struct super_block *afs_cacheSBp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
AFS_STATCNT(osi_Stat);
MObtainWriteLock(&afs_xosi,320);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
+int osi_UFSClose(register struct osi_file *afile)
{
AFS_STATCNT(osi_Close);
if (afile) {
return 0;
}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
register afs_int32 code;
struct osi_stat tstat;
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
size_t resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
size_t resid;
register afs_int32 code;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
return returnCode;
}
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
#define AFS_GUNLOCK() \
do { \
if (!ISAFS_GLOCK()) \
- osi_Panic("afs global lock not held"); \
+ osi_Panic("afs global lock not held at %s:%d", __FILE__, __LINE__); \
afs_global_owner = 0; \
up(&afs_global_lock); \
} while (0)
#else
if (ip->i_count > 1)
#endif
- printf("afs_put_inode: ino %d (0x%x) has count %d\n", ip->i_ino, ip);
+ printf("afs_put_inode: ino %d (0x%x) has count %d\n", ip->i_ino, ip, ip->i_count);
afs_InactiveVCache(vcp, credp);
ObtainWriteLock(&vcp->lock, 504);
struct task_struct *rxk_ListenerTask;
-void osi_linux_mask() {
+void osi_linux_mask(void)
+{
spin_lock_irq(¤t->sigmask_lock);
sigfillset(¤t->blocked);
recalc_sigpending(current);
spin_unlock_irq(¤t->sigmask_lock);
}
-void osi_linux_unmask() {
+void osi_linux_unmask(void)
+{
spin_lock_irq(&rxk_ListenerTask->sigmask_lock);
sigemptyset(&rxk_ListenerTask->blocked);
flush_signals(rxk_ListenerTask);
spin_unlock_irq(&rxk_ListenerTask->sigmask_lock);
}
-void osi_linux_rxkreg() {
+void osi_linux_rxkreg(void)
+{
rxk_ListenerTask = current;
}
#include "../afs/afsincludes.h" /* Afs-based standard headers */
#include "../afs/afs_stats.h" /* afs statistics */
-
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV, dummyV;
-
void afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
{
AFS_STATCNT(osi_InitWaitHandle);
*/
int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
{
- int code;
- afs_int32 endTime, tid;
+ afs_int32 endTime;
struct timer_list *timer = NULL;
+ int code;
AFS_STATCNT(osi_Wait);
endTime = osi_Time() + (ams/1000);
ahandle->proc = (caddr_t) current;
do {
- AFS_ASSERT_GLOCK();
- code = 0;
- code = osi_TimedSleep(&waitV, ams, aintok);
-
- if (code) break;
+ AFS_ASSERT_GLOCK();
+ code = osi_TimedSleep(&waitV, ams, 1);
+ if (code) break;
if (ahandle && (ahandle->proc == (caddr_t) 0)) {
/* we've been signalled */
break;
* Waits for an event to be notified, returning early if a signal
* is received. Returns EINTR if signaled, and 0 otherwise.
*/
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
struct afs_event *evp;
int seq, retval;
* caller that the wait has been interrupted and the stack should be cleaned
* up preparatory to signal delivery
*/
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
sigset_t saved_set;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(code = afs_InitReq(&treq, credp)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
if (tvp) {
extern struct inode_operations afs_dir_iops;
#if defined(AFS_LINUX24_ENV)
if (!dirpos)
break;
- de = (struct DirEntry*)afs_dir_GetBlob(&tdc->f.inode, dirpos);
+ de = afs_dir_GetBlob(&tdc->f.inode, dirpos);
if (!de)
break;
if ((avc->states & CForeign) == 0 &&
(ntohl(de->fid.vnode) & 1)) {
type=DT_DIR;
- } else if ((tvc=afs_FindVCache(&afid,0,0,0,0))) {
+ } else if ((tvc=afs_FindVCache(&afid,0,0))) {
if (tvc->mvstat) {
type=DT_DIR;
} else if (((tvc->states) & (CStatd|CTruth))) {
/* what other types does AFS support? */
}
/* clean up from afs_FindVCache */
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
code = (*filldir)(dirbuf, de->name, len, offset, ino, type);
}
if (afs_fakestat_enable && vcp->mvstat == 1 && vcp->mvid &&
(vcp->states & CMValid) && (vcp->states & CStatd)) {
ObtainSharedLock(&afs_xvcache, 680);
- rootvp = afs_FindVCache(vcp->mvid, 0, 0, 0, 0);
+ rootvp = afs_FindVCache(vcp->mvid, 0, 0);
ReleaseSharedLock(&afs_xvcache);
}
#ifdef AFS_LINUX24_ENV
unlock_kernel();
#endif
- if (rootvp) afs_PutVCache(rootvp, 0);
+ if (rootvp) afs_PutVCache(rootvp);
AFS_GUNLOCK();
return 0;
}
goto done;
}
- if (code = afs_InitReq(&treq, credp))
+ if ((code = afs_InitReq(&treq, credp)))
goto done;
Check_AtSys(parentvcp, dp->d_name.name, &sysState, &treq);
name = sysState.name;
/* First try looking up the DNLC */
- if (lookupvcp = osi_dnlc_lookup(parentvcp, name, WRITE_LOCK)) {
+ if ((lookupvcp = osi_dnlc_lookup(parentvcp, name, WRITE_LOCK))) {
/* Verify that the dentry does not point to an old inode */
if (vcp != lookupvcp)
goto done;
done:
/* Clean up */
if (lookupvcp)
- afs_PutVCache(lookupvcp, WRITE_LOCK);
+ afs_PutVCache(lookupvcp);
if (sysState.allocked)
osi_FreeLargeSpace(name);
extern struct mount *afs_cacheVfsp;
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
return (void *)afile;
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
+int osi_UFSClose(register struct osi_file *afile)
{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
return 0;
}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
struct inode *ip = VTOI(avp);
ip->i_flag &= ~IACC;
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
unsigned int resid;
register afs_int32 code;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
int fd;
extern struct fileops vnops;
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
-
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
afs_osi_Sleep(event);
return 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(code = afs_InitReq(&treq, cred)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
*
* OSF/1 Locking: VN_LOCK has been called.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
#ifdef SECRETLY_OSF1
if (avc->vrefCount > 1)
*
* Called with the global lock NOT held.
*/
-void
-osi_ubc_flush_dirty_and_wait(vp, flags)
-struct vnode *vp;
-int flags; {
+static void osi_ubc_flush_dirty_and_wait(struct vnode *vp, int flags)
+{
int retry;
vm_page_t pp;
int first;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
#ifdef SECRETLY_OSF1
ReleaseWriteLock(&avc->lock);
* Since we drop and re-obtain the lock, we can't guarantee that there won't
* be some pages around when we return, newly created by concurrent activity.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
#ifdef SECRETLY_OSF1
ReleaseWriteLock(&avc->lock);
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
#ifdef SECRETLY_OSF1
ubc_flush_dirty(((struct vnode *)avc)->v_object, 0);
* activeV is raised. This is supposed to block pageins, but at present
* it only works on Solaris.
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
#ifdef SECRETLY_OSF1
ubc_invalidate(((struct vnode *)avc)->v_object, alen,
ndp->ni_segflg = seg;
ndp->ni_dirp = namep;
error = namei(ndp);
- if (dvpp != (struct vnode **)0)
+ if (dvpp != NULL)
*dvpp = ndp->ni_dvp;
- if (cvpp != (struct vnode **)0)
+ if (cvpp != NULL)
*cvpp = ndp->ni_vp;
return(error);
}
return (afs_int32)(vattr.va_size);
}
-void *osi_VxfsOpen(ainode)
- afs_int32 ainode;
+void *osi_VxfsOpen(afs_int32 ainode)
{
struct vnode *vp;
register struct osi_file *afile = NULL;
}
#endif /* AFS_HAVE_VXFS */
-void *osi_UfsOpen(ainode)
- afs_int32 ainode;
+void *osi_UfsOpen(afs_int32 ainode)
{
struct inode *ip;
register struct osi_file *afile = NULL;
#if defined(AFS_SUN57_64BIT_ENV)
void *osi_UFSOpen(ino_t ainode)
#else
-void *osi_UFSOpen(ainode)
- afs_int32 ainode;
+void *osi_UFSOpen(afs_int32 ainode)
#endif
{
extern int cacheDiskType;
return osi_UfsOpen(ainode);
}
-afs_osi_Stat(afile, astat)
- register struct osi_file *afile;
- register struct osi_stat *astat; {
+int afs_osi_Stat(register struct osi_file *afile, register struct osi_stat *astat)
+{
register afs_int32 code;
struct vattr tvattr;
AFS_STATCNT(osi_Stat);
return code;
}
-osi_UFSClose(afile)
- register struct osi_file *afile;
+int osi_UFSClose(register struct osi_file *afile)
{
AFS_STATCNT(osi_Close);
if(afile->vnode) {
return 0;
}
-osi_UFSTruncate(afile, asize)
- register struct osi_file *afile;
- afs_int32 asize; {
+int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
struct vattr tvattr;
register afs_int32 code;
return code;
}
-void osi_DisableAtimes(avp)
-struct vnode *avp;
+void osi_DisableAtimes(struct vnode *avp)
{
if (afs_CacheFSType == AFS_SUN_UFS_CACHE) {
struct inode *ip = VTOI(avp);
/* Generic read interface */
-afs_osi_Read(afile, offset, aptr, asize)
- register struct osi_file *afile;
- int offset;
- char *aptr;
- afs_int32 asize; {
+int afs_osi_Read(register struct osi_file *afile, int offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
#if defined(AFS_SUN57_ENV)
ssize_t resid;
}
/* Generic write interface */
-afs_osi_Write(afile, offset, aptr, asize)
- register struct osi_file *afile;
- char *aptr;
- afs_int32 offset;
- afs_int32 asize; {
+int afs_osi_Write(register struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize)
+{
struct AFS_UCRED *oldCred;
#if defined(AFS_SUN57_ENV)
ssize_t resid;
/* This work should be handled by physstrat in ca/machdep.c.
This routine written from the RT NFS port strategy routine.
It has been generalized a bit, but should still be pretty clear. */
-int afs_osi_MapStrategy(aproc, bp)
- int (*aproc)();
- register struct buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), register struct buf *bp)
{
afs_int32 returnCode;
-void
-shutdown_osifile()
+void shutdown_osifile(void)
{
extern int afs_cold_shutdown;
{
struct file *fp;
struct inode *ip;
- struct vnode *vp = (struct vnode *)0;
+ struct vnode *vp = NULL;
int dummy;
int fd;
register int code;
#include "../afs/afs_stats.h" /* afs statistics */
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
static char waitV;
#define relevent(evp) ((evp)->refcount--)
-void afs_osi_Sleep(char *event)
+void afs_osi_Sleep(void *event)
{
struct afs_event *evp;
int seq;
relevent(evp);
}
-int afs_osi_SleepSig(char *event)
+int afs_osi_SleepSig(void *event)
{
struct afs_event *evp;
int seq, code = 0;
}
-void afs_osi_Wakeup(char *event)
+void afs_osi_Wakeup(void *event)
{
struct afs_event *evp;
if (!(code = afs_InitReq(&treq, proc->p_cred)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
*
* Locking: only the global lock is held on entry.
*/
-int
-osi_VM_GetDownD(avc, adc)
- struct vcache *avc;
- struct dcache *adc;
+int osi_VM_GetDownD(struct vcache *avc, struct dcache *adc)
{
int code;
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*/
-int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
if (avc->vrefCount != 0)
return EBUSY;
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
-void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
AFS_GUNLOCK();
#if defined(AFS_SUN56_ENV)
* Locking: the vcache entry's lock is held. It may be dropped and
* re-obtained.
*/
-void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
AFS_GUNLOCK();
#if defined(AFS_SUN56_ENV)
*
* Locking: No lock is held, not even the global lock.
*/
-void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
extern int afs_pvn_vptrunc;
* The caller will raise activeV (to prevent pageins), but this function must
* be called first, since it causes a pagein.
*/
-void
-osi_VM_PreTruncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_PreTruncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
page_t *pp;
int pageOffset = (alen & PAGEOFFSET);
* Locking: no lock is held, not even the global lock.
* Pageins are blocked (activeV is raised).
*/
-void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
/*
* It's OK to specify afs_putapage here, even though we aren't holding
} /* while (1) ... */
AFS_GLOCK();
- pl[slot] = (struct page *) 0;
+ pl[slot] = NULL;
ReleaseReadLock(&tdc->lock);
/* Prefetch next chunk if we're at a chunk boundary */
/*
* Never call afs_brelse
*/
-int ufs_brelse(vp, bp)
-struct usr_vnode *vp;
-struct usr_buf *bp;
+int ufs_brelse(struct usr_vnode *vp, struct usr_buf *bp)
{
usr_assert(0);
}
/*
* I am not sure what to do with these, they assert for now
*/
-int iodone(bp)
-struct usr_buf *bp;
+int iodone(struct usr_buf *bp)
{
usr_assert(0);
}
-struct usr_file *getf(fd)
-int fd;
+struct usr_file *getf(int fd)
{
usr_assert(0);
}
/*
* Every user is a super user
*/
-int afs_osi_suser(credp)
-struct usr_ucred *credp;
+int afs_osi_suser(void *credp)
{
return 1;
}
-int afs_suser(credp)
-struct usr_ucred *credp;
+int afs_suser(void *credp)
{
return 1;
}
* These are no-ops in user space
*/
-int afs_osi_SetTime(atv)
-osi_timeval_t *atv;
+int afs_osi_SetTime(osi_timeval_t *atv)
{
return 0;
}
* xflock should never fall through, the only files we know
* about are AFS files
*/
-int usr_flock()
+int usr_flock(void)
{
usr_assert(0);
}
* ioctl should never fall through, the only files we know
* about are AFS files
*/
-int usr_ioctl()
+int usr_ioctl(void)
{
usr_assert(0);
}
/*
* We do not support the inode related system calls
*/
-int afs_syscall_icreate()
+int afs_syscall_icreate(void)
{
usr_assert(0);
}
-int afs_syscall_iincdec()
+int afs_syscall_iincdec(void)
{
usr_assert(0);
}
-int afs_syscall_iopen()
+int afs_syscall_iopen(void)
{
usr_assert(0);
}
-int afs_syscall_ireadwrite()
+int afs_syscall_ireadwrite(void)
{
usr_assert(0);
}
* these routines are referenced in the vfsops structure, but
* should never get called
*/
-int vno_close()
+int vno_close(void)
{
usr_assert(0);
}
-int vno_ioctl()
+int vno_ioctl(void)
{
usr_assert(0);
}
-int vno_rw()
+int vno_rw(void)
{
usr_assert(0);
}
-int vno_select()
+int vno_select(void)
{
usr_assert(0);
}
/*
* uiomove copies data between kernel buffers and uio buffers
*/
-int usr_uiomove(kbuf, n, rw, uio)
-char *kbuf;
-int n;
-int rw;
-struct usr_uio *uio;
+int usr_uiomove(char *kbuf, int n, int rw, struct usr_uio *uio)
{
int nio;
int len;
/*
* routines to manage user credentials
*/
-struct usr_ucred *usr_crcopy(credp)
-struct usr_ucred *credp;
+struct usr_ucred *usr_crcopy(struct usr_ucred *credp)
{
struct usr_ucred *newcredp;
return newcredp;
}
-struct usr_ucred *usr_crget()
+struct usr_ucred *usr_crget(void)
{
struct usr_ucred *newcredp;
return newcredp;
}
-int usr_crfree(credp)
-struct usr_ucred *credp;
+int usr_crfree(struct usr_ucred *credp)
{
credp->cr_ref--;
if (credp->cr_ref == 0) {
}
}
-int usr_crhold(credp)
-struct usr_ucred *credp;
+int usr_crhold(struct usr_ucred *credp)
{
credp->cr_ref++;
}
-void usr_vattr_null(vap)
-struct usr_vattr *vap;
+void usr_vattr_null(struct usr_vattr *vap)
{
int n;
char *cp;
* kernel environment for each thread. The user structure
* is stored in the thread specific data.
*/
-void uafs_InitThread()
+void uafs_InitThread(void)
{
int st;
struct usr_user *uptr;
* this routine is used to implement the global 'u' structure. Initializes
* the thread if needed.
*/
-struct usr_user *get_user_struct()
+struct usr_user *get_user_struct(void)
{
struct usr_user *uptr;
int st;
/*
* Sleep on an event
*/
-int afs_osi_Sleep(x)
-caddr_t x;
+void afs_osi_Sleep(void *x)
{
int index;
osi_wait_t *waitp;
}
}
-int afs_osi_SleepSig(x)
- caddr_t x;
+int afs_osi_SleepSig(void *x)
{
afs_osi_Sleep(x);
return 0;
}
-int afs_osi_Wakeup(x)
-caddr_t x;
+void afs_osi_Wakeup(void *x)
{
int index;
osi_wait_t *waitp;
usr_mutex_unlock(&osi_waitq_lock);
}
-int afs_osi_Wait(msec, handle, intok)
-afs_int32 msec;
-struct afs_osi_WaitHandle *handle;
-int intok;
+int afs_osi_Wait(afs_int32 msec, struct afs_osi_WaitHandle *handle, int intok)
{
int index;
osi_wait_t *waitp;
return ret;
}
-void afs_osi_CancelWait(handle)
-struct afs_osi_WaitHandle *handle;
+void afs_osi_CancelWait(struct afs_osi_WaitHandle *handle)
{
- afs_osi_Wakeup((caddr_t)handle);
+ afs_osi_Wakeup(handle);
}
/*
* Netscape NSAPI doesn't have a cond_timed_wait, so we need
* to explicitly signal cond_timed_waits when their timers expire
*/
-int afs_osi_CheckTimedWaits()
+int afs_osi_CheckTimedWaits(void)
{
time_t curTime;
osi_wait_t *waitp;
* Allocate a slot in the file table if there is not one there already,
* copy in the file name and kludge up the vnode and inode structures
*/
-int lookupname(fnamep, segflg, followlink, dirvpp, compvpp)
-char *fnamep;
-int segflg;
-int followlink;
-struct usr_vnode **dirvpp;
-struct usr_vnode **compvpp;
+int lookupname(char *fnamep, int segflg, int followlink,
+ struct usr_vnode **dirvpp, struct usr_vnode **compvpp)
{
int i;
int code;
/*
* open a file given its i-node number
*/
-void *osi_UFSOpen(ino)
-int ino;
+void *osi_UFSOpen(afs_int32 ino)
{
int rc;
struct osi_file *fp;
return fp;
}
-int osi_UFSClose(fp)
-struct osi_file *fp;
+int osi_UFSClose(struct osi_file *fp)
{
int rc;
return 0;
}
-int osi_UFSTruncate(fp, len)
-struct osi_file *fp;
-int len;
+int osi_UFSTruncate(struct osi_file *fp, afs_int32 len)
{
int rc;
return 0;
}
-int afs_osi_Read(fp, offset, buf, len)
-struct osi_file *fp;
-int offset;
-char *buf;
-int len;
+int afs_osi_Read(struct osi_file *fp, int offset, char *buf, afs_int32 len)
{
int rc, ret;
int code;
return ret;
}
-int afs_osi_Write(fp, offset, buf, len)
-struct osi_file *fp;
-int offset;
-char *buf;
-int len;
+int afs_osi_Write(struct osi_file *fp, afs_int32 offset, char *buf, afs_int32 len)
{
int rc, ret;
int code;
return ret;
}
-int afs_osi_Stat(fp, stp)
-struct osi_file *fp;
-struct osi_stat *stp;
+int afs_osi_Stat(struct osi_file *fp, struct osi_stat *stp)
{
int rc;
struct stat st;
afs_osi_Free(ptr, 0);
}
-void shutdown_osi()
+void shutdown_osi(void)
{
AFS_STATCNT(shutdown_osi);
return;
}
-void shutdown_osinet()
+void shutdown_osinet(void)
{
AFS_STATCNT(shutdown_osinet);
return;
}
-void shutdown_osifile()
+void shutdown_osifile(void)
{
AFS_STATCNT(shutdown_osifile);
return;
}
-int afs_nfsclient_init()
+int afs_nfsclient_init(void)
{
return 0;
}
-void shutdown_nfsclnt()
+void shutdown_nfsclnt(void)
{
return;
}
-int afs_osi_Invisible()
+void afs_osi_Invisible(void)
{
- return 0;
+ return;
}
-int osi_GetTime(tv)
-struct timeval *tv;
+int osi_GetTime(struct timeval *tv)
{
gettimeofday(tv, NULL);
return 0;
}
-int osi_SetTime(tv)
-struct timeval *tv;
+int osi_SetTime(struct timeval *tv)
{
return 0;
}
-int osi_Active(avc)
-struct vcache *avc;
+int osi_Active(struct vcache *avc)
{
AFS_STATCNT(osi_Active);
if (avc->opens > 0) return(1);
return 0;
}
-int afs_osi_MapStrategy(aproc, bp)
-int (*aproc)();
-struct usr_buf *bp;
+int afs_osi_MapStrategy(int (*aproc)(), struct usr_buf *bp)
{
afs_int32 returnCode;
returnCode = (*aproc)(bp);
return returnCode;
}
-osi_FlushPages(avc, credp)
- register struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_FlushPages(register struct vcache *avc, struct AFS_UCRED *credp)
{
ObtainSharedLock(&avc->lock,555);
if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0) ||
return;
}
-osi_FlushText_really(vp)
- register struct vcache *vp;
+void osi_FlushText_really(register struct vcache *vp)
{
if (hcmp(vp->m.DataVersion, vp->flushDV) > 0) {
hset(vp->flushDV, vp->m.DataVersion);
return;
}
-int osi_SyncVM(avc)
-struct vcache *avc;
+int osi_SyncVM(struct vcache *avc)
{
return 0;
}
-void osi_ReleaseVM(avc, len, credp)
-struct vcache *avc;
-int len;
-struct usr_ucred *credp;
+void osi_ReleaseVM(struct vcache *avc, int len, struct usr_ucred *credp)
{
return;
}
-void osi_Init()
+void osi_Init(void)
{
int i;
int rc;
* None.
*------------------------------------------------------------------------*/
-int GetVFileNumber(fname)
- char *fname;
+int GetVFileNumber(char *fname)
{
int computedVNumber; /*The computed file number we return*/
int filenameLen; /*Number of chars in filename*/
* As described.
*------------------------------------------------------------------------*/
-int CreateCacheFile(fname)
- char *fname;
+int CreateCacheFile(char *fname)
{
static char rn[] = "CreateCacheFile"; /*Routine name*/
int cfd; /*File descriptor to AFS cache file*/
* delete files as explained above.
*------------------------------------------------------------------------*/
-int SweepAFSCache(vFilesFound)
- int *vFilesFound;
+int SweepAFSCache(int *vFilesFound)
{
static char rn[] = "SweepAFSCache"; /*Routine name*/
char fullpn_FileToDelete[1024]; /*File to be deleted from cache*/
if (cacheFlags & AFSCALL_INIT_MEMCACHE) {
if (afsd_debug)
- printf("%s: Memory Cache, no cache sweep done\n");
+ printf("%s: Memory Cache, no cache sweep done\n", rn);
*vFilesFound = 0;
return 0;
}
return(0);
}
-static ConfigCell(aci, arock, adir)
-register struct afsconf_cell *aci;
-char *arock;
-struct afsconf_dir *adir; {
+static ConfigCell(register struct afsconf_cell *aci, char *arock, struct afsconf_dir *adir)
+{
register int isHomeCell;
register int i;
afs_int32 cellFlags;
/*
* Set the UDP port number RX uses for UDP datagrams
*/
-void uafs_SetRxPort(
- int port)
+void uafs_SetRxPort(int port)
{
usr_assert(usr_rx_port == 0);
usr_rx_port = port;
memset(pathname_for_V, 0, (cacheFiles * sizeof(char *)));
if (afsd_debug)
printf("%s: %d pathname_for_V entries at 0x%x, %d bytes\n",
- rn, cacheFiles, (cacheFiles * sizeof(AFSD_INO_T)));
+ rn, cacheFiles, pathname_for_V, (cacheFiles * sizeof(AFSD_INO_T)));
/*
* Set up all the pathnames we'll need for later.
*/
lookingForHomeCell = 1;
- afsconf_CellApply(afs_cdir, ConfigCell, (char *) 0);
+ afsconf_CellApply(afs_cdir, ConfigCell, NULL);
/*
* If we're still looking for the home cell after the whole cell
return;
}
-void uafs_Shutdown()
+void uafs_Shutdown(void)
{
int rc;
/*
* Donate the current thread to the RX server pool.
*/
-void uafs_RxServerProc()
+void uafs_RxServerProc(void)
{
osi_socket sock;
int threadID;
};
#ifdef NETSCAPE_NSAPI
-void syscallThread(argp)
+void syscallThread(void *argp)
#else /* NETSCAPE_NSAPI */
-void *syscallThread(argp)
+void *syscallThread(void *argp)
#endif /* NETSCAPE_NSAPI */
-void *argp;
{
int i;
struct usr_ucred *crp;
return 0;
}
-int uafs_RPCStatsEnableProc()
+int uafs_RPCStatsEnableProc(void)
{
int rc;
struct afs_ioctl iob;
return rc;
}
-int uafs_RPCStatsDisableProc()
+int uafs_RPCStatsDisableProc(void)
{
int rc;
struct afs_ioctl iob;
return rc;
}
-int uafs_RPCStatsClearProc()
+int uafs_RPCStatsClearProc(void)
{
int rc;
struct afs_ioctl iob;
return rc;
}
-int uafs_RPCStatsEnablePeer()
+int uafs_RPCStatsEnablePeer(void)
{
int rc;
struct afs_ioctl iob;
return rc;
}
-int uafs_RPCStatsDisablePeer()
+int uafs_RPCStatsDisablePeer(void)
{
int rc;
struct afs_ioctl iob;
return rc;
}
-int uafs_RPCStatsClearPeer()
+int uafs_RPCStatsClearPeer(void)
{
int rc;
struct afs_ioctl iob;
/*
* Set the working directory.
*/
-int uafs_chdir(
- char *path)
+int uafs_chdir(char *path)
{
int retval;
AFS_GLOCK();
return retval;
}
-int uafs_chdir_r(
- char *path)
+int uafs_chdir_r(char *path)
{
int code;
struct vnode *dirP;
/*
* Create a directory.
*/
-int uafs_mkdir(
- char *path,
- int mode)
+int uafs_mkdir(char *path, int mode)
{
int retval;
AFS_GLOCK();
return retval;
}
-int uafs_mkdir_r(
- char *path,
- int mode)
+int uafs_mkdir_r(char *path, int mode)
{
int code;
char *nameP;
* Open a file
* Note: file name may not end in a slash.
*/
-int uafs_open(
- char *path,
- int flags,
- int mode)
+int uafs_open(char *path, int flags, int mode)
{
int retval;
AFS_GLOCK();
return retval;
}
-int uafs_open_r(
- char *path,
- int flags,
- int mode)
+int uafs_open_r(char *path, int flags, int mode)
{
int fd;
int code;
/*
* Create a file
*/
-int uafs_creat(
- char *path,
- int mode)
+int uafs_creat(char *path, int mode)
{
int rc;
rc = uafs_open(path, O_CREAT|O_WRONLY|O_TRUNC, mode);
return rc;
}
-int uafs_creat_r(
- char *path,
- int mode)
+int uafs_creat_r(char *path, int mode)
{
int rc;
rc = uafs_open_r(path, O_CREAT|O_WRONLY|O_TRUNC, mode);
/*
* Write to a file
*/
-int uafs_write(
- int fd,
- char *buf,
- int len)
+int uafs_write(int fd, char *buf, int len)
{
int retval;
AFS_GLOCK();
return retval;
}
-int uafs_write_r(
- int fd,
- char *buf,
- int len)
+int uafs_write_r(int fd, char *buf, int len)
{
int code;
struct usr_uio uio;
/*
* Read from a file
*/
-int uafs_read(
- int fd,
- char *buf,
- int len)
+int uafs_read(int fd, char *buf, int len)
{
int retval;
AFS_GLOCK();
return retval;
}
-int uafs_read_r(
- int fd,
- char *buf,
- int len)
+int uafs_read_r(int fd, char *buf, int len)
{
int code;
struct usr_uio uio;
*
* NOTE: Caller must hold the global AFS lock.
*/
-int uafs_GetAttr(
- struct usr_vnode *vp,
- struct stat *stats)
+int uafs_GetAttr(struct usr_vnode *vp, struct stat *stats)
{
int code;
struct usr_vattr attrs;
/*
* Destroy AFS credentials from the kernel cache
*/
-int uafs_unlog()
+int uafs_unlog(void)
{
int code;
return code;
}
-int uafs_unlog_r()
+int uafs_unlog_r(void)
{
int retval;
AFS_GUNLOCK();
#include "../afs/dir.h"
#include "../afs/afs_axscache.h"
#include "../afs/icl.h"
+#include "../afs/afs_stats.h"
#include "../afs/afs_prototypes.h"
}
*newpag = (pagvalue == -1 ? genpag(): pagvalue);
afs_get_groups_from_pag(*newpag, &gidset[0], &gidset[1]);
- if (code = afs_setgroups(cred, ngroups, gidset, change_parent)) {
+ if ((code = afs_setgroups(cred, ngroups, gidset, change_parent))) {
osi_FreeSmallSpace((char *)gidset);
return (code);
}
if (!(code = afs_InitReq(&treq, u.u_cred)) &&
!(code = afs_CheckInit())) {
- tvp = afs_GetVCache(&afs_rootFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
/* we really want this to stay around */
if (tvp) {
afs_globalVp = tvp;
#include "../afs/afsincludes.h" /* Afs-based standard headers */
#include "../afs/afs_stats.h" /* statistics */
-void osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+void osi_VM_Truncate(struct vcache *avc, int alen, struct AFS_UCRED *acred)
{
return;
}
-int osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+int osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
return 0;
}
-void osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+void osi_VM_StoreAllSegments(struct vcache *avc)
{
return;
}
-void osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+void osi_VM_TryToSmush(struct vcache *avc, struct AFS_UCRED *acred, int sync)
{
return;
}
-void osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+void osi_VM_FlushPages(struct vcache *avc, struct AFS_UCRED *credp)
{
return;
}
#define u_rval1 u_r.r_val1
#define u (*(get_user_struct()))
-extern struct usr_user *get_user_struct();
+extern struct usr_user *get_user_struct(void);
#define USR_DIRSIZE 2048
};
/* avc must be held. Returns bit map of mode bits. Ignores file mode bits */
-afs_int32 afs_GetAccessBits (avc, arights, areq)
- register struct vcache *avc;
- register afs_int32 arights;
- register struct vrequest *areq;
+afs_int32 afs_GetAccessBits(register struct vcache *avc, register afs_int32 arights,
+ register struct vrequest *areq)
{
AFS_STATCNT(afs_GetAccessBits);
/* see if anyuser has the required access bits */
/* the new access ok function. AVC must be held but not locked. if avc is a
* file, its parent need not be held, and should not be locked. */
-afs_AccessOK(avc, arights, areq, check_mode_bits)
-struct vcache *avc;
-afs_int32 arights, check_mode_bits;
-struct vrequest *areq; {
+int afs_AccessOK(struct vcache *avc, afs_int32 arights, struct vrequest *areq,
+ afs_int32 check_mode_bits)
+{
register struct vcache *tvc;
struct VenusFid dirFid;
register afs_int32 mask;
dirFid.Fid.Vnode = avc->parentVnode;
dirFid.Fid.Unique = avc->parentUnique;
/* Avoid this GetVCache call */
- tvc = afs_GetVCache(&dirFid, areq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(&dirFid, areq, NULL, NULL);
if (tvc) {
dirBits = afs_GetAccessBits(tvc, arights, areq);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
}
else
}
-#if defined(AFS_SUN5_ENV) || (defined(AFS_SGI_ENV) && !defined(AFS_SGI65_ENV))
-afs_access(OSI_VC_ARG(avc), amode, flags, acred)
- int flags;
+#if defined(AFS_SUN5_ENV) || (defined(AFS_SGI_ENV) && !defined(AFS_SGI65_ENV))
+int afs_access(OSI_VC_DECL(avc), register afs_int32 amode, int flags, struct AFS_UCRED *acred)
#else
-afs_access(OSI_VC_ARG(avc), amode, acred)
+int afs_access(OSI_VC_DECL(avc), register afs_int32 amode, struct AFS_UCRED *acred)
#endif
- OSI_VC_DECL(avc);
- register afs_int32 amode;
- struct AFS_UCRED *acred; {
+{
register afs_int32 code;
struct vrequest treq;
struct afs_fakestat_state fakestate;
ICL_TYPE_INT32, amode,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
afs_InitFakeStat(&fakestate);
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
code = afs_EvalFakeStat(&avc, &fakestate, &treq);
if (code) {
* afs_getRights
* This function is just an interface to afs_GetAccessBits
*/
-int afs_getRights(OSI_VC_ARG(avc), arights, acred)
- OSI_VC_DECL(avc);
- register afs_int32 arights;
- struct AFS_UCRED *acred;
+int afs_getRights(OSI_VC_DECL(avc), register afs_int32 arights, struct AFS_UCRED *acred)
{
register afs_int32 code;
struct vrequest treq;
extern struct vcache *afs_globalVp;
/* copy out attributes from cache entry */
-afs_CopyOutAttrs(avc, attrs)
- register struct vattr *attrs;
- register struct vcache *avc;
+int afs_CopyOutAttrs(register struct vcache *avc, register struct vattr *attrs)
{
register struct volume *tvp;
register struct cell *tcell;
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
-afs_getattr(OSI_VC_ARG(avc), attrs, flags, acred)
- int flags;
+int afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, int flags, struct AFS_UCRED *acred)
#else
-afs_getattr(OSI_VC_ARG(avc), attrs, acred)
+int afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, struct AFS_UCRED *acred)
#endif
- OSI_VC_DECL(avc);
- struct vattr *attrs;
- struct AFS_UCRED *acred;
{
afs_int32 code;
struct vrequest treq;
if (afs_nfsexporter) {
if (!inited) {
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
return code;
inited = 1;
}
#endif
}
}
- if (au = afs_FindUser(treq.uid, -1, READ_LOCK)) {
+ if ((au = afs_FindUser(treq.uid, -1, READ_LOCK))) {
register struct afs_exporter *exporter = au->exporter;
if (exporter && !(afs_nfsexporter->exp_states & EXP_UNIXMODE)) {
}
/* convert a Unix request into a status store request */
-afs_VAttrToAS(avc, av, as)
-register struct vcache *avc;
-register struct vattr *av;
-register struct AFSStoreStatus *as; {
+int afs_VAttrToAS(register struct vcache *avc, register struct vattr *av,
+ register struct AFSStoreStatus *as)
+{
register int mask;
mask = 0;
AFS_STATCNT(afs_VAttrToAS);
/* We don't set CDirty bit in avc->states because setattr calls WriteVCache
* synchronously, therefore, it's not needed.
*/
-#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
-afs_setattr(OSI_VC_ARG(avc), attrs, flags, acred)
- int flags;
+#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
+int afs_setattr(OSI_VC_DECL(avc), register struct vattr *attrs, int flags, struct AFS_UCRED *acred)
#else
-afs_setattr(avc, attrs, acred)
+int afs_setattr(OSI_VC_DECL(avc), register struct vattr *attrs, struct AFS_UCRED *acred)
#endif
- OSI_VC_DECL(avc);
- register struct vattr *attrs;
- struct AFS_UCRED *acred; {
+{
struct vrequest treq;
struct AFSStoreStatus astat;
register afs_int32 code;
AFS_STATCNT(afs_setattr);
afs_Trace2(afs_iclSetp, CM_TRACE_SETATTR, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
afs_InitFakeStat(&fakestate);
code = afs_EvalFakeStat(&avc, &fakestate, &treq);
#include "../afs/nfsclient.h"
#include "../afs/afs_osidnlc.h"
-extern afs_rwlock_t afs_xvcache;
-extern afs_rwlock_t afs_xcbhash;
-
/* question: does afs_create need to set CDirty in the adp or the avc?
* I think we can get away without it, but I'm not sure. Note that
* afs_setattr is called in here for truncation.
*/
#ifdef AFS_OSF_ENV
-afs_create(ndp, attrs)
- struct nameidata *ndp;
- struct vattr *attrs; {
+int afs_create(struct nameidata *ndp, struct vattr *attrs)
+#else /* AFS_OSF_ENV */
+#ifdef AFS_SGI64_ENV
+int afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, int flags,
+ int amode, struct vcache **avcp, struct AFS_UCRED *acred)
+#else /* AFS_SGI64_ENV */
+int afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, enum vcexcl aexcl,
+ int amode, struct vcache **avcp, struct AFS_UCRED *acred)
+#endif /* AFS_SGI64_ENV */
+#endif /* AFS_OSF_ENV */
+ {
+#ifdef AFS_OSF_ENV
register struct vcache *adp = VTOAFS(ndp->ni_dvp);
char *aname = ndp->ni_dent.d_name;
enum vcexcl aexcl = NONEXCL; /* XXX - create called properly */
int amode = 0; /* XXX - checked in higher level */
struct vcache **avcp = (struct vcache **)&(ndp->ni_vp);
struct ucred *acred = ndp->ni_cred;
-#else /* AFS_OSF_ENV */
-#ifdef AFS_SGI64_ENV
-afs_create(OSI_VC_ARG(adp), aname, attrs, flags, amode, avcp, acred)
- int flags;
-#else /* AFS_SGI64_ENV */
-afs_create(OSI_VC_ARG(adp), aname, attrs, aexcl, amode, avcp, acred)
- enum vcexcl aexcl;
-#endif /* AFS_SGI64_ENV */
- OSI_VC_DECL(adp);
- char *aname;
- struct vattr *attrs;
- int amode;
- struct vcache **avcp;
- struct AFS_UCRED *acred; {
-#endif /* AFS_OSF_ENV */
+#endif
+
afs_int32 origCBs, origZaps, finalZaps;
struct vrequest treq;
register afs_int32 code;
AFS_STATCNT(afs_create);
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done2;
afs_Trace3(afs_iclSetp, CM_TRACE_CREATE, ICL_TYPE_POINTER, adp,
/* found the file, so use it */
newFid.Cell = adp->fid.Cell;
newFid.Fid.Volume = adp->fid.Fid.Volume;
- tvc = (struct vcache *)0;
+ tvc = NULL;
if (newFid.Fid.Unique == 0) {
- tvc = afs_LookupVCache(&newFid, &treq, (afs_int32 *)0,
- WRITE_LOCK, adp, aname);
+ tvc = afs_LookupVCache(&newFid, &treq, NULL, adp, aname);
}
if (!tvc) /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&newFid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(&newFid, &treq, NULL, NULL);
if (tvc) {
/* if the thing exists, we need the right access to open it.
*/
if ((amode & VREAD) &&
!afs_AccessOK(tvc, PRSFS_READ, &treq, CHECK_MODE_BITS)) {
- afs_PutVCache(tvc, READ_LOCK);
+ afs_PutVCache(tvc);
code = EACCES;
goto done;
}
tvc->parentUnique = adp->fid.Fid.Unique;
/* need write mode for these guys */
if (!afs_AccessOK(tvc, PRSFS_WRITE, &treq, CHECK_MODE_BITS)) {
- afs_PutVCache(tvc, READ_LOCK);
+ afs_PutVCache(tvc);
code = EACCES;
goto done;
}
#endif
{
if (vType(tvc) != VREG) {
- afs_PutVCache(tvc, READ_LOCK);
+ afs_PutVCache(tvc);
code = EISDIR;
goto done;
}
tvc->states &= ~CCreating;
ReleaseWriteLock(&tvc->lock);
if (code) {
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(tvc);
goto done;
}
}
hostp = tc->srvr->server; /* remember for callback processing */
now = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_CREATEFILE);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_CreateFile(tc->id, (struct AFSFid *) &adp->fid.Fid,
aname, &InStatus, (struct AFSFid *)
&newFid.Fid, &OutFidStatus,
&OutDirStatus, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
CallBack.ExpirationTime += now;
}
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, &treq,
- AFS_STATS_FS_RPCIDX_CREATEFILE, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_CREATEFILE, SHARED_LOCK, NULL));
#if defined(AFS_OSF_ENV) || defined(AFS_DARWIN_ENV)
if (code == EEXIST && aexcl == NONEXCL) {
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
#if defined(AFS_SGI64_ENV)
code = afs_lookup(VNODE_TO_FIRST_BHV((vnode_t*)adp), aname, avcp,
- (struct pathname *)0, 0,
- (struct vnode *)0, acred);
+ NULL, 0,
+ NULL, acred);
#else
- code = afs_lookup(adp, aname, avcp, (struct pathname *)0, 0,
- (struct vnode *)0, acred);
+ code = afs_lookup(adp, aname, avcp, NULL, 0,
+ NULL, acred);
#endif /* AFS_SGI64_ENV */
#else /* SUN5 || SGI */
code = afs_lookup(adp, aname, avcp, acred);
freeing of the vnode will change evenZaps. Don't need to update the VLRU
queue, since the find will only succeed in the event of a create race, and
then the vcache will be at the front of the VLRU queue anyway... */
- if (!(tvc = afs_FindVCache(&newFid, 0, WRITE_LOCK,
- 0, DO_STATS))) {
- tvc = afs_NewVCache(&newFid, hostp, 0, WRITE_LOCK);
+ if (!(tvc = afs_FindVCache(&newFid, 0, DO_STATS))) {
+ tvc = afs_NewVCache(&newFid, hostp);
if (tvc) {
int finalCBs;
ObtainWriteLock(&tvc->lock,139);
done2:
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
+ afs_PutVCache(adp);
#endif /* AFS_OSF_ENV */
return code;
* This routine must be called with the stat cache entry write-locked,
* and dcache entry write-locked.
*/
-afs_LocalHero(avc, adc, astat, aincr)
- register struct vcache *avc;
- register AFSFetchStatus *astat;
- register struct dcache *adc;
- register int aincr; {
+int afs_LocalHero(register struct vcache *avc, register struct dcache *adc,
+ register AFSFetchStatus *astat, register int aincr)
+{
register afs_int32 ok;
afs_hyper_t avers;
afs_Trace2(afs_iclSetp, CM_TRACE_MKDIR, ICL_TYPE_POINTER, adp,
ICL_TYPE_STRING, aname);
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done2;
afs_InitFakeStat(&fakestate);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_MAKEDIR);
now = osi_Time();
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_MakeDir(tc->id, (struct AFSFid *) &adp->fid.Fid, aname,
&InStatus, (struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
CallBack.ExpirationTime += now;
/* DON'T forget to Set the callback value... */
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, &treq,
- AFS_STATS_FS_RPCIDX_MAKEDIR, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_MAKEDIR, SHARED_LOCK, NULL));
if (code) {
if (code < 0) {
newFid.Fid.Volume = adp->fid.Fid.Volume;
ReleaseWriteLock(&adp->lock);
/* now we're done with parent dir, create the real dir's cache entry */
- tvc = afs_GetVCache(&newFid, &treq, (afs_int32 *)0, (struct vcache*)0, 0);
+ tvc = afs_GetVCache(&newFid, &treq, NULL, NULL);
if (tvc) {
code = 0;
*avcp = tvc;
#endif
struct vrequest treq;
register struct dcache *tdc;
- register struct vcache *tvc = (struct vcache *)0;
+ register struct vcache *tvc = NULL;
register afs_int32 code;
register struct conn *tc;
afs_size_t offset, len;
afs_Trace2(afs_iclSetp, CM_TRACE_RMDIR, ICL_TYPE_POINTER, adp,
ICL_TYPE_STRING, aname);
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done2;
afs_InitFakeStat(&fakestate);
unlinkFid.Cell = adp->fid.Cell;
unlinkFid.Fid.Volume = adp->fid.Fid.Volume;
if (unlinkFid.Fid.Unique == 0) {
- tvc = afs_LookupVCache(&unlinkFid, &treq, &cached,
- WRITE_LOCK, adp, aname);
+ tvc = afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname);
} else {
ObtainReadLock(&afs_xvcache);
- tvc = afs_FindVCache(&unlinkFid, 1, WRITE_LOCK,
- 0, 1/* do xstats */);
+ tvc = afs_FindVCache(&unlinkFid, 0, 1/* do xstats */);
ReleaseReadLock(&afs_xvcache);
}
}
tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEDIR);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_RemoveDir(tc->id, (struct AFSFid *) &adp->fid.Fid,
aname, &OutDirStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, &treq,
- AFS_STATS_FS_RPCIDX_REMOVEDIR, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_REMOVEDIR, SHARED_LOCK, NULL));
if (code) {
if (tdc) {
ObtainWriteLock(&tvc->lock,155);
tvc->states &= ~CUnique; /* For the dfs xlator */
ReleaseWriteLock(&tvc->lock);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
ReleaseWriteLock(&adp->lock);
/* don't worry about link count since dirs can not be hardlinked */
code = afs_CheckCode(code, &treq, 27);
done2:
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(ndp->ni_vp, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(ndp->ni_vp);
#endif /* AFS_OSF_ENV */
return code;
}
#include "../afs/nfsclient.h"
#include "../afs/afs_osidnlc.h"
-#if defined(AFS_HPUX102_ENV)
-#define AFS_FLOCK k_flock
-#else
-#if defined(AFS_SUN56_ENV) || defined(AFS_LINUX24_ENV)
-#define AFS_FLOCK flock64
-#else
-#define AFS_FLOCK flock
-#endif /* AFS_SUN65_ENV */
-#endif /* AFS_HPUX102_ENV */
-
+/* Static prototypes */
+static int HandleGetLock(register struct vcache *avc,
+ register struct AFS_FLOCK *af, register struct vrequest *areq, int clid);
static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
+static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
+ register struct SimpleLocks *alp, int onlymine, int clid);
+static void DoLockWarning(void);
-void lockIdSet(flock, slp, clid)
- int clid; /* non-zero on SGI, OSF, SunOS, Darwin, xBSD *//* XXX ptr type */
- struct SimpleLocks *slp;
- struct AFS_FLOCK *flock;
+/* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
+void lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
{
#if defined(AFS_SUN5_ENV)
register proc_t *procp = ttoproc(curthread);
* to p_ppid? Especially in the context of the lower loop, where
* the repeated comparison doesn't make much sense...
*/
-static int lockIdcmp2(flock1, vp, alp, onlymine, clid)
- struct AFS_FLOCK *flock1;
- struct vcache *vp;
- register struct SimpleLocks *alp;
- int onlymine; /* don't match any locks which are held by my */
- /* parent */
- int clid; /* Only Irix 6.5 for now. */
+/* onlymine - don't match any locks which are held by my parent */
+/* clid - only irix 6.5 */
+
+static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
+ register struct SimpleLocks *alp, int onlymine, int clid)
{
register struct SimpleLocks *slp;
#if defined(AFS_SUN5_ENV)
#endif /* AFS_SGI64_ENV */
#endif
#endif
- int code = 0;
if (alp) {
#if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
file, I guess we'll permit it. however, we don't want simple,
innocent closes by children to unlock files in the parent process.
*/
-HandleFlock(avc, acom, areq, clid, onlymine)
- pid_t clid; /* non-zero on SGI, SunOS, OSF1 only */
- register struct vcache *avc;
- struct vrequest *areq;
- int onlymine;
- int acom; {
+/* clid - nonzero on sgi sunos osf1 only */
+int HandleFlock(register struct vcache *avc, int acom,
+ struct vrequest *areq, pid_t clid, int onlymine)
+{
struct conn *tc;
struct SimpleLocks *slp, *tlp, **slpp;
afs_int32 code;
AFS_STATCNT(HandleFlock);
code = 0; /* default when we don't make any network calls */
- lockIdSet(&flock, (struct SimpleLocks *)0, clid);
+ lockIdSet(&flock, NULL, clid);
#if defined(AFS_SGI_ENV)
osi_Assert(valusema(&avc->vc_rwlock) <= 0);
}
}
#endif
- if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, onlymine, clid)) {
+ if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
ReleaseWriteLock(&avc->lock);
return 0;
}
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_ReleaseLock(tc->id, (struct AFSFid *)
&avc->fid.Fid, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &avc->fid, areq,
AFS_STATS_FS_RPCIDX_RELEASELOCK,
- SHARED_LOCK, (struct cell *)0));
+ SHARED_LOCK, NULL));
}
}
else {
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_ReleaseLock(tc->id,
(struct AFSFid *) &avc->fid.Fid,
&tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &avc->fid, areq,
AFS_STATS_FS_RPCIDX_RELEASELOCK,
- SHARED_LOCK, (struct cell *)0));
+ SHARED_LOCK, NULL));
}
} else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
- if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
code = EWOULDBLOCK;
} else
code = 0;
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_SetLock(tc->id, (struct AFSFid *)
&avc->fid.Fid, lockType, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &avc->fid, areq,
AFS_STATS_FS_RPCIDX_SETLOCK,
- SHARED_LOCK, (struct cell *)0));
+ SHARED_LOCK, NULL));
}
else code = 0; /* otherwise, pretend things worked */
}
#endif
slp->type = LockWrite;
- slp->next = (struct SimpleLocks *)0;
+ slp->next = NULL;
avc->slocks = slp;
avc->flockCount = -1;
} else {
/* warn a user that a lock has been ignored */
-afs_int32 lastWarnTime = 0;
-static void DoLockWarning() {
+afs_int32 lastWarnTime = 0; /* this is used elsewhere */
+static void DoLockWarning(void)
+{
register afs_int32 now;
now = osi_Time();
#ifdef AFS_OSF_ENV
-afs_lockctl(avc, af, flag, acred, clid, offset)
-struct eflock *af;
-int flag;
-pid_t clid;
-off_t offset;
+afs_lockctl(struct vcache *avc, struct eflock *af, int flag,
+ struct AFS_UCRED *acred, pid_t clid, off_t offset)
#else
#if defined(AFS_SGI_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)) || defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
-afs_lockctl(avc, af, acmd, acred, clid)
+afs_lockctl(struct vcache *avc, struct AFS_FLOCK *af, int acmd, struct AFS_UCRED *acred, pid_t clid)
pid_t clid;
#else
u_int clid=0;
-afs_lockctl(avc, af, acmd, acred)
+afs_lockctl(struct vcache *avc, struct AFS_FLOCK *af, int acmd, struct AFS_UCRED *acred)
#endif
-struct AFS_FLOCK *af;
-int acmd;
#endif
-struct vcache *avc;
-struct AFS_UCRED *acred; {
+{
struct vrequest treq;
afs_int32 code;
#ifdef AFS_OSF_ENV
struct afs_fakestat_state fakestate;
AFS_STATCNT(afs_lockctl);
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
afs_InitFakeStat(&fakestate);
code = afs_EvalFakeStat(&avc, &fakestate, &treq);
if (code) {
* PID has the file read locked.
*/
#ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
-HandleGetLock(avc, af, areq, clid)
- int clid; /* not used by some OSes */
- register struct vcache *avc;
- register struct vrequest *areq;
- register struct AFS_FLOCK *af;
+static int HandleGetLock(register struct vcache *avc,
+ register struct AFS_FLOCK *af, register struct vrequest *areq, int clid)
{
register afs_int32 code;
struct AFS_FLOCK flock;
- lockIdSet(&flock, (struct SimpleLocks *)0, clid);
+ lockIdSet(&flock, NULL, clid);
ObtainWriteLock(&avc->lock,122);
if (avc->flockCount == 0) {
* write lock, say it is unlocked.
*/
if (avc->flockCount > 0 || /* only read locks */
- !lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
af->l_type = F_UNLCK;
goto unlck_leave;
}
* already, and it is not this process, we fail.
*/
if (avc->flockCount < 0) {
- if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
af->l_type = F_WRLCK;
if (avc->slocks) {
af->l_pid = avc->slocks->pid;
* If there is more than one, or it isn't us, we cannot lock.
*/
if ((avc->flockCount > 1)
- || lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
struct SimpleLocks *slp;
af->l_type = F_RDLCK;
#endif
/* find a pid that isn't our own */
for (slp = avc->slocks; slp; slp = slp->next) {
- if (lockIdcmp2(&flock, (struct vcache *)0, slp, 1, clid)) {
+ if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
af->l_pid = slp->pid;
#if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
af->l_sysid = avc->slocks->sysid;
* already, and it is not this process, we fail.
*/
if (avc->flockCount < 0) {
- if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
af->l_type = F_WRLCK;
if (avc->slocks) {
af->l_pid = avc->slocks->pid;
* If there is more than one, or it isn't us, we cannot lock.
*/
if ((avc->flockCount > 1)
- || lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
+ || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
struct SimpleLocks *slp;
af->l_type = F_RDLCK;
af->l_pid = 0;
#endif
/* find a pid that isn't our own */
for (slp = avc->slocks; slp; slp = slp->next) {
- if (lockIdcmp2(&flock, (struct vcache *)0, slp, 1, clid)) {
+ if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
af->l_pid = slp->pid;
#if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
af->l_sysid = avc->slocks->sysid;
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc){
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_FetchStatus(tc->id, (struct AFSFid *) &avc->fid.Fid,
&OutStatus, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
} else code = -1;
} while
(afs_Analyze(tc, code, &avc->fid, areq,
AFS_STATS_FS_RPCIDX_FETCHSTATUS,
- SHARED_LOCK, (struct cell *)0));
+ SHARED_LOCK, NULL));
if (temp)
areq->flags &= ~O_NONBLOCK;
/* Flock not support on System V systems */
#ifdef AFS_OSF_ENV
extern struct fileops afs_fileops;
-afs_xflock (p, args, retval)
- struct proc *p;
- void *args;
- int *retval;
-{
+
+int afs_xflock (struct proc *p, void *args, int *retval)
#else /* AFS_OSF_ENV */
-afs_xflock () {
+int afs_xflock (void)
#endif
+{
int code = 0;
struct a {
int fd;
afs_Trace3(afs_iclSetp, CM_TRACE_LINK, ICL_TYPE_POINTER, adp,
ICL_TYPE_POINTER, avc, ICL_TYPE_STRING, aname);
/* create a hard link; new entry is aname in dir adp */
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done2;
afs_InitFakeStat(&vfakestate);
tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_LINK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_Link(tc->id, (struct AFSFid *) &adp->fid.Fid, aname,
(struct AFSFid *) &avc->fid.Fid, &OutFidStatus,
&OutDirStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, &treq,
- AFS_STATS_FS_RPCIDX_LINK, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_LINK, SHARED_LOCK, NULL));
if (code) {
if (tdc) afs_PutDCache(tdc);
afs_PutFakeStat(&dfakestate);
done2:
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, WRITE_LOCK);
+ afs_PutVCache(adp);
#endif /* AFS_OSF_ENV */
return code;
}
* what "@sys" is in binary... */
#define AFS_EQ_ATSYS(name) (((name)[0]=='@')&&((name)[1]=='s')&&((name)[2]=='y')&&((name)[3]=='s')&&(!(name)[4]))
-char *
-afs_strcat(s1, s2)
- register char *s1, *s2;
+char *afs_strcat(register char *s1, register char *s2)
{
register char *os1;
while (*s1++)
;
--s1;
- while (*s1++ = *s2++)
+ while ((*s1++ = *s2++))
;
return (os1);
}
-char *afs_index(a, c)
- register char *a, c; {
+char *afs_index(register char *a, register char c)
+{
register char tc;
AFS_STATCNT(afs_index);
- while (tc = *a) {
+ while ((tc = *a)) {
if (tc == c) return a;
else a++;
}
- return (char *) 0;
+ return NULL;
}
/* call under write lock, evaluate mvid field from a mt pt.
*
* NOTE: this function returns a held volume structure in *volpp if it returns 0!
*/
-EvalMountPoint(avc, advc, avolpp, areq)
- register struct vcache *avc;
- struct volume **avolpp;
- struct vcache *advc; /* the containing dir */
- register struct vrequest *areq;
+int EvalMountPoint(register struct vcache *avc, struct vcache *advc,
+ struct volume **avolpp, register struct vrequest *areq)
{
afs_int32 code;
struct volume *tvp = 0;
#ifdef notdef
if (avc->mvid && (avc->states & CMValid)) return 0; /* done while racing */
#endif
- *avolpp = (struct volume *)0;
+ *avolpp = NULL;
code = afs_HandleLink(avc, areq);
if (code) return code;
* without calling afs_EvalFakeStat is legal, as long as this
* function is called.
*/
-
-void
-afs_InitFakeStat(state)
- struct afs_fakestat_state *state;
+void afs_InitFakeStat(struct afs_fakestat_state *state)
{
state->valid = 1;
state->did_eval = 0;
*
* Only issues RPCs if canblock is non-zero.
*/
-static int
-afs_EvalFakeStat_int(avcp, state, areq, canblock)
- struct vcache **avcp;
- struct afs_fakestat_state *state;
- struct vrequest *areq;
- int canblock;
+int afs_EvalFakeStat_int(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq, int canblock)
{
struct vcache *tvc, *root_vp;
struct volume *tvolp = NULL;
do {
retry = 0;
ObtainWriteLock(&afs_xvcache, 597);
- root_vp = afs_FindVCache(tvc->mvid, 0, 0, &retry, 0);
+ root_vp = afs_FindVCache(tvc->mvid, &retry, 0);
if (root_vp && retry) {
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(root_vp, 0);
+ afs_PutVCache(root_vp);
}
} while (root_vp && retry);
ReleaseWriteLock(&afs_xvcache);
} else {
- root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL, WRITE_LOCK);
+ root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL);
}
if (!root_vp) {
code = canblock ? ENOENT : 0;
* something goes wrong and the error code should be returned to the user.
*/
int
-afs_EvalFakeStat(avcp, state, areq)
- struct vcache **avcp;
- struct afs_fakestat_state *state;
- struct vrequest *areq;
+afs_EvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
{
return afs_EvalFakeStat_int(avcp, state, areq, 1);
}
* Returns 0 if everything succeeds and *avcp points to a valid
* vcache entry (possibly evaluated).
*/
-int
-afs_TryEvalFakeStat(avcp, state, areq)
- struct vcache **avcp;
- struct afs_fakestat_state *state;
- struct vrequest *areq;
+int afs_TryEvalFakeStat(struct vcache **avcp, struct afs_fakestat_state *state,
+ struct vrequest *areq)
{
return afs_EvalFakeStat_int(avcp, state, areq, 0);
}
* Perform any necessary cleanup at the end of a vnode op, given that
* afs_InitFakeStat was previously called with this state.
*/
-void
-afs_PutFakeStat(state)
- struct afs_fakestat_state *state;
+void afs_PutFakeStat(struct afs_fakestat_state *state)
{
osi_Assert(state->valid == 1);
if (state->need_release)
- afs_PutVCache(state->root_vp, 0);
+ afs_PutVCache(state->root_vp);
state->valid = 0;
}
-afs_ENameOK(aname)
- register char *aname; {
+int afs_ENameOK(register char *aname)
+{
register char tc;
register int tlen;
return 1;
}
-afs_getsysname(areq, adp, bufp)
- register struct vrequest *areq;
- register struct vcache *adp;
- register char *bufp;
+int afs_getsysname(register struct vrequest *areq, register struct vcache *adp,
+ register char *bufp)
{
static char sysname[MAXSYSNAME];
register struct unixuser *au;
au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
afs_PutUser(au, 0);
if (au->exporter) {
- error = EXP_SYSNAME(au->exporter, (char *)0, bufp);
+ error = EXP_SYSNAME(au->exporter, NULL, bufp);
if (error)
strcpy(bufp, "@sys");
return -1;
}
}
-Check_AtSys(avc, aname, state, areq)
- register struct vcache *avc;
- char *aname;
- struct sysname_info *state;
- struct vrequest *areq;
+int Check_AtSys(register struct vcache *avc, const char *aname,
+ struct sysname_info *state, struct vrequest *areq)
{
if (AFS_EQ_ATSYS(aname)) {
state->offset = 0;
}
}
-Next_AtSys(avc, areq, state)
- register struct vcache *avc;
- struct vrequest *areq;
- struct sysname_info *state;
+int Next_AtSys(register struct vcache *avc, struct vrequest *areq,
+ struct sysname_info *state)
{
if (state->index == -1)
return 0; /* No list */
* ensure that vcaches created for failed RPC's to older servers have the
* CForeign bit set.
*/
-struct vcache * BStvc = (struct vcache *) 0;
-int afs_DoBulkStat(adp, dirCookie, areqp)
- struct vcache *adp;
- long dirCookie;
- struct vrequest *areqp;
+static struct vcache *BStvc = NULL;
+
+int afs_DoBulkStat(struct vcache *adp, long dirCookie, struct vrequest *areqp)
{
int nentries; /* # of entries to prefetch */
int nskip; /* # of slots in the LRU queue to skip */
do {
retry = 0;
ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
+ tvcp = afs_FindVCache(&tfid, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
- tvcp = afs_NewVCache(&tfid, hostp, 0, 0);
+ tvcp = afs_NewVCache(&tfid, hostp);
ObtainWriteLock(&tvcp->lock, 505);
ReleaseWriteLock(&afs_xvcache);
afs_RemoveVCB(&tfid);
tvcp->m.Length = statSeqNo;
fidIndex++;
}
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
} /* if dir vnode has non-zero entry */
/* move to the next dir entry by adding in the # of entries
if (tcp) {
hostp = tcp->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
&volSync);
}
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while (afs_Analyze(tcp, code, &adp->fid, areqp,
- AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL));
/* now, if we didnt get the info, bail out. */
if (code) goto done;
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
continue;
}
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent4");
+ { refpanic ("Bulkstat VLRU inconsistent4"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if (tvcp != lruvcp) { /* if they are == don't move it, don't corrupt vlru */
QRemove(&tvcp->vlruq);
}
if ((QNext(QPrev(&tvcp->vlruq)) != &tvcp->vlruq)
|| (QPrev(QNext(&tvcp->vlruq)) != &tvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent5");
+ { refpanic ("Bulkstat VLRU inconsistent5"); }
if ((QNext(QPrev(&lruvcp->vlruq)) != &lruvcp->vlruq)
|| (QPrev(QNext(&lruvcp->vlruq)) != &lruvcp->vlruq))
- refpanic ("Bulkstat VLRU inconsistent6");
+ { refpanic ("Bulkstat VLRU inconsistent6"); }
ReleaseWriteLock(&afs_xvcache);
ObtainWriteLock(&afs_xcbhash, 494);
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
continue;
}
ReleaseWriteLock(&tvcp->lock);
/* finally, we're done with the entry */
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
} /* for all files we got back */
/* finally return the pointer into the LRU queue */
- afs_PutVCache(lruvcp, 0);
+ afs_PutVCache(lruvcp);
done:
/* Be sure to turn off the CBulkFetching flags */
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvcp = afs_FindVCache(&afid, 1, 0, &retry, 0/* !stats&!lru*/);
+ tvcp = afs_FindVCache(&afid, &retry, 0/* !stats&!lru*/);
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
if (tvcp != NULL
tvcp->states &= ~CBulkFetching;
}
if (tvcp != NULL) {
- afs_PutVCache(tvcp, 0);
+ afs_PutVCache(tvcp);
}
}
if ( volp )
if ((&statsp[0])->errorCode) {
afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
- (struct cell *)0);
+ NULL);
code = (&statsp[0])->errorCode;
}
} else {
}
/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
-int AFSDOBULK = 1;
+static int AFSDOBULK = 1;
#ifdef AFS_OSF_ENV
afs_lookup(adp, ndp)
struct AFS_UCRED *acred; {
#endif
struct vrequest treq;
- char *tname = (char *)0;
+ char *tname = NULL;
register struct vcache *tvc=0;
register afs_int32 code;
register afs_int32 bulkcode = 0;
AFS_STATCNT(afs_lookup);
afs_InitFakeStat(&fakestate);
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done;
#ifdef AFS_OSF_ENV
if (code)
goto done;
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+ *avcp = NULL; /* Since some callers don't initialize it */
/* come back to here if we encounter a non-existent object in a read-only
volume's directory */
redo:
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+ *avcp = NULL; /* Since some callers don't initialize it */
bulkcode = 0;
if (!(adp->states & CStatd)) {
- if (code = afs_VerifyVCache2(adp, &treq)) {
+ if ((code = afs_VerifyVCache2(adp, &treq))) {
goto done;
}
}
goto done;
}
/* otherwise we have the fid here, so we use it */
- tvc = afs_GetVCache(adp->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, 0);
+ tvc = afs_GetVCache(adp->mvid, &treq, NULL, NULL);
afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT,
ICL_TYPE_FID, adp->mvid, ICL_TYPE_POINTER, tvc,
ICL_TYPE_INT32, code);
/* now check the access */
if (treq.uid != adp->last_looker) {
if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
code = EACCES;
goto done;
}
if (tvc) {
if (no_read_access && vType(tvc) != VDIR && vType(tvc) != VLNK) {
/* need read access on dir to stat non-directory / non-link */
- afs_PutVCache(tvc, WRITE_LOCK);
- *avcp = (struct vcache *)0;
+ afs_PutVCache(tvc);
+ *avcp = NULL;
code = EACCES;
goto done;
}
/* now we have to lookup the next fid */
tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
if (!tdc) {
- *avcp = (struct vcache *)0; /* redundant, but harmless */
+ *avcp = NULL; /* redundant, but harmless */
code = EIO;
goto done;
}
do {
retry = 0;
ObtainReadLock(&afs_xvcache);
- tvc = afs_FindVCache(&tfid, 1, 0, &retry, 0/* !stats,!lru */);
+ tvc = afs_FindVCache(&tfid, &retry, 0/* !stats,!lru */);
ReleaseReadLock(&afs_xvcache);
} while (tvc && retry);
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
- afs_PutVCache(tvc, 0);
- tvc = (struct vcache *) 0;
+ afs_PutVCache(tvc);
+ tvc = NULL;
}
} else {
- tvc = (struct vcache *) 0;
+ tvc = NULL;
bulkcode = 0;
}
if (!tvc) {
afs_int32 cached = 0;
if (!tfid.Fid.Unique && (adp->states & CForeign)) {
- tvc = afs_LookupVCache(&tfid, &treq, &cached, WRITE_LOCK,
- adp, tname);
+ tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
}
if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, &treq, &cached, (struct vcache*)0,
- WRITE_LOCK);
+ tvc = afs_GetVCache(&tfid, &treq, &cached, NULL);
}
} /* if !tvc */
} /* sub-block just to reduce stack usage */
ReleaseWriteLock(&tvc->lock);
if (code) {
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
}
if (tvolp && (tvolp->states & VForeign)) {
/* XXXX tvolp has ref cnt on but not locked! XXX */
- tvc = afs_GetRootVCache(tvc->mvid, &treq, (afs_int32 *)0, tvolp, WRITE_LOCK);
+ tvc = afs_GetRootVCache(tvc->mvid, &treq, NULL, tvolp);
} else {
- tvc = afs_GetVCache(tvc->mvid, &treq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(tvc->mvid, &treq, NULL, NULL);
}
- afs_PutVCache(uvc, WRITE_LOCK); /* we're done with it */
+ afs_PutVCache(uvc); /* we're done with it */
if (!tvc) {
code = ENOENT;
* ptr to point back to the appropriate place */
if (tvolp) {
ObtainWriteLock(&tvc->lock,134);
- if (tvc->mvid == (struct VenusFid *) 0) {
+ if (tvc->mvid == NULL) {
tvc->mvid = (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
}
/* setup backpointer */
}
}
else {
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
code = ENOENT;
if (tvolp) afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
/* Handle RENAME; only need to check rename "." */
if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
if (!FidCmp(&(tvc->fid), &(adp->fid))) {
- afs_PutVCache(*avcp, WRITE_LOCK);
+ afs_PutVCache(*avcp);
*avcp = NULL;
afs_PutFakeStat(&fakestate);
return afs_CheckCode(EISDIR, &treq, 18);
/* If there is an error, make sure *avcp is null.
* Alphas panic otherwise - defect 10719.
*/
- *avcp = (struct vcache *)0;
+ *avcp = NULL;
}
afs_PutFakeStat(&fakestate);
struct afs_fakestat_state fakestate;
AFS_STATCNT(afs_open);
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
#ifdef AFS_SGI64_ENV
/* avcpp can be, but is not necesarily, bhp's vnode. */
tvc = VTOAFS(BHV_TO_VNODE(bhv));
crhold(acred);
if (tvc->credp) {
struct ucred *crp = tvc->credp;
- tvc->credp = (struct ucred *)0;
+ tvc->credp = NULL;
crfree(crp);
}
tvc->credp = acred;
void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
struct AFS_UCRED *acred, struct vrequest *areq);
-afs_MemRead(avc, auio, acred, albn, abpp, noLock)
- register struct vcache *avc;
- struct uio *auio;
- struct AFS_UCRED *acred;
- daddr_t albn;
- int noLock;
- struct buf **abpp;
+int afs_MemRead(register struct vcache *avc, struct uio *auio, struct AFS_UCRED *acred,
+ daddr_t albn, struct buf **abpp, int noLock)
{
afs_size_t totalLength;
afs_size_t transferLength;
return EIO;
/* check that we have the latest status info in the vnode cache */
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
if (!noLock) {
code = afs_VerifyVCache(avc, &treq);
if (code) {
/* don't have current data, so get it below */
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
- tdc = (struct dcache *) 0;
+ tdc = NULL;
}
}
* Also need to worry about DFFetching, and IFFree, I think. */
static struct dcache *savedc = 0;
-afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
- register struct vcache *avc;
- struct uio *auio;
- struct AFS_UCRED *acred;
- int noLock;
- daddr_t albn;
- struct buf **abpp;
+int afs_UFSReadFast(register struct vcache *avc, struct uio *auio,
+ struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
{
struct vrequest treq;
int offDiff;
return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
}
-afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
- struct vcache *avc;
- struct uio *auio;
- struct AFS_UCRED *acred;
- daddr_t albn;
- int noLock;
- struct buf **abpp;
+int afs_UFSRead(register struct vcache *avc, struct uio *auio,
+ struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
{
afs_size_t totalLength;
afs_size_t transferLength;
return EIO;
/* check that we have the latest status info in the vnode cache */
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
if (!noLock) {
if (!avc)
osi_Panic ("null avc in afs_UFSRead");
/* don't have current data, so get it below */
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
- tdc = (struct dcache *) 0;
+ tdc = NULL;
}
}
if ((avc->states & CForeign) == 0 &&
(ntohl(ade->fid.vnode) & 1)) {
return DT_DIR;
- } else if ((tvc=afs_FindVCache(&tfid,0,0,0,0))) {
+ } else if ((tvc=afs_FindVCache(&tfid,0,0))) {
if (tvc->mvstat) {
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
return DT_DIR;
} else if (((tvc->states) & (CStatd|CTruth))) {
/* CTruth will be set if the object has
*ever* been statd */
vtype=vType(tvc);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
if (vtype == VDIR)
return DT_DIR;
else if (vtype == VREG)
type=DT_LNK; */
/* what other types does AFS support? */
} else
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
return DT_UNKNOWN;
}
|| AfsLargeFileSize(auio->uio_offset, auio->uio_resid) )
return EFBIG;
- if (code = afs_InitReq(&treq, acred)) {
+ if ((code = afs_InitReq(&treq, acred))) {
#ifdef AFS_HPUX_ENV
osi_FreeSmallSpace((char *)sdirEntry);
#endif
#else
afs1_readdir(avc, auio, acred)
#endif
- register struct vcache *avc;
+ struct vcache *avc;
struct uio *auio;
struct AFS_UCRED *acred; {
struct vrequest treq;
}
#endif /* AFS_OSF_ENV */
-afsremove(adp, tdc, tvc, aname, acred, treqp)
- register struct vcache *adp;
- register struct dcache *tdc;
- register struct vcache *tvc;
- char *aname;
- struct vrequest *treqp;
- struct AFS_UCRED *acred; {
+int afsremove(register struct vcache *adp, register struct dcache *tdc,
+ register struct vcache *tvc, char *aname, struct AFS_UCRED *acred,
+ struct vrequest *treqp)
+{
register afs_int32 code;
register struct conn *tc;
struct AFSFetchStatus OutDirStatus;
tc = afs_Conn(&adp->fid, treqp, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEFILE);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_RemoveFile(tc->id, (struct AFSFid *) &adp->fid.Fid,
aname, &OutDirStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, treqp,
- AFS_STATS_FS_RPCIDX_REMOVEFILE, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_REMOVEFILE, SHARED_LOCK, NULL));
osi_dnlc_remove (adp, aname, tvc);
if (tvc) afs_symhint_inval(tvc); /* XXX: don't really need to be so extreme */
ReleaseSharedLock(&tdc->lock);
afs_PutDCache(tdc);
}
- if (tvc) afs_PutVCache(tvc, WRITE_LOCK);
+ if (tvc) afs_PutVCache(tvc);
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 497);
#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
return (0);
}
-static char *newname() {
+static char *newname(void)
+{
char *name, *sp, *p = ".__afs";
afs_int32 rd = afs_random() & 0xffff;
tvc = (struct vcache *)ndp->ni_vp; /* should never be null */
#endif
- if (code = afs_InitReq(&treq, acred)) {
+ if ((code = afs_InitReq(&treq, acred))) {
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
#endif
return code;
}
if (code) {
afs_PutFakeStat(&fakestate);
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
#endif
return code;
}
code = afs_DynrootVOPRemove(adp, acred, aname);
afs_PutFakeStat(&fakestate);
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
#endif
return code;
}
if (strlen(aname) > AFSNAMEMAX) {
afs_PutFakeStat(&fakestate);
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
#endif
return ENAMETOOLONG;
}
#ifdef AFS_OSF_ENV
tvc = VTOAFS(ndp->ni_vp); /* should never be null */
if (code) {
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
afs_PutFakeStat(&fakestate);
return afs_CheckCode(code, &treq, 22);
}
#else /* AFS_OSF_ENV */
- tvc = (struct vcache *) 0;
+ tvc = NULL;
if (code) {
code = afs_CheckCode(code, &treq, 23);
afs_PutFakeStat(&fakestate);
*/
if ( adp->states & CRO ) {
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, 0);
- afs_PutVCache(tvc, 0);
+ afs_PutVCache(adp);
+ afs_PutVCache(tvc);
#endif
code = EROFS;
afs_PutFakeStat(&fakestate);
unlinkFid.Cell = adp->fid.Cell;
unlinkFid.Fid.Volume = adp->fid.Fid.Volume;
if (unlinkFid.Fid.Unique == 0) {
- tvc = afs_LookupVCache(&unlinkFid, &treq, &cached,
- WRITE_LOCK, adp, aname);
+ tvc = afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname);
} else {
ObtainReadLock(&afs_xvcache);
- tvc = afs_FindVCache(&unlinkFid, 1, WRITE_LOCK,
- 0 , DO_STATS );
+ tvc = afs_FindVCache(&unlinkFid, 0, DO_STATS);
ReleaseReadLock(&afs_xvcache);
}
}
}
if ( tdc )
afs_PutDCache(tdc);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
} else {
code = afsremove(adp, tdc, tvc, aname, acred, &treq);
}
#ifdef AFS_OSF_ENV
- afs_PutVCache(adp, WRITE_LOCK);
+ afs_PutVCache(adp);
#endif /* AFS_OSF_ENV */
afs_PutFakeStat(&fakestate);
return code;
*
* CAUTION -- may be called with avc unheld. */
-afs_remunlink(avc, doit)
- register struct vcache *avc;
- register int doit;
+int afs_remunlink(register struct vcache *avc, register int doit)
{
struct AFS_UCRED *cred;
char *unlname;
return 0;
if (avc->mvid && (doit || (avc->states & CUnlinkedDel))) {
- if (code = afs_InitReq(&treq, avc->uncred)) {
+ if ((code = afs_InitReq(&treq, avc->uncred))) {
ReleaseWriteLock(&avc->lock);
}
else {
dirFid.Fid.Volume = avc->fid.Fid.Volume;
dirFid.Fid.Vnode = avc->parentVnode;
dirFid.Fid.Unique = avc->parentUnique;
- adp = afs_GetVCache(&dirFid, &treq, (afs_int32 *)0,
- (struct vcache *)0, WRITE_LOCK);
+ adp = afs_GetVCache(&dirFid, &treq, NULL, NULL);
if (adp) {
tdc = afs_FindDCache(adp, 0);
/* afsremove releases the adp & tdc locks, and does vn_rele(avc) */
code = afsremove(adp, tdc, avc, unlname, cred, &treq);
- afs_PutVCache(adp, WRITE_LOCK);
+ afs_PutVCache(adp);
} else {
/* we failed - and won't be back to try again. */
- afs_PutVCache(avc, WRITE_LOCK);
+ afs_PutVCache(avc);
}
osi_FreeSmallSpace(unlname);
crfree(cred);
tc = afs_Conn(&aodp->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RENAME);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_Rename(tc->id, (struct AFSFid *) &aodp->fid.Fid, aname1,
(struct AFSFid *) &andp->fid.Fid, aname2,
&OutOldDirStatus, &OutNewDirStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
} else code = -1;
} while
(afs_Analyze(tc, code, &andp->fid, areq,
- AFS_STATS_FS_RPCIDX_RENAME, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_RENAME, SHARED_LOCK, NULL));
returnCode = code; /* remember for later */
if (unlinkFid.Fid.Vnode) {
unlinkFid.Fid.Volume = aodp->fid.Fid.Volume;
unlinkFid.Cell = aodp->fid.Cell;
- tvc = (struct vcache *)0;
+ tvc = NULL;
if (!unlinkFid.Fid.Unique) {
- tvc = afs_LookupVCache(&unlinkFid, areq, (afs_int32 *)0, WRITE_LOCK,
- aodp, aname1);
+ tvc = afs_LookupVCache(&unlinkFid, areq, NULL, aodp, aname1);
}
if (!tvc) /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&unlinkFid, areq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(&unlinkFid, areq, NULL, NULL);
if (tvc) {
#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
}
fileFid.Fid.Volume = aodp->fid.Fid.Volume;
fileFid.Cell = aodp->fid.Cell;
if (!fileFid.Fid.Unique)
- tvc = afs_LookupVCache(&fileFid, areq, (afs_int32 *)0, WRITE_LOCK, andp, aname2);
+ tvc = afs_LookupVCache(&fileFid, areq, NULL, andp, aname2);
else
- tvc = afs_GetVCache(&fileFid, areq, (afs_int32 *)0,
- (struct vcache*)0, WRITE_LOCK);
+ tvc = afs_GetVCache(&fileFid, areq, NULL, (struct vcache*)0);
if (tvc && (vType(tvc) == VDIR)) {
ObtainWriteLock(&tvc->lock,152);
tdc1 = afs_FindDCache(tvc, 0);
}
osi_dnlc_remove(tvc, "..", 0);
ReleaseWriteLock(&tvc->lock);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
} else if (tvc) {
/* True we shouldn't come here since tvc SHOULD be a dir, but we
* 'syntactically' need to unless we change the 'if' above...
*/
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
}
}
code = returnCode;
afs_Trace2(afs_iclSetp, CM_TRACE_SYMLINK, ICL_TYPE_POINTER, adp,
ICL_TYPE_STRING, aname);
- if (code = afs_InitReq(&treq, acred))
+ if ((code = afs_InitReq(&treq, acred)))
goto done2;
afs_InitFakeStat(&fakestate);
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SYMLINK);
if (adp->states & CForeign) {
now = osi_Time();
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_DFSSymlink(tc->id, (struct AFSFid *) &adp->fid.Fid, aname,
atargetName, &InStatus, (struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
} else {
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_Symlink(tc->id, (struct AFSFid *) &adp->fid.Fid, aname,
atargetName, &InStatus, (struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
}
XSTATS_END_TIME;
}
else code = -1;
} while
(afs_Analyze(tc, code, &adp->fid, &treq,
- AFS_STATS_FS_RPCIDX_SYMLINK, SHARED_LOCK, (struct cell *)0));
+ AFS_STATS_FS_RPCIDX_SYMLINK, SHARED_LOCK, NULL));
UpgradeSToWLock(&afs_xvcache,40);
if (code) {
/* now we're done with parent dir, create the link's entry. Note that
* no one can get a pointer to the new cache entry until we release
* the xvcache lock. */
- tvc = afs_NewVCache(&newFid, hostp, 1, WRITE_LOCK);
+ tvc = afs_NewVCache(&newFid, hostp);
ObtainWriteLock(&tvc->lock,157);
ObtainWriteLock(&afs_xcbhash, 500);
tvc->states |= CStatd; /* have valid info */
}
ReleaseWriteLock(&tvc->lock);
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(tvc, WRITE_LOCK);
+ afs_PutVCache(tvc);
code = 0;
done:
afs_PutFakeStat(&fakestate);
return code;
}
-afs_MemHandleLink(avc, areq)
- register struct vcache *avc;
- struct vrequest *areq;
- {
+int afs_MemHandleLink(register struct vcache *avc, struct vrequest *areq)
+{
register struct dcache *tdc;
register char *tp, *rbuf;
afs_size_t offset, len;
return 0;
}
-afs_UFSHandleLink(avc, areq)
- register struct vcache *avc;
- struct vrequest *areq;
+int afs_UFSHandleLink(register struct vcache *avc, struct vrequest *areq)
{
register struct dcache *tdc;
register char *tp, *rbuf;
AFS_STATCNT(afs_readlink);
afs_Trace1(afs_iclSetp, CM_TRACE_READLINK, ICL_TYPE_POINTER, avc);
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
afs_InitFakeStat(&fakestat);
code = afs_EvalFakeStat(&avc, &fakestat, &treq);
if (code) goto done;
* afs_FlushActiveVCaches routine (when CCORE is on).
* avc->lock must be write-locked.
*/
-afs_StoreOnLastReference(avc, treq)
-register struct vcache *avc;
-register struct vrequest *treq;
+int afs_StoreOnLastReference(register struct vcache *avc, register struct vrequest *treq)
{
int code = 0;
avc->execsOrWriters--;
AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose)*/
crfree((struct AFS_UCRED *)avc->linkData); /* "crheld" in afs_FakeClose */
- avc->linkData = (char *)0;
+ avc->linkData = NULL;
}
/* Now, send the file back. Used to require 0 writers left, but now do
* it on every close for write, since two closes in a row are harmless
-afs_MemWrite(avc, auio, aio, acred, noLock)
- register struct vcache *avc;
- struct uio *auio;
- int aio, noLock;
- struct AFS_UCRED *acred;
+int afs_MemWrite(register struct vcache *avc, struct uio *auio, int aio,
+ struct AFS_UCRED *acred, int noLock)
{
afs_size_t totalLength;
afs_size_t transferLength;
return avc->vc_error;
startDate = osi_Time();
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
/* otherwise we read */
totalLength = auio->afsio_resid;
filePos = auio->afsio_offset;
/* called on writes */
-afs_UFSWrite(avc, auio, aio, acred, noLock)
- register struct vcache *avc;
- struct uio *auio;
- int aio, noLock;
- struct AFS_UCRED *acred;
+int afs_UFSWrite(register struct vcache *avc, struct uio *auio,
+ int aio, struct AFS_UCRED *acred, int noLock)
{
afs_size_t totalLength;
afs_size_t transferLength;
return avc->vc_error;
startDate = osi_Time();
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
/* otherwise we read */
totalLength = auio->afsio_resid;
filePos = auio->afsio_offset;
}
/* do partial write if we're low on unmodified chunks */
-afs_DoPartialWrite(avc, areq)
-register struct vcache *avc;
-struct vrequest *areq; {
+int afs_DoPartialWrite(register struct vcache *avc, struct vrequest *areq)
+{
register afs_int32 code;
if (afs_stats_cmperf.cacheCurrDirtyChunks <= afs_stats_cmperf.cacheMaxDirtyChunks)
* N.B: Intercepting close syscall doesn't trap aborts or exit system
* calls.
*/
-afs_closex(afd)
- register struct file *afd; {
+int afs_closex(register struct file *afd)
+{
struct vrequest treq;
struct vcache *tvc;
afs_int32 flags;
AFS_STATCNT(afs_closex);
/* setup the credentials */
- if (code = afs_InitReq(&treq, u.u_cred)) return code;
+ if ((code = afs_InitReq(&treq, u.u_cred))) return code;
afs_InitFakeStat(&fakestat);
closeDone = 0;
AFS_STATCNT(afs_fsync);
afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc);
- if (code = afs_InitReq(&treq, acred)) return code;
+ if ((code = afs_InitReq(&treq, acred))) return code;
#if defined(AFS_SGI_ENV)
AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
#endif
#endif
+/* Moved from VNOPS/afs_vnop_flocks so can be used in prototypes */
+#if defined(AFS_HPUX102_ENV)
+#define AFS_FLOCK k_flock
+#else
+#if defined(AFS_SUN56_ENV) || defined(AFS_LINUX24_ENV)
+#define AFS_FLOCK flock64
+#else
+#define AFS_FLOCK flock
+#endif /* AFS_SUN65_ENV */
+#endif /* AFS_HPUX102_ENV */
+
/* The following are various levels of afs debugging */
#define AFSDEB_GENERAL 1 /* Standard debugging */
#define AFSDEB_NETWORK 2 /* low level afs networking */
#define SHash(aserv) ((ntohl(aserv)) & (NSERVERS-1))
#define FVHash(acell,avol) (((avol)+(acell)) & (NFENTRIES-1))
-extern struct cell *afs_GetCell();
-extern struct cell *afs_GetCellNoLock();
-extern struct cell *afs_GetCellByName();
-extern struct cell *afs_FindCellByName();
-extern struct cell *afs_GetCellByIndex();
-extern struct unixuser *afs_GetUser();
-extern struct volume *afs_GetVolume();
-extern struct volume *afs_GetVolumeByName();
-extern struct conn *afs_Conn();
-extern struct conn *afs_ConnByHost();
-extern struct conn *afs_ConnByMHosts();
-extern afs_int32 afs_NewCell();
-extern struct dcache *afs_GetDCache();
-extern struct dcache *afs_FindDCache();
-extern struct dcache *afs_NewDCache();
-extern struct dcache *afs_GetDSlot();
-extern struct vcache *afs_GetVCache();
-extern struct brequest *afs_BQueue();
-
-/* afs_cache.c */
-extern int afs_CacheInit();
-extern void afs_StoreWarn();
-extern void afs_AdjustSize();
-extern void afs_ComputeCacheParms();
-extern void afs_FlushDCache();
-extern void afs_FlushActiveVcaches();
-extern void afs_StuffVcache();
-extern void afs_PutVCache();
-extern void afs_TryToSmush();
-extern void afs_ProcessFS();
-extern void afs_WriteThroughDSlots();
-extern void shutdown_cache();
-/* afs_call.c */
-extern void afs_shutdown();
-/* afs_osifile.c */
-extern void shutdown_osifile();
-
-/* afs_dynroot.c */
-extern int afs_IsDynrootFid();
-extern void afs_GetDynrootFid();
-extern int afs_IsDynroot();
-extern void afs_RefreshDynroot();
-extern void afs_GetDynroot();
-extern void afs_PutDynroot();
-extern int afs_DynrootNewVnode();
-extern int afs_SetDynrootEnable();
-extern int afs_GetDynrootEnable();
-extern int afs_DynrootVOPSymlink();
-extern int afs_DynrootVOPRemove();
-
-
/* Performance hack - we could replace VerifyVCache2 with the appropriate
* GetVCache incantation, and could eliminate even this code from afs_UFSRead
* by making intentionally invalidating quick.stamp in the various callbacks
#define afs_nlrdwr(avc, uio, rw, io, cred) \
(((rw) == UIO_WRITE) ? afs_write(avc, uio, io, cred, 1) : afs_read(avc, uio, cred, 0, 0, 1))
-extern afs_int32 afs_blocksUsed, afs_blocksDiscarded;
-extern afs_int32 afs_discardDCCount, afs_freeDCCount;
-extern afs_int32 afs_bulkStatsDone, afs_bulkStatsLost;
-extern int afs_TruncateDaemonRunning;
-extern int afs_CacheTooFull;
/* Cache size truncation uses the following low and high water marks:
* If the cache is more than 95% full (CM_DCACHECOUNTFREEPCT), the cache
* truncation daemon is awakened and will free up space until the cache is 85%
* afs_GetDownD wakes those processes once the cache is 95% full
* (CM_CACHESIZEDRAINEDPCT).
*/
-extern void afs_MaybeWakeupTruncateDaemon();
-extern void afs_CacheTruncateDaemon();
-extern int afs_WaitForCacheDrain;
#define CM_MAXDISCARDEDCHUNKS 16 /* # of chunks */
#define CM_DCACHECOUNTFREEPCT 95 /* max pct of chunks in use */
#define CM_DCACHESPACEFREEPCT 90 /* max pct of space in use */
/* Handy max length of a numeric string. */
#define CVBS 12 /* max afs_int32 is 2^32 ~ 4*10^9, +1 for NULL, +luck */
-extern int afs_norefpanic;
#define refpanic(foo) if (afs_norefpanic) \
{ printf( foo ); afs_norefpanic++;} else osi_Panic( foo )
-
-
/*
** these are defined in the AIX source code sys/fs_locks.h but are not
** defined anywhere in the /usr/include directory
#endif /* AFS_SGI62_ENV */
#endif
+
+/* Note: this should agree with the definition in kdump.c */
+#if defined(AFS_OSF_ENV)
+#if !defined(UKERNEL)
+#define AFS_USEBUFFERS 1
+#endif
+#endif
+
+#if !defined(UKERNEL) && !defined(HAVE_STRUCT_BUF)
+/* declare something so that prototypes don't flip out */
+/* this is really a sick hack, but appears struct buf stuff is only actually passed
+around as a pointer, except with libuafs, in which case it is actually defined */
+
+struct buf {
+ int bogus;
+};
+#endif
+
/* fakestat support: opaque storage for afs_EvalFakeStat to remember
* what vcache should be released.
*/
extern int afs_fakestat_enable;
+struct buffer {
+ ino_t fid[1]; /* Unique cache key + i/o addressing */
+ afs_int32 page;
+ afs_int32 accesstime;
+ struct buffer *hashNext;
+ char *data;
+ char lockers;
+ char dirty;
+ char hashIndex;
+#if AFS_USEBUFFERS
+ struct buf *bufp;
+#endif
+ afs_rwlock_t lock; /* the lock for this structure */
+};
+
+/* afs_memcache.c */
+struct memCacheEntry {
+ int size; /* # of valid bytes in this entry */
+ int dataSize; /* size of allocated data area */
+ afs_lock_t afs_memLock;
+ char *data; /* bytes */
+};
+
#endif /* _AFS_H_ */
#include "../afs/afsincludes.h" /* Afs-based standard headers */
#include "../afs/afs_stats.h" /* afs statistics */
#include "../afs/afs_util.h"
-#include "../afs/afs_prototypes.h"
#if defined(AFS_SUN56_ENV)
#include <inet/led.h>
int afs_BusyWaitPeriod = 15; /* poll every 15 seconds */
-
afs_int32 hm_retry_RO=0; /* don't wait */
afs_int32 hm_retry_RW=0; /* don't wait */
afs_int32 hm_retry_int=0; /* don't wait */
-void afs_CopyError(afrom, ato)
- register struct vrequest *afrom;
- register struct vrequest *ato;
-
+void afs_CopyError(register struct vrequest *afrom, register struct vrequest *ato)
{
AFS_STATCNT(afs_CopyError);
if (!afrom->initd)
if (afrom->permWriteError)
ato->permWriteError = 1;
-} /*afs_CopyError*/
-
-
-void afs_FinalizeReq(areq)
- register struct vrequest *areq;
+}
+void afs_FinalizeReq(register struct vrequest *areq)
{
AFS_STATCNT(afs_FinalizeReq);
if (areq->initd)
areq->permWriteError = 0;
areq->initd = 1;
-} /*afs_FinalizeReq*/
-
-
-afs_CheckCode(acode, areq, where)
- afs_int32 acode;
- struct vrequest *areq;
- int where;
+}
+int afs_CheckCode(afs_int32 acode, struct vrequest *areq, int where)
{
AFS_STATCNT(afs_CheckCode);
if (acode) {
#define DIFFERENT 0
#define SAME 1
#define DUNNO 2
-static int VLDB_Same (afid, areq)
- struct VenusFid *afid;
- struct vrequest *areq;
+static int VLDB_Same (struct VenusFid *afid, struct vrequest *areq)
{
struct vrequest treq;
struct conn *tconn;
AFS_STATCNT(CheckVLDB);
afs_FinalizeReq(areq);
- if (i = afs_InitReq(&treq, &afs_osi_cred)) return DUNNO;
+ if ((i = afs_InitReq(&treq, &afs_osi_cred))) return DUNNO;
tcell = afs_GetCell(afid->Cell, READ_LOCK);
bp = afs_cv2string(&tbuf[CVBS], afid->Fid.Volume);
do {
}
} else
i = -1;
- } while (afs_Analyze(tconn, i, (struct VenusFid *) 0, &treq,
+ } while (afs_Analyze(tconn, i, NULL, &treq,
-1, /* no op code for this */
SHARED_LOCK, tcell));
* The retry return value is used by afs_StoreAllSegments to determine
* if this is a temporary or permanent error.
*------------------------------------------------------------------------*/
-int afs_Analyze(aconn, acode, afid, areq, op, locktype, cellp)
- register struct conn *aconn;
- afs_int32 acode;
- register struct vrequest *areq;
- struct VenusFid *afid;
- int op;
- afs_int32 locktype;
- struct cell *cellp;
-{ /*afs_Analyze*/
-
+int afs_Analyze(register struct conn *aconn, afs_int32 acode,
+ struct VenusFid *afid, register struct vrequest *areq, int op,
+ afs_int32 locktype, struct cell *cellp)
+{
afs_int32 i, code;
struct srvAddr *sa;
struct server *tsp;