shadow@dementia.org to blame for some of this.
powerpc-apple-darwin7.5*)
AFS_SYSNAME="ppc_darwin_70"
;;
+ powerpc-apple-darwin8.0*)
+ AFS_SYSNAME="ppc_darwin_80"
+ ;;
sparc-sun-solaris2.5*)
AFS_SYSNAME="sun4x_55"
enable_login="yes"
#endif
#endif
+#ifdef AFS_DARWIN80_ENV
+#include <kern/locks.h>
+#else
#include <sys/lock.h>
+#endif
#include <kern/thread.h>
#include <sys/user.h>
+#ifdef AFS_DARWIN80_ENV
+#define getpid() proc_selfpid()
+#define getppid() proc_selfppid()
+#else
#define getpid() current_proc()->p_pid
#define getppid() current_proc()->p_pptr->p_pid
+#endif
#undef gop_lookupname
#define gop_lookupname osi_lookupname
/* vcexcl - used only by afs_create */
enum vcexcl { EXCL, NONEXCL };
+#ifdef AFS_DARWIN80_ENV
+#define vrele vnode_rele
+#define vput vnode_put
+#define vref vnode_ref
+#endif
+
/*
* Time related macros
*/
#ifdef KERNEL
extern thread_t afs_global_owner;
/* simple locks cannot be used since sleep can happen at any time */
+#ifdef AFS_DARWIN80_ENV
+/* mach locks still don't have an exported try, but we are forced to use them */
+extern lck_mtx_t *afs_global_lock;
+#define AFS_GLOCK() \
+ do { \
+ lk_mtx_lock(afs_global_lock); \
+ osi_Assert(afs_global_owner == 0); \
+ afs_global_owner = current_thread(); \
+ } while (0)
+#define AFS_GUNLOCK() \
+ do { \
+ osi_Assert(afs_global_owner == current_thread()); \
+ afs_global_owner = 0; \
+ lk_mtx_unlock(afs_global_lock); \
+ } while(0)
+#else
/* Should probably use mach locks rather than bsd locks, since we use the
mach thread control api's elsewhere (mach locks not used for consistency
with rx, since rx needs lock_write_try() in order to use mach locks
afs_global_owner = 0; \
lockmgr(&afs_global_lock, LK_RELEASE, 0, current_proc()); \
} while(0)
+#endif
#define ISAFS_GLOCK() (afs_global_owner == current_thread())
#define SPLVAR
#ifdef KERNEL_FUNNEL
sysent[AFS_SYSCALL].sy_funnel = KERNEL_FUNNEL;
#endif
+#ifdef AFS_DARWIN80_ENV
+ MUTEX_SETUP();
+ afs_global_lock = lck_mtx_alloc(openafs_lck_grp, 0);
+#endif
return KERN_SUCCESS;
}
/* give up the stolen syscall entry */
sysent[AFS_SYSCALL].sy_narg = 0;
sysent[AFS_SYSCALL].sy_call = nosys;
+#ifdef AFS_DARWIN80_ENV
+ MUTEX_FINISH();
+ lck_mtx_free(afs_global_lock);
+#endif
return KERN_SUCCESS;
}
if (flags & MNT_FORCE) {
if (afs_globalVp) {
AFS_GUNLOCK();
+#ifdef AFS_DARWIN80_ENV
+ vnode_rele(AFSTOV(afs_globalVp));
+#else
vrele(AFSTOV(afs_globalVp));
+#endif
AFS_GLOCK();
}
afs_globalVp = NULL;
vprint("bad usecount", vp);
panic("afs_vget");
}
+#ifdef AFS_DARWIN80_ENV
+ error = vnode_get(vp);
+#else
error = vget(vp, lfl, current_proc());
+#endif
if (!error)
insmntque(vp, mp); /* take off free list */
return error;
ourselves during vop_inactive, except we also need to not reinst
the ubc... so we just call VREF there now anyway. */
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(vp))
+ vnode_ref(vp);
+ else
+ vnode_get(vp);
+#else
if (VREFCOUNT(tvc) > 0)
VREF(((struct vnode *)(vp)));
else
afs_vget(afs_globalVFS, 0, (vp));
+#endif
if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) {
ubc_info_init(vp);
*avcp = tvc;
code = (tvc ? 0 : ENOENT);
hit = 1;
+#ifdef AFS_DARWIN80_ENV
+ if (tvc && !vnode_isinuse(AFSTOV(tvc))) {
+ osi_Panic("TT1");
+ }
+#else
if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT1");
}
+#endif
if (code) {
/*printf("LOOKUP GETVCDOTDOT -> %d\n", code); */
}
code = 0;
*avcp = tvc = adp;
hit = 1;
+#ifdef AFS_DARWIN80_ENV
+ if (adp && !vnode_isinuse(AFSTOV(adp))) {
+ osi_Panic("TT2");
+ }
+#else
if (adp && !VREFCOUNT(adp)) {
osi_Panic("TT2");
}
+#endif
goto done;
}
}
}
*avcp = tvc;
+#ifdef AFS_DARWIN80_ENV
+ if (tvc && !vnode_isinuse(AFSTOV(tvc))) {
+ osi_Panic("TT3");
+ }
+#else
if (tvc && !VREFCOUNT(tvc)) {
osi_Panic("TT3");
}
+#endif
code = 0;
} else {
/* if we get here, we found something in a directory that couldn't
afs_symhint_inval(tvc);
Tadp1 = adp;
+#ifndef AFS_DARWIN80_ENV
Tadpr = VREFCOUNT(adp);
+#endif
Ttvc = tvc;
Tnam = aname;
Tnam1 = 0;
if (tvc)
+#ifndef AFS_DARWIN80_ENV
Ttvcr = VREFCOUNT(tvc);
#ifdef AFS_AIX_ENV
if (tvc && (VREFCOUNT(tvc) > 2) && tvc->opens > 0
if (tvc && (VREFCOUNT(tvc) > 1) && tvc->opens > 0
&& !(tvc->states & CUnlinked))
#endif
+#else
+ if (tvc && (vnode_isinuse(AFSTOV(tvc))) && tvc->opens > 0
+ && !(tvc->states & CUnlinked))
+#endif
{
char *unlname = afs_newname();
#define MakeStamp() (++afs_stampValue)
#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
+#ifdef AFS_DARWIN80_ENV
+#define VTOAFS(v) ((struct vcache *)vnode_fsnode((v)))
+#else
#define VTOAFS(v) ((struct vcache *)(v)->v_data)
+#endif
#define AFSTOV(vc) ((vc)->v)
#else
#define VTOAFS(V) ((struct vcache *)(V))
int ownslock; /* pid of owner of excl lock, else 0 - defect 3083 */
#endif
#ifdef AFS_DARWIN_ENV
+#ifdef AFS_DARWIN80_ENV
+ lck_mtx_t *rwlock;
+#else
struct lock__bsd__ rwlock;
#endif
+#endif
#ifdef AFS_XBSD_ENV
struct lock rwlock;
#endif
a_result->DataVersion = hgetlo(tvc->m.DataVersion);
a_result->callback = afs_data_pointer_to_int32(tvc->callback); /* XXXX Now a pointer; change it XXXX */
a_result->cbExpires = tvc->cbExpires;
+#ifdef AFS_DARWIN80_ENV
+ a_result->refCount = vnode_isinuse(AFSTOV(tvc))?1:0; /* XXX fix */
+#else
a_result->refCount = VREFCOUNT(tvc);
+#endif
a_result->opens = tvc->opens;
a_result->writers = tvc->execsOrWriters;
a_result->mvstat = tvc->mvstat;
a_result->DataVersion = hgetlo(tvc->m.DataVersion);
a_result->callback = afs_data_pointer_to_int32(tvc->callback); /* XXXX Now a pointer; change it XXXX */
a_result->cbExpires = tvc->cbExpires;
+#ifdef AFS_DARWIN80_ENV
+ a_result->refCount = vnode_isinuse(AFSTOV(tvc))?1:0; /* XXX fix */
+#else
a_result->refCount = VREFCOUNT(tvc);
+#endif
a_result->opens = tvc->opens;
a_result->writers = tvc->execsOrWriters;
a_result->mvstat = tvc->mvstat;
#elif defined(AFS_FBSD50_ENV)
mtx_init(&afs_global_mtx, "AFS global lock", NULL, MTX_DEF);
#elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
+#if !defined(AFS_DARWIN80_ENV)
lockinit(&afs_global_lock, PLOCK, "afs global lock", 0, 0);
+#endif
afs_global_owner = 0;
#elif defined(AFS_AIX41_ENV)
lock_alloc((void *)&afs_global_lock, LOCK_ALLOC_PIN, 1, 1);
/* ARCH/osi_misc.c */
+#ifdef AFS_LINUX20_ENV
extern void osi_iput(struct inode *ip);
+#endif
extern void afs_osi_SetTime(osi_timeval_t * atv);
/* LINUX/osi_misc.c */
#ifdef AFS_OSF_ENV
if (VREFCOUNT(tvc) > 1)
#else /* AFS_OSF_ENV */
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(AFSTOV(tvc)))
+#else
if (VREFCOUNT(tvc))
#endif
+#endif
afs_warn("Stat cache entry at %x is held\n", tvc);
if (CheckLock(&tvc->lock))
afs_warn("Stat entry at %x is locked\n", tvc);
/* This should put it back on the vnode free list since usecount is 1 */
afs_vcount--;
vSetType(avc, VREG);
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(AFSTOV(avc))) {
+#else
if (VREFCOUNT(avc) > 0) {
+#endif
VN_UNLOCK(AFSTOV(avc));
AFS_RELE(AFSTOV(avc));
} else {
refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
else if (QNext(uq) != tq)
refpanic("VLRU inconsistent");
+#ifdef AFS_DARWIN80_ENV
+ else if (!vnode_isinuse(AFSTOV(tvc)))
+#else
else if (VREFCOUNT(tvc) < 1)
+#endif
refpanic("refcnt 0 on VLRU");
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(AFSTOV(tvc)) &&
+#else
+ if (VREFCOUNT(tvc) == 1 &&
+#endif
+ tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
code = afs_FlushVCache(tvc, &fv_slept);
if (code == 0) {
}
#endif
+#ifdef AFS_DARWIN80_ENV
+ if (!vnode_isinuse(AFSTOV(tvc)
+#else
if (((VREFCOUNT(tvc) == 0)
#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
|| ((VREFCOUNT(tvc) == 1) &&
(UBCINFOEXISTS(AFSTOV(tvc))))
#endif
+#endif
) && tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
#if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
/*
AFS_GUNLOCK();
afs_darwin_getnewvnode(tvc); /* includes one refcount */
AFS_GLOCK();
+#ifdef AFS_DARWIN80_ENV
+ LOCKINIT(tvc->rwlock);
+#else
lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
#endif
+#endif
#ifdef AFS_FBSD_ENV
{
struct vnode *vp;
#define MyPidxx current->pid
#else
#if defined(AFS_DARWIN_ENV)
+#if defined(AFS_DARWIN80_ENV)
+#define MyPidxx (proc_selfpid())
+#else
#define MyPidxx (current_proc()->p_pid )
+#endif
#else
#if defined(AFS_FBSD_ENV)
#define MyPidxx (curproc->p_pid )
# include "h/dir.h"
#endif /* SGI || SUN || HPUX */
-#if !defined(AFS_SGI64_ENV) && !defined(AFS_FBSD_ENV)
+#if !defined(AFS_SGI64_ENV) && !defined(AFS_FBSD_ENV) && !defined(AFS_DARWIN80_ENV)
#include "h/user.h"
#endif /* AFS_SGI64_ENV */
#define MACH_USER_API 1
# include <sys/uio.h>
# include <sys/mount.h>
# include <sys/namei.h>
+#ifdef AFS_DARWIN80_ENV
+# include <h/vnode.h>
+#else
# include <sys/vnode.h>
+#endif
# include <sys/queue.h>
# include <sys/malloc.h>
#ifndef AFS_FBSD_ENV
#undef timeout_fcn_t
#define _DIR_H_
#define doff_t int32_t
+#ifndef AFS_DARWIN80_ENV
# include <ufs/ufs/quota.h>
# include <ufs/ufs/inode.h>
# include <ufs/ffs/fs.h>
+#endif
#else
# include "h/vfs.h"
# include "h/vnode.h"
RCSID
("$Header$");
+#ifndef AFS_DARWIN80_ENV
/*
* Currently everything is implemented in rx_kmutex.h
*/
+#else
+lck_grp_t * openafs_lck_grp;
+static lck_grp_attr_t * openafs_lck_grp_attr;
+void rx_kmutex_setup(void) {
+ openafs_lck_grp_attr= lck_grp_attr_alloc_init();
+ lck_grp_attr_setstat(openafs_lck_grp_attr);
+
+ openafs_lck_grp = lck_grp_alloc_init("openafs", openafs_lck_grp_attr);
+ lck_grp_attr_free(openafs_lck_grp_attr);
+
+}
+
+void rx_kmutex_finish(void) {
+ lck_grp_free(openafs_lck_grp);
+}
+
+#endif
+
+
#define CV_BROADCAST(cv) thread_wakeup((event_t)(cv))
#endif
+#ifdef AFS_DARWIN80_ENV
typedef struct {
- struct lock__bsd__ lock;
+ lck_mtx_t *lock;
thread_t owner;
} afs_kmutex_t;
typedef int afs_kcondvar_t;
-#define osi_rxWakeup(cv) thread_wakeup((event_t)(cv))
+extern lck_grp_t * openafs_lck_grp;
+
+#define MUTEX_SETUP() rx_kmutex_setup()
+#define MUTEX_FINISH() rx_kmutex_finish()
+#define LOCKINIT(a) \
+ do { \
+ lck_attr_t * openafs_lck_attr = lck_attr_alloc_init(); \
+ (a) = lck_mtx_alloc_init(openafs_lck_grp, openafs_lck_attr); \
+ lck_attr_free(openafs_lck_attr); \
+ } while(0);
+#define MUTEX_INIT(a,b,c,d) \
+ do { \
+ lck_attr_t * openafs_lck_attr = lck_attr_alloc_init(); \
+ (a)->lock = lck_mtx_alloc_init(openafs_lck_grp, openafs_lck_attr); \
+ lck_attr_free(openafs_lck_attr); \
+ (a)->owner = (thread_t)0; \
+ } while(0);
+#define MUTEX_DESTROY(a) \
+ do { \
+ lck_mtx_destroy((a)->lock, openafs_lck_grp); \
+ (a)->owner = (thread_t)-1; \
+ } while(0);
+#define MUTEX_ENTER(a) \
+ do { \
+ lck_mtx_lock(&(a)->lock); \
+ osi_Assert((a)->owner == (thread_t)0); \
+ (a)->owner = current_thread(); \
+ } while(0);
+#define MUTEX_TRYENTER(a) \
+ (lck_mtx_try_lock(&(a)->lock) ? ((a)->owner = current_thread(), 1) : 0)
+#define MUTEX_EXIT(a) \
+ do { \
+ osi_Assert((a)->owner == current_thread()); \
+ (a)->owner = (thread_t)0; \
+ lck_mtx_unlock(&(a)->lock); \
+ } while(0);
+
+#undef MUTEX_ISMINE
+#define MUTEX_ISMINE(a) (((afs_kmutex_t *)(a))->owner == current_thread())
+#else
+typedef struct {
+ struct lock__bsd__ lock;
+ thread_t owner;
+} afs_kmutex_t;
+typedef int afs_kcondvar_t;
#define LOCK_INIT(a,b) \
do { \
} while(0);
#define MUTEX_TRYENTER(a) \
( lockmgr(&(a)->lock, LK_EXCLUSIVE|LK_NOWAIT, 0, current_proc()) ? 0 : ((a)->owner = current_thread(), 1) )
-#define xMUTEX_TRYENTER(a) \
- ( osi_Assert((a)->owner == (thread_t)0), (a)->owner = current_thread(), 1)
#define MUTEX_EXIT(a) \
do { \
osi_Assert((a)->owner == current_thread()); \
#undef MUTEX_ISMINE
#define MUTEX_ISMINE(a) (((afs_kmutex_t *)(a))->owner == current_thread())
+#endif
#undef osirx_AssertMine
extern void osirx_AssertMine(afs_kmutex_t * lockaddr, char *msg);
#ifndef _XDR_PROTOTYPES_H
#define _XDR_PROTOTYPES_H
-/* I don't like this, but some of these defs depend on rx.h */
-#if defined(KERNEL) && defined(UKERNEL)
-#include "afs/sysincludes.h"
-#include "rx/rx.h"
-#else
-#include "rx/rx.h"
-#endif
+struct rx_call;
/* xdr_afsuuid.c */
extern int xdr_afsUUID(XDR * xdrs, afsUUID * objp);