#include <roken.h>
#include <afs/opr.h>
+
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
+
#include <ubik.h>
-#include <lock.h>
#include <afs/audit.h>
#include "database.h"
if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
dumpSyncPtr->ds_readerStatus = 0;
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond);
+ opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
#else
code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
if (code)
ReleaseWriteLock(&dumpSyncPtr->ds_lock);
#ifdef AFS_PTHREAD_ENV
MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
- CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
+ opr_cv_wait(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
#else
LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
dumpSyncPtr->ds_readerStatus = 0;
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond);
+ opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
#else
code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
if (code)
ReleaseWriteLock(&dumpSyncPtr->ds_lock);
#ifdef AFS_PTHREAD_ENV
MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
- CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
+ opr_cv_wait(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
#else
LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
dumpSyncPtr->ds_writerStatus = DS_DONE;
dumpSyncPtr->ds_readerStatus = 0;
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond);
+ opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
#else
code = LWP_NoYieldSignal(&dumpSyncPtr->ds_readerStatus);
if (code)
#include <roken.h>
#include <afs/opr.h>
-#include <lock.h>
+
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
+
#include <ubik.h>
#include <lwp.h>
#include <rx/rx.h>
/* Initialize the condition variables and the mutexes we use
* to signal and synchronize the reader and writer threads.
*/
- CV_INIT(&dumpSyncPtr->ds_readerStatus_cond, "reader cond", CV_DEFAULT, 0);
- CV_INIT(&dumpSyncPtr->ds_writerStatus_cond, "writer cond", CV_DEFAULT, 0);
- MUTEX_INIT(&dumpSyncPtr->ds_readerStatus_mutex, "reader", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&dumpSyncPtr->ds_writerStatus_mutex, "writer", MUTEX_DEFAULT, 0);
+ opr_cv_init(&dumpSyncPtr->ds_readerStatus_cond);
+ opr_cv_init(&dumpSyncPtr->ds_writerStatus_cond);
+ opr_mutex_init(&dumpSyncPtr->ds_readerStatus_mutex);
+ opr_mutex_init(&dumpSyncPtr->ds_writerStatus_mutex);
/* Initialize the thread attributes and launch the thread */
LogDebug(6, "wakup writer\n");
dumpSyncPtr->ds_writerStatus = 0;
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dumpSyncPtr->ds_writerStatus_cond);
+ opr_cv_broadcast(&dumpSyncPtr->ds_writerStatus_cond);
#else
code = LWP_SignalProcess(&dumpSyncPtr->ds_writerStatus);
if (code)
dumpSyncPtr->ds_readerStatus = DS_WAITING;
ReleaseWriteLock(&dumpSyncPtr->ds_lock);
#ifdef AFS_PTHREAD_ENV
- MUTEX_ENTER(&dumpSyncPtr->ds_readerStatus_mutex);
- CV_WAIT(&dumpSyncPtr->ds_readerStatus_cond, &dumpSyncPtr->ds_readerStatus_mutex);
- MUTEX_EXIT(&dumpSyncPtr->ds_readerStatus_mutex);
+ opr_mutex_enter(&dumpSyncPtr->ds_readerStatus_mutex);
+ opr_cv_wait(&dumpSyncPtr->ds_readerStatus_cond, &dumpSyncPtr->ds_readerStatus_mutex);
+ opr_mutex_exit(&dumpSyncPtr->ds_readerStatus_mutex);
#else
LWP_WaitProcess(&dumpSyncPtr->ds_readerStatus);
#endif
if (dumpSyncPtr->ds_writerStatus == DS_WAITING) {
dumpSyncPtr->ds_writerStatus = 0;
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dumpSyncPtr->ds_writerStatus_cond);
+ opr_cv_broadcast(&dumpSyncPtr->ds_writerStatus_cond);
#else
code = LWP_SignalProcess(&dumpSyncPtr->ds_writerStatus);
if (code)
lock->wait_states = 0;
lock->num_waiting = 0;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_mutex_init(&lock->mutex, NULL) == 0);
- opr_Verify(pthread_cond_init(&lock->read_cv, NULL) == 0);
- opr_Verify(pthread_cond_init(&lock->write_cv, NULL) == 0);
+ opr_mutex_init(&lock->mutex);
+ opr_cv_init(&lock->read_cv);
+ opr_cv_init(&lock->write_cv);
#endif /* AFS_PTHREAD_ENV */
}
Lock_Destroy(struct Lock *lock)
{
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_mutex_destroy(&lock->mutex) == 0);
- opr_Verify(pthread_cond_destroy(&lock->read_cv) == 0);
- opr_Verify(pthread_cond_destroy(&lock->write_cv) == 0);
+ opr_mutex_destroy(&lock->mutex);
+ opr_cv_destroy(&lock->read_cv);
+ opr_cv_destroy(&lock->write_cv);
#endif /* AFS_PTHREAD_ENV */
}
do {
lock->wait_states |= READ_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_wait(&lock->read_cv, &lock->mutex) == 0);
+ opr_cv_wait(&lock->read_cv, &lock->mutex);
#else /* AFS_PTHREAD_ENV */
LWP_WaitProcess(&lock->readers_reading);
#endif /* AFS_PTHREAD_ENV */
do {
lock->wait_states |= WRITE_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_wait(&lock->write_cv, &lock->mutex) == 0);
+ opr_cv_wait(&lock->write_cv, &lock->mutex);
#else /* AFS_PTHREAD_ENV */
LWP_WaitProcess(&lock->excl_locked);
#endif /* AFS_PTHREAD_ENV */
do {
lock->wait_states |= SHARED_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_wait(&lock->write_cv, &lock->mutex) == 0);
+ opr_cv_wait(&lock->write_cv, &lock->mutex);
#else /* AFS_PTHREAD_ENV */
LWP_WaitProcess(&lock->excl_locked);
#endif /* AFS_PTHREAD_ENV */
do {
lock->wait_states |= WRITE_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_wait(&lock->write_cv, &lock->mutex) == 0);
+ opr_cv_wait(&lock->write_cv, &lock->mutex);
#else /* AFS_PTHREAD_ENV */
LWP_WaitProcess(&lock->excl_locked);
#endif /* AFS_PTHREAD_ENV */
if (lock->wait_states & READ_LOCK) {
lock->wait_states &= ~READ_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_broadcast(&lock->read_cv) == 0);
+ opr_cv_broadcast(&lock->read_cv);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(&lock->readers_reading);
#endif /* AFS_PTHREAD_ENV */
if (lock->wait_states & READ_LOCK) {
lock->wait_states &= ~READ_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_broadcast(&lock->read_cv) == 0);
+ opr_cv_broadcast(&lock->read_cv);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(&lock->readers_reading);
#endif /* AFS_PTHREAD_ENV */
} else {
lock->wait_states &= ~EXCL_LOCKS;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_broadcast(&lock->write_cv) == 0);
+ opr_cv_broadcast(&lock->write_cv);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(&lock->excl_locked);
#endif /* AFS_PTHREAD_ENV */
if (lock->wait_states & EXCL_LOCKS) {
lock->wait_states &= ~EXCL_LOCKS;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_broadcast(&lock->write_cv) == 0);
+ opr_cv_broadcast(&lock->write_cv);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(&lock->excl_locked);
#endif /* AFS_PTHREAD_ENV */
} else {
lock->wait_states &= ~READ_LOCK;
#ifdef AFS_PTHREAD_ENV
- opr_Verify(pthread_cond_broadcast(&lock->read_cv) == 0);
+ opr_cv_broadcast(&lock->read_cv);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(&lock->readers_reading);
#endif /* AFS_PTHREAD_ENV */
#define ENDMAC } while (0)
#ifdef AFS_PTHREAD_ENV
-#include <pthread.h>
#include <afs/opr.h>
-#define LOCK_LOCK(A) opr_Verify(pthread_mutex_lock(&(A)->mutex) == 0);
-#define LOCK_UNLOCK(A) opr_Verify(pthread_mutex_unlock(&(A)->mutex) == 0);
+#include <opr/lock.h>
+#define LOCK_LOCK(A) opr_mutex_enter(&(A)->mutex);
+#define LOCK_UNLOCK(A) opr_mutex_exit(&(A)->mutex);
#else /* AFS_PTHREAD_ENV */
#define LOCK_LOCK(A)
#define LOCK_UNLOCK(A)
HEADERS = $(TOP_INCDIR)/afs/opr.h \
$(TOP_INCDIR)/afs/opr_assert.h \
$(TOP_INCDIR)/opr/jhash.h \
+ $(TOP_INCDIR)/opr/lock.h \
+ $(TOP_INCDIR)/opr/lockstub.h \
$(TOP_INCDIR)/opr/queue.h \
$(TOP_INCDIR)/opr/rbtree.h \
$(TOP_INCDIR)/opr/time.h \
$(TOP_INCDIR)/opr/jhash.h: ${srcdir}/jhash.h
$(INSTALL_DATA) $? $@
+$(TOP_INCDIR)/opr/lockstub.h: ${srcdir}/lockstub.h
+ $(INSTALL_DATA) $? $@
+
$(TOP_INCDIR)/opr/queue.h: ${srcdir}/queue.h
$(INSTALL_DATA) $? $@
$(TOP_INCDIR)/opr/rbtree.h: ${srcdir}/rbtree.h
$(INSTALL_DATA) $? $@
+$(TOP_INCDIR)/opr/lock.h: ${srcdir}/opr_lock.h
+ $(INSTALL_DATA) $? $@
+
$(TOP_INCDIR)/opr/time.h: ${srcdir}/opr_time.h
$(INSTALL_DATA) $? $@
$(DESTDIR)\include\opr\queue.h \
$(DESTDIR)\include\opr\rbtree.h \
$(DESTDIR)\include\opr\time.h \
+ $(DESTDIR)\include\opr\lock.h \
+ $(DESTDIR)\include\opr\lockstub.h \
$(DESTDIR)\include\opr\uuid.h
$(DESTDIR)\include\opr\time.h: opr_time.h
$(COPY) $** $@
+$(DESTDIR)\include\opr\lock.h: opr_lock.h
+ $(COPY) $** $@
+
LIBFILE = $(DESTDIR)\lib\opr.lib
LIBOBJS = \
--- /dev/null
+/*
+ * Copyright (c) 2012 Your File System Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*!
+ * This is a set of stub defines that can be included by LWP processes to
+ * disable the pthread locking macros, and typedefs
+ */
+
+#ifndef OPENAFS_OPR_LOCKSTUB_H
+#define OPENAFS_OPR_LOCKSTUB_H 1
+
+# ifdef AFS_PTHREAD_ENV
+# error "Do not use the opr/lockstub.h header with pthreaded code"
+# endif
+
+typedef int opr_cv_t;
+
+# define opr_mutex_init(mutex)
+# define opr_mutex_destroy(mutex)
+# define opr_mutex_enter(mutex)
+# define opr_mutex_exit(mutex)
+# define opr_mutex_tryenter(mutex) (1)
+# define opr_cv_init(condvar)
+# define opr_cv_destroy(condvar)
+# define opr_cv_wait(condvar, mutex)
+# define opr_cv_timedwait(condvar, mutex, timeout)
+# define opr_cv_signal(condvar)
+# define opr_cv_broadcast(condvar)
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2012 Your File System Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPENAFS_OPR_LOCK_H
+#define OPENAFS_OPR_LOCK_H 1
+
+#include <pthread.h>
+
+typedef pthread_mutex_t opr_mutex_t;
+
+# define opr_mutex_init(mutex) \
+ opr_Verify(pthread_mutex_init(mutex, NULL) == 0)
+
+# define opr_mutex_destroy(mutex) \
+ opr_Verify(pthread_mutex_destroy(mutex) == 0)
+
+# define opr_mutex_enter(mutex) \
+ opr_Verify(pthread_mutex_lock(mutex) == 0)
+
+# define opr_mutex_exit(mutex) \
+ opr_Verify(pthread_mutex_unlock(mutex) == 0)
+
+# define opr_mutex_tryenter(mutex) \
+ (pthread_mutex_trylock(mutex) ? 0: 1)
+
+typedef pthread_cond_t opr_cv_t;
+
+# define opr_cv_init(condvar) \
+ opr_Verify(pthread_cond_init(condvar, NULL) == 0)
+
+# define opr_cv_destroy(condvar) \
+ opr_Verify(pthread_cond_destroy(condvar) == 0)
+
+# define opr_cv_wait(condvar, mutex) \
+ opr_Verify(pthread_cond_wait(condvar, mutex) == 0)
+
+# define opr_cv_timedwait(condvar, mutex, timeout) \
+ pthread_cond_timedwait(condvar, mutex, timeout)
+
+# define opr_cv_signal(condvar) \
+ opr_Verify(pthread_cond_signal(condvar) == 0)
+
+# define opr_cv_broadcast(condvar) \
+ opr_Verify(pthread_cond_broadcast(condvar) == 0)
+
+#endif /* OPENAFS_OPR_LOCK_H */
/* Block signals to child threads. */
#include <afs/pthread_nosigs.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#ifdef AFS_NT40_ENV
#include <wtypes.h>
extern void osirx_AssertMine(afs_kmutex_t * lockaddr, char *msg);
-#ifdef AFS_PTHREAD_ENV
-#ifdef MUTEX_INIT
-#undef MUTEX_INIT
-#endif
-#define MUTEX_INIT(a, b, c, d) opr_Verify(pthread_mutex_init(a, NULL) == 0)
-
-#ifdef MUTEX_DESTROY
-#undef MUTEX_DESTROY
-#endif
-#define MUTEX_DESTROY(l) opr_Verify(pthread_mutex_destroy(l) == 0)
-
-#ifdef MUTEX_ENTER
-#undef MUTEX_ENTER
-#endif
-#define MUTEX_ENTER(l) opr_Verify(pthread_mutex_lock(l) == 0)
-
-#ifdef MUTEX_TRYENTER
-#undef MUTEX_TRYENTER
-#endif
-#define MUTEX_TRYENTER(l) pthread_mutex_trylock(l) ? 0 : 1
-
-#ifdef MUTEX_EXIT
-#undef MUTEX_EXIT
-#endif
-#define MUTEX_EXIT(l) opr_Verify(pthread_mutex_unlock(l) == 0)
-
-#ifdef CV_INIT
-#undef CV_INIT
-#endif
-#define CV_INIT(cv, a, b, c) opr_Verify(pthread_cond_init(cv, NULL) == 0)
-
-#ifdef CV_DESTROY
-#undef CV_DESTROY
-#endif
-#define CV_DESTROY(cv) opr_Verify(pthread_cond_destroy(cv) == 0)
-
-#ifdef CV_WAIT
-#undef CV_WAIT
-#endif
-#define CV_WAIT(cv, l) opr_Verify(pthread_cond_wait(cv, l) == 0)
-
-#ifdef CV_TIMEDWAIT
-#undef CV_TIMEDWAIT
-#endif
-#define CV_TIMEDWAIT(cv, l, t) pthread_cond_timedwait(cv, l, t)
-
-#ifdef CV_SIGNAL
-#undef CV_SIGNAL
-#endif
-#define CV_SIGNAL(cv) opr_Verify(pthread_cond_signal(cv) == 0)
-
-#ifdef CV_BROADCAST
-#undef CV_BROADCAST
-#endif
-#define CV_BROADCAST(cv) opr_Verify(pthread_cond_broadcast(cv) == 0)
-
-#endif /* AFS_PTHREAD_ENV */
-
+#define MUTEX_INIT(a, b, c, d) opr_mutex_init(a)
+#define MUTEX_DESTROY(l) opr_mutex_destroy(l)
+#define MUTEX_ENTER(l) opr_mutex_enter(l)
+#define MUTEX_TRYENTER(l) opr_mutex_tryenter(l)
+#define MUTEX_EXIT(l) opr_mutex_exit(l)
+#define CV_INIT(cv, a, b, c) opr_cv_init(cv)
+#define CV_DESTROY(cv) opr_cv_destroy(cv)
+#define CV_WAIT(cv, l) opr_cv_wait(cv, l)
+#define CV_TIMEDWAIT(cv, l, t) opr_cv_timedwait(cv, l, t)
+#define CV_SIGNAL(cv) opr_cv_signal(cv)
+#define CV_BROADCAST(cv) opr_cv_broadcast(cv)
#endif /* RX_PTHREAD_H */
f_print(fout, "#include <afs/param.h>\n");
f_print(fout, "#include <roken.h>\n");
f_print(fout, "#include <afs/opr.h>\n");
+ f_print(fout, "#ifdef AFS_PTHREAD_ENV\n");
+ f_print(fout, "# include <opr/lock.h>\n");
+ f_print(fout, "#endif\n");
f_print(fout, "#include \"%s\"\n\n", include);
}
free(include);
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <lock.h>
#include <rx/rx.h>
#include <rx/rxkad.h>
#include <roken.h>
#include <afs/opr.h>
-#include <lock.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
#define UBIK_INTERNALS
#include "ubik.h"
UBIK_VERSION_LOCK;
dbase->version.counter++; /* bump commit count */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dbase->version_cond);
+ opr_cv_broadcast(&dbase->version_cond);
#else
LWP_NoYieldSignal(&dbase->version);
#endif
/* Wakeup any writers waiting in BeginTrans() */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&dbase->flags_cond);
+ opr_cv_broadcast(&dbase->flags_cond);
#else
LWP_NoYieldSignal(&dbase->flags);
#endif
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#include <lock.h>
#define UBIK_INTERNALS 1
#include <roken.h>
#include <afs/opr.h>
-#include <lock.h>
-#include <rx/xdr.h>
+
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <rx/rx.h>
#include <afs/afsutil.h>
#include <afs/cellconfig.h>
+
#define UBIK_INTERNALS
#include "ubik.h"
#include "ubik_int.h"
(*adbase->setlabel) (adbase, 0, &adbase->version);
}
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&adbase->version_cond);
+ opr_cv_broadcast(&adbase->version_cond);
#else
LWP_NoYieldSignal(&adbase->version);
#endif
}
udisk_Invalidate(ubik_dbase, 0); /* data has changed */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&ubik_dbase->version_cond);
+ opr_cv_broadcast(&ubik_dbase->version_cond);
#else
LWP_NoYieldSignal(&ubik_dbase->version);
#endif
UBIK_VERSION_UNLOCK;
udisk_Invalidate(ubik_dbase, 0); /* data may have changed */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&ubik_dbase->version_cond);
+ opr_cv_broadcast(&ubik_dbase->version_cond);
#else
LWP_NoYieldSignal(&ubik_dbase->version);
#endif
#include <assert.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <lock.h>
#include <rx/xdr.h>
#include <rx/rx.h>
#include <roken.h>
+
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <lock.h>
#include <rx/rx.h>
#include <afs/cellconfig.h>
+
#define UBIK_INTERNALS
#include "ubik.h"
#include "ubik_int.h"
memset(&tdb->version, 0, sizeof(struct ubik_version));
memset(&tdb->cachedVersion, 0, sizeof(struct ubik_version));
#ifdef AFS_PTHREAD_ENV
- MUTEX_INIT(&tdb->versionLock, "version lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&beacon_globals.beacon_lock, "beacon lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&vote_globals.vote_lock, "vote lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&addr_globals.addr_lock, "address lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&version_globals.version_lock, "version lock", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&tdb->versionLock);
+ opr_mutex_init(&beacon_globals.beacon_lock);
+ opr_mutex_init(&vote_globals.vote_lock);
+ opr_mutex_init(&addr_globals.addr_lock);
+ opr_mutex_init(&version_globals.version_lock);
#else
Lock_Init(&tdb->versionLock);
#endif
ubik_dbase = tdb; /* for now, only one db per server; can fix later when we have names for the other dbases */
#ifdef AFS_PTHREAD_ENV
- CV_INIT(&tdb->version_cond, "version", CV_DEFAULT, 0);
- CV_INIT(&tdb->flags_cond, "flags", CV_DEFAULT, 0);
+ opr_cv_init(&tdb->version_cond);
+ opr_cv_init(&tdb->flags_cond);
#endif /* AFS_PTHREAD_ENV */
/* initialize RX */
/* if we're writing already, wait */
while (dbase->flags & DBWRITING) {
#ifdef AFS_PTHREAD_ENV
- CV_WAIT(&dbase->flags_cond, &dbase->versionLock);
+ opr_cv_wait(&dbase->flags_cond, &dbase->versionLock);
#else
DBRELE(dbase);
LWP_WaitProcess(&dbase->flags);
return 0;
}
#ifdef AFS_PTHREAD_ENV
- CV_WAIT(&adatabase->version_cond, &adatabase->versionLock);
+ opr_cv_wait(&adatabase->version_cond, &adatabase->versionLock);
#else
DBRELE(adatabase);
LWP_WaitProcess(&adatabase->version); /* same vers, just wait */
};
#ifdef AFS_PTHREAD_ENV
-#define LOCK_UBIK_CLIENT(client) MUTEX_ENTER(&client->cm)
-#define UNLOCK_UBIK_CLIENT(client) MUTEX_EXIT(&client->cm)
+#define LOCK_UBIK_CLIENT(client) opr_mutex_enter(&client->cm)
+#define UNLOCK_UBIK_CLIENT(client) opr_mutex_exit(&client->cm)
#else
#define LOCK_UBIK_CLIENT(client)
#define UNLOCK_UBIK_CLIENT(client)
/*! \name hold and release functions on a database */
#ifdef AFS_PTHREAD_ENV
-# define DBHOLD(a) MUTEX_ENTER(&((a)->versionLock))
-# define DBRELE(a) MUTEX_EXIT(&((a)->versionLock))
+# define DBHOLD(a) opr_mutex_enter(&((a)->versionLock))
+# define DBRELE(a) opr_mutex_exit(&((a)->versionLock))
#else /* !AFS_PTHREAD_ENV */
# define DBHOLD(a) ObtainWriteLock(&((a)->versionLock))
# define DBRELE(a) ReleaseWriteLock(&((a)->versionLock))
afs_int32 syncSiteUntil; /*!< valid only if amSyncSite */
};
-#define UBIK_BEACON_LOCK MUTEX_ENTER(&beacon_globals.beacon_lock)
-#define UBIK_BEACON_UNLOCK MUTEX_EXIT(&beacon_globals.beacon_lock)
+#define UBIK_BEACON_LOCK opr_mutex_enter(&beacon_globals.beacon_lock)
+#define UBIK_BEACON_UNLOCK opr_mutex_exit(&beacon_globals.beacon_lock)
/*!
* \brief Global vote data. All values are protected by vote_lock
afs_int32 syncHost;
};
-#define UBIK_VOTE_LOCK MUTEX_ENTER(&vote_globals.vote_lock)
-#define UBIK_VOTE_UNLOCK MUTEX_EXIT(&vote_globals.vote_lock)
+#define UBIK_VOTE_LOCK opr_mutex_enter(&vote_globals.vote_lock)
+#define UBIK_VOTE_UNLOCK opr_mutex_exit(&vote_globals.vote_lock)
/*!
* \brief Server address data. All values are protected by addr_lock
struct rx_securityClass *ubikSecClass;
};
-#define UBIK_ADDR_LOCK MUTEX_ENTER(&addr_globals.addr_lock)
-#define UBIK_ADDR_UNLOCK MUTEX_EXIT(&addr_globals.addr_lock)
+#define UBIK_ADDR_LOCK opr_mutex_enter(&addr_globals.addr_lock)
+#define UBIK_ADDR_UNLOCK opr_mutex_exit(&addr_globals.addr_lock)
/*!
* \brief The version lock protects the structure member, as well as
afs_int32 ubik_epochTime; /* time when this site started */
};
-#define UBIK_VERSION_LOCK MUTEX_ENTER(&version_globals.version_lock)
-#define UBIK_VERSION_UNLOCK MUTEX_EXIT(&version_globals.version_lock)
+#define UBIK_VERSION_LOCK opr_mutex_enter(&version_globals.version_lock)
+#define UBIK_VERSION_UNLOCK opr_mutex_exit(&version_globals.version_lock)
/* phys.c */
extern int uphys_stat(struct ubik_dbase *adbase, afs_int32 afid,
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#ifdef IGNORE_SOME_GCC_WARNINGS
# pragma GCC diagnostic warning "-Wstrict-prototypes"
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
#include <lock.h>
#include <rx/rx.h>
#include <afs/afsutil.h>
struct afs_thread_pool * pool = worker->pool;
/* register worker with pool */
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
queue_Append(&pool->thread_list, worker);
pool->nthreads++;
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
/* call high-level entry point */
worker->ret = (*pool->entry)(pool, worker, pool->work_queue, pool->rock);
/* adjust pool live thread count */
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
opr_Assert(pool->nthreads);
queue_Remove(worker);
pool->nthreads--;
if (!pool->nthreads) {
- CV_BROADCAST(&pool->shutdown_cv);
+ opr_cv_broadcast(&pool->shutdown_cv);
pool->state = AFS_TP_STATE_STOPPED;
}
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
_afs_tp_worker_free(worker);
}
pool = *pool_out;
- MUTEX_INIT(&pool->lock, "pool", MUTEX_DEFAULT, 0);
- CV_INIT(&pool->shutdown_cv, "pool shutdown", CV_DEFAULT, 0);
+ opr_mutex_init(&pool->lock);
+ opr_cv_init(&pool->shutdown_cv);
queue_Init(&pool->thread_list);
pool->work_queue = queue;
pool->entry = &_afs_tp_worker_default;
{
int ret = 0;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
switch (pool->state) {
case AFS_TP_STATE_INIT:
case AFS_TP_STATE_STOPPED:
default:
ret = AFS_TP_ERROR;
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
}
return ret;
{
int ret = 0;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
if (pool->state != AFS_TP_STATE_INIT) {
ret = AFS_TP_ERROR;
} else {
pool->max_threads = threads;
}
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
return ret;
}
{
int ret = 0;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
if (pool->state != AFS_TP_STATE_INIT) {
ret = AFS_TP_ERROR;
} else {
pool->entry = entry;
pool->rock = rock;
}
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
return ret;
}
struct afs_thread_pool_worker * worker;
afs_uint32 i;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
if (pool->state != AFS_TP_STATE_INIT) {
ret = AFS_TP_ERROR;
goto done_sync;
}
pool->state = AFS_TP_STATE_STARTING;
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
for (i = 0; i < pool->max_threads; i++) {
code = _afs_tp_worker_start(pool, &worker);
}
}
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
pool->state = AFS_TP_STATE_RUNNING;
done_sync:
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
return ret;
}
int ret = 0;
struct afs_thread_pool_worker * worker, *nn;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
if (pool->state == AFS_TP_STATE_STOPPED
|| pool->state == AFS_TP_STATE_STOPPING) {
goto done_stopped;
pool->state = AFS_TP_STATE_STOPPED;
}
/* need to drop lock to get a membar here */
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
ret = afs_wq_shutdown(pool->work_queue);
if (ret) {
goto error;
}
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
done_stopped:
if (block) {
while (pool->nthreads) {
- CV_WAIT(&pool->shutdown_cv, &pool->lock);
+ opr_cv_wait(&pool->shutdown_cv, &pool->lock);
}
}
done_sync:
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
error:
return ret;
{
int ret;
- MUTEX_ENTER(&pool->lock);
+ opr_mutex_enter(&pool->lock);
ret = (pool->state == AFS_TP_STATE_RUNNING);
- MUTEX_EXIT(&pool->lock);
+ opr_mutex_exit(&pool->lock);
return ret;
}
#include <roken.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#include <sys/file.h>
-#include <lock.h>
-
#define __AFS_WORK_QUEUE_IMPL 1
#include "work_queue.h"
#include "work_queue_impl.h"
old_state = node->state;
node->state = new_state;
- CV_BROADCAST(&node->state_cv);
+ opr_cv_broadcast(&node->state_cv);
return old_state;
}
_afs_wq_node_state_wait_busy(struct afs_work_queue_node * node)
{
while (node->state == AFS_WQ_NODE_STATE_BUSY) {
- CV_WAIT(&node->state_cv, &node->lock);
+ opr_cv_wait(&node->state_cv, &node->lock);
}
return 0;
}
}
- code = MUTEX_TRYENTER(&ml->nodes[1].node->lock);
+ code = opr_mutex_tryenter(&ml->nodes[1].node->lock);
if (code) {
/* success */
goto done;
}
/* setup for main loop */
- MUTEX_EXIT(&ml->nodes[0].node->lock);
+ opr_mutex_exit(&ml->nodes[0].node->lock);
}
/*
delay.tv_nsec = 500 + rand() % 500;
while (1) {
- MUTEX_ENTER(&ml->nodes[first].node->lock);
+ opr_mutex_enter(&ml->nodes[first].node->lock);
if ((first != 0) || !ml->nodes[0].busy_held) {
ret = _afs_wq_node_state_wait_busy(ml->nodes[first].node);
if (ret) {
/* cleanup */
if (!ml->nodes[0].lock_held || first) {
- MUTEX_EXIT(&ml->nodes[first].node->lock);
+ opr_mutex_exit(&ml->nodes[first].node->lock);
if (ml->nodes[0].lock_held) {
/* on error, return with locks in same state as before call */
- MUTEX_ENTER(&ml->nodes[0].node->lock);
+ opr_mutex_enter(&ml->nodes[0].node->lock);
}
}
goto error;
* a non-blocking state check. if we meet any contention,
* we must drop back and start again.
*/
- code = MUTEX_TRYENTER(&ml->nodes[second].node->lock);
+ code = opr_mutex_tryenter(&ml->nodes[second].node->lock);
if (code) {
if (((second == 0) && (ml->nodes[0].busy_held)) ||
!_afs_wq_node_state_is_busy(ml->nodes[second].node)) {
/* success */
break;
} else {
- MUTEX_EXIT(&ml->nodes[second].node->lock);
+ opr_mutex_exit(&ml->nodes[second].node->lock);
}
}
* drop locks, use exponential backoff,
* try acquiring in the opposite order
*/
- MUTEX_EXIT(&ml->nodes[first].node->lock);
+ opr_mutex_exit(&ml->nodes[first].node->lock);
nanosleep(&delay, NULL);
if (delay.tv_nsec <= 65536000) { /* max backoff delay of ~131ms */
delay.tv_nsec <<= 1;
afs_wq_node_list_id_t id)
{
queue_Init(&list->list);
- MUTEX_INIT(&list->lock, "list", MUTEX_DEFAULT, 0);
- CV_INIT(&list->cv, "list", CV_DEFAULT, 0);
+ opr_mutex_init(&list->lock);
+ opr_cv_init(&list->cv);
list->qidx = id;
list->shutdown = 0;
goto error;
}
- MUTEX_DESTROY(&list->lock);
- CV_DESTROY(&list->cv);
+ opr_mutex_destroy(&list->lock);
+ opr_cv_destroy(&list->cv);
error:
return ret;
int ret = 0;
struct afs_work_queue_node *node, *nnode;
- MUTEX_ENTER(&list->lock);
+ opr_mutex_enter(&list->lock);
list->shutdown = 1;
for (queue_Scan(&list->list, node, nnode, afs_work_queue_node)) {
}
}
- CV_BROADCAST(&list->cv);
- MUTEX_EXIT(&list->lock);
+ opr_cv_broadcast(&list->cv);
+ opr_mutex_exit(&list->lock);
return ret;
}
}
/* deal with lock inversion */
- code = MUTEX_TRYENTER(&list->lock);
+ code = opr_mutex_tryenter(&list->lock);
if (!code) {
/* contended */
_afs_wq_node_state_change(node, AFS_WQ_NODE_STATE_BUSY);
- MUTEX_EXIT(&node->lock);
- MUTEX_ENTER(&list->lock);
- MUTEX_ENTER(&node->lock);
+ opr_mutex_exit(&node->lock);
+ opr_mutex_enter(&list->lock);
+ opr_mutex_enter(&node->lock);
/* assert state of the world (we set busy, so this should never happen) */
opr_Assert(queue_IsNotOnQueue(node));
opr_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE);
if (queue_IsEmpty(&list->list)) {
/* wakeup a dequeue thread */
- CV_SIGNAL(&list->cv);
+ opr_cv_signal(&list->cv);
}
queue_Append(&list->list, node);
node->qidx = list->qidx;
_afs_wq_node_state_change(node, state);
error_unlock:
- MUTEX_EXIT(&node->lock);
- MUTEX_EXIT(&list->lock);
+ opr_mutex_exit(&node->lock);
+ opr_mutex_exit(&list->lock);
error:
return ret;
int ret = 0;
struct afs_work_queue_node * node;
- MUTEX_ENTER(&list->lock);
+ opr_mutex_enter(&list->lock);
if (list->shutdown) {
*node_out = NULL;
ret = EINTR;
goto done_sync;
}
- CV_WAIT(&list->cv, &list->lock);
+ opr_cv_wait(&list->cv, &list->lock);
}
*node_out = node = queue_First(&list->list, afs_work_queue_node);
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
queue_Remove(node);
node->qidx = AFS_WQ_NODE_LIST_NONE;
_afs_wq_node_state_change(node, state);
done_sync:
- MUTEX_EXIT(&list->lock);
+ opr_mutex_exit(&list->lock);
return ret;
}
}
if (list) {
- code = MUTEX_TRYENTER(&list->lock);
+ code = opr_mutex_tryenter(&list->lock);
if (!code) {
/* contended */
_afs_wq_node_state_change(node,
AFS_WQ_NODE_STATE_BUSY);
- MUTEX_EXIT(&node->lock);
- MUTEX_ENTER(&list->lock);
- MUTEX_ENTER(&node->lock);
+ opr_mutex_exit(&node->lock);
+ opr_mutex_enter(&list->lock);
+ opr_mutex_enter(&node->lock);
if (node->qidx == AFS_WQ_NODE_LIST_NONE) {
/* raced */
_afs_wq_node_state_change(node, next_state);
done_sync:
- MUTEX_EXIT(&list->lock);
+ opr_mutex_exit(&list->lock);
}
error:
nd,
afs_work_queue_dep_node)) {
- MUTEX_ENTER(&dep->child->lock);
+ opr_mutex_enter(&dep->child->lock);
node_unlock = dep->child;
/* We need to get a ref on child here, since _afs_wq_dep_unlink_r may
if (node_put) {
_afs_wq_node_put_r(node_put, 1);
} else if (node_unlock) {
- MUTEX_EXIT(&node_unlock->lock);
+ opr_mutex_exit(&node_unlock->lock);
}
node_put = node_unlock = NULL;
/* skip unscheduled nodes */
if (dep->child->queue == NULL) {
- MUTEX_EXIT(&dep->child->lock);
+ opr_mutex_exit(&dep->child->lock);
continue;
}
ret = _afs_wq_node_list_remove(dep->child,
AFS_WQ_NODE_STATE_BUSY);
if (ret) {
- MUTEX_EXIT(&dep->child->lock);
+ opr_mutex_exit(&dep->child->lock);
goto error;
}
dep->child,
cns);
if (ret) {
- MUTEX_EXIT(&dep->child->lock);
+ opr_mutex_exit(&dep->child->lock);
goto error;
}
}
- MUTEX_EXIT(&dep->child->lock);
+ opr_mutex_exit(&dep->child->lock);
}
error:
static void
_afs_wq_dec_running_count(struct afs_work_queue *queue)
{
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
queue->running_count--;
if (queue->shutdown && queue->running_count == 0) {
/* if we've shut down, someone may be waiting for the running count
* to drop to 0 */
- CV_BROADCAST(&queue->running_cv);
+ opr_cv_broadcast(&queue->running_cv);
}
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
}
/**
* _afs_wq_node_list_dequeue should return immediately with EINTR,
* in which case we'll dec running_count, so it's as if we never inc'd it
* in the first place. */
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
if (queue->shutdown) {
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
return EINTR;
}
queue->running_count++;
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
ret = _afs_wq_node_list_dequeue(&queue->ready_list,
&node,
detached = node->detached;
if (cbf != NULL) {
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
code = (*cbf)(queue, node, queue->rock, node_rock, rock);
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
if (code == 0) {
next_state = AFS_WQ_NODE_STATE_DONE;
ql = &queue->done_list;
if ((next_state == AFS_WQ_NODE_STATE_DONE) ||
(next_state == AFS_WQ_NODE_STATE_ERROR)) {
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
if (queue->drain && queue->pend_count == queue->opts.pend_lothresh) {
/* signal other threads if we're about to below the low
* pending-tasks threshold */
queue->drain = 0;
- CV_SIGNAL(&queue->pend_cv);
+ opr_cv_signal(&queue->pend_cv);
}
if (queue->pend_count == 1) {
/* signal other threads if we're about to become 'empty' */
- CV_BROADCAST(&queue->empty_cv);
+ opr_cv_broadcast(&queue->empty_cv);
}
queue->pend_count--;
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
}
ret = _afs_wq_node_state_wait_busy(node);
queue->pend_count = 0;
queue->running_count = 0;
- MUTEX_INIT(&queue->lock, "queue", MUTEX_DEFAULT, 0);
- CV_INIT(&queue->pend_cv, "queue pending", CV_DEFAULT, 0);
- CV_INIT(&queue->empty_cv, "queue empty", CV_DEFAULT, 0);
- CV_INIT(&queue->running_cv, "queue running", CV_DEFAULT, 0);
+ opr_mutex_init(&queue->lock);
+ opr_cv_init(&queue->pend_cv);
+ opr_cv_init(&queue->empty_cv);
+ opr_cv_init(&queue->running_cv);
error:
return ret;
{
int ret = 0;
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
if (queue->shutdown) {
/* already shutdown, do nothing */
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
goto error;
}
queue->shutdown = 1;
/* signal everyone that could be waiting, since these conditions will
* generally fail to signal on their own if we're shutdown, since no
* progress is being made */
- CV_BROADCAST(&queue->pend_cv);
- CV_BROADCAST(&queue->empty_cv);
- MUTEX_EXIT(&queue->lock);
+ opr_cv_broadcast(&queue->pend_cv);
+ opr_cv_broadcast(&queue->empty_cv);
+ opr_mutex_exit(&queue->lock);
error:
return ret;
node->refcount = 1;
node->block_count = 0;
node->error_count = 0;
- MUTEX_INIT(&node->lock, "node", MUTEX_DEFAULT, 0);
- CV_INIT(&node->state_cv, "node state", CV_DEFAULT, 0);
+ opr_mutex_init(&node->lock);
+ opr_cv_init(&node->state_cv);
node->state = AFS_WQ_NODE_STATE_INIT;
queue_Init(&node->dep_children);
goto error;
}
- MUTEX_DESTROY(&node->lock);
- CV_DESTROY(&node->state_cv);
+ opr_mutex_destroy(&node->lock);
+ opr_cv_destory(&node->state_cv);
if (node->rock_dtor) {
(*node->rock_dtor) (node->rock);
int
afs_wq_node_get(struct afs_work_queue_node * node)
{
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
node->refcount++;
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return 0;
}
opr_Assert(node->refcount > 0);
refc = --node->refcount;
if (drop) {
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
}
if (!refc) {
opr_Assert(node->qidx == AFS_WQ_NODE_LIST_NONE);
int
afs_wq_node_put(struct afs_work_queue_node * node)
{
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
return _afs_wq_node_put_r(node, 1);
}
afs_wq_callback_func_t * cbf,
void * rock, afs_wq_callback_dtor_t *dtor)
{
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
node->cbf = cbf;
node->rock = rock;
node->rock_dtor = dtor;
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return 0;
}
int
afs_wq_node_set_detached(struct afs_work_queue_node * node)
{
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
node->detached = 1;
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return 0;
}
done:
if (held) {
- MUTEX_EXIT(&child->lock);
- MUTEX_EXIT(&parent->lock);
+ opr_mutex_exit(&child->lock);
+ opr_mutex_exit(&parent->lock);
}
return ret;
error:
if (held) {
- MUTEX_EXIT(&child->lock);
- MUTEX_EXIT(&parent->lock);
+ opr_mutex_exit(&child->lock);
+ opr_mutex_exit(&parent->lock);
}
return ret;
}
int ret = 0;
int start;
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
ret = _afs_wq_node_state_wait_busy(node);
if (ret) {
goto error_sync;
}
error_sync:
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return ret;
}
int ret = 0;
int end;
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
ret = _afs_wq_node_state_wait_busy(node);
if (ret) {
goto error_sync;
}
error_sync:
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return ret;
}
force = opts->force;
retry:
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
ret = _afs_wq_node_state_wait_busy(node);
if (ret) {
ret = 0;
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
if (queue->shutdown) {
ret = EINTR;
- MUTEX_EXIT(&queue->lock);
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&queue->lock);
+ opr_mutex_exit(&node->lock);
goto error;
}
if (queue->drain) {
if (block) {
- MUTEX_EXIT(&node->lock);
- CV_WAIT(&queue->pend_cv, &queue->lock);
+ opr_mutex_exit(&node->lock);
+ opr_cv_wait(&queue->pend_cv, &queue->lock);
if (queue->shutdown) {
ret = EINTR;
} else {
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
waited_for_drain = 1;
}
if (waited_for_drain) {
/* signal another thread that may have been waiting for drain */
- CV_SIGNAL(&queue->pend_cv);
+ opr_cv_signal(&queue->pend_cv);
}
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
if (ret) {
goto error;
{
int ret = 0;
- MUTEX_ENTER(&queue->lock);
+ opr_mutex_enter(&queue->lock);
while (queue->pend_count > 0 && !queue->shutdown) {
- CV_WAIT(&queue->empty_cv, &queue->lock);
+ opr_cv_wait(&queue->empty_cv, &queue->lock);
}
if (queue->shutdown) {
* running e.g. in the middle of their callback. ensure they have
* stopped before we return. */
while (queue->running_count > 0) {
- CV_WAIT(&queue->running_cv, &queue->lock);
+ opr_cv_wait(&queue->running_cv, &queue->lock);
}
ret = EINTR;
goto done;
}
done:
- MUTEX_EXIT(&queue->lock);
+ opr_mutex_exit(&queue->lock);
/* technically this doesn't really guarantee that the work queue is empty
* after we return, but we do guarantee that it was empty at some point */
{
int ret = 0;
- MUTEX_ENTER(&node->lock);
+ opr_mutex_enter(&node->lock);
if (node->state == AFS_WQ_NODE_STATE_INIT) {
/* not sure what to do in this case */
goto done_sync;
while ((node->state != AFS_WQ_NODE_STATE_DONE) &&
(node->state != AFS_WQ_NODE_STATE_ERROR)) {
- CV_WAIT(&node->state_cv, &node->lock);
+ opr_cv_wait(&node->state_cv, &node->lock);
}
- if (retcode) {
+ if (retcowait{
*retcode = node->retcode;
}
AFS_WQ_NODE_STATE_INIT);
done_sync:
- MUTEX_EXIT(&node->lock);
+ opr_mutex_exit(&node->lock);
return ret;
}
#include <afs/opr.h>
#include <rx/rx_queue.h>
+#include <opr/lock.h>
#include <afs/nfs.h>
-#include <lwp.h>
-#include <lock.h>
#include <afs/afsint.h>
#include <afs/vldbint.h>
#include <afs/errors.h>
H_LOCK;
while (client->host->hostFlags & HCPS_INPROGRESS) {
client->host->hostFlags |= HCPS_WAITING; /* I am waiting */
- CV_WAIT(&client->host->cond, &host_glock_mutex);
+ opr_cv_wait(&client->host->cond, &host_glock_mutex);
}
if (!client->host->hcps.prlist_len || !client->host->hcps.prlist_val) {
#endif
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/nfs.h> /* yuck. This is an abomination. */
-#include <lwp.h>
#include <rx/rx.h>
#include <rx/rx_queue.h>
#include <afs/afscbint.h>
#include <afs/afsutil.h>
-#include <lock.h>
#include <afs/ihandle.h>
-#include <afs/vnode.h>
-#include <afs/volume.h>
#include "viced_prototypes.h"
#include "viced.h"
ViceLog(25, ("Fsync thread wakeup\n"));
FSYNC_LOCK;
- CV_BROADCAST(&fsync_cond);
+ opr_cv_broadcast(&fsync_cond);
FSYNC_UNLOCK;
return 0;
}
#include <roken.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#include <afs/ihandle.h>
#include <afs/nfs.h>
#include <roken.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#ifdef HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
-#include <rx/xdr.h>
-#include <lwp.h>
-#include <lock.h>
#include <afs/afsint.h>
#define FSINT_COMMON_XG
#include <afs/afscbint.h>
ShutDownAndCore(PANIC);
}
for (i = 0; i < (h_HTSPERBLOCK); i++)
- CV_INIT(&block->entry[i].cond, "block entry", CV_DEFAULT, 0);
+ opr_cv_init(&block->entry[i].cond);
for (i = 0; i < (h_HTSPERBLOCK); i++)
Lock_Init(&block->entry[i].lock);
for (i = 0; i < (h_HTSPERBLOCK - 1); i++)
while (host->hostFlags & HCPS_INPROGRESS) {
slept = 1; /* I did sleep */
host->hostFlags |= HCPS_WAITING; /* I am sleeping now */
- CV_WAIT(&host->cond, &host_glock_mutex);
+ opr_cv_wait(&host->cond, &host_glock_mutex);
}
/* signal all who are waiting */
if (host->hostFlags & HCPS_WAITING) { /* somebody is waiting */
host->hostFlags &= ~HCPS_WAITING;
- CV_BROADCAST(&host->cond);
+ opr_cv_broadcast(&host->cond);
}
}
memset(&nulluuid, 0, sizeof(afsUUID));
rxcon_ident_key = rx_KeyCreate((rx_destructor_t) free);
rxcon_client_key = rx_KeyCreate((rx_destructor_t) 0);
- MUTEX_INIT(&host_glock_mutex, "host glock", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&host_glock_mutex);
}
static int
#include <rx/rx_globals.h>
#include <pthread.h>
extern pthread_mutex_t host_glock_mutex;
-#define H_LOCK MUTEX_ENTER(&host_glock_mutex);
-#define H_UNLOCK MUTEX_EXIT(&host_glock_mutex);
+#define H_LOCK opr_mutex_enter(&host_glock_mutex);
+#define H_UNLOCK opr_mutex_exit(&host_glock_mutex);
extern pthread_key_t viced_uclient_key;
#define h_MAXHOSTTABLEENTRIES 1000
#include <afs/stds.h>
#include <afs/opr.h>
-#include <lwp.h>
-#include <lock.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#include <afs/rxgen_consts.h>
#include <afs/nfs.h>
#include <afs/nfs.h>
#include <rx/rx_queue.h>
#include <lwp.h>
-#include <lock.h>
+#include <opr/lock.h>
#include <afs/cmd.h>
#include <afs/ptclient.h>
#include <afs/afsint.h>
fs_state.options.fs_state_verify_before_save = 1;
fs_state.options.fs_state_verify_after_restore = 1;
- CV_INIT(&fs_state.worker_done_cv, "worker done", CV_DEFAULT, 0);
+ opr_cv_init(&fs_state.worker_done_cv, "worker done");
opr_Verify(pthread_rwlock_init(&fs_state.state_lock, NULL) == 0);
}
# endif /* AFS_NT40_ENV */
#ifdef AFS_DEMAND_ATTACH_FS
fs_state.FiveMinuteLWP_tranquil = 1;
FS_LOCK;
- CV_BROADCAST(&fs_state.worker_done_cv);
+ opr_cv_broadcast(&fs_state.worker_done_cv);
FS_UNLOCK;
FS_STATE_UNLOCK;
#endif
#ifdef AFS_DEMAND_ATTACH_FS
fs_state.HostCheckLWP_tranquil = 1;
FS_LOCK;
- CV_BROADCAST(&fs_state.worker_done_cv);
+ opr_cv_broadcast(&fs_state.worker_done_cv);
FS_UNLOCK;
FS_STATE_UNLOCK;
#endif
fsync_next.tv_nsec = 0;
fsync_next.tv_sec = time(0) + fiveminutes;
- code = CV_TIMEDWAIT(&fsync_cond, &fsync_glock_mutex,
+ code = opr_cv_timedwait(&fsync_cond, &fsync_glock_mutex,
&fsync_next);
if (code != 0 && code != ETIMEDOUT)
ViceLog(0, ("pthread_cond_timedwait returned %d\n", code));
#ifdef AFS_DEMAND_ATTACH_FS
fs_state.FsyncCheckLWP_tranquil = 1;
FS_LOCK;
- CV_BROADCAST(&fs_state.worker_done_cv);
+ opr_cv_broadcast(&fs_state.worker_done_cv);
FS_UNLOCK;
FS_STATE_UNLOCK;
#endif /* AFS_DEMAND_ATTACH_FS */
FS_LOCK;
FS_STATE_UNLOCK;
ViceLog(0, ("waiting for background host/callback threads to quiesce before saving fileserver state...\n"));
- CV_WAIT(&fs_state.worker_done_cv, &fileproc_glock_mutex);
+ opr_cv_wait(&fs_state.worker_done_cv, &fileproc_glock_mutex);
FS_UNLOCK;
FS_STATE_RDLOCK;
}
if (ParseArgs(argc, argv)) {
exit(-1);
}
- MUTEX_INIT(&fileproc_glock_mutex, "fileproc", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&fileproc_glock_mutex);
#ifdef AFS_SGI_VNODE_GLUE
if (afs_init_kernel_config(-1) < 0) {
/* allow super users to manage RX statistics */
rx_SetRxStatUserOk(viced_SuperUser);
- CV_INIT(&fsync_cond, "fsync", CV_DEFAULT, 0);
- MUTEX_INIT(&fsync_glock_mutex, "fsync", MUTEX_DEFAULT, 0);
+ opr_cv_init(&fsync_cond);
+ opr_mutex_init(&fsync_glock_mutex);
#if !defined(AFS_DEMAND_ATTACH_FS)
/*
* HostCheck, Signal, min 2 for RXSTATS */
#include <pthread.h>
extern pthread_mutex_t fileproc_glock_mutex;
-#define FS_LOCK MUTEX_ENTER(&fileproc_glock_mutex);
-#define FS_UNLOCK MUTEX_EXIT(&fileproc_glock_mutex);
+#define FS_LOCK opr_mutex_enter(&fileproc_glock_mutex);
+#define FS_UNLOCK opr_mutex_exit(&fileproc_glock_mutex);
extern pthread_mutex_t fsync_glock_mutex;
-#define FSYNC_LOCK MUTEX_ENTER(&fsync_glock_mutex);
-#define FSYNC_UNLOCK MUTEX_EXIT(&fsync_glock_mutex);
+#define FSYNC_LOCK opr_mutex_enter(&fsync_glock_mutex);
+#define FSYNC_UNLOCK opr_mutex_exit(&fsync_glock_mutex);
#ifdef AFS_DEMAND_ATTACH_FS
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
+
#include <afs/afsint.h>
#include <rx/rx_queue.h>
#include <afs/errors.h>
#ifdef AFS_PTHREAD_ENV
static pthread_mutex_t vol_fsync_mutex;
static volatile int vol_fsync_mutex_init = 0;
-#define VFSYNC_LOCK MUTEX_ENTER(&vol_fsync_mutex)
-#define VFSYNC_UNLOCK MUTEX_EXIT(&vol_fsync_mutex)
+#define VFSYNC_LOCK opr_mutex_enter(&vol_fsync_mutex)
+#define VFSYNC_UNLOCK opr_mutex_exit(&vol_fsync_mutex)
#else
#define VFSYNC_LOCK
#define VFSYNC_UNLOCK
#ifdef AFS_PTHREAD_ENV
/* this is safe since it gets called with VOL_LOCK held, or before we go multithreaded */
if (!vol_fsync_mutex_init) {
- MUTEX_INIT(&vol_fsync_mutex, "vol fsync", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&vol_fsync_mutex);
vol_fsync_mutex_init = 1;
}
#endif
#endif
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#ifndef AFS_NT40_ENV
#include <rx/rx_queue.h>
#include "nfs.h"
-#include "lwp.h"
#include "lock.h"
#include "ihandle.h"
#include "vnode.h"
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#include <afs/afsint.h>
#include <rx/rx_queue.h>
#include "nfs.h"
#ifdef AFS_DEMAND_ATTACH_FS
queue_Init(&fsync_salv.head);
- CV_INIT(&fsync_salv.cv, "fsync salv", CV_DEFAULT, 0);
+ opr_cv_init(&fsync_salv.cv);
opr_Verify(pthread_create(&tid, &tattr, FSYNC_salvageThread, NULL) == 0);
#endif /* AFS_DEMAND_ATTACH_FS */
}
}
queue_Append(&fsync_salv.head, node);
- CV_BROADCAST(&fsync_salv.cv);
+ opr_cv_broadcast(&fsync_salv.cv);
}
#endif /* AFS_DEMAND_ATTACH_FS */
#endif
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#include <afs/afsint.h>
#include <afs/afssyscalls.h>
#include <afs/afsutil.h>
void
ih_glock_init(void)
{
- MUTEX_INIT(&ih_glock_mutex, "ih glock", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&ih_glock_mutex);
}
#endif /* AFS_PTHREAD_ENV */
extern void ih_glock_init(void);
#define IH_LOCK \
do { opr_Verify(pthread_once(&ih_glock_once, ih_glock_init) == 0); \
- MUTEX_ENTER(&ih_glock_mutex); \
+ opr_mutex_enter(&ih_glock_mutex); \
} while (0)
-#define IH_UNLOCK MUTEX_EXIT(&ih_glock_mutex)
+#define IH_UNLOCK opr_mutex_exit(&ih_glock_mutex)
#else /* AFS_PTHREAD_ENV */
#define IH_LOCK
#define IH_UNLOCK
#include <afs/opr.h>
#include <rx/rx_queue.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#include <lock.h>
#include <afs/afsutil.h>
#include <lwp.h>
#ifdef AFS_PTHREAD_ENV
/* XXX do static initializers work for WINNT/pthread? */
pthread_mutex_t _namei_glc_lock = PTHREAD_MUTEX_INITIALIZER;
-#define NAMEI_GLC_LOCK MUTEX_ENTER(&_namei_glc_lock)
-#define NAMEI_GLC_UNLOCK MUTEX_EXIT(&_namei_glc_lock)
+#define NAMEI_GLC_LOCK opr_mutex_enter(&_namei_glc_lock)
+#define NAMEI_GLC_UNLOCK opr_mutex_exit(&_namei_glc_lock)
#else /* !AFS_PTHREAD_ENV */
#define NAMEI_GLC_LOCK
#define NAMEI_GLC_UNLOCK
#endif
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#include <afs/afsint.h>
#include <rx/rx_queue.h>
#include "nfs.h"
#endif
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#include <rx/rx_queue.h>
DInit(10);
queue_Init(&pending_q);
queue_Init(&log_cleanup_queue);
- MUTEX_INIT(&worker_lock, "worker", MUTEX_DEFAULT, 0);
- CV_INIT(&worker_cv, "worker", CV_DEFAULT, 0);
- CV_INIT(&log_cleanup_queue.queue_change_cv, "queuechange", CV_DEFAULT, 0);
+ opr_mutex_init(&worker_lock);
+ opr_cv_init(&worker_cv);
+ opr_cv_init(&log_cleanup_queue.queue_change_cv);
opr_Verify(pthread_attr_init(&attrs) == 0);
/* start up the reaper and log cleaner threads */
node->pid = pid;
VOL_UNLOCK;
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
current_workers++;
/* let the reaper thread know another worker was spawned */
- CV_BROADCAST(&worker_cv);
+ opr_cv_broadcast(&worker_cv);
/* if we're overquota, wait for the reaper */
while (current_workers >= Parallel) {
- CV_WAIT(&worker_cv, &worker_lock);
+ opr_cv_wait(&worker_cv, &worker_lock);
}
- MUTEX_EXIT(&worker_lock);
+ opr_mutex_exit(&worker_lock);
}
}
}
int slot, pid, status;
struct log_cleanup_node * cleanup;
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
/* loop reaping our children */
while (1) {
/* wait() won't block unless we have children, so
* block on the cond var if we're childless */
while (current_workers == 0) {
- CV_WAIT(&worker_cv, &worker_lock);
+ opr_cv_wait(&worker_cv, &worker_lock);
}
- MUTEX_EXIT(&worker_lock);
+ opr_mutex_exit(&worker_lock);
cleanup = malloc(sizeof(struct log_cleanup_node));
SALVSYNC_doneWorkByPid(pid, status);
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
if (cleanup) {
cleanup->pid = pid;
queue_Append(&log_cleanup_queue, cleanup);
- CV_SIGNAL(&log_cleanup_queue.queue_change_cv);
+ opr_cv_signal(&log_cleanup_queue.queue_change_cv);
}
/* ok, we've reaped a child */
current_workers--;
- CV_BROADCAST(&worker_cv);
+ opr_cv_broadcast(&worker_cv);
}
return NULL;
{
struct log_cleanup_node * cleanup;
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
while (1) {
while (queue_IsEmpty(&log_cleanup_queue)) {
- CV_WAIT(&log_cleanup_queue.queue_change_cv, &worker_lock);
+ opr_cv_wait(&log_cleanup_queue.queue_change_cv, &worker_lock);
}
while (queue_IsNotEmpty(&log_cleanup_queue)) {
cleanup = queue_First(&log_cleanup_queue, log_cleanup_node);
queue_Remove(cleanup);
- MUTEX_EXIT(&worker_lock);
+ opr_mutex_exit(&worker_lock);
SalvageLogCleanup(cleanup->pid);
free(cleanup);
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
}
}
- MUTEX_EXIT(&worker_lock);
+ opr_mutex_exit(&worker_lock);
return NULL;
}
{
struct log_cleanup_node *cleanup, *next;
- MUTEX_ENTER(&worker_lock);
+ opr_mutex_enter(&worker_lock);
for (queue_Scan(log_watch_queue, cleanup, next, log_cleanup_node)) {
/* if a process is still running, assume it's the salvage process
if (kill(cleanup->pid, 0) < 0 && errno == ESRCH) {
queue_Remove(cleanup);
queue_Append(&log_cleanup_queue, cleanup);
- CV_SIGNAL(&log_cleanup_queue.queue_change_cv);
+ opr_cv_signal(&log_cleanup_queue.queue_change_cv);
}
}
- MUTEX_EXIT(&worker_lock);
+ opr_mutex_exit(&worker_lock);
}
#include <roken.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#include <rx/rx_queue.h>
#include "nfs.h"
#include <afs/errors.h>
#include "salvsync.h"
-#include "lwp.h"
#include "lock.h"
#include <afs/afssyscalls.h>
#include "ihandle.h"
#include <stddef.h>
#include <afs/opr.h>
+#include <opr/lock.h>
#include <afs/afsint.h>
#include <rx/rx_queue.h>
#include "nfs.h"
#include <afs/errors.h>
#include "salvsync.h"
-#include "lwp.h"
#include "lock.h"
#include <afs/afssyscalls.h>
#include "ihandle.h"
#include <afs/opr.h>
#include <rx/rx_queue.h>
+#include <opr/lock.h>
#include <lock.h>
#include <afs/afsutil.h>
-#include <lwp.h>
#include "nfs.h"
#include <afs/afsint.h>
#include "ihandle.h"
#include <afs/opr.h>
#include <rx/rx_queue.h>
+#include <opr/lock.h>
#include <lock.h>
#include <afs/afsutil.h>
-#include <lwp.h>
#include "nfs.h"
#include <afs/afsint.h>
#include "ihandle.h"
#endif
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+#include <opr/lock.h>
+#endif
#include "rx/rx_queue.h"
#include <afs/afsint.h>
#include "nfs.h"
VnState old_state = Vn_state(vnp);
Vn_state(vnp) = new_state;
- CV_BROADCAST(&Vn_stateCV(vnp));
+ opr_cv_broadcast(&Vn_stateCV(vnp));
return old_state;
}
opr_Assert(Vn_readers(vnp) > 0);
Vn_readers(vnp)--;
if (!Vn_readers(vnp)) {
- CV_BROADCAST(&Vn_stateCV(vnp));
+ opr_cv_broadcast(&Vn_stateCV(vnp));
VnChangeState_r(vnp, VN_STATE_ONLINE);
}
}
#define WCOREDUMP(x) ((x) & 0200)
#endif
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
+
#include <afs/afsint.h>
#if !defined(AFS_SGI_ENV) && !defined(AFS_NT40_ENV)
#if defined(AFS_VFSINCL_ENV)
#include <sys/file.h>
#endif
-#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <afs/afsint.h>
#include <rx/rx_queue.h>
VSetVInit_r(int value)
{
VInit = value;
- CV_BROADCAST(&vol_vinit_cond);
+ opr_cv_broadcast(&vol_vinit_cond);
}
static_inline void
opr_Verify(pthread_key_create(&VThread_key, NULL) == 0);
#endif
- MUTEX_INIT(&vol_glock_mutex, "vol glock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&vol_trans_mutex, "vol trans", MUTEX_DEFAULT, 0);
- CV_INIT(&vol_put_volume_cond, "vol put", CV_DEFAULT, 0);
- CV_INIT(&vol_sleep_cond, "vol sleep", CV_DEFAULT, 0);
- CV_INIT(&vol_init_attach_cond, "vol init attach", CV_DEFAULT, 0);
- CV_INIT(&vol_vinit_cond, "vol init", CV_DEFAULT, 0);
+ opr_mutex_init(&vol_glock_mutex);
+ opr_mutex_init(&vol_trans_mutex);
+ opr_cv_init(&vol_put_volume_cond);
+ opr_cv_init(&vol_sleep_cond);
+ opr_cv_init(&vol_init_attach_cond);
+ opr_cv_init(&vol_vinit_cond);
#ifndef AFS_PTHREAD_ENV
IOMGR_Initialize();
#endif /* AFS_PTHREAD_ENV */
srandom(time(0)); /* For VGetVolumeInfo */
#ifdef AFS_DEMAND_ATTACH_FS
- MUTEX_INIT(&vol_salvsync_mutex, "salvsync", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&vol_salvsync_mutex);
#endif /* AFS_DEMAND_ATTACH_FS */
/* Ok, we have done enough initialization that fileserver can
pthread_t tid;
pthread_attr_t attrs;
- CV_INIT(¶ms.thread_done_cv, "thread done", CV_DEFAULT, 0);
+ opr_cv_init(¶ms.thread_done_cv);
queue_Init(¶ms);
params.n_threads_complete = 0;
VInitVolumePackageThread(¶ms);
}
- CV_DESTROY(¶ms.thread_done_cv);
+ opr_cv_destroy(¶ms.thread_done_cv);
}
VOL_LOCK;
VSetVInit_r(2); /* Initialized, and all volumes have been attached */
- CV_BROADCAST(&vol_init_attach_cond);
+ opr_cv_broadcast(&vol_init_attach_cond);
VOL_UNLOCK;
return 0;
}
done:
params->n_threads_complete++;
- CV_SIGNAL(¶ms->thread_done_cv);
+ opr_cv_signal(¶ms->thread_done_cv);
VOL_UNLOCK;
return NULL;
}
/* create partition work queue */
queue_Init(&pq);
- CV_INIT(&(pq.cv), "partq", CV_DEFAULT, 0);
- MUTEX_INIT(&(pq.mutex), "partq", MUTEX_DEFAULT, 0);
+ opr_cv_init(&pq.cv);
+ opr_mutex_init(&pq.mutex);
for (parts = 0, diskP = DiskPartitionList; diskP; diskP = diskP->next, parts++) {
struct diskpartition_queue_t *dp;
dp = malloc(sizeof(struct diskpartition_queue_t));
/* create volume work queue */
queue_Init(&vq);
- CV_INIT(&(vq.cv), "volq", CV_DEFAULT, 0);
- MUTEX_INIT(&(vq.mutex), "volq", MUTEX_DEFAULT, 0);
+ opr_cv_init(&vq.cv);
+ opr_mutex_init(&vq.mutex);
opr_Verify(pthread_attr_init(&attrs) == 0);
opr_Verify(pthread_attr_setdetachstate(&attrs,
VInitPreAttachVolumes(threads, &vq);
opr_Verify(pthread_attr_destroy(&attrs) == 0);
- CV_DESTROY(&pq.cv);
- MUTEX_DESTROY(&pq.mutex);
- CV_DESTROY(&vq.cv);
- MUTEX_DESTROY(&vq.mutex);
+ opr_cv_destroy(&pq.cv);
+ opr_mutex_destroy(&pq.mutex);
+ opr_cv_destroy(&vq.cv);
+ opr_mutex_destroy(&vq.mutex);
}
VOL_LOCK;
VSetVInit_r(2); /* Initialized, and all volumes have been attached */
- CV_BROADCAST(&vol_init_attach_cond);
+ opr_cv_broadcast(&vol_init_attach_cond);
VOL_UNLOCK;
return 0;
vp->hashid = vid;
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
- CV_INIT(&V_attachCV(vp), "partattach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
vb->batch[vb->size++] = vp;
if (vb->size == VINIT_BATCH_MAX_SIZE) {
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
queue_Append(vq, vb);
- CV_BROADCAST(&vq->cv);
- MUTEX_EXIT(&vq->mutex);
+ opr_cv_broadcast(&vq->cv);
+ opr_mutex_exit(&vq->mutex);
vb = malloc(sizeof(struct volume_init_batch));
opr_Assert(vb);
}
vb->last = 1;
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
queue_Append(vq, vb);
- CV_BROADCAST(&vq->cv);
- MUTEX_EXIT(&vq->mutex);
+ opr_cv_broadcast(&vq->cv);
+ opr_mutex_exit(&vq->mutex);
Log("Partition scan thread %d of %d ended\n", params->thread, params->nthreads);
free(params);
}
/* get next partition to scan */
- MUTEX_ENTER(&pq->mutex);
+ opr_mutex_enter(&pq->mutex);
if (queue_IsEmpty(pq)) {
- MUTEX_EXIT(&pq->mutex);
+ opr_mutex_exit(&pq->mutex);
return NULL;
}
dp = queue_First(pq, diskpartition_queue_t);
queue_Remove(dp);
- MUTEX_EXIT(&pq->mutex);
+ opr_mutex_exit(&pq->mutex);
opr_Assert(dp);
opr_Assert(dp->diskP);
while (nthreads) {
/* dequeue next volume */
- MUTEX_ENTER(&vq->mutex);
+ opr_mutex_enter(&vq->mutex);
if (queue_IsEmpty(vq)) {
- CV_WAIT(&vq->cv, &vq->mutex);
+ opr_cv_wait(&vq->cv, &vq->mutex);
}
vb = queue_First(vq, volume_init_batch);
queue_Remove(vb);
- MUTEX_EXIT(&vq->mutex);
+ opr_mutex_exit(&vq->mutex);
if (vb->size) {
VOL_LOCK;
if (vol_attach_threads > 1) {
/* prepare for parallel shutdown */
params.n_threads = vol_attach_threads;
- MUTEX_INIT(¶ms.lock, "params", MUTEX_DEFAULT, 0);
- CV_INIT(¶ms.cv, "params", CV_DEFAULT, 0);
- CV_INIT(¶ms.master_cv, "params master", CV_DEFAULT, 0);
+ opr_mutex_init(¶ms.lock);
+ opr_cv_init(¶ms.cv);
+ opr_cv_init(¶ms.master_cv);
opr_Verify(pthread_attr_init(&attrs) == 0);
opr_Verify(pthread_attr_setdetachstate(&attrs,
PTHREAD_CREATE_DETACHED) == 0);
vol_attach_threads, params.n_parts, params.n_parts > 1 ? "s" : "" );
/* do pass 0 shutdown */
- MUTEX_ENTER(¶ms.lock);
+ opr_mutex_enter(¶ms.lock);
for (i=0; i < params.n_threads; i++) {
opr_Verify(pthread_create(&tid, &attrs, &VShutdownThread,
¶ms) == 0);
}
params.n_threads_complete = 0;
params.pass = 1;
- CV_BROADCAST(¶ms.cv);
- MUTEX_EXIT(¶ms.lock);
+ opr_cv_broadcast(¶ms.cv);
+ opr_mutex_exit(¶ms.lock);
Log("VShutdown: pass 0 completed using the 1 thread per partition algorithm\n");
Log("VShutdown: starting passes 1 through 3 using finely-granular mp-fast algorithm\n");
}
opr_Verify(pthread_attr_destroy(&attrs) == 0);
- CV_DESTROY(¶ms.cv);
- CV_DESTROY(¶ms.master_cv);
- MUTEX_DESTROY(¶ms.lock);
+ opr_cv_destroy(¶ms.cv);
+ opr_cv_destroy(¶ms.master_cv);
+ opr_mutex_destroy(¶ms.lock);
/* drop the VByPList exclusive reservations */
for (diskP = DiskPartitionList; diskP; diskP = diskP->next) {
params = (vshutdown_thread_t *) args;
/* acquire the shutdown pass 0 lock */
- MUTEX_ENTER(¶ms->lock);
+ opr_mutex_enter(¶ms->lock);
/* if there's still pass 0 work to be done,
* get a work entry, and do a pass 0 shutdown */
if (queue_IsNotEmpty(params)) {
dpq = queue_First(params, diskpartition_queue_t);
queue_Remove(dpq);
- MUTEX_EXIT(¶ms->lock);
+ opr_mutex_exit(¶ms->lock);
diskP = dpq->diskP;
free(dpq);
id = diskP->index;
while (ShutdownVolumeWalk_r(diskP, 0, ¶ms->part_pass_head[id]))
count++;
params->stats[0][diskP->index] = count;
- MUTEX_ENTER(¶ms->lock);
+ opr_mutex_enter(¶ms->lock);
}
params->n_threads_complete++;
if (params->n_threads_complete == params->n_threads) {
/* notify control thread that all workers have completed pass 0 */
- CV_SIGNAL(¶ms->master_cv);
+ opr_cv_signal(¶ms->master_cv);
}
while (params->pass == 0) {
- CV_WAIT(¶ms->cv, ¶ms->lock);
+ opr_cv_wait(¶ms->cv, ¶ms->lock);
}
/* switch locks */
- MUTEX_EXIT(¶ms->lock);
+ opr_mutex_exit(¶ms->lock);
VOL_LOCK;
pass = params->pass;
ShutdownCreateSchedule(params);
/* wake up all the workers */
- CV_BROADCAST(¶ms->cv);
+ opr_cv_broadcast(¶ms->cv);
VOL_UNLOCK;
Log("VShutdown: pass %d completed using %d threads on %d partitions\n",
opr_Assert(vp != NULL);
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
- CV_INIT(&V_attachCV(vp), "vp attach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
}
/* link the volume with its associated vice partition */
queue_Init(&vp->vnode_list);
queue_Init(&vp->rx_call_list);
#ifdef AFS_DEMAND_ATTACH_FS
- CV_INIT(&V_attachCV(vp), "vp attach", CV_DEFAULT, 0);
+ opr_cv_init(&V_attachCV(vp));
#endif /* AFS_DEMAND_ATTACH_FS */
}
#endif /* AFS_DEMAND_ATTACH_FS */
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
VCheckSalvage(vp);
ReallyFreeVolume(vp);
if (programType == fileServer) {
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
}
}
return ret;
ReallyFreeVolume(vp);
if (programType == fileServer) {
#if defined(AFS_PTHREAD_ENV)
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
}
FreeVolumeHeader(vp);
#ifdef AFS_PTHREAD_ENV
- CV_BROADCAST(&vol_put_volume_cond);
+ opr_cv_broadcast(&vol_put_volume_cond);
#else /* AFS_PTHREAD_ENV */
LWP_NoYieldSignal(VPutVolume);
#endif /* AFS_PTHREAD_ENV */
queue_Init(&volume_LRU.q[i]);
volume_LRU.q[i].len = 0;
volume_LRU.q[i].busy = 0;
- CV_INIT(&volume_LRU.q[i].cv, "vol lru", CV_DEFAULT, 0);
+ opr_cv_init(&volume_LRU.q[i].cv);
}
/* setup the timing constants */
/* start up the VLRU scanner */
volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
if (programType == fileServer) {
- CV_INIT(&volume_LRU.cv, "vol lru", CV_DEFAULT, 0);
+ opr_cv_init(&volume_LRU.cv);
opr_Verify(pthread_attr_init(&attrs) == 0);
opr_Verify(pthread_attr_setdetachstate(&attrs,
PTHREAD_CREATE_DETACHED) == 0);
/* check to see if we've been asked to pause */
if (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSING) {
volume_LRU.scanner_state = VLRU_SCANNER_STATE_PAUSED;
- CV_BROADCAST(&volume_LRU.cv);
+ opr_cv_broadcast(&volume_LRU.cv);
do {
VOL_CV_WAIT(&volume_LRU.cv);
} while (volume_LRU.scanner_state == VLRU_SCANNER_STATE_PAUSED);
/* signal that scanner is down */
volume_LRU.scanner_state = VLRU_SCANNER_STATE_OFFLINE;
- CV_BROADCAST(&volume_LRU.cv);
+ opr_cv_broadcast(&volume_LRU.cv);
VOL_UNLOCK;
return NULL;
}
{
opr_Assert(q->busy);
q->busy = 0;
- CV_BROADCAST(&q->cv);
+ opr_cv_broadcast(&q->cv);
}
/* wait for another thread to end exclusive access on VLRU */
for (i=0; i < VolumeHashTable.Size; i++) {
queue_Init(&VolumeHashTable.Table[i]);
#ifdef AFS_DEMAND_ATTACH_FS
- CV_INIT(&VolumeHashTable.Table[i].chain_busy_cv, "vhash busy", CV_DEFAULT, 0);
+ opr_cv_init(&VolumeHashTable.Table[i].chain_busy_cv);
#endif /* AFS_DEMAND_ATTACH_FS */
}
}
{
opr_Assert(head->busy);
head->busy = 0;
- CV_BROADCAST(&head->chain_busy_cv);
+ opr_cv_broadcast(&head->chain_busy_cv);
}
/**
{
opr_Assert(dp->vol_list.busy);
dp->vol_list.busy = 0;
- CV_BROADCAST(&dp->vol_list.cv);
+ opr_cv_broadcast(&dp->vol_list.cv);
}
/**
extern pthread_t vol_glock_holder;
#define VOL_LOCK \
do { \
- MUTEX_ENTER(&vol_glock_mutex); \
+ opr_mutex_enter(&vol_glock_mutex); \
VOL_LOCK_ASSERT_UNHELD; \
_VOL_LOCK_SET_HELD; \
} while (0)
do { \
VOL_LOCK_ASSERT_HELD; \
_VOL_LOCK_SET_UNHELD; \
- MUTEX_EXIT(&vol_glock_mutex); \
+ opr_mutex_exit(&vol_glock_mutex); \
} while (0)
#define VOL_CV_WAIT(cv) \
do { \
VOL_LOCK_DBG_CV_WAIT_BEGIN; \
- CV_WAIT((cv), &vol_glock_mutex); \
+ opr_cv_wait((cv), &vol_glock_mutex); \
VOL_LOCK_DBG_CV_WAIT_END; \
} while (0)
#else /* !VOL_LOCK_DEBUG */
-#define VOL_LOCK MUTEX_ENTER(&vol_glock_mutex)
-#define VOL_UNLOCK MUTEX_EXIT(&vol_glock_mutex)
-#define VOL_CV_WAIT(cv) CV_WAIT((cv), &vol_glock_mutex)
+#define VOL_LOCK opr_mutex_enter(&vol_glock_mutex)
+#define VOL_UNLOCK opr_mutex_exit(&vol_glock_mutex)
+#define VOL_CV_WAIT(cv) opr_cv_wait((cv), &vol_glock_mutex)
#endif /* !VOL_LOCK_DEBUG */
-#define VSALVSYNC_LOCK MUTEX_ENTER(&vol_salvsync_mutex)
-#define VSALVSYNC_UNLOCK MUTEX_EXIT(&vol_salvsync_mutex)
-#define VTRANS_LOCK MUTEX_ENTER(&vol_trans_mutex)
-#define VTRANS_UNLOCK MUTEX_EXIT(&vol_trans_mutex)
+#define VSALVSYNC_LOCK opr_mutex_enter(&vol_salvsync_mutex)
+#define VSALVSYNC_UNLOCK opr_mutex_exit(&vol_salvsync_mutex)
+#define VTRANS_LOCK opr_mutex_enter(&vol_trans_mutex)
+#define VTRANS_UNLOCK opr_mutex_exit(&vol_trans_mutex)
#else /* AFS_PTHREAD_ENV */
#define VOL_LOCK
#define VOL_UNLOCK
return;
}
VOL_LOCK_DBG_CV_WAIT_BEGIN;
- code = CV_TIMEDWAIT(cv, &vol_glock_mutex, ts);
+ code = opr_cv_timedwait(cv, &vol_glock_mutex, ts);
VOL_LOCK_DBG_CV_WAIT_END;
if (code == ETIMEDOUT) {
code = 0;
VStats.state_levels[new_state]++;
V_attachState(vp) = new_state;
- CV_BROADCAST(&V_attachCV(vp));
+ opr_cv_broadcast(&V_attachCV(vp));
return old_state;
}
#include <sys/lockf.h>
#endif
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#else
+# include <opr/lockstub.h>
+#endif
+
#include <afs/opr.h>
#include <rx/rx_queue.h>
#include <rx/xdr.h>
memset(lf, 0, sizeof(*lf));
lf->path = strdup(path);
lf->fd = INVALID_FD;
- MUTEX_INIT(&lf->mutex, "vlockfile", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&lf->mutex);
}
#ifdef AFS_NT40_ENV
void
VLockFileReinit(struct VLockFile *lf)
{
- MUTEX_ENTER(&lf->mutex);
+ opr_mutex_enter(&lf->mutex);
if (lf->fd != INVALID_FD) {
_VCloseFd(lf->fd);
lf->refcount = 0;
- MUTEX_EXIT(&lf->mutex);
+ opr_mutex_exit(&lf->mutex);
}
/**
opr_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK);
- MUTEX_ENTER(&lf->mutex);
+ opr_mutex_enter(&lf->mutex);
if (lf->fd == INVALID_FD) {
lf->fd = _VOpenPath(lf->path);
if (lf->fd == INVALID_FD) {
- MUTEX_EXIT(&lf->mutex);
+ opr_mutex_exit(&lf->mutex);
return EIO;
}
}
lf->refcount++;
- MUTEX_EXIT(&lf->mutex);
+ opr_mutex_exit(&lf->mutex);
code = _VLockFd(lf->fd, offset, locktype, nonblock);
if (code) {
- MUTEX_ENTER(&lf->mutex);
+ opr_mutex_enter(&lf->mutex);
if (--lf->refcount < 1) {
_VCloseFd(lf->fd);
lf->fd = INVALID_FD;
}
- MUTEX_EXIT(&lf->mutex);
+ opr_mutex_exit(&lf->mutex);
}
return code;
void
VLockFileUnlock(struct VLockFile *lf, afs_uint32 offset)
{
- MUTEX_ENTER(&lf->mutex);
+ opr_mutex_enter(&lf->mutex);
opr_Assert(lf->fd != INVALID_FD);
_VUnlockFd(lf->fd, offset);
}
- MUTEX_EXIT(&lf->mutex);
+ opr_mutex_exit(&lf->mutex);
}
#ifdef AFS_DEMAND_ATTACH_FS
opr_Assert(lf);
memset(dl, 0, sizeof(*dl));
Lock_Init(&dl->rwlock);
- MUTEX_INIT(&dl->mutex, "disklock", MUTEX_DEFAULT, 0);
- CV_INIT(&dl->cv, "disklock cv", CV_DEFAULT, 0);
+ opr_mutex_init(&dl->mutex);
+ opr_cv_init(&dl->cv);
dl->lockfile = lf;
dl->offset = offset;
}
ObtainWriteLock(&dl->rwlock);
}
- MUTEX_ENTER(&dl->mutex);
+ opr_mutex_enter(&dl->mutex);
if ((dl->flags & VDISKLOCK_ACQUIRING)) {
/* Some other thread is waiting to acquire an fs lock. If nonblock=1,
code = EBUSY;
} else {
while ((dl->flags & VDISKLOCK_ACQUIRING)) {
- CV_WAIT(&dl->cv, &dl->mutex);
+ opr_cv_wait(&dl->cv, &dl->mutex);
}
}
}
/* mark that we are waiting on the fs lock */
dl->flags |= VDISKLOCK_ACQUIRING;
- MUTEX_EXIT(&dl->mutex);
+ opr_mutex_exit(&dl->mutex);
code = VLockFileLock(dl->lockfile, dl->offset, locktype, nonblock);
- MUTEX_ENTER(&dl->mutex);
+ opr_mutex_enter(&dl->mutex);
dl->flags &= ~VDISKLOCK_ACQUIRING;
dl->flags |= VDISKLOCK_ACQUIRED;
}
- CV_BROADCAST(&dl->cv);
+ opr_cv_broadcast(&dl->cv);
}
}
++dl->lockers;
}
- MUTEX_EXIT(&dl->mutex);
+ opr_mutex_exit(&dl->mutex);
return code;
}
{
opr_Assert(locktype == READ_LOCK || locktype == WRITE_LOCK);
- MUTEX_ENTER(&dl->mutex);
+ opr_mutex_enter(&dl->mutex);
opr_Assert(dl->lockers > 0);
if (--dl->lockers < 1) {
dl->flags &= ~VDISKLOCK_ACQUIRED;
}
- MUTEX_EXIT(&dl->mutex);
+ opr_mutex_exit(&dl->mutex);
if (locktype == READ_LOCK) {
ReleaseReadLock(&dl->rwlock);
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#ifdef AFS_NT40_ENV
#include <windows.h>
#include <WINNT/afsevent.h>
#endif
-#include <rx/xdr.h>
#include <rx/rx_queue.h>
#include <afs/afsint.h>
#include <afs/prs_fs.h>
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
+
#include <rx/rx.h>
#include <rx/rxkad.h>
#include <rx/rx_queue.h>
#ifdef AFS_PTHREAD_ENV
#define VTRANS_OBJ_LOCK_INIT(tt) \
- MUTEX_INIT(&((tt)->lock), "vtrans obj", MUTEX_DEFAULT, 0);
+ opr_mutex_init(&((tt)->lock));
#define VTRANS_OBJ_LOCK_DESTROY(tt) \
- MUTEX_DESTROY(&((tt)->lock))
+ opr_mutex_destroy(&((tt)->lock))
#define VTRANS_OBJ_LOCK(tt) \
- MUTEX_ENTER(&((tt)->lock))
+ opr_mutex_enter(&((tt)->lock))
#define VTRANS_OBJ_UNLOCK(tt) \
- MUTEX_EXIT(&((tt)->lock))
+ opr_mutex_exit(&((tt)->lock))
#else
#define VTRANS_OBJ_LOCK_INIT(tt)
#define VTRANS_OBJ_LOCK_DESTROY(tt)
#include <roken.h>
#include <afs/opr.h>
+#ifdef AFS_PTHREAD_ENV
+# include <opr/lock.h>
+#endif
#ifdef AFS_NT40_ENV
#include <afs/afsutil.h>