return l->owner == current->pid;
}
-
-static inline void afs_mutex_init(afs_kmutex_t *l)
-{
-#if defined(AFS_LINUX24_ENV)
- init_MUTEX(&l->sem);
-#else
- l->sem = MUTEX;
-#endif
- l->owner = 0;
-}
-#define MUTEX_INIT(a,b,c,d) afs_mutex_init(a)
-
+#define MUTEX_INIT(a,b,c,d) afs_mutex_init(a)
#define MUTEX_DESTROY(a)
-
-static inline void MUTEX_ENTER(afs_kmutex_t *l)
-{
- down(&l->sem);
- if (l->owner)
- osi_Panic("mutex_enter: 0x%x held by %d", l, l->owner);
- l->owner = current->pid;
-}
-
-/* And how to do a good tryenter? */
-static inline int MUTEX_TRYENTER(afs_kmutex_t *l)
-{
- if (!l->owner) {
- MUTEX_ENTER(l);
- return 1;
- }
- else
- return 0;
-}
-
-static inline void MUTEX_EXIT(afs_kmutex_t *l)
-{
- if (l->owner != current->pid)
- osi_Panic("mutex_exit: 0x%x held by %d",
- l, l->owner);
- l->owner = 0;
- up(&l->sem);
-}
+#define MUTEX_ENTER afs_mutex_enter
+#define MUTEX_TRYENTER afs_mutex_tryenter
+#define MUTEX_EXIT afs_mutex_exit
#if defined(AFS_LINUX24_ENV)
-#define CV_INIT(cv,b,c,d) init_waitqueue_head((wait_queue_head_t *)(cv))
+#define CV_INIT(cv,b,c,d) init_waitqueue_head((wait_queue_head_t *)(cv))
#else
-#define CV_INIT(cv,b,c,d) init_waitqueue((struct wait_queue**)(cv))
+#define CV_INIT(cv,b,c,d) init_waitqueue((struct wait_queue**)(cv))
#endif
#define CV_DESTROY(cv)
-
-/* CV_WAIT and CV_TIMEDWAIT rely on the fact that the Linux kernel has
- * a global lock. Thus we can safely drop our locks before calling the
- * kernel sleep services.
- */
-static inline int CV_WAIT(afs_kcondvar_t *cv, afs_kmutex_t *l)
-{
- int isAFSGlocked = ISAFS_GLOCK();
- sigset_t saved_set;
-
- if (isAFSGlocked) AFS_GUNLOCK();
- MUTEX_EXIT(l);
-
- spin_lock_irq(¤t->sigmask_lock);
- saved_set = current->blocked;
- sigfillset(¤t->blocked);
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
-#if defined(AFS_LINUX24_ENV)
- interruptible_sleep_on((wait_queue_head_t *)cv);
-#else
- interruptible_sleep_on((struct wait_queue**)cv);
-#endif
-
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = saved_set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- MUTEX_ENTER(l);
- if (isAFSGlocked) AFS_GLOCK();
-
- return 0;
-}
-
-static inline int CV_TIMEDWAIT(afs_kcondvar_t *cv, afs_kmutex_t *l, int waittime)
-{
- int isAFSGlocked = ISAFS_GLOCK();
- long t = waittime * HZ / 1000;
- sigset_t saved_set;
-
- if (isAFSGlocked) AFS_GUNLOCK();
- MUTEX_EXIT(l);
-
- spin_lock_irq(¤t->sigmask_lock);
- saved_set = current->blocked;
- sigfillset(¤t->blocked);
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
-#if defined(AFS_LINUX24_ENV)
- t = interruptible_sleep_on_timeout((wait_queue_head_t *)cv, t);
-#else
- t = interruptible_sleep_on_timeout((struct wait_queue**)cv, t);
-#endif
-
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = saved_set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
-
- MUTEX_ENTER(l);
- if (isAFSGlocked) AFS_GLOCK();
-
- return 0;
-}
+#define CV_WAIT_SIG(cv, m) afs_cv_wait(cv, m, 1)
+#define CV_WAIT(cv, m) afs_cv_wait(cv, m, 0)
+#define CV_TIMEDWAIT afs_cv_timedwait
#if defined(AFS_LINUX24_ENV)
-#define CV_SIGNAL(cv) wake_up((wait_queue_head_t *)cv)
-#define CV_BROADCAST(cv) wake_up((wait_queue_head_t *)cv)
+#define CV_SIGNAL(cv) wake_up((wait_queue_head_t *)cv)
+#define CV_BROADCAST(cv) wake_up((wait_queue_head_t *)cv)
#else
-#define CV_SIGNAL(cv) wake_up((struct wait_queue**)cv)
-#define CV_BROADCAST(cv) wake_up((struct wait_queue**)cv)
+#define CV_SIGNAL(cv) wake_up((struct wait_queue**)cv)
+#define CV_BROADCAST(cv) wake_up((struct wait_queue**)cv)
#endif
#else