void
afs_mutex_init(afs_kmutex_t * l)
{
-#if defined(AFS_LINUX24_ENV)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_init(&l->mutex);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
init_MUTEX(&l->sem);
#else
l->sem = MUTEX;
void
afs_mutex_enter(afs_kmutex_t * l)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_lock(&l->mutex);
+#else
down(&l->sem);
+#endif
if (l->owner)
- osi_Panic("mutex_enter: 0x%x held by %d", l, l->owner);
+ osi_Panic("mutex_enter: 0x%lx held by %d", (unsigned long)l, l->owner);
l->owner = current->pid;
}
int
afs_mutex_tryenter(afs_kmutex_t * l)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ if (mutex_trylock(&l->mutex) == 0)
+#else
if (down_trylock(&l->sem))
+#endif
return 0;
l->owner = current->pid;
return 1;
afs_mutex_exit(afs_kmutex_t * l)
{
if (l->owner != current->pid)
- osi_Panic("mutex_exit: 0x%x held by %d", l, l->owner);
+ osi_Panic("mutex_exit: 0x%lx held by %d", (unsigned long)l, l->owner);
l->owner = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ mutex_unlock(&l->mutex);
+#else
up(&l->sem);
+#endif
}
/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the
#else
struct wait_queue wait = { current, NULL };
#endif
-
+ sigemptyset(&saved_set);
seq = cv->seq;
set_current_state(TASK_INTERRUPTIBLE);
#ifdef PF_FREEZE
current->flags & PF_FREEZE
#else
+#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
!current->todo
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
+ test_ti_thread_flag(current->thread_info, TIF_FREEZE)
+#else
+ test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
+#endif
+#endif
#endif
)
#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE