linux-updates-20060309
[openafs.git] / src / rx / LINUX / rx_kmutex.c
index eecddc5..dbd3b32 100644 (file)
  */
 
 #include <afsconfig.h>
-#include "../afs/param.h"
+#include "afs/param.h"
 
-RCSID("$Header$");
+RCSID
+    ("$Header$");
 
-#include "../rx/rx_kcommon.h"
-#include "../rx/rx_kmutex.h"
-#include "../rx/rx_kernel.h"
+#include "rx/rx_kcommon.h"
+#include "rx_kmutex.h"
+#include "rx/rx_kernel.h"
 
-#ifdef CONFIG_SMP
-
-void afs_mutex_init(afs_kmutex_t *l)
+void
+afs_mutex_init(afs_kmutex_t * l)
 {
-#if defined(AFS_LINUX24_ENV)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    mutex_init(&l->mutex);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
     init_MUTEX(&l->sem);
 #else
     l->sem = MUTEX;
@@ -34,40 +36,57 @@ void afs_mutex_init(afs_kmutex_t *l)
     l->owner = 0;
 }
 
-void afs_mutex_enter(afs_kmutex_t *l)
+void
+afs_mutex_enter(afs_kmutex_t * l)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    mutex_lock(&l->mutex);
+#else
     down(&l->sem);
+#endif
     if (l->owner)
        osi_Panic("mutex_enter: 0x%x held by %d", l, l->owner);
     l->owner = current->pid;
 }
-                                                             
-int afs_mutex_tryenter(afs_kmutex_t *l)
+
+int
+afs_mutex_tryenter(afs_kmutex_t * l)
 {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    if (mutex_trylock(&l->mutex) == 0)
+#else
     if (down_trylock(&l->sem))
+#endif
        return 0;
     l->owner = current->pid;
     return 1;
 }
 
-void afs_mutex_exit(afs_kmutex_t *l)
+void
+afs_mutex_exit(afs_kmutex_t * l)
 {
     if (l->owner != current->pid)
-       osi_Panic("mutex_exit: 0x%x held by %d",
-                 l, l->owner);
+       osi_Panic("mutex_exit: 0x%x held by %d", l, l->owner);
     l->owner = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+    mutex_unlock(&l->mutex);
+#else
     up(&l->sem);
+#endif
 }
 
-/*
- * CV_WAIT and CV_TIMEDWAIT rely on the fact that the Linux kernel has
- * a global lock. Thus we can safely drop our locks before calling the
- * kernel sleep services.
- * Or not.
+/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the
+ * case of CV_TIMEDWAIT, until the specified timeout occurs.
+ * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE
+ *   can wake up, even if all signals are blocked
+ * - TODO: handle signals correctly by passing an indication back to the
+ *   caller that the wait has been interrupted and the stack should be cleaned
+ *   up preparatory to signal delivery
  */
-int afs_cv_wait(afs_kcondvar_t *cv, afs_kmutex_t *l, int sigok)
+int
+afs_cv_wait(afs_kcondvar_t * cv, afs_kmutex_t * l, int sigok)
 {
-    int isAFSGlocked = ISAFS_GLOCK();
+    int seq, isAFSGlocked = ISAFS_GLOCK();
     sigset_t saved_set;
 #ifdef DECLARE_WAITQUEUE
     DECLARE_WAITQUEUE(wait, current);
@@ -75,57 +94,90 @@ int afs_cv_wait(afs_kcondvar_t *cv, afs_kmutex_t *l, int sigok)
     struct wait_queue wait = { current, NULL };
 #endif
 
-    add_wait_queue(cv, &wait);
+    seq = cv->seq;
+    
     set_current_state(TASK_INTERRUPTIBLE);
+    add_wait_queue(&cv->waitq, &wait);
 
-    if (isAFSGlocked) AFS_GUNLOCK();
+    if (isAFSGlocked)
+       AFS_GUNLOCK();
     MUTEX_EXIT(l);
 
     if (!sigok) {
-       spin_lock_irq(&current->sigmask_lock);
+       SIG_LOCK(current);
        saved_set = current->blocked;
        sigfillset(&current->blocked);
-       recalc_sigpending(current);
-       spin_unlock_irq(&current->sigmask_lock);
+       RECALC_SIGPENDING(current);
+       SIG_UNLOCK(current);
+    }
+
+    while(seq == cv->seq) {
+       schedule();
+#ifdef AFS_LINUX26_ENV
+#ifdef CONFIG_PM
+       if (
+#ifdef PF_FREEZE
+           current->flags & PF_FREEZE
+#else
+           !current->todo
+#endif
+           )
+#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
+           refrigerator(PF_FREEZE);
+#else
+           refrigerator();
+#endif
+           set_current_state(TASK_INTERRUPTIBLE);
+#endif
+#endif
     }
 
-    schedule();
-    remove_wait_queue(cv, &wait);
+    remove_wait_queue(&cv->waitq, &wait);
+    set_current_state(TASK_RUNNING);
 
     if (!sigok) {
-       spin_lock_irq(&current->sigmask_lock);
+       SIG_LOCK(current);
        current->blocked = saved_set;
-       recalc_sigpending(current);
-       spin_unlock_irq(&current->sigmask_lock);
+       RECALC_SIGPENDING(current);
+       SIG_UNLOCK(current);
     }
 
-    if (isAFSGlocked) AFS_GLOCK();
+    if (isAFSGlocked)
+       AFS_GLOCK();
     MUTEX_ENTER(l);
 
     return (sigok && signal_pending(current)) ? EINTR : 0;
 }
 
-void afs_cv_timedwait(afs_kcondvar_t *cv, afs_kmutex_t *l, int waittime)
+void
+afs_cv_timedwait(afs_kcondvar_t * cv, afs_kmutex_t * l, int waittime)
 {
-    int isAFSGlocked = ISAFS_GLOCK();
+    int seq, isAFSGlocked = ISAFS_GLOCK();
     long t = waittime * HZ / 1000;
 #ifdef DECLARE_WAITQUEUE
     DECLARE_WAITQUEUE(wait, current);
 #else
     struct wait_queue wait = { current, NULL };
 #endif
+    seq = cv->seq;
 
-    add_wait_queue(cv, &wait);
     set_current_state(TASK_INTERRUPTIBLE);
+    add_wait_queue(&cv->waitq, &wait);
 
-    if (isAFSGlocked) AFS_GUNLOCK();
+    if (isAFSGlocked)
+       AFS_GUNLOCK();
     MUTEX_EXIT(l);
+
+    while(seq == cv->seq) {
+       t = schedule_timeout(t);
+       if (!t)         /* timeout */
+           break;
+    }
     
-    t = schedule_timeout(t);
-    remove_wait_queue(cv, &wait);
-    
-    if (isAFSGlocked) AFS_GLOCK();
+    remove_wait_queue(&cv->waitq, &wait);
+    set_current_state(TASK_RUNNING);
+
+    if (isAFSGlocked)
+       AFS_GLOCK();
     MUTEX_ENTER(l);
 }
-
-#endif