* directory or online at http://www.openafs.org/dl/license10.html
*/
-#include "../afs/param.h" /* Should be always first */
#include <afsconfig.h>
+#include "afs/param.h"
-RCSID("$Header$");
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* afs statistics */
-
-
-
-#if defined(AFS_GLOBAL_SUNLOCK)
-static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
+#include "afs/sysincludes.h" /* Standard vendor system headers */
+#include "afsincludes.h" /* Afs-based standard headers */
+#include "afs/afs_stats.h" /* afs statistics */
+#if defined(HAVE_LINUX_FREEZER_H)
+#include <linux/freezer.h>
#endif
-void afs_osi_Wakeup(char *event);
-void afs_osi_Sleep(char *event);
-
-static char waitV;
-
-#if ! defined(AFS_GLOBAL_SUNLOCK)
-
-/* call procedure aproc with arock as an argument, in ams milliseconds */
-static struct timer_list *afs_osi_CallProc(void *aproc, void *arock, int ams)
-{
- struct timer_list *timer = NULL;
-
- timer = (struct timer_list*)osi_Alloc(sizeof(struct timer_list));
- if (timer) {
- init_timer(timer);
- timer->expires = (ams*afs_hz)/1000 + 1;
- timer->data = (unsigned long)arock;
- timer->function = aproc;
- add_timer(timer);
- }
- return timer;
-}
-
-/* cancel a timeout, whether or not it has already occurred */
-static int afs_osi_CancelProc(struct timer_list *timer)
-{
- if (timer) {
- del_timer(timer);
- osi_Free(timer, sizeof(struct timer_list));
- }
- return 0;
-}
-
-static AfsWaitHack()
-{
- AFS_STATCNT(WaitHack);
- wakeup(&waitV);
-}
+static char waitV, dummyV;
-#endif
-
-void afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
+void
+afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
{
AFS_STATCNT(osi_InitWaitHandle);
achandle->proc = (caddr_t) 0;
}
/* cancel osi_Wait */
-void afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle)
+void
+afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle)
{
caddr_t proc;
AFS_STATCNT(osi_CancelWait);
proc = achandle->proc;
- if (proc == 0) return;
- achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */
+ if (proc == 0)
+ return;
+ achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */
afs_osi_Wakeup(&waitV);
}
* Waits for data on ahandle, or ams ms later. ahandle may be null.
* Returns 0 if timeout and EINTR if signalled.
*/
-int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
+int
+afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
{
+ afs_int32 endTime;
int code;
- afs_int32 endTime, tid;
- struct timer_list *timer = NULL;
AFS_STATCNT(osi_Wait);
- endTime = osi_Time() + (ams/1000);
+ endTime = osi_Time() + (ams / 1000);
if (ahandle)
ahandle->proc = (caddr_t) current;
do {
AFS_ASSERT_GLOCK();
- code = 0;
-#if defined(AFS_GLOBAL_SUNLOCK)
- code = osi_TimedSleep(&waitV, ams, 1);
- if (code) {
- if (aintok) break;
- flush_signals(current);
- code = 0;
- }
-#else
- timer = afs_osi_CallProc(AfsWaitHack, (char *) current, ams);
- afs_osi_Sleep(&waitV);
- afs_osi_CancelProc(timer);
-#endif /* AFS_GLOBAL_SUNLOCK */
+ code = afs_osi_TimedSleep(&waitV, ams, 1);
+ if (code)
+ break;
if (ahandle && (ahandle->proc == (caddr_t) 0)) {
/* we've been signalled */
break;
char *event; /* lwp event: an address */
int refcount; /* Is it in use? */
int seq; /* Sequence number: this is incremented
- by wakeup calls; wait will not return until
- it changes */
-#if defined(AFS_LINUX24_ENV)
+ * by wakeup calls; wait will not return until
+ * it changes */
wait_queue_head_t cond;
-#else
- struct wait_queue *cond;
-#endif
} afs_event_t;
#define HASHSIZE 128
-afs_event_t *afs_evhasht[HASHSIZE];/* Hash table for events */
+afs_event_t *afs_evhasht[HASHSIZE]; /* Hash table for events */
#define afs_evhash(event) (afs_uint32) ((((long)event)>>2) & (HASHSIZE-1));
int afs_evhashcnt = 0;
/* Get and initialize event structure corresponding to lwp event (i.e. address)
* */
-static afs_event_t *afs_getevent(char *event)
+static afs_event_t *
+afs_getevent(char *event)
{
afs_event_t *evp, *newp = 0;
int hashcode;
newp = evp;
evp = evp->next;
}
- if (!newp) {
- newp = (afs_event_t *) osi_AllocSmallSpace(sizeof (afs_event_t));
- afs_evhashcnt++;
- newp->next = afs_evhasht[hashcode];
- afs_evhasht[hashcode] = newp;
-#if defined(AFS_LINUX24_ENV)
- init_waitqueue_head(&newp->cond);
-#else
- init_waitqueue(&newp->cond);
-#endif
- newp->seq = 0;
- }
+ if (!newp)
+ return NULL;
+
newp->event = event;
newp->refcount = 1;
return newp;
}
+/* afs_addevent -- allocates a new event for the address. It isn't returned;
+ * instead, afs_getevent should be called again. Thus, the real effect of
+ * this routine is to add another event to the hash bucket for this
+ * address.
+ *
+ * Locks:
+ * Called with GLOCK held. However the function might drop
+ * GLOCK when it calls osi_AllocSmallSpace for allocating
+ * a new event (In Linux, the allocator drops GLOCK to avoid
+ * a deadlock).
+ */
+
+static void
+afs_addevent(char *event)
+{
+ int hashcode;
+ afs_event_t *newp;
+
+ AFS_ASSERT_GLOCK();
+ hashcode = afs_evhash(event);
+ newp = osi_linux_alloc(sizeof(afs_event_t), 0);
+ afs_evhashcnt++;
+ newp->next = afs_evhasht[hashcode];
+ afs_evhasht[hashcode] = newp;
+ init_waitqueue_head(&newp->cond);
+ newp->seq = 0;
+ newp->event = &dummyV; /* Dummy address for new events */
+ newp->refcount = 0;
+}
+
+#ifndef set_current_state
+#define set_current_state(x) current->state = (x);
+#endif
+
/* Release the specified event */
#define relevent(evp) ((evp)->refcount--)
-
-void afs_osi_Sleep(char *event)
+/* afs_osi_SleepSig
+ *
+ * Waits for an event to be notified, returning early if a signal
+ * is received. Returns EINTR if signaled, and 0 otherwise.
+ */
+int
+afs_osi_SleepSig(void *event)
{
struct afs_event *evp;
- int seq;
+ int seq, retval;
+#ifdef DECLARE_WAITQUEUE
+ DECLARE_WAITQUEUE(wait, current);
+#else
+ struct wait_queue wait = { current, NULL };
+#endif
evp = afs_getevent(event);
+ if (!evp) {
+ afs_addevent(event);
+ evp = afs_getevent(event);
+ }
+
seq = evp->seq;
+ retval = 0;
+
+ add_wait_queue(&evp->cond, &wait);
while (seq == evp->seq) {
+ set_current_state(TASK_INTERRUPTIBLE);
AFS_ASSERT_GLOCK();
AFS_GUNLOCK();
- interruptible_sleep_on(&evp->cond);
+ schedule();
+#ifdef CONFIG_PM
+ if (
+#ifdef PF_FREEZE
+ current->flags & PF_FREEZE
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
+ !current->todo
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
+ test_ti_thread_flag(current->thread_info, TIF_FREEZE)
+#else
+ test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
+#endif
+#endif
+#endif
+ )
+#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
+ refrigerator(PF_FREEZE);
+#else
+ refrigerator();
+#endif
+#endif
AFS_GLOCK();
+ if (signal_pending(current)) {
+ retval = EINTR;
+ break;
+ }
}
+ remove_wait_queue(&evp->cond, &wait);
+ set_current_state(TASK_RUNNING);
+
relevent(evp);
+ return retval;
+}
+
+/* afs_osi_Sleep -- waits for an event to be notified, ignoring signals.
+ * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE
+ * can wake up, even if all signals are blocked
+ * - TODO: handle signals correctly by passing an indication back to the
+ * caller that the wait has been interrupted and the stack should be cleaned
+ * up preparatory to signal delivery
+ */
+void
+afs_osi_Sleep(void *event)
+{
+ sigset_t saved_set;
+
+ SIG_LOCK(current);
+ saved_set = current->blocked;
+ sigfillset(¤t->blocked);
+ RECALC_SIGPENDING(current);
+ SIG_UNLOCK(current);
+
+ afs_osi_SleepSig(event);
+
+ SIG_LOCK(current);
+ current->blocked = saved_set;
+ RECALC_SIGPENDING(current);
+ SIG_UNLOCK(current);
}
-/* osi_TimedSleep
+/* afs_osi_TimedSleep
*
* Arguments:
* event - event to sleep on
* ams --- max sleep time in milliseconds
* aintok - 1 if should sleep interruptibly
*
- * Returns 0 if timeout and EINTR if signalled.
- *
- * While the Linux kernel still has a global lock, we can use the standard
- * sleep calls and drop our locks early. The kernel lock will protect us
- * until we get to sleep.
+ * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might
+ * have raced.
*/
-static int osi_TimedSleep(char *event, afs_int32 ams, int aintok)
+int
+afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
{
- long t = ams * HZ / 1000;
+ int code = 0;
+ long ticks = (ams * HZ / 1000) + 1;
struct afs_event *evp;
+#ifdef DECLARE_WAITQUEUE
+ DECLARE_WAITQUEUE(wait, current);
+#else
+ struct wait_queue wait = { current, NULL };
+#endif
evp = afs_getevent(event);
+ if (!evp) {
+ afs_addevent(event);
+ evp = afs_getevent(event);
+ }
+ add_wait_queue(&evp->cond, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* always sleep TASK_INTERRUPTIBLE to keep load average
+ * from artifically increasing. */
AFS_GUNLOCK();
- if (aintok)
- t = interruptible_sleep_on_timeout(&evp->cond, t);
- else
- t = sleep_on_timeout(&evp->cond, t);
+
+ if (aintok) {
+ if (schedule_timeout(ticks))
+ code = EINTR;
+ } else
+ schedule_timeout(ticks);
+#ifdef CONFIG_PM
+ if (
+#ifdef PF_FREEZE
+ current->flags & PF_FREEZE
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_TODO)
+ !current->todo
+#else
+#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
+ test_ti_thread_flag(current->thread_info, TIF_FREEZE)
+#else
+ test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
+#endif
+#endif
+#endif
+ )
+#ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
+ refrigerator(PF_FREEZE);
+#else
+ refrigerator();
+#endif
+#endif
+
AFS_GLOCK();
+ remove_wait_queue(&evp->cond, &wait);
+ set_current_state(TASK_RUNNING);
+
+ relevent(evp);
- return t ? EINTR : 0;
+ return code;
}
-void afs_osi_Wakeup(char *event)
+int
+afs_osi_Wakeup(void *event)
{
+ int ret = 2;
struct afs_event *evp;
evp = afs_getevent(event);
+ if (!evp) /* No sleepers */
+ return 1;
+
if (evp->refcount > 1) {
- evp->seq++;
+ evp->seq++;
wake_up(&evp->cond);
+ ret = 0;
}
relevent(evp);
+ return ret;
}